1719a5fe8SMika Westerberg // SPDX-License-Identifier: GPL-2.0 2719a5fe8SMika Westerberg /* 3719a5fe8SMika Westerberg * NVM helpers 4719a5fe8SMika Westerberg * 5719a5fe8SMika Westerberg * Copyright (C) 2020, Intel Corporation 6719a5fe8SMika Westerberg * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 7719a5fe8SMika Westerberg */ 8719a5fe8SMika Westerberg 9719a5fe8SMika Westerberg #include <linux/idr.h> 10719a5fe8SMika Westerberg #include <linux/slab.h> 11719a5fe8SMika Westerberg #include <linux/vmalloc.h> 12719a5fe8SMika Westerberg 13719a5fe8SMika Westerberg #include "tb.h" 14719a5fe8SMika Westerberg 15*aef9c693SSzuying Chen /* Intel specific NVM offsets */ 16*aef9c693SSzuying Chen #define INTEL_NVM_DEVID 0x05 17*aef9c693SSzuying Chen #define INTEL_NVM_VERSION 0x08 18*aef9c693SSzuying Chen #define INTEL_NVM_CSS 0x10 19*aef9c693SSzuying Chen #define INTEL_NVM_FLASH_SIZE 0x45 20*aef9c693SSzuying Chen 21719a5fe8SMika Westerberg static DEFINE_IDA(nvm_ida); 22719a5fe8SMika Westerberg 23719a5fe8SMika Westerberg /** 24*aef9c693SSzuying Chen * struct tb_nvm_vendor_ops - Vendor specific NVM operations 25*aef9c693SSzuying Chen * @read_version: Reads out NVM version from the flash 26*aef9c693SSzuying Chen * @validate: Validates the NVM image before update (optional) 27*aef9c693SSzuying Chen * @write_headers: Writes headers before the rest of the image (optional) 28*aef9c693SSzuying Chen */ 29*aef9c693SSzuying Chen struct tb_nvm_vendor_ops { 30*aef9c693SSzuying Chen int (*read_version)(struct tb_nvm *nvm); 31*aef9c693SSzuying Chen int (*validate)(struct tb_nvm *nvm); 32*aef9c693SSzuying Chen int (*write_headers)(struct tb_nvm *nvm); 33*aef9c693SSzuying Chen }; 34*aef9c693SSzuying Chen 35*aef9c693SSzuying Chen /** 36*aef9c693SSzuying Chen * struct tb_nvm_vendor - Vendor to &struct tb_nvm_vendor_ops mapping 37*aef9c693SSzuying Chen * @vendor: Vendor ID 38*aef9c693SSzuying Chen * @vops: Vendor specific NVM operations 39*aef9c693SSzuying Chen * 40*aef9c693SSzuying Chen * Maps vendor ID to NVM vendor operations. If there is no mapping then 41*aef9c693SSzuying Chen * NVM firmware upgrade is disabled for the device. 42*aef9c693SSzuying Chen */ 43*aef9c693SSzuying Chen struct tb_nvm_vendor { 44*aef9c693SSzuying Chen u16 vendor; 45*aef9c693SSzuying Chen const struct tb_nvm_vendor_ops *vops; 46*aef9c693SSzuying Chen }; 47*aef9c693SSzuying Chen 48*aef9c693SSzuying Chen static int intel_switch_nvm_version(struct tb_nvm *nvm) 49*aef9c693SSzuying Chen { 50*aef9c693SSzuying Chen struct tb_switch *sw = tb_to_switch(nvm->dev); 51*aef9c693SSzuying Chen u32 val, nvm_size, hdr_size; 52*aef9c693SSzuying Chen int ret; 53*aef9c693SSzuying Chen 54*aef9c693SSzuying Chen /* 55*aef9c693SSzuying Chen * If the switch is in safe-mode the only accessible portion of 56*aef9c693SSzuying Chen * the NVM is the non-active one where userspace is expected to 57*aef9c693SSzuying Chen * write new functional NVM. 58*aef9c693SSzuying Chen */ 59*aef9c693SSzuying Chen if (sw->safe_mode) 60*aef9c693SSzuying Chen return 0; 61*aef9c693SSzuying Chen 62*aef9c693SSzuying Chen ret = tb_switch_nvm_read(sw, INTEL_NVM_FLASH_SIZE, &val, sizeof(val)); 63*aef9c693SSzuying Chen if (ret) 64*aef9c693SSzuying Chen return ret; 65*aef9c693SSzuying Chen 66*aef9c693SSzuying Chen hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; 67*aef9c693SSzuying Chen nvm_size = (SZ_1M << (val & 7)) / 8; 68*aef9c693SSzuying Chen nvm_size = (nvm_size - hdr_size) / 2; 69*aef9c693SSzuying Chen 70*aef9c693SSzuying Chen ret = tb_switch_nvm_read(sw, INTEL_NVM_VERSION, &val, sizeof(val)); 71*aef9c693SSzuying Chen if (ret) 72*aef9c693SSzuying Chen return ret; 73*aef9c693SSzuying Chen 74*aef9c693SSzuying Chen nvm->major = (val >> 16) & 0xff; 75*aef9c693SSzuying Chen nvm->minor = (val >> 8) & 0xff; 76*aef9c693SSzuying Chen nvm->active_size = nvm_size; 77*aef9c693SSzuying Chen 78*aef9c693SSzuying Chen return 0; 79*aef9c693SSzuying Chen } 80*aef9c693SSzuying Chen 81*aef9c693SSzuying Chen static int intel_switch_nvm_validate(struct tb_nvm *nvm) 82*aef9c693SSzuying Chen { 83*aef9c693SSzuying Chen struct tb_switch *sw = tb_to_switch(nvm->dev); 84*aef9c693SSzuying Chen unsigned int image_size, hdr_size; 85*aef9c693SSzuying Chen u16 ds_size, device_id; 86*aef9c693SSzuying Chen u8 *buf = nvm->buf; 87*aef9c693SSzuying Chen 88*aef9c693SSzuying Chen image_size = nvm->buf_data_size; 89*aef9c693SSzuying Chen 90*aef9c693SSzuying Chen /* 91*aef9c693SSzuying Chen * FARB pointer must point inside the image and must at least 92*aef9c693SSzuying Chen * contain parts of the digital section we will be reading here. 93*aef9c693SSzuying Chen */ 94*aef9c693SSzuying Chen hdr_size = (*(u32 *)buf) & 0xffffff; 95*aef9c693SSzuying Chen if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size) 96*aef9c693SSzuying Chen return -EINVAL; 97*aef9c693SSzuying Chen 98*aef9c693SSzuying Chen /* Digital section start should be aligned to 4k page */ 99*aef9c693SSzuying Chen if (!IS_ALIGNED(hdr_size, SZ_4K)) 100*aef9c693SSzuying Chen return -EINVAL; 101*aef9c693SSzuying Chen 102*aef9c693SSzuying Chen /* 103*aef9c693SSzuying Chen * Read digital section size and check that it also fits inside 104*aef9c693SSzuying Chen * the image. 105*aef9c693SSzuying Chen */ 106*aef9c693SSzuying Chen ds_size = *(u16 *)(buf + hdr_size); 107*aef9c693SSzuying Chen if (ds_size >= image_size) 108*aef9c693SSzuying Chen return -EINVAL; 109*aef9c693SSzuying Chen 110*aef9c693SSzuying Chen if (sw->safe_mode) 111*aef9c693SSzuying Chen return 0; 112*aef9c693SSzuying Chen 113*aef9c693SSzuying Chen /* 114*aef9c693SSzuying Chen * Make sure the device ID in the image matches the one 115*aef9c693SSzuying Chen * we read from the switch config space. 116*aef9c693SSzuying Chen */ 117*aef9c693SSzuying Chen device_id = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID); 118*aef9c693SSzuying Chen if (device_id != sw->config.device_id) 119*aef9c693SSzuying Chen return -EINVAL; 120*aef9c693SSzuying Chen 121*aef9c693SSzuying Chen /* Skip headers in the image */ 122*aef9c693SSzuying Chen nvm->buf_data_start = buf + hdr_size; 123*aef9c693SSzuying Chen nvm->buf_data_size = image_size - hdr_size; 124*aef9c693SSzuying Chen 125*aef9c693SSzuying Chen return 0; 126*aef9c693SSzuying Chen } 127*aef9c693SSzuying Chen 128*aef9c693SSzuying Chen static int intel_switch_nvm_write_headers(struct tb_nvm *nvm) 129*aef9c693SSzuying Chen { 130*aef9c693SSzuying Chen struct tb_switch *sw = tb_to_switch(nvm->dev); 131*aef9c693SSzuying Chen 132*aef9c693SSzuying Chen if (sw->generation < 3) { 133*aef9c693SSzuying Chen int ret; 134*aef9c693SSzuying Chen 135*aef9c693SSzuying Chen /* Write CSS headers first */ 136*aef9c693SSzuying Chen ret = dma_port_flash_write(sw->dma_port, 137*aef9c693SSzuying Chen DMA_PORT_CSS_ADDRESS, nvm->buf + INTEL_NVM_CSS, 138*aef9c693SSzuying Chen DMA_PORT_CSS_MAX_SIZE); 139*aef9c693SSzuying Chen if (ret) 140*aef9c693SSzuying Chen return ret; 141*aef9c693SSzuying Chen } 142*aef9c693SSzuying Chen 143*aef9c693SSzuying Chen return 0; 144*aef9c693SSzuying Chen } 145*aef9c693SSzuying Chen 146*aef9c693SSzuying Chen static const struct tb_nvm_vendor_ops intel_switch_nvm_ops = { 147*aef9c693SSzuying Chen .read_version = intel_switch_nvm_version, 148*aef9c693SSzuying Chen .validate = intel_switch_nvm_validate, 149*aef9c693SSzuying Chen .write_headers = intel_switch_nvm_write_headers, 150*aef9c693SSzuying Chen }; 151*aef9c693SSzuying Chen 152*aef9c693SSzuying Chen /* Router vendor NVM support table */ 153*aef9c693SSzuying Chen static const struct tb_nvm_vendor switch_nvm_vendors[] = { 154*aef9c693SSzuying Chen { PCI_VENDOR_ID_INTEL, &intel_switch_nvm_ops }, 155*aef9c693SSzuying Chen { 0x8087, &intel_switch_nvm_ops }, 156*aef9c693SSzuying Chen }; 157*aef9c693SSzuying Chen 158*aef9c693SSzuying Chen static int intel_retimer_nvm_version(struct tb_nvm *nvm) 159*aef9c693SSzuying Chen { 160*aef9c693SSzuying Chen struct tb_retimer *rt = tb_to_retimer(nvm->dev); 161*aef9c693SSzuying Chen u32 val, nvm_size; 162*aef9c693SSzuying Chen int ret; 163*aef9c693SSzuying Chen 164*aef9c693SSzuying Chen ret = tb_retimer_nvm_read(rt, INTEL_NVM_VERSION, &val, sizeof(val)); 165*aef9c693SSzuying Chen if (ret) 166*aef9c693SSzuying Chen return ret; 167*aef9c693SSzuying Chen 168*aef9c693SSzuying Chen nvm->major = (val >> 16) & 0xff; 169*aef9c693SSzuying Chen nvm->minor = (val >> 8) & 0xff; 170*aef9c693SSzuying Chen 171*aef9c693SSzuying Chen ret = tb_retimer_nvm_read(rt, INTEL_NVM_FLASH_SIZE, &val, sizeof(val)); 172*aef9c693SSzuying Chen if (ret) 173*aef9c693SSzuying Chen return ret; 174*aef9c693SSzuying Chen 175*aef9c693SSzuying Chen nvm_size = (SZ_1M << (val & 7)) / 8; 176*aef9c693SSzuying Chen nvm_size = (nvm_size - SZ_16K) / 2; 177*aef9c693SSzuying Chen nvm->active_size = nvm_size; 178*aef9c693SSzuying Chen 179*aef9c693SSzuying Chen return 0; 180*aef9c693SSzuying Chen } 181*aef9c693SSzuying Chen 182*aef9c693SSzuying Chen static int intel_retimer_nvm_validate(struct tb_nvm *nvm) 183*aef9c693SSzuying Chen { 184*aef9c693SSzuying Chen struct tb_retimer *rt = tb_to_retimer(nvm->dev); 185*aef9c693SSzuying Chen unsigned int image_size, hdr_size; 186*aef9c693SSzuying Chen u8 *buf = nvm->buf; 187*aef9c693SSzuying Chen u16 ds_size, device; 188*aef9c693SSzuying Chen 189*aef9c693SSzuying Chen image_size = nvm->buf_data_size; 190*aef9c693SSzuying Chen 191*aef9c693SSzuying Chen /* 192*aef9c693SSzuying Chen * FARB pointer must point inside the image and must at least 193*aef9c693SSzuying Chen * contain parts of the digital section we will be reading here. 194*aef9c693SSzuying Chen */ 195*aef9c693SSzuying Chen hdr_size = (*(u32 *)buf) & 0xffffff; 196*aef9c693SSzuying Chen if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size) 197*aef9c693SSzuying Chen return -EINVAL; 198*aef9c693SSzuying Chen 199*aef9c693SSzuying Chen /* Digital section start should be aligned to 4k page */ 200*aef9c693SSzuying Chen if (!IS_ALIGNED(hdr_size, SZ_4K)) 201*aef9c693SSzuying Chen return -EINVAL; 202*aef9c693SSzuying Chen 203*aef9c693SSzuying Chen /* 204*aef9c693SSzuying Chen * Read digital section size and check that it also fits inside 205*aef9c693SSzuying Chen * the image. 206*aef9c693SSzuying Chen */ 207*aef9c693SSzuying Chen ds_size = *(u16 *)(buf + hdr_size); 208*aef9c693SSzuying Chen if (ds_size >= image_size) 209*aef9c693SSzuying Chen return -EINVAL; 210*aef9c693SSzuying Chen 211*aef9c693SSzuying Chen /* 212*aef9c693SSzuying Chen * Make sure the device ID in the image matches the retimer 213*aef9c693SSzuying Chen * hardware. 214*aef9c693SSzuying Chen */ 215*aef9c693SSzuying Chen device = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID); 216*aef9c693SSzuying Chen if (device != rt->device) 217*aef9c693SSzuying Chen return -EINVAL; 218*aef9c693SSzuying Chen 219*aef9c693SSzuying Chen /* Skip headers in the image */ 220*aef9c693SSzuying Chen nvm->buf_data_start = buf + hdr_size; 221*aef9c693SSzuying Chen nvm->buf_data_size = image_size - hdr_size; 222*aef9c693SSzuying Chen 223*aef9c693SSzuying Chen return 0; 224*aef9c693SSzuying Chen } 225*aef9c693SSzuying Chen 226*aef9c693SSzuying Chen static const struct tb_nvm_vendor_ops intel_retimer_nvm_ops = { 227*aef9c693SSzuying Chen .read_version = intel_retimer_nvm_version, 228*aef9c693SSzuying Chen .validate = intel_retimer_nvm_validate, 229*aef9c693SSzuying Chen }; 230*aef9c693SSzuying Chen 231*aef9c693SSzuying Chen /* Retimer vendor NVM support table */ 232*aef9c693SSzuying Chen static const struct tb_nvm_vendor retimer_nvm_vendors[] = { 233*aef9c693SSzuying Chen { 0x8087, &intel_retimer_nvm_ops }, 234*aef9c693SSzuying Chen }; 235*aef9c693SSzuying Chen 236*aef9c693SSzuying Chen /** 237719a5fe8SMika Westerberg * tb_nvm_alloc() - Allocate new NVM structure 238719a5fe8SMika Westerberg * @dev: Device owning the NVM 239719a5fe8SMika Westerberg * 240719a5fe8SMika Westerberg * Allocates new NVM structure with unique @id and returns it. In case 241*aef9c693SSzuying Chen * of error returns ERR_PTR(). Specifically returns %-EOPNOTSUPP if the 242*aef9c693SSzuying Chen * NVM format of the @dev is not known by the kernel. 243719a5fe8SMika Westerberg */ 244719a5fe8SMika Westerberg struct tb_nvm *tb_nvm_alloc(struct device *dev) 245719a5fe8SMika Westerberg { 246*aef9c693SSzuying Chen const struct tb_nvm_vendor_ops *vops = NULL; 247719a5fe8SMika Westerberg struct tb_nvm *nvm; 248*aef9c693SSzuying Chen int ret, i; 249*aef9c693SSzuying Chen 250*aef9c693SSzuying Chen if (tb_is_switch(dev)) { 251*aef9c693SSzuying Chen const struct tb_switch *sw = tb_to_switch(dev); 252*aef9c693SSzuying Chen 253*aef9c693SSzuying Chen for (i = 0; i < ARRAY_SIZE(switch_nvm_vendors); i++) { 254*aef9c693SSzuying Chen const struct tb_nvm_vendor *v = &switch_nvm_vendors[i]; 255*aef9c693SSzuying Chen 256*aef9c693SSzuying Chen if (v->vendor == sw->config.vendor_id) { 257*aef9c693SSzuying Chen vops = v->vops; 258*aef9c693SSzuying Chen break; 259*aef9c693SSzuying Chen } 260*aef9c693SSzuying Chen } 261*aef9c693SSzuying Chen 262*aef9c693SSzuying Chen if (!vops) { 263*aef9c693SSzuying Chen tb_sw_dbg(sw, "router NVM format of vendor %#x unknown\n", 264*aef9c693SSzuying Chen sw->config.vendor_id); 265*aef9c693SSzuying Chen return ERR_PTR(-EOPNOTSUPP); 266*aef9c693SSzuying Chen } 267*aef9c693SSzuying Chen } else if (tb_is_retimer(dev)) { 268*aef9c693SSzuying Chen const struct tb_retimer *rt = tb_to_retimer(dev); 269*aef9c693SSzuying Chen 270*aef9c693SSzuying Chen for (i = 0; i < ARRAY_SIZE(retimer_nvm_vendors); i++) { 271*aef9c693SSzuying Chen const struct tb_nvm_vendor *v = &retimer_nvm_vendors[i]; 272*aef9c693SSzuying Chen 273*aef9c693SSzuying Chen if (v->vendor == rt->vendor) { 274*aef9c693SSzuying Chen vops = v->vops; 275*aef9c693SSzuying Chen break; 276*aef9c693SSzuying Chen } 277*aef9c693SSzuying Chen } 278*aef9c693SSzuying Chen 279*aef9c693SSzuying Chen if (!vops) { 280*aef9c693SSzuying Chen dev_dbg(dev, "retimer NVM format of vendor %#x unknown\n", 281*aef9c693SSzuying Chen rt->vendor); 282*aef9c693SSzuying Chen return ERR_PTR(-EOPNOTSUPP); 283*aef9c693SSzuying Chen } 284*aef9c693SSzuying Chen } else { 285*aef9c693SSzuying Chen return ERR_PTR(-EOPNOTSUPP); 286*aef9c693SSzuying Chen } 287719a5fe8SMika Westerberg 288719a5fe8SMika Westerberg nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); 289719a5fe8SMika Westerberg if (!nvm) 290719a5fe8SMika Westerberg return ERR_PTR(-ENOMEM); 291719a5fe8SMika Westerberg 292719a5fe8SMika Westerberg ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL); 293719a5fe8SMika Westerberg if (ret < 0) { 294719a5fe8SMika Westerberg kfree(nvm); 295719a5fe8SMika Westerberg return ERR_PTR(ret); 296719a5fe8SMika Westerberg } 297719a5fe8SMika Westerberg 298719a5fe8SMika Westerberg nvm->id = ret; 299719a5fe8SMika Westerberg nvm->dev = dev; 300*aef9c693SSzuying Chen nvm->vops = vops; 301719a5fe8SMika Westerberg 302719a5fe8SMika Westerberg return nvm; 303719a5fe8SMika Westerberg } 304719a5fe8SMika Westerberg 305719a5fe8SMika Westerberg /** 306*aef9c693SSzuying Chen * tb_nvm_read_version() - Read and populate NVM version 307*aef9c693SSzuying Chen * @nvm: NVM structure 308*aef9c693SSzuying Chen * 309*aef9c693SSzuying Chen * Uses vendor specific means to read out and fill in the existing 310*aef9c693SSzuying Chen * active NVM version. Returns %0 in case of success and negative errno 311*aef9c693SSzuying Chen * otherwise. 312*aef9c693SSzuying Chen */ 313*aef9c693SSzuying Chen int tb_nvm_read_version(struct tb_nvm *nvm) 314*aef9c693SSzuying Chen { 315*aef9c693SSzuying Chen const struct tb_nvm_vendor_ops *vops = nvm->vops; 316*aef9c693SSzuying Chen 317*aef9c693SSzuying Chen if (vops && vops->read_version) 318*aef9c693SSzuying Chen return vops->read_version(nvm); 319*aef9c693SSzuying Chen 320*aef9c693SSzuying Chen return -EOPNOTSUPP; 321*aef9c693SSzuying Chen } 322*aef9c693SSzuying Chen 323*aef9c693SSzuying Chen /** 324*aef9c693SSzuying Chen * tb_nvm_validate() - Validate new NVM image 325*aef9c693SSzuying Chen * @nvm: NVM structure 326*aef9c693SSzuying Chen * 327*aef9c693SSzuying Chen * Runs vendor specific validation over the new NVM image and if all 328*aef9c693SSzuying Chen * checks pass returns %0. As side effect updates @nvm->buf_data_start 329*aef9c693SSzuying Chen * and @nvm->buf_data_size fields to match the actual data to be written 330*aef9c693SSzuying Chen * to the NVM. 331*aef9c693SSzuying Chen * 332*aef9c693SSzuying Chen * If the validation does not pass then returns negative errno. 333*aef9c693SSzuying Chen */ 334*aef9c693SSzuying Chen int tb_nvm_validate(struct tb_nvm *nvm) 335*aef9c693SSzuying Chen { 336*aef9c693SSzuying Chen const struct tb_nvm_vendor_ops *vops = nvm->vops; 337*aef9c693SSzuying Chen unsigned int image_size; 338*aef9c693SSzuying Chen u8 *buf = nvm->buf; 339*aef9c693SSzuying Chen 340*aef9c693SSzuying Chen if (!buf) 341*aef9c693SSzuying Chen return -EINVAL; 342*aef9c693SSzuying Chen if (!vops) 343*aef9c693SSzuying Chen return -EOPNOTSUPP; 344*aef9c693SSzuying Chen 345*aef9c693SSzuying Chen /* Just do basic image size checks */ 346*aef9c693SSzuying Chen image_size = nvm->buf_data_size; 347*aef9c693SSzuying Chen if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) 348*aef9c693SSzuying Chen return -EINVAL; 349*aef9c693SSzuying Chen 350*aef9c693SSzuying Chen /* 351*aef9c693SSzuying Chen * Set the default data start in the buffer. The validate method 352*aef9c693SSzuying Chen * below can change this if needed. 353*aef9c693SSzuying Chen */ 354*aef9c693SSzuying Chen nvm->buf_data_start = buf; 355*aef9c693SSzuying Chen 356*aef9c693SSzuying Chen return vops->validate ? vops->validate(nvm) : 0; 357*aef9c693SSzuying Chen } 358*aef9c693SSzuying Chen 359*aef9c693SSzuying Chen /** 360*aef9c693SSzuying Chen * tb_nvm_write_headers() - Write headers before the rest of the image 361*aef9c693SSzuying Chen * @nvm: NVM structure 362*aef9c693SSzuying Chen * 363*aef9c693SSzuying Chen * If the vendor NVM format requires writing headers before the rest of 364*aef9c693SSzuying Chen * the image, this function does that. Can be called even if the device 365*aef9c693SSzuying Chen * does not need this. 366*aef9c693SSzuying Chen * 367*aef9c693SSzuying Chen * Returns %0 in case of success and negative errno otherwise. 368*aef9c693SSzuying Chen */ 369*aef9c693SSzuying Chen int tb_nvm_write_headers(struct tb_nvm *nvm) 370*aef9c693SSzuying Chen { 371*aef9c693SSzuying Chen const struct tb_nvm_vendor_ops *vops = nvm->vops; 372*aef9c693SSzuying Chen 373*aef9c693SSzuying Chen return vops->write_headers ? vops->write_headers(nvm) : 0; 374*aef9c693SSzuying Chen } 375*aef9c693SSzuying Chen 376*aef9c693SSzuying Chen /** 377719a5fe8SMika Westerberg * tb_nvm_add_active() - Adds active NVMem device to NVM 378719a5fe8SMika Westerberg * @nvm: NVM structure 379719a5fe8SMika Westerberg * @reg_read: Pointer to the function to read the NVM (passed directly to the 380719a5fe8SMika Westerberg * NVMem device) 381719a5fe8SMika Westerberg * 382719a5fe8SMika Westerberg * Registers new active NVmem device for @nvm. The @reg_read is called 383719a5fe8SMika Westerberg * directly from NVMem so it must handle possible concurrent access if 384719a5fe8SMika Westerberg * needed. The first parameter passed to @reg_read is @nvm structure. 385719a5fe8SMika Westerberg * Returns %0 in success and negative errno otherwise. 386719a5fe8SMika Westerberg */ 387*aef9c693SSzuying Chen int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read) 388719a5fe8SMika Westerberg { 389719a5fe8SMika Westerberg struct nvmem_config config; 390719a5fe8SMika Westerberg struct nvmem_device *nvmem; 391719a5fe8SMika Westerberg 392719a5fe8SMika Westerberg memset(&config, 0, sizeof(config)); 393719a5fe8SMika Westerberg 394719a5fe8SMika Westerberg config.name = "nvm_active"; 395719a5fe8SMika Westerberg config.reg_read = reg_read; 396719a5fe8SMika Westerberg config.read_only = true; 397719a5fe8SMika Westerberg config.id = nvm->id; 398719a5fe8SMika Westerberg config.stride = 4; 399719a5fe8SMika Westerberg config.word_size = 4; 400*aef9c693SSzuying Chen config.size = nvm->active_size; 401719a5fe8SMika Westerberg config.dev = nvm->dev; 402719a5fe8SMika Westerberg config.owner = THIS_MODULE; 403719a5fe8SMika Westerberg config.priv = nvm; 404719a5fe8SMika Westerberg 405719a5fe8SMika Westerberg nvmem = nvmem_register(&config); 406719a5fe8SMika Westerberg if (IS_ERR(nvmem)) 407719a5fe8SMika Westerberg return PTR_ERR(nvmem); 408719a5fe8SMika Westerberg 409719a5fe8SMika Westerberg nvm->active = nvmem; 410719a5fe8SMika Westerberg return 0; 411719a5fe8SMika Westerberg } 412719a5fe8SMika Westerberg 413719a5fe8SMika Westerberg /** 414719a5fe8SMika Westerberg * tb_nvm_write_buf() - Write data to @nvm buffer 415719a5fe8SMika Westerberg * @nvm: NVM structure 416719a5fe8SMika Westerberg * @offset: Offset where to write the data 417719a5fe8SMika Westerberg * @val: Data buffer to write 418719a5fe8SMika Westerberg * @bytes: Number of bytes to write 419719a5fe8SMika Westerberg * 420719a5fe8SMika Westerberg * Helper function to cache the new NVM image before it is actually 421719a5fe8SMika Westerberg * written to the flash. Copies @bytes from @val to @nvm->buf starting 422719a5fe8SMika Westerberg * from @offset. 423719a5fe8SMika Westerberg */ 424719a5fe8SMika Westerberg int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val, 425719a5fe8SMika Westerberg size_t bytes) 426719a5fe8SMika Westerberg { 427719a5fe8SMika Westerberg if (!nvm->buf) { 428719a5fe8SMika Westerberg nvm->buf = vmalloc(NVM_MAX_SIZE); 429719a5fe8SMika Westerberg if (!nvm->buf) 430719a5fe8SMika Westerberg return -ENOMEM; 431719a5fe8SMika Westerberg } 432719a5fe8SMika Westerberg 4334b794f80SMario Limonciello nvm->flushed = false; 434719a5fe8SMika Westerberg nvm->buf_data_size = offset + bytes; 435719a5fe8SMika Westerberg memcpy(nvm->buf + offset, val, bytes); 436719a5fe8SMika Westerberg return 0; 437719a5fe8SMika Westerberg } 438719a5fe8SMika Westerberg 439719a5fe8SMika Westerberg /** 440719a5fe8SMika Westerberg * tb_nvm_add_non_active() - Adds non-active NVMem device to NVM 441719a5fe8SMika Westerberg * @nvm: NVM structure 442719a5fe8SMika Westerberg * @reg_write: Pointer to the function to write the NVM (passed directly 443719a5fe8SMika Westerberg * to the NVMem device) 444719a5fe8SMika Westerberg * 445719a5fe8SMika Westerberg * Registers new non-active NVmem device for @nvm. The @reg_write is called 446719a5fe8SMika Westerberg * directly from NVMem so it must handle possible concurrent access if 447719a5fe8SMika Westerberg * needed. The first parameter passed to @reg_write is @nvm structure. 448*aef9c693SSzuying Chen * The size of the NVMem device is set to %NVM_MAX_SIZE. 449*aef9c693SSzuying Chen * 450719a5fe8SMika Westerberg * Returns %0 in success and negative errno otherwise. 451719a5fe8SMika Westerberg */ 452*aef9c693SSzuying Chen int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write) 453719a5fe8SMika Westerberg { 454719a5fe8SMika Westerberg struct nvmem_config config; 455719a5fe8SMika Westerberg struct nvmem_device *nvmem; 456719a5fe8SMika Westerberg 457719a5fe8SMika Westerberg memset(&config, 0, sizeof(config)); 458719a5fe8SMika Westerberg 459719a5fe8SMika Westerberg config.name = "nvm_non_active"; 460719a5fe8SMika Westerberg config.reg_write = reg_write; 461719a5fe8SMika Westerberg config.root_only = true; 462719a5fe8SMika Westerberg config.id = nvm->id; 463719a5fe8SMika Westerberg config.stride = 4; 464719a5fe8SMika Westerberg config.word_size = 4; 465*aef9c693SSzuying Chen config.size = NVM_MAX_SIZE; 466719a5fe8SMika Westerberg config.dev = nvm->dev; 467719a5fe8SMika Westerberg config.owner = THIS_MODULE; 468719a5fe8SMika Westerberg config.priv = nvm; 469719a5fe8SMika Westerberg 470719a5fe8SMika Westerberg nvmem = nvmem_register(&config); 471719a5fe8SMika Westerberg if (IS_ERR(nvmem)) 472719a5fe8SMika Westerberg return PTR_ERR(nvmem); 473719a5fe8SMika Westerberg 474719a5fe8SMika Westerberg nvm->non_active = nvmem; 475719a5fe8SMika Westerberg return 0; 476719a5fe8SMika Westerberg } 477719a5fe8SMika Westerberg 478719a5fe8SMika Westerberg /** 479719a5fe8SMika Westerberg * tb_nvm_free() - Release NVM and its resources 480719a5fe8SMika Westerberg * @nvm: NVM structure to release 481719a5fe8SMika Westerberg * 482719a5fe8SMika Westerberg * Releases NVM and the NVMem devices if they were registered. 483719a5fe8SMika Westerberg */ 484719a5fe8SMika Westerberg void tb_nvm_free(struct tb_nvm *nvm) 485719a5fe8SMika Westerberg { 486719a5fe8SMika Westerberg if (nvm) { 487719a5fe8SMika Westerberg nvmem_unregister(nvm->non_active); 488719a5fe8SMika Westerberg nvmem_unregister(nvm->active); 489719a5fe8SMika Westerberg vfree(nvm->buf); 490719a5fe8SMika Westerberg ida_simple_remove(&nvm_ida, nvm->id); 491719a5fe8SMika Westerberg } 492719a5fe8SMika Westerberg kfree(nvm); 493719a5fe8SMika Westerberg } 494719a5fe8SMika Westerberg 4959b383037SMika Westerberg /** 4969b383037SMika Westerberg * tb_nvm_read_data() - Read data from NVM 4979b383037SMika Westerberg * @address: Start address on the flash 4989b383037SMika Westerberg * @buf: Buffer where the read data is copied 4999b383037SMika Westerberg * @size: Size of the buffer in bytes 5009b383037SMika Westerberg * @retries: Number of retries if block read fails 5019b383037SMika Westerberg * @read_block: Function that reads block from the flash 5029b383037SMika Westerberg * @read_block_data: Data passsed to @read_block 5039b383037SMika Westerberg * 5049b383037SMika Westerberg * This is a generic function that reads data from NVM or NVM like 5059b383037SMika Westerberg * device. 5069b383037SMika Westerberg * 5079b383037SMika Westerberg * Returns %0 on success and negative errno otherwise. 5089b383037SMika Westerberg */ 5099b383037SMika Westerberg int tb_nvm_read_data(unsigned int address, void *buf, size_t size, 5109b383037SMika Westerberg unsigned int retries, read_block_fn read_block, 5119b383037SMika Westerberg void *read_block_data) 5129b383037SMika Westerberg { 5139b383037SMika Westerberg do { 5149b383037SMika Westerberg unsigned int dwaddress, dwords, offset; 5159b383037SMika Westerberg u8 data[NVM_DATA_DWORDS * 4]; 5169b383037SMika Westerberg size_t nbytes; 5179b383037SMika Westerberg int ret; 5189b383037SMika Westerberg 5199b383037SMika Westerberg offset = address & 3; 5209b383037SMika Westerberg nbytes = min_t(size_t, size + offset, NVM_DATA_DWORDS * 4); 5219b383037SMika Westerberg 5229b383037SMika Westerberg dwaddress = address / 4; 5239b383037SMika Westerberg dwords = ALIGN(nbytes, 4) / 4; 5249b383037SMika Westerberg 5259b383037SMika Westerberg ret = read_block(read_block_data, dwaddress, data, dwords); 5269b383037SMika Westerberg if (ret) { 5279b383037SMika Westerberg if (ret != -ENODEV && retries--) 5289b383037SMika Westerberg continue; 5299b383037SMika Westerberg return ret; 5309b383037SMika Westerberg } 5319b383037SMika Westerberg 5329b383037SMika Westerberg nbytes -= offset; 5339b383037SMika Westerberg memcpy(buf, data + offset, nbytes); 5349b383037SMika Westerberg 5359b383037SMika Westerberg size -= nbytes; 5369b383037SMika Westerberg address += nbytes; 5379b383037SMika Westerberg buf += nbytes; 5389b383037SMika Westerberg } while (size > 0); 5399b383037SMika Westerberg 5409b383037SMika Westerberg return 0; 5419b383037SMika Westerberg } 5429b383037SMika Westerberg 5439b383037SMika Westerberg /** 5449b383037SMika Westerberg * tb_nvm_write_data() - Write data to NVM 5459b383037SMika Westerberg * @address: Start address on the flash 5469b383037SMika Westerberg * @buf: Buffer where the data is copied from 5479b383037SMika Westerberg * @size: Size of the buffer in bytes 5489b383037SMika Westerberg * @retries: Number of retries if the block write fails 5499b383037SMika Westerberg * @write_block: Function that writes block to the flash 5509b383037SMika Westerberg * @write_block_data: Data passwd to @write_block 5519b383037SMika Westerberg * 5529b383037SMika Westerberg * This is generic function that writes data to NVM or NVM like device. 5539b383037SMika Westerberg * 5549b383037SMika Westerberg * Returns %0 on success and negative errno otherwise. 5559b383037SMika Westerberg */ 5569b383037SMika Westerberg int tb_nvm_write_data(unsigned int address, const void *buf, size_t size, 5579b383037SMika Westerberg unsigned int retries, write_block_fn write_block, 5589b383037SMika Westerberg void *write_block_data) 5599b383037SMika Westerberg { 5609b383037SMika Westerberg do { 5619b383037SMika Westerberg unsigned int offset, dwaddress; 5629b383037SMika Westerberg u8 data[NVM_DATA_DWORDS * 4]; 5639b383037SMika Westerberg size_t nbytes; 5649b383037SMika Westerberg int ret; 5659b383037SMika Westerberg 5669b383037SMika Westerberg offset = address & 3; 5679b383037SMika Westerberg nbytes = min_t(u32, size + offset, NVM_DATA_DWORDS * 4); 5689b383037SMika Westerberg 5699b383037SMika Westerberg memcpy(data + offset, buf, nbytes); 5709b383037SMika Westerberg 5719b383037SMika Westerberg dwaddress = address / 4; 5729b383037SMika Westerberg ret = write_block(write_block_data, dwaddress, data, nbytes / 4); 5739b383037SMika Westerberg if (ret) { 5749b383037SMika Westerberg if (ret == -ETIMEDOUT) { 5759b383037SMika Westerberg if (retries--) 5769b383037SMika Westerberg continue; 5779b383037SMika Westerberg ret = -EIO; 5789b383037SMika Westerberg } 5799b383037SMika Westerberg return ret; 5809b383037SMika Westerberg } 5819b383037SMika Westerberg 5829b383037SMika Westerberg size -= nbytes; 5839b383037SMika Westerberg address += nbytes; 5849b383037SMika Westerberg buf += nbytes; 5859b383037SMika Westerberg } while (size > 0); 5869b383037SMika Westerberg 5879b383037SMika Westerberg return 0; 5889b383037SMika Westerberg } 5899b383037SMika Westerberg 590719a5fe8SMika Westerberg void tb_nvm_exit(void) 591719a5fe8SMika Westerberg { 592719a5fe8SMika Westerberg ida_destroy(&nvm_ida); 593719a5fe8SMika Westerberg } 594