Lines Matching +full:vendor +full:- +full:specific
1 // SPDX-License-Identifier: GPL-2.0
19 /* Intel specific NVM offsets */
25 /* ASMedia specific NVM offsets */
32 * struct tb_nvm_vendor_ops - Vendor specific NVM operations
44 * struct tb_nvm_vendor - Vendor to &struct tb_nvm_vendor_ops mapping
45 * @vendor: Vendor ID
46 * @vops: Vendor specific NVM operations
48 * Maps vendor ID to NVM vendor operations. If there is no mapping then
52 u16 vendor; member
58 struct tb_switch *sw = tb_to_switch(nvm->dev); in intel_switch_nvm_version()
63 * If the switch is in safe-mode the only accessible portion of in intel_switch_nvm_version()
64 * the NVM is the non-active one where userspace is expected to in intel_switch_nvm_version()
67 if (sw->safe_mode) in intel_switch_nvm_version()
74 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; in intel_switch_nvm_version()
76 nvm_size = (nvm_size - hdr_size) / 2; in intel_switch_nvm_version()
82 nvm->major = (val >> 16) & 0xff; in intel_switch_nvm_version()
83 nvm->minor = (val >> 8) & 0xff; in intel_switch_nvm_version()
84 nvm->active_size = nvm_size; in intel_switch_nvm_version()
91 struct tb_switch *sw = tb_to_switch(nvm->dev); in intel_switch_nvm_validate()
94 u8 *buf = nvm->buf; in intel_switch_nvm_validate()
96 image_size = nvm->buf_data_size; in intel_switch_nvm_validate()
104 return -EINVAL; in intel_switch_nvm_validate()
108 return -EINVAL; in intel_switch_nvm_validate()
116 return -EINVAL; in intel_switch_nvm_validate()
118 if (sw->safe_mode) in intel_switch_nvm_validate()
126 if (device_id != sw->config.device_id) in intel_switch_nvm_validate()
127 return -EINVAL; in intel_switch_nvm_validate()
130 nvm->buf_data_start = buf + hdr_size; in intel_switch_nvm_validate()
131 nvm->buf_data_size = image_size - hdr_size; in intel_switch_nvm_validate()
138 struct tb_switch *sw = tb_to_switch(nvm->dev); in intel_switch_nvm_write_headers()
140 if (sw->generation < 3) { in intel_switch_nvm_write_headers()
144 ret = dma_port_flash_write(sw->dma_port, in intel_switch_nvm_write_headers()
145 DMA_PORT_CSS_ADDRESS, nvm->buf + INTEL_NVM_CSS, in intel_switch_nvm_write_headers()
162 struct tb_switch *sw = tb_to_switch(nvm->dev); in asmedia_switch_nvm_version()
170 nvm->major = (val << 16) & 0xff0000; in asmedia_switch_nvm_version()
171 nvm->major |= val & 0x00ff00; in asmedia_switch_nvm_version()
172 nvm->major |= (val >> 16) & 0x0000ff; in asmedia_switch_nvm_version()
178 nvm->minor = (val << 16) & 0xff0000; in asmedia_switch_nvm_version()
179 nvm->minor |= val & 0x00ff00; in asmedia_switch_nvm_version()
180 nvm->minor |= (val >> 16) & 0x0000ff; in asmedia_switch_nvm_version()
183 nvm->active_size = SZ_512K; in asmedia_switch_nvm_version()
192 /* Router vendor NVM support table */
201 struct tb_retimer *rt = tb_to_retimer(nvm->dev); in intel_retimer_nvm_version()
209 nvm->major = (val >> 16) & 0xff; in intel_retimer_nvm_version()
210 nvm->minor = (val >> 8) & 0xff; in intel_retimer_nvm_version()
217 nvm_size = (nvm_size - SZ_16K) / 2; in intel_retimer_nvm_version()
218 nvm->active_size = nvm_size; in intel_retimer_nvm_version()
225 struct tb_retimer *rt = tb_to_retimer(nvm->dev); in intel_retimer_nvm_validate()
227 u8 *buf = nvm->buf; in intel_retimer_nvm_validate()
230 image_size = nvm->buf_data_size; in intel_retimer_nvm_validate()
238 return -EINVAL; in intel_retimer_nvm_validate()
242 return -EINVAL; in intel_retimer_nvm_validate()
250 return -EINVAL; in intel_retimer_nvm_validate()
257 if (device != rt->device) in intel_retimer_nvm_validate()
258 return -EINVAL; in intel_retimer_nvm_validate()
261 nvm->buf_data_start = buf + hdr_size; in intel_retimer_nvm_validate()
262 nvm->buf_data_size = image_size - hdr_size; in intel_retimer_nvm_validate()
272 /* Retimer vendor NVM support table */
278 * tb_nvm_alloc() - Allocate new NVM structure
282 * of error returns ERR_PTR(). Specifically returns %-EOPNOTSUPP if the
297 if (v->vendor == sw->config.vendor_id) { in tb_nvm_alloc()
298 vops = v->vops; in tb_nvm_alloc()
304 tb_sw_dbg(sw, "router NVM format of vendor %#x unknown\n", in tb_nvm_alloc()
305 sw->config.vendor_id); in tb_nvm_alloc()
306 return ERR_PTR(-EOPNOTSUPP); in tb_nvm_alloc()
314 if (v->vendor == rt->vendor) { in tb_nvm_alloc()
315 vops = v->vops; in tb_nvm_alloc()
321 dev_dbg(dev, "retimer NVM format of vendor %#x unknown\n", in tb_nvm_alloc()
322 rt->vendor); in tb_nvm_alloc()
323 return ERR_PTR(-EOPNOTSUPP); in tb_nvm_alloc()
326 return ERR_PTR(-EOPNOTSUPP); in tb_nvm_alloc()
331 return ERR_PTR(-ENOMEM); in tb_nvm_alloc()
339 nvm->id = ret; in tb_nvm_alloc()
340 nvm->dev = dev; in tb_nvm_alloc()
341 nvm->vops = vops; in tb_nvm_alloc()
347 * tb_nvm_read_version() - Read and populate NVM version
350 * Uses vendor specific means to read out and fill in the existing
356 const struct tb_nvm_vendor_ops *vops = nvm->vops; in tb_nvm_read_version()
358 if (vops && vops->read_version) in tb_nvm_read_version()
359 return vops->read_version(nvm); in tb_nvm_read_version()
361 return -EOPNOTSUPP; in tb_nvm_read_version()
365 * tb_nvm_validate() - Validate new NVM image
368 * Runs vendor specific validation over the new NVM image and if all
369 * checks pass returns %0. As side effect updates @nvm->buf_data_start
370 * and @nvm->buf_data_size fields to match the actual data to be written
377 const struct tb_nvm_vendor_ops *vops = nvm->vops; in tb_nvm_validate()
379 u8 *buf = nvm->buf; in tb_nvm_validate()
382 return -EINVAL; in tb_nvm_validate()
384 return -EOPNOTSUPP; in tb_nvm_validate()
387 image_size = nvm->buf_data_size; in tb_nvm_validate()
389 return -EINVAL; in tb_nvm_validate()
395 nvm->buf_data_start = buf; in tb_nvm_validate()
397 return vops->validate ? vops->validate(nvm) : 0; in tb_nvm_validate()
401 * tb_nvm_write_headers() - Write headers before the rest of the image
404 * If the vendor NVM format requires writing headers before the rest of
412 const struct tb_nvm_vendor_ops *vops = nvm->vops; in tb_nvm_write_headers()
414 return vops->write_headers ? vops->write_headers(nvm) : 0; in tb_nvm_write_headers()
418 * tb_nvm_add_active() - Adds active NVMem device to NVM
438 config.id = nvm->id; in tb_nvm_add_active()
441 config.size = nvm->active_size; in tb_nvm_add_active()
442 config.dev = nvm->dev; in tb_nvm_add_active()
450 nvm->active = nvmem; in tb_nvm_add_active()
455 * tb_nvm_write_buf() - Write data to @nvm buffer
462 * written to the flash. Copies @bytes from @val to @nvm->buf starting
468 if (!nvm->buf) { in tb_nvm_write_buf()
469 nvm->buf = vmalloc(NVM_MAX_SIZE); in tb_nvm_write_buf()
470 if (!nvm->buf) in tb_nvm_write_buf()
471 return -ENOMEM; in tb_nvm_write_buf()
474 nvm->flushed = false; in tb_nvm_write_buf()
475 nvm->buf_data_size = offset + bytes; in tb_nvm_write_buf()
476 memcpy(nvm->buf + offset, val, bytes); in tb_nvm_write_buf()
481 * tb_nvm_add_non_active() - Adds non-active NVMem device to NVM
486 * Registers new non-active NVmem device for @nvm. The @reg_write is called
503 config.id = nvm->id; in tb_nvm_add_non_active()
507 config.dev = nvm->dev; in tb_nvm_add_non_active()
515 nvm->non_active = nvmem; in tb_nvm_add_non_active()
520 * tb_nvm_free() - Release NVM and its resources
528 nvmem_unregister(nvm->non_active); in tb_nvm_free()
529 nvmem_unregister(nvm->active); in tb_nvm_free()
530 vfree(nvm->buf); in tb_nvm_free()
531 ida_simple_remove(&nvm_ida, nvm->id); in tb_nvm_free()
537 * tb_nvm_read_data() - Read data from NVM
568 if (ret != -ENODEV && retries--) in tb_nvm_read_data()
573 nbytes -= offset; in tb_nvm_read_data()
576 size -= nbytes; in tb_nvm_read_data()
585 * tb_nvm_write_data() - Write data to NVM
615 if (ret == -ETIMEDOUT) { in tb_nvm_write_data()
616 if (retries--) in tb_nvm_write_data()
618 ret = -EIO; in tb_nvm_write_data()
623 size -= nbytes; in tb_nvm_write_data()