Lines Matching refs:dev

49 	struct mei_device *dev;  in mei_open()  local
54 dev = container_of(inode->i_cdev, struct mei_device, cdev); in mei_open()
56 mutex_lock(&dev->device_lock); in mei_open()
58 if (dev->dev_state != MEI_DEV_ENABLED) { in mei_open()
59 dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n", in mei_open()
60 mei_dev_state_str(dev->dev_state)); in mei_open()
65 cl = mei_cl_alloc_linked(dev); in mei_open()
74 mutex_unlock(&dev->device_lock); in mei_open()
79 mutex_unlock(&dev->device_lock); in mei_open()
115 struct mei_device *dev; in mei_release() local
118 if (WARN_ON(!cl || !cl->dev)) in mei_release()
121 dev = cl->dev; in mei_release()
123 mutex_lock(&dev->device_lock); in mei_release()
128 cl_dbg(dev, cl, "not the last vtag\n"); in mei_release()
140 cl_dbg(dev, cl, "not the last vtag after disconnect\n"); in mei_release()
146 cl_dbg(dev, cl, "removing\n"); in mei_release()
154 mutex_unlock(&dev->device_lock); in mei_release()
173 struct mei_device *dev; in mei_read() local
178 if (WARN_ON(!cl || !cl->dev)) in mei_read()
181 dev = cl->dev; in mei_read()
184 mutex_lock(&dev->device_lock); in mei_read()
185 if (dev->dev_state != MEI_DEV_ENABLED) { in mei_read()
209 cl_dbg(dev, cl, "mei start read failure status = %zd\n", rets); in mei_read()
218 mutex_unlock(&dev->device_lock); in mei_read()
226 mutex_lock(&dev->device_lock); in mei_read()
243 cl_dbg(dev, cl, "read operation failed %zd\n", rets); in mei_read()
247 cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n", in mei_read()
259 dev_dbg(dev->dev, "failed to copy data to userland\n"); in mei_read()
275 cl_dbg(dev, cl, "end mei read rets = %zd\n", rets); in mei_read()
276 mutex_unlock(&dev->device_lock); in mei_read()
316 struct mei_device *dev; in mei_write() local
319 if (WARN_ON(!cl || !cl->dev)) in mei_write()
322 dev = cl->dev; in mei_write()
324 mutex_lock(&dev->device_lock); in mei_write()
326 if (dev->dev_state != MEI_DEV_ENABLED) { in mei_write()
332 cl_dbg(dev, cl, "is not connected"); in mei_write()
352 while (cl->tx_cb_queued >= dev->tx_queue_limit) { in mei_write()
357 mutex_unlock(&dev->device_lock); in mei_write()
361 mutex_lock(&dev->device_lock); in mei_write()
382 dev_dbg(dev->dev, "failed to copy data from userland\n"); in mei_write()
390 mutex_unlock(&dev->device_lock); in mei_write()
409 struct mei_device *dev; in mei_ioctl_connect_client() local
415 dev = cl->dev; in mei_ioctl_connect_client()
422 me_cl = mei_me_cl_by_uuid(dev, in_client_uuid); in mei_ioctl_connect_client()
424 dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", in mei_ioctl_connect_client()
431 bool forbidden = dev->override_fixed_address ? in mei_ioctl_connect_client()
432 !dev->allow_fixed_address : !dev->hbm_f_fa_supported; in mei_ioctl_connect_client()
434 dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n", in mei_ioctl_connect_client()
441 dev_dbg(dev->dev, "Connect to FW Client ID = %d\n", in mei_ioctl_connect_client()
443 dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n", in mei_ioctl_connect_client()
445 dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n", in mei_ioctl_connect_client()
451 dev_dbg(dev->dev, "Can connect?\n"); in mei_ioctl_connect_client()
473 static int mei_vt_support_check(struct mei_device *dev, const uuid_le *uuid) in mei_vt_support_check() argument
478 if (!dev->hbm_f_vt_supported) in mei_vt_support_check()
481 me_cl = mei_me_cl_by_uuid(dev, uuid); in mei_vt_support_check()
483 dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", in mei_vt_support_check()
510 struct mei_device *dev; in mei_ioctl_connect_vtag() local
516 dev = cl->dev; in mei_ioctl_connect_vtag()
518 dev_dbg(dev->dev, "FW Client %pUl vtag %d\n", in_client_uuid, vtag); in mei_ioctl_connect_vtag()
523 dev_err(dev->dev, "reconnect with different vtag\n"); in mei_ioctl_connect_vtag()
530 dev_err(dev->dev, "vtag already filled\n"); in mei_ioctl_connect_vtag()
534 list_for_each_entry(pos, &dev->file_list, link) { in mei_ioctl_connect_vtag()
549 dev_dbg(dev->dev, "replacing with existing cl\n"); in mei_ioctl_connect_vtag()
570 mutex_unlock(&dev->device_lock); in mei_ioctl_connect_vtag()
576 dev->timeouts.cl_connect); in mei_ioctl_connect_vtag()
577 mutex_lock(&dev->device_lock); in mei_ioctl_connect_vtag()
643 struct mei_device *dev; in mei_ioctl() local
654 if (WARN_ON(!cl || !cl->dev)) in mei_ioctl()
657 dev = cl->dev; in mei_ioctl()
659 dev_dbg(dev->dev, "IOCTL cmd = 0x%x", cmd); in mei_ioctl()
661 mutex_lock(&dev->device_lock); in mei_ioctl()
662 if (dev->dev_state != MEI_DEV_ENABLED) { in mei_ioctl()
669 dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n"); in mei_ioctl()
671 dev_dbg(dev->dev, "failed to copy data from userland\n"); in mei_ioctl()
679 rets = mei_vt_support_check(dev, cl_uuid); in mei_ioctl()
692 dev_dbg(dev->dev, "failed to copy data to userland\n"); in mei_ioctl()
700 dev_dbg(dev->dev, "IOCTL_MEI_CONNECT_CLIENT_VTAG\n"); in mei_ioctl()
703 dev_dbg(dev->dev, "failed to copy data from userland\n"); in mei_ioctl()
712 rets = mei_vt_support_check(dev, cl_uuid); in mei_ioctl()
714 dev_dbg(dev->dev, "FW Client %pUl does not support vtags\n", in mei_ioctl()
720 dev_dbg(dev->dev, "vtag can't be zero\n"); in mei_ioctl()
732 dev_dbg(dev->dev, "failed to copy data to userland\n"); in mei_ioctl()
740 dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n"); in mei_ioctl()
743 dev_dbg(dev->dev, "failed to copy data from userland\n"); in mei_ioctl()
751 dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n"); in mei_ioctl()
756 dev_dbg(dev->dev, "copy connect data to user\n"); in mei_ioctl()
759 dev_dbg(dev->dev, "failed to copy data to userland\n"); in mei_ioctl()
771 mutex_unlock(&dev->device_lock); in mei_ioctl()
787 struct mei_device *dev; in mei_poll() local
791 if (WARN_ON(!cl || !cl->dev)) in mei_poll()
794 dev = cl->dev; in mei_poll()
796 mutex_lock(&dev->device_lock); in mei_poll()
800 if (dev->dev_state != MEI_DEV_ENABLED || in mei_poll()
823 if (cl->tx_cb_queued < dev->tx_queue_limit) in mei_poll()
828 mutex_unlock(&dev->device_lock); in mei_poll()
841 struct mei_device *dev = cl->dev; in mei_cl_is_write_queued() local
844 list_for_each_entry(cb, &dev->write_list, list) in mei_cl_is_write_queued()
847 list_for_each_entry(cb, &dev->write_waiting_list, list) in mei_cl_is_write_queued()
866 struct mei_device *dev; in mei_fsync() local
869 if (WARN_ON(!cl || !cl->dev)) in mei_fsync()
872 dev = cl->dev; in mei_fsync()
874 mutex_lock(&dev->device_lock); in mei_fsync()
876 if (dev->dev_state != MEI_DEV_ENABLED || !mei_cl_is_connected(cl)) { in mei_fsync()
882 mutex_unlock(&dev->device_lock); in mei_fsync()
886 mutex_lock(&dev->device_lock); in mei_fsync()
899 mutex_unlock(&dev->device_lock); in mei_fsync()
937 struct mei_device *dev = dev_get_drvdata(device); in trc_show() local
941 ret = mei_trc_status(dev, &trc); in trc_show()
960 struct mei_device *dev = dev_get_drvdata(device); in fw_status_show() local
965 mutex_lock(&dev->device_lock); in fw_status_show()
966 err = mei_fw_status(dev, &fw_status); in fw_status_show()
967 mutex_unlock(&dev->device_lock); in fw_status_show()
992 struct mei_device *dev = dev_get_drvdata(device); in hbm_ver_show() local
995 mutex_lock(&dev->device_lock); in hbm_ver_show()
996 ver = dev->version; in hbm_ver_show()
997 mutex_unlock(&dev->device_lock); in hbm_ver_show()
1022 struct mei_device *dev = dev_get_drvdata(device); in tx_queue_limit_show() local
1025 mutex_lock(&dev->device_lock); in tx_queue_limit_show()
1026 size = dev->tx_queue_limit; in tx_queue_limit_show()
1027 mutex_unlock(&dev->device_lock); in tx_queue_limit_show()
1036 struct mei_device *dev = dev_get_drvdata(device); in tx_queue_limit_store() local
1048 mutex_lock(&dev->device_lock); in tx_queue_limit_store()
1049 dev->tx_queue_limit = limit; in tx_queue_limit_store()
1050 mutex_unlock(&dev->device_lock); in tx_queue_limit_store()
1068 struct mei_device *dev = dev_get_drvdata(device); in fw_ver_show() local
1073 ver = dev->fw_ver; in fw_ver_show()
1095 struct mei_device *dev = dev_get_drvdata(device); in dev_state_show() local
1098 mutex_lock(&dev->device_lock); in dev_state_show()
1099 dev_state = dev->dev_state; in dev_state_show()
1100 mutex_unlock(&dev->device_lock); in dev_state_show()
1112 void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state) in mei_set_devstate() argument
1116 if (dev->dev_state == state) in mei_set_devstate()
1119 dev->dev_state = state; in mei_set_devstate()
1121 clsdev = class_find_device_by_devt(&mei_class, dev->cdev.dev); in mei_set_devstate()
1140 struct mei_device *dev = dev_get_drvdata(device); in kind_show() local
1143 if (dev->kind) in kind_show()
1144 ret = sprintf(buf, "%s\n", dev->kind); in kind_show()
1189 static int mei_minor_get(struct mei_device *dev) in mei_minor_get() argument
1194 ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL); in mei_minor_get()
1196 dev->minor = ret; in mei_minor_get()
1198 dev_err(dev->dev, "too many mei devices\n"); in mei_minor_get()
1209 static void mei_minor_free(struct mei_device *dev) in mei_minor_free() argument
1212 idr_remove(&mei_idr, dev->minor); in mei_minor_free()
1216 int mei_register(struct mei_device *dev, struct device *parent) in mei_register() argument
1221 ret = mei_minor_get(dev); in mei_register()
1226 devno = MKDEV(MAJOR(mei_devt), dev->minor); in mei_register()
1227 cdev_init(&dev->cdev, &mei_fops); in mei_register()
1228 dev->cdev.owner = parent->driver->owner; in mei_register()
1231 ret = cdev_add(&dev->cdev, devno, 1); in mei_register()
1234 MAJOR(mei_devt), dev->minor); in mei_register()
1239 dev, mei_groups, in mei_register()
1240 "mei%d", dev->minor); in mei_register()
1244 MAJOR(mei_devt), dev->minor); in mei_register()
1249 mei_dbgfs_register(dev, dev_name(clsdev)); in mei_register()
1254 cdev_del(&dev->cdev); in mei_register()
1256 mei_minor_free(dev); in mei_register()
1261 void mei_deregister(struct mei_device *dev) in mei_deregister() argument
1265 devno = dev->cdev.dev; in mei_deregister()
1266 cdev_del(&dev->cdev); in mei_deregister()
1268 mei_dbgfs_deregister(dev); in mei_deregister()
1272 mei_minor_free(dev); in mei_deregister()