Lines Matching refs:ipmi_device

113 	struct acpi_ipmi_device *ipmi_device;  in ipmi_dev_alloc()  local
117 ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL); in ipmi_dev_alloc()
118 if (!ipmi_device) in ipmi_dev_alloc()
121 kref_init(&ipmi_device->kref); in ipmi_dev_alloc()
122 INIT_LIST_HEAD(&ipmi_device->head); in ipmi_dev_alloc()
123 INIT_LIST_HEAD(&ipmi_device->tx_msg_list); in ipmi_dev_alloc()
124 spin_lock_init(&ipmi_device->tx_msg_lock); in ipmi_dev_alloc()
125 ipmi_device->handle = handle; in ipmi_dev_alloc()
126 ipmi_device->dev = get_device(dev); in ipmi_dev_alloc()
127 ipmi_device->ipmi_ifnum = iface; in ipmi_dev_alloc()
130 ipmi_device, &user); in ipmi_dev_alloc()
133 kfree(ipmi_device); in ipmi_dev_alloc()
136 ipmi_device->user_interface = user; in ipmi_dev_alloc()
138 return ipmi_device; in ipmi_dev_alloc()
141 static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device) in ipmi_dev_release() argument
143 ipmi_destroy_user(ipmi_device->user_interface); in ipmi_dev_release()
144 put_device(ipmi_device->dev); in ipmi_dev_release()
145 kfree(ipmi_device); in ipmi_dev_release()
156 static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device) in __ipmi_dev_kill() argument
158 list_del(&ipmi_device->head); in __ipmi_dev_kill()
159 if (driver_data.selected_smi == ipmi_device) in __ipmi_dev_kill()
166 ipmi_device->dead = true; in __ipmi_dev_kill()
171 struct acpi_ipmi_device *ipmi_device = NULL; in acpi_ipmi_dev_get() local
175 ipmi_device = driver_data.selected_smi; in acpi_ipmi_dev_get()
176 kref_get(&ipmi_device->kref); in acpi_ipmi_dev_get()
180 return ipmi_device; in acpi_ipmi_dev_get()
183 static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device) in acpi_ipmi_dev_put() argument
185 kref_put(&ipmi_device->kref, ipmi_dev_release_kref); in acpi_ipmi_dev_put()
375 struct acpi_ipmi_device *ipmi_device = user_msg_data; in ipmi_msg_handler() local
377 struct device *dev = ipmi_device->dev; in ipmi_msg_handler()
380 if (msg->user != ipmi_device->user_interface) { in ipmi_msg_handler()
383 msg->user, ipmi_device->user_interface); in ipmi_msg_handler()
387 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); in ipmi_msg_handler()
388 list_for_each_entry_safe(iter, temp, &ipmi_device->tx_msg_list, head) { in ipmi_msg_handler()
395 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); in ipmi_msg_handler()
436 struct acpi_ipmi_device *ipmi_device, *temp; in ipmi_register_bmc() local
451 ipmi_device = ipmi_dev_alloc(iface, smi_data.dev, handle); in ipmi_register_bmc()
452 if (!ipmi_device) { in ipmi_register_bmc()
467 driver_data.selected_smi = ipmi_device; in ipmi_register_bmc()
468 list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices); in ipmi_register_bmc()
476 ipmi_dev_release(ipmi_device); in ipmi_register_bmc()
483 struct acpi_ipmi_device *ipmi_device = NULL, *iter, *temp; in ipmi_bmc_gone() local
489 ipmi_device = iter; in ipmi_bmc_gone()
500 if (ipmi_device) { in ipmi_bmc_gone()
501 ipmi_flush_tx_msg(ipmi_device); in ipmi_bmc_gone()
502 acpi_ipmi_dev_put(ipmi_device); in ipmi_bmc_gone()
524 struct acpi_ipmi_device *ipmi_device; in acpi_ipmi_space_handler() local
541 ipmi_device = tx_msg->device; in acpi_ipmi_space_handler()
551 if (ipmi_device->dead) { in acpi_ipmi_space_handler()
556 spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); in acpi_ipmi_space_handler()
557 list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); in acpi_ipmi_space_handler()
558 spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); in acpi_ipmi_space_handler()
561 err = ipmi_request_settime(ipmi_device->user_interface, in acpi_ipmi_space_handler()
576 ipmi_cancel_tx_msg(ipmi_device, tx_msg); in acpi_ipmi_space_handler()
611 struct acpi_ipmi_device *ipmi_device; in acpi_ipmi_exit() local
626 ipmi_device = list_first_entry(&driver_data.ipmi_devices, in acpi_ipmi_exit()
629 __ipmi_dev_kill(ipmi_device); in acpi_ipmi_exit()
632 ipmi_flush_tx_msg(ipmi_device); in acpi_ipmi_exit()
633 acpi_ipmi_dev_put(ipmi_device); in acpi_ipmi_exit()