Lines Matching refs:dev

54 	struct mlx4_ib_dev     *dev ;  member
71 static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
74 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num, in mlx4_ib_update_cache_on_guid_change() argument
82 if (!mlx4_is_master(dev->dev)) in mlx4_ib_update_cache_on_guid_change()
85 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. in mlx4_ib_update_cache_on_guid_change()
95 if (slave_id >= dev->dev->num_slaves) { in mlx4_ib_update_cache_on_guid_change()
101 memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id], in mlx4_ib_update_cache_on_guid_change()
110 static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index) in get_cached_alias_guid() argument
116 return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index]; in get_cached_alias_guid()
125 void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave, in mlx4_ib_slave_alias_guid_event() argument
135 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); in mlx4_ib_slave_alias_guid_event()
136 if (dev->sriov.alias_guid.ports_guid[port_index].state_flags & in mlx4_ib_slave_alias_guid_event()
140 curr_guid = *(__be64 *)&dev->sriov. in mlx4_ib_slave_alias_guid_event()
149 required_guid = mlx4_get_admin_guid(dev->dev, slave, port); in mlx4_ib_slave_alias_guid_event()
153 *(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index]. in mlx4_ib_slave_alias_guid_event()
156 dev->sriov.alias_guid.ports_guid[port_index]. in mlx4_ib_slave_alias_guid_event()
159 dev->sriov.alias_guid.ports_guid[port_index]. in mlx4_ib_slave_alias_guid_event()
163 dev->sriov.alias_guid.ports_guid[port_index]. in mlx4_ib_slave_alias_guid_event()
165 dev->sriov.alias_guid.ports_guid[port_index]. in mlx4_ib_slave_alias_guid_event()
170 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); in mlx4_ib_slave_alias_guid_event()
173 mlx4_ib_init_alias_guid_work(dev, port_index); in mlx4_ib_slave_alias_guid_event()
185 void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, in mlx4_ib_notify_slaves_on_guid_change() argument
200 if (!mlx4_is_master(dev->dev)) in mlx4_ib_notify_slaves_on_guid_change()
203 rec = &dev->sriov.alias_guid.ports_guid[port_num - 1]. in mlx4_ib_notify_slaves_on_guid_change()
205 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. in mlx4_ib_notify_slaves_on_guid_change()
217 if (slave_id >= dev->dev->persist->num_vfs + 1) in mlx4_ib_notify_slaves_on_guid_change()
220 slave_port = mlx4_phys_to_slave_port(dev->dev, slave_id, port_num); in mlx4_ib_notify_slaves_on_guid_change()
225 form_cache_ag = get_cached_alias_guid(dev, port_num, in mlx4_ib_notify_slaves_on_guid_change()
235 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); in mlx4_ib_notify_slaves_on_guid_change()
247 spin_unlock_irqrestore(&dev->sriov. in mlx4_ib_notify_slaves_on_guid_change()
252 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, in mlx4_ib_notify_slaves_on_guid_change()
254 mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num); in mlx4_ib_notify_slaves_on_guid_change()
258 prev_state = mlx4_get_slave_port_state(dev->dev, slave_id, port_num); in mlx4_ib_notify_slaves_on_guid_change()
259 new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num, in mlx4_ib_notify_slaves_on_guid_change()
268 mlx4_gen_port_state_change_eqe(dev->dev, slave_id, in mlx4_ib_notify_slaves_on_guid_change()
272 set_and_calc_slave_port_state(dev->dev, slave_id, port_num, in mlx4_ib_notify_slaves_on_guid_change()
278 mlx4_gen_port_state_change_eqe(dev->dev, in mlx4_ib_notify_slaves_on_guid_change()
291 struct mlx4_ib_dev *dev; in aliasguid_query_handler() local
304 dev = cb_ctx->dev; in aliasguid_query_handler()
306 rec = &dev->sriov.alias_guid.ports_guid[port_index]. in aliasguid_query_handler()
326 rec = &dev->sriov.alias_guid.ports_guid[port_index]. in aliasguid_query_handler()
329 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); in aliasguid_query_handler()
356 mlx4_ib_warn(&dev->ib_dev, in aliasguid_query_handler()
370 mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" in aliasguid_query_handler()
385 mlx4_set_admin_guid(dev->dev, in aliasguid_query_handler()
423 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); in aliasguid_query_handler()
429 mlx4_ib_notify_slaves_on_guid_change(dev, guid_rec->block_num, in aliasguid_query_handler()
433 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); in aliasguid_query_handler()
434 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in aliasguid_query_handler()
435 if (!dev->sriov.is_going_down) { in aliasguid_query_handler()
436 get_low_record_time_index(dev, port_index, &resched_delay_sec); in aliasguid_query_handler()
437 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, in aliasguid_query_handler()
438 &dev->sriov.alias_guid.ports_guid[port_index]. in aliasguid_query_handler()
447 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in aliasguid_query_handler()
448 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); in aliasguid_query_handler()
451 static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) in invalidate_guid_record() argument
457 dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status in invalidate_guid_record()
463 *(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. in invalidate_guid_record()
476 dev->sriov.alias_guid.ports_guid[port - 1]. in invalidate_guid_record()
478 if (dev->sriov.alias_guid.ports_guid[port - 1]. in invalidate_guid_record()
480 dev->sriov.alias_guid.ports_guid[port - 1]. in invalidate_guid_record()
489 struct mlx4_ib_dev *dev = to_mdev(ibdev); in set_guid_rec() local
499 &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; in set_guid_rec()
523 callback_context->dev = dev; in set_guid_rec()
539 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
541 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
544 ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, in set_guid_rec()
554 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
557 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
566 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); in set_guid_rec()
567 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
568 invalidate_guid_record(dev, port, index); in set_guid_rec()
569 if (!dev->sriov.is_going_down) { in set_guid_rec()
570 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, in set_guid_rec()
571 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work, in set_guid_rec()
574 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
575 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); in set_guid_rec()
581 static void mlx4_ib_guid_port_init(struct mlx4_ib_dev *dev, int port) in mlx4_ib_guid_port_init() argument
591 if (!entry || entry > dev->dev->persist->num_vfs || in mlx4_ib_guid_port_init()
592 !mlx4_is_slave_active(dev->dev, entry)) in mlx4_ib_guid_port_init()
594 guid = mlx4_get_admin_guid(dev->dev, entry, port); in mlx4_ib_guid_port_init()
595 *(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. in mlx4_ib_guid_port_init()
605 void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) in mlx4_ib_invalidate_all_guid_record() argument
612 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); in mlx4_ib_invalidate_all_guid_record()
613 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in mlx4_ib_invalidate_all_guid_record()
615 if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags & in mlx4_ib_invalidate_all_guid_record()
617 mlx4_ib_guid_port_init(dev, port); in mlx4_ib_invalidate_all_guid_record()
618 dev->sriov.alias_guid.ports_guid[port - 1].state_flags &= in mlx4_ib_invalidate_all_guid_record()
622 invalidate_guid_record(dev, port, i); in mlx4_ib_invalidate_all_guid_record()
624 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) { in mlx4_ib_invalidate_all_guid_record()
630 cancel_delayed_work(&dev->sriov.alias_guid. in mlx4_ib_invalidate_all_guid_record()
632 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, in mlx4_ib_invalidate_all_guid_record()
633 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work, in mlx4_ib_invalidate_all_guid_record()
636 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in mlx4_ib_invalidate_all_guid_record()
637 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); in mlx4_ib_invalidate_all_guid_record()
640 static void set_required_record(struct mlx4_ib_dev *dev, u8 port, in set_required_record() argument
650 &dev->sriov.alias_guid.ports_guid[port]. in set_required_record()
690 static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, in get_low_record_time_index() argument
699 rec = dev->sriov.alias_guid.ports_guid[port]. in get_low_record_time_index()
722 static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, in get_next_record_to_update() argument
729 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); in get_next_record_to_update()
730 record_index = get_low_record_time_index(dev, port, NULL); in get_next_record_to_update()
737 set_required_record(dev, port, rec, record_index); in get_next_record_to_update()
739 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); in get_next_record_to_update()
755 struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov); in alias_guid_work() local
762 ret = get_next_record_to_update(dev, sriov_alias_port->port, rec); in alias_guid_work()
768 set_guid_rec(&dev->ib_dev, rec); in alias_guid_work()
774 void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port) in mlx4_ib_init_alias_guid_work() argument
778 if (!mlx4_is_master(dev->dev)) in mlx4_ib_init_alias_guid_work()
780 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); in mlx4_ib_init_alias_guid_work()
781 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in mlx4_ib_init_alias_guid_work()
782 if (!dev->sriov.is_going_down) { in mlx4_ib_init_alias_guid_work()
787 cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port]. in mlx4_ib_init_alias_guid_work()
789 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, in mlx4_ib_init_alias_guid_work()
790 &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); in mlx4_ib_init_alias_guid_work()
792 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in mlx4_ib_init_alias_guid_work()
793 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); in mlx4_ib_init_alias_guid_work()
796 void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev) in mlx4_ib_destroy_alias_guid_service() argument
799 struct mlx4_ib_sriov *sriov = &dev->sriov; in mlx4_ib_destroy_alias_guid_service()
805 for (i = 0 ; i < dev->num_ports; i++) { in mlx4_ib_destroy_alias_guid_service()
824 for (i = 0 ; i < dev->num_ports; i++) in mlx4_ib_destroy_alias_guid_service()
825 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); in mlx4_ib_destroy_alias_guid_service()
826 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); in mlx4_ib_destroy_alias_guid_service()
827 kfree(dev->sriov.alias_guid.sa_client); in mlx4_ib_destroy_alias_guid_service()
830 int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) in mlx4_ib_init_alias_guid_service() argument
837 if (!mlx4_is_master(dev->dev)) in mlx4_ib_init_alias_guid_service()
839 dev->sriov.alias_guid.sa_client = in mlx4_ib_init_alias_guid_service()
840 kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL); in mlx4_ib_init_alias_guid_service()
841 if (!dev->sriov.alias_guid.sa_client) in mlx4_ib_init_alias_guid_service()
844 ib_sa_register_client(dev->sriov.alias_guid.sa_client); in mlx4_ib_init_alias_guid_service()
846 spin_lock_init(&dev->sriov.alias_guid.ag_work_lock); in mlx4_ib_init_alias_guid_service()
848 for (i = 1; i <= dev->num_ports; ++i) { in mlx4_ib_init_alias_guid_service()
849 if (dev->ib_dev.ops.query_gid(&dev->ib_dev, i, 0, &gid)) { in mlx4_ib_init_alias_guid_service()
855 for (i = 0 ; i < dev->num_ports; i++) { in mlx4_ib_init_alias_guid_service()
856 memset(&dev->sriov.alias_guid.ports_guid[i], 0, in mlx4_ib_init_alias_guid_service()
858 dev->sriov.alias_guid.ports_guid[i].state_flags |= in mlx4_ib_init_alias_guid_service()
862 memset(dev->sriov.alias_guid.ports_guid[i]. in mlx4_ib_init_alias_guid_service()
864 sizeof(dev->sriov.alias_guid.ports_guid[i]. in mlx4_ib_init_alias_guid_service()
867 INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); in mlx4_ib_init_alias_guid_service()
871 mlx4_set_admin_guid(dev->dev, 0, j, i + 1); in mlx4_ib_init_alias_guid_service()
873 invalidate_guid_record(dev, i + 1, j); in mlx4_ib_init_alias_guid_service()
875 dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; in mlx4_ib_init_alias_guid_service()
876 dev->sriov.alias_guid.ports_guid[i].port = i; in mlx4_ib_init_alias_guid_service()
879 dev->sriov.alias_guid.ports_guid[i].wq = in mlx4_ib_init_alias_guid_service()
881 if (!dev->sriov.alias_guid.ports_guid[i].wq) { in mlx4_ib_init_alias_guid_service()
885 INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work, in mlx4_ib_init_alias_guid_service()
892 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); in mlx4_ib_init_alias_guid_service()
893 dev->sriov.alias_guid.ports_guid[i].wq = NULL; in mlx4_ib_init_alias_guid_service()
897 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); in mlx4_ib_init_alias_guid_service()
898 kfree(dev->sriov.alias_guid.sa_client); in mlx4_ib_init_alias_guid_service()
899 dev->sriov.alias_guid.sa_client = NULL; in mlx4_ib_init_alias_guid_service()