xref: /openbmc/linux/drivers/scsi/libsas/sas_event.c (revision 7ae5c03a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Serial Attached SCSI (SAS) Event processing
4  *
5  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
6  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7  */
8 
9 #include <linux/export.h>
10 #include <scsi/scsi_host.h>
11 #include "sas_internal.h"
12 
13 bool sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
14 {
15 	if (!test_bit(SAS_HA_REGISTERED, &ha->state))
16 		return false;
17 
18 	if (test_bit(SAS_HA_DRAINING, &ha->state)) {
19 		/* add it to the defer list, if not already pending */
20 		if (list_empty(&sw->drain_node))
21 			list_add_tail(&sw->drain_node, &ha->defer_q);
22 		return true;
23 	}
24 
25 	return queue_work(ha->event_q, &sw->work);
26 }
27 
28 static bool sas_queue_event(int event, struct sas_work *work,
29 			    struct sas_ha_struct *ha)
30 {
31 	unsigned long flags;
32 	bool rc;
33 
34 	spin_lock_irqsave(&ha->lock, flags);
35 	rc = sas_queue_work(ha, work);
36 	spin_unlock_irqrestore(&ha->lock, flags);
37 
38 	return rc;
39 }
40 
41 void sas_queue_deferred_work(struct sas_ha_struct *ha)
42 {
43 	struct sas_work *sw, *_sw;
44 
45 	spin_lock_irq(&ha->lock);
46 	list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
47 		list_del_init(&sw->drain_node);
48 
49 		if (!sas_queue_work(ha, sw)) {
50 			pm_runtime_put(ha->dev);
51 			sas_free_event(to_asd_sas_event(&sw->work));
52 		}
53 	}
54 	spin_unlock_irq(&ha->lock);
55 }
56 
57 void __sas_drain_work(struct sas_ha_struct *ha)
58 {
59 	set_bit(SAS_HA_DRAINING, &ha->state);
60 	/* flush submitters */
61 	spin_lock_irq(&ha->lock);
62 	spin_unlock_irq(&ha->lock);
63 
64 	drain_workqueue(ha->event_q);
65 	drain_workqueue(ha->disco_q);
66 
67 	clear_bit(SAS_HA_DRAINING, &ha->state);
68 	sas_queue_deferred_work(ha);
69 }
70 
71 int sas_drain_work(struct sas_ha_struct *ha)
72 {
73 	int err;
74 
75 	err = mutex_lock_interruptible(&ha->drain_mutex);
76 	if (err)
77 		return err;
78 	if (test_bit(SAS_HA_REGISTERED, &ha->state))
79 		__sas_drain_work(ha);
80 	mutex_unlock(&ha->drain_mutex);
81 
82 	return 0;
83 }
84 EXPORT_SYMBOL_GPL(sas_drain_work);
85 
86 void sas_disable_revalidation(struct sas_ha_struct *ha)
87 {
88 	mutex_lock(&ha->disco_mutex);
89 	set_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
90 	mutex_unlock(&ha->disco_mutex);
91 }
92 
93 void sas_enable_revalidation(struct sas_ha_struct *ha)
94 {
95 	int i;
96 
97 	mutex_lock(&ha->disco_mutex);
98 	clear_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
99 	for (i = 0; i < ha->num_phys; i++) {
100 		struct asd_sas_port *port = ha->sas_port[i];
101 		const int ev = DISCE_REVALIDATE_DOMAIN;
102 		struct sas_discovery *d = &port->disc;
103 		struct asd_sas_phy *sas_phy;
104 
105 		if (!test_and_clear_bit(ev, &d->pending))
106 			continue;
107 
108 		spin_lock(&port->phy_list_lock);
109 		if (list_empty(&port->phy_list)) {
110 			spin_unlock(&port->phy_list_lock);
111 			continue;
112 		}
113 
114 		sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
115 				port_phy_el);
116 		spin_unlock(&port->phy_list_lock);
117 		sas_notify_port_event(sas_phy,
118 				PORTE_BROADCAST_RCVD, GFP_KERNEL);
119 	}
120 	mutex_unlock(&ha->disco_mutex);
121 }
122 
123 
124 static void sas_port_event_worker(struct work_struct *work)
125 {
126 	struct asd_sas_event *ev = to_asd_sas_event(work);
127 	struct asd_sas_phy *phy = ev->phy;
128 	struct sas_ha_struct *ha = phy->ha;
129 
130 	sas_port_event_fns[ev->event](work);
131 	pm_runtime_put(ha->dev);
132 	sas_free_event(ev);
133 }
134 
135 static void sas_phy_event_worker(struct work_struct *work)
136 {
137 	struct asd_sas_event *ev = to_asd_sas_event(work);
138 	struct asd_sas_phy *phy = ev->phy;
139 	struct sas_ha_struct *ha = phy->ha;
140 
141 	sas_phy_event_fns[ev->event](work);
142 	pm_runtime_put(ha->dev);
143 	sas_free_event(ev);
144 }
145 
146 /* defer works of new phys during suspend */
147 static bool sas_defer_event(struct asd_sas_phy *phy, struct asd_sas_event *ev)
148 {
149 	struct sas_ha_struct *ha = phy->ha;
150 	unsigned long flags;
151 	bool deferred = false;
152 
153 	spin_lock_irqsave(&ha->lock, flags);
154 	if (test_bit(SAS_HA_RESUMING, &ha->state) && !phy->suspended) {
155 		struct sas_work *sw = &ev->work;
156 
157 		list_add_tail(&sw->drain_node, &ha->defer_q);
158 		deferred = true;
159 	}
160 	spin_unlock_irqrestore(&ha->lock, flags);
161 	return deferred;
162 }
163 
164 void sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
165 			   gfp_t gfp_flags)
166 {
167 	struct sas_ha_struct *ha = phy->ha;
168 	struct asd_sas_event *ev;
169 
170 	BUG_ON(event >= PORT_NUM_EVENTS);
171 
172 	ev = sas_alloc_event(phy, gfp_flags);
173 	if (!ev)
174 		return;
175 
176 	/* Call pm_runtime_put() with pairs in sas_port_event_worker() */
177 	pm_runtime_get_noresume(ha->dev);
178 
179 	INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
180 
181 	if (sas_defer_event(phy, ev))
182 		return;
183 
184 	if (!sas_queue_event(event, &ev->work, ha)) {
185 		pm_runtime_put(ha->dev);
186 		sas_free_event(ev);
187 	}
188 }
189 EXPORT_SYMBOL_GPL(sas_notify_port_event);
190 
191 void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
192 			  gfp_t gfp_flags)
193 {
194 	struct sas_ha_struct *ha = phy->ha;
195 	struct asd_sas_event *ev;
196 
197 	BUG_ON(event >= PHY_NUM_EVENTS);
198 
199 	ev = sas_alloc_event(phy, gfp_flags);
200 	if (!ev)
201 		return;
202 
203 	/* Call pm_runtime_put() with pairs in sas_phy_event_worker() */
204 	pm_runtime_get_noresume(ha->dev);
205 
206 	INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
207 
208 	if (sas_defer_event(phy, ev))
209 		return;
210 
211 	if (!sas_queue_event(event, &ev->work, ha)) {
212 		pm_runtime_put(ha->dev);
213 		sas_free_event(ev);
214 	}
215 }
216 EXPORT_SYMBOL_GPL(sas_notify_phy_event);
217