1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3  * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
4  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
5  * Copyright (C) 2015 Intel Deutschland GmbH
6  */
7 #ifndef __iwl_op_mode_h__
8 #define __iwl_op_mode_h__
9 
10 #include <linux/netdevice.h>
11 #include <linux/debugfs.h>
12 #include "iwl-dbg-tlv.h"
13 
14 struct iwl_op_mode;
15 struct iwl_trans;
16 struct sk_buff;
17 struct iwl_device_cmd;
18 struct iwl_rx_cmd_buffer;
19 struct iwl_fw;
20 struct iwl_cfg;
21 
22 /**
23  * DOC: Operational mode - what is it ?
24  *
25  * The operational mode (a.k.a. op_mode) is the layer that implements
26  * mac80211's handlers. It knows two APIs: mac80211's and the fw's. It uses
27  * the transport API to access the HW. The op_mode doesn't need to know how the
28  * underlying HW works, since the transport layer takes care of that.
29  *
30  * There can be several op_mode: i.e. different fw APIs will require two
31  * different op_modes. This is why the op_mode is virtualized.
32  */
33 
34 /**
35  * DOC: Life cycle of the Operational mode
36  *
37  * The operational mode has a very simple life cycle.
38  *
39  *	1) The driver layer (iwl-drv.c) chooses the op_mode based on the
40  *	   capabilities advertised by the fw file (in TLV format).
41  *	2) The driver layer starts the op_mode (ops->start)
42  *	3) The op_mode registers mac80211
43  *	4) The op_mode is governed by mac80211
44  *	5) The driver layer stops the op_mode
45  */
46 
47 /**
48  * struct iwl_op_mode_ops - op_mode specific operations
49  *
50  * The op_mode exports its ops so that external components can start it and
51  * interact with it. The driver layer typically calls the start and stop
52  * handlers, the transport layer calls the others.
53  *
54  * All the handlers MUST be implemented, except @rx_rss which can be left
55  * out *iff* the opmode will never run on hardware with multi-queue capability.
56  *
57  * @start: start the op_mode. The transport layer is already allocated.
58  *	May sleep
59  * @stop: stop the op_mode. Must free all the memory allocated.
60  *	May sleep
61  * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
62  *	HCMD this Rx responds to. Can't sleep.
63  * @rx_rss: data queue RX notification to the op_mode, for (data) notifications
64  *	received on the RSS queue(s). The queue parameter indicates which of the
65  *	RSS queues received this frame; it will always be non-zero.
66  *	This method must not sleep.
67  * @async_cb: called when an ASYNC command with CMD_WANT_ASYNC_CALLBACK set
68  *	completes. Must be atomic.
69  * @queue_full: notifies that a HW queue is full.
70  *	Must be atomic and called with BH disabled.
71  * @queue_not_full: notifies that a HW queue is not full any more.
72  *	Must be atomic and called with BH disabled.
73  * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
74  *	the radio is killed. Return %true if the device should be stopped by
75  *	the transport immediately after the call. May sleep.
76  * @free_skb: allows the transport layer to free skbs that haven't been
77  *	reclaimed by the op_mode. This can happen when the driver is freed and
78  *	there are Tx packets pending in the transport layer.
79  *	Must be atomic
80  * @nic_error: error notification. Must be atomic and must be called with BH
81  *	disabled.
82  * @cmd_queue_full: Called when the command queue gets full. Must be atomic and
83  *	called with BH disabled.
84  * @nic_config: configure NIC, called before firmware is started.
85  *	May sleep
86  * @wimax_active: invoked when WiMax becomes active. May sleep
87  * @time_point: called when transport layer wants to collect debug data
88  */
89 struct iwl_op_mode_ops {
90 	struct iwl_op_mode *(*start)(struct iwl_trans *trans,
91 				     const struct iwl_cfg *cfg,
92 				     const struct iwl_fw *fw,
93 				     struct dentry *dbgfs_dir);
94 	void (*stop)(struct iwl_op_mode *op_mode);
95 	void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
96 		   struct iwl_rx_cmd_buffer *rxb);
97 	void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
98 		       struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
99 	void (*async_cb)(struct iwl_op_mode *op_mode,
100 			 const struct iwl_device_cmd *cmd);
101 	void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
102 	void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
103 	bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
104 	void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
105 	void (*nic_error)(struct iwl_op_mode *op_mode);
106 	void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
107 	void (*nic_config)(struct iwl_op_mode *op_mode);
108 	void (*wimax_active)(struct iwl_op_mode *op_mode);
109 	void (*time_point)(struct iwl_op_mode *op_mode,
110 			   enum iwl_fw_ini_time_point tp_id,
111 			   union iwl_dbg_tlv_tp_data *tp_data);
112 };
113 
114 int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
115 void iwl_opmode_deregister(const char *name);
116 
117 /**
118  * struct iwl_op_mode - operational mode
119  * @ops: pointer to its own ops
120  *
121  * This holds an implementation of the mac80211 / fw API.
122  */
123 struct iwl_op_mode {
124 	const struct iwl_op_mode_ops *ops;
125 
126 	char op_mode_specific[] __aligned(sizeof(void *));
127 };
128 
129 static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
130 {
131 	might_sleep();
132 	op_mode->ops->stop(op_mode);
133 }
134 
135 static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
136 				  struct napi_struct *napi,
137 				  struct iwl_rx_cmd_buffer *rxb)
138 {
139 	return op_mode->ops->rx(op_mode, napi, rxb);
140 }
141 
142 static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode,
143 				      struct napi_struct *napi,
144 				      struct iwl_rx_cmd_buffer *rxb,
145 				      unsigned int queue)
146 {
147 	op_mode->ops->rx_rss(op_mode, napi, rxb, queue);
148 }
149 
150 static inline void iwl_op_mode_async_cb(struct iwl_op_mode *op_mode,
151 					const struct iwl_device_cmd *cmd)
152 {
153 	if (op_mode->ops->async_cb)
154 		op_mode->ops->async_cb(op_mode, cmd);
155 }
156 
157 static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
158 					  int queue)
159 {
160 	op_mode->ops->queue_full(op_mode, queue);
161 }
162 
163 static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
164 					      int queue)
165 {
166 	op_mode->ops->queue_not_full(op_mode, queue);
167 }
168 
169 static inline bool __must_check
170 iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state)
171 {
172 	might_sleep();
173 	return op_mode->ops->hw_rf_kill(op_mode, state);
174 }
175 
176 static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
177 					struct sk_buff *skb)
178 {
179 	if (WARN_ON_ONCE(!op_mode))
180 		return;
181 	op_mode->ops->free_skb(op_mode, skb);
182 }
183 
184 static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode)
185 {
186 	op_mode->ops->nic_error(op_mode);
187 }
188 
189 static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode)
190 {
191 	op_mode->ops->cmd_queue_full(op_mode);
192 }
193 
194 static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
195 {
196 	might_sleep();
197 	op_mode->ops->nic_config(op_mode);
198 }
199 
200 static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
201 {
202 	might_sleep();
203 	op_mode->ops->wimax_active(op_mode);
204 }
205 
206 static inline void iwl_op_mode_time_point(struct iwl_op_mode *op_mode,
207 					  enum iwl_fw_ini_time_point tp_id,
208 					  union iwl_dbg_tlv_tp_data *tp_data)
209 {
210 	if (!op_mode || !op_mode->ops || !op_mode->ops->time_point)
211 		return;
212 	op_mode->ops->time_point(op_mode, tp_id, tp_data);
213 }
214 
215 #endif /* __iwl_op_mode_h__ */
216