Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
  2/*
  3 * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
  4 * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  5 * Copyright (C) 2015 Intel Deutschland GmbH
  6 */
  7#ifndef __iwl_op_mode_h__
  8#define __iwl_op_mode_h__
  9
 10#include <linux/netdevice.h>
 11#include <linux/debugfs.h>
 12#include "iwl-dbg-tlv.h"
 13
 14struct iwl_op_mode;
 15struct iwl_trans;
 16struct sk_buff;
 17struct iwl_device_cmd;
 18struct iwl_rx_cmd_buffer;
 19struct iwl_fw;
 20struct iwl_cfg;
 21
 22/**
 23 * DOC: Operational mode - what is it ?
 24 *
 25 * The operational mode (a.k.a. op_mode) is the layer that implements
 26 * mac80211's handlers. It knows two APIs: mac80211's and the fw's. It uses
 27 * the transport API to access the HW. The op_mode doesn't need to know how the
 28 * underlying HW works, since the transport layer takes care of that.
 29 *
 30 * There can be several op_mode: i.e. different fw APIs will require two
 31 * different op_modes. This is why the op_mode is virtualized.
 32 */
 33
 34/**
 35 * DOC: Life cycle of the Operational mode
 36 *
 37 * The operational mode has a very simple life cycle.
 38 *
 39 *	1) The driver layer (iwl-drv.c) chooses the op_mode based on the
 40 *	   capabilities advertised by the fw file (in TLV format).
 41 *	2) The driver layer starts the op_mode (ops->start)
 42 *	3) The op_mode registers mac80211
 43 *	4) The op_mode is governed by mac80211
 44 *	5) The driver layer stops the op_mode
 45 */
 46
 47/**
 48 * struct iwl_op_mode_ops - op_mode specific operations
 49 *
 50 * The op_mode exports its ops so that external components can start it and
 51 * interact with it. The driver layer typically calls the start and stop
 52 * handlers, the transport layer calls the others.
 53 *
 54 * All the handlers MUST be implemented, except @rx_rss which can be left
 55 * out *iff* the opmode will never run on hardware with multi-queue capability.
 56 *
 57 * @start: start the op_mode. The transport layer is already allocated.
 58 *	May sleep
 59 * @stop: stop the op_mode. Must free all the memory allocated.
 60 *	May sleep
 61 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
 62 *	HCMD this Rx responds to. Can't sleep.
 63 * @rx_rss: data queue RX notification to the op_mode, for (data) notifications
 64 *	received on the RSS queue(s). The queue parameter indicates which of the
 65 *	RSS queues received this frame; it will always be non-zero.
 66 *	This method must not sleep.
 
 
 67 * @queue_full: notifies that a HW queue is full.
 68 *	Must be atomic and called with BH disabled.
 69 * @queue_not_full: notifies that a HW queue is not full any more.
 70 *	Must be atomic and called with BH disabled.
 71 * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
 72 *	the radio is killed. Return %true if the device should be stopped by
 73 *	the transport immediately after the call. May sleep.
 74 * @free_skb: allows the transport layer to free skbs that haven't been
 75 *	reclaimed by the op_mode. This can happen when the driver is freed and
 76 *	there are Tx packets pending in the transport layer.
 77 *	Must be atomic
 78 * @nic_error: error notification. Must be atomic and must be called with BH
 79 *	disabled, unless the sync parameter is true.
 80 * @cmd_queue_full: Called when the command queue gets full. Must be atomic and
 81 *	called with BH disabled.
 82 * @nic_config: configure NIC, called before firmware is started.
 83 *	May sleep
 84 * @wimax_active: invoked when WiMax becomes active. May sleep
 85 * @time_point: called when transport layer wants to collect debug data
 86 */
 87struct iwl_op_mode_ops {
 88	struct iwl_op_mode *(*start)(struct iwl_trans *trans,
 89				     const struct iwl_cfg *cfg,
 90				     const struct iwl_fw *fw,
 91				     struct dentry *dbgfs_dir);
 92	void (*stop)(struct iwl_op_mode *op_mode);
 93	void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
 94		   struct iwl_rx_cmd_buffer *rxb);
 95	void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
 96		       struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
 
 
 97	void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
 98	void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
 99	bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
100	void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
101	void (*nic_error)(struct iwl_op_mode *op_mode, bool sync);
102	void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
103	void (*nic_config)(struct iwl_op_mode *op_mode);
104	void (*wimax_active)(struct iwl_op_mode *op_mode);
105	void (*time_point)(struct iwl_op_mode *op_mode,
106			   enum iwl_fw_ini_time_point tp_id,
107			   union iwl_dbg_tlv_tp_data *tp_data);
108};
109
110int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
111void iwl_opmode_deregister(const char *name);
112
113/**
114 * struct iwl_op_mode - operational mode
115 * @ops: pointer to its own ops
116 *
117 * This holds an implementation of the mac80211 / fw API.
118 */
119struct iwl_op_mode {
120	const struct iwl_op_mode_ops *ops;
121
122	char op_mode_specific[] __aligned(sizeof(void *));
123};
124
125static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
126{
127	might_sleep();
128	op_mode->ops->stop(op_mode);
129}
130
131static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
132				  struct napi_struct *napi,
133				  struct iwl_rx_cmd_buffer *rxb)
134{
135	return op_mode->ops->rx(op_mode, napi, rxb);
136}
137
138static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode,
139				      struct napi_struct *napi,
140				      struct iwl_rx_cmd_buffer *rxb,
141				      unsigned int queue)
142{
143	op_mode->ops->rx_rss(op_mode, napi, rxb, queue);
144}
145
 
 
 
 
 
 
 
146static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
147					  int queue)
148{
149	op_mode->ops->queue_full(op_mode, queue);
150}
151
152static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
153					      int queue)
154{
155	op_mode->ops->queue_not_full(op_mode, queue);
156}
157
158static inline bool __must_check
159iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state)
160{
161	might_sleep();
162	return op_mode->ops->hw_rf_kill(op_mode, state);
163}
164
165static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
166					struct sk_buff *skb)
167{
168	if (WARN_ON_ONCE(!op_mode))
169		return;
170	op_mode->ops->free_skb(op_mode, skb);
171}
172
173static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode, bool sync)
174{
175	op_mode->ops->nic_error(op_mode, sync);
176}
177
178static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode)
179{
180	op_mode->ops->cmd_queue_full(op_mode);
181}
182
183static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
184{
185	might_sleep();
186	op_mode->ops->nic_config(op_mode);
187}
188
189static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
190{
191	might_sleep();
192	op_mode->ops->wimax_active(op_mode);
193}
194
195static inline void iwl_op_mode_time_point(struct iwl_op_mode *op_mode,
196					  enum iwl_fw_ini_time_point tp_id,
197					  union iwl_dbg_tlv_tp_data *tp_data)
198{
199	if (!op_mode || !op_mode->ops || !op_mode->ops->time_point)
200		return;
201	op_mode->ops->time_point(op_mode, tp_id, tp_data);
202}
203
204#endif /* __iwl_op_mode_h__ */
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
  2/*
  3 * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
  4 * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
  5 * Copyright (C) 2015 Intel Deutschland GmbH
  6 */
  7#ifndef __iwl_op_mode_h__
  8#define __iwl_op_mode_h__
  9
 10#include <linux/netdevice.h>
 11#include <linux/debugfs.h>
 12#include "iwl-dbg-tlv.h"
 13
 14struct iwl_op_mode;
 15struct iwl_trans;
 16struct sk_buff;
 17struct iwl_device_cmd;
 18struct iwl_rx_cmd_buffer;
 19struct iwl_fw;
 20struct iwl_cfg;
 21
 22/**
 23 * DOC: Operational mode - what is it ?
 24 *
 25 * The operational mode (a.k.a. op_mode) is the layer that implements
 26 * mac80211's handlers. It knows two APIs: mac80211's and the fw's. It uses
 27 * the transport API to access the HW. The op_mode doesn't need to know how the
 28 * underlying HW works, since the transport layer takes care of that.
 29 *
 30 * There can be several op_mode: i.e. different fw APIs will require two
 31 * different op_modes. This is why the op_mode is virtualized.
 32 */
 33
 34/**
 35 * DOC: Life cycle of the Operational mode
 36 *
 37 * The operational mode has a very simple life cycle.
 38 *
 39 *	1) The driver layer (iwl-drv.c) chooses the op_mode based on the
 40 *	   capabilities advertised by the fw file (in TLV format).
 41 *	2) The driver layer starts the op_mode (ops->start)
 42 *	3) The op_mode registers mac80211
 43 *	4) The op_mode is governed by mac80211
 44 *	5) The driver layer stops the op_mode
 45 */
 46
 47/**
 48 * struct iwl_op_mode_ops - op_mode specific operations
 49 *
 50 * The op_mode exports its ops so that external components can start it and
 51 * interact with it. The driver layer typically calls the start and stop
 52 * handlers, the transport layer calls the others.
 53 *
 54 * All the handlers MUST be implemented, except @rx_rss which can be left
 55 * out *iff* the opmode will never run on hardware with multi-queue capability.
 56 *
 57 * @start: start the op_mode. The transport layer is already allocated.
 58 *	May sleep
 59 * @stop: stop the op_mode. Must free all the memory allocated.
 60 *	May sleep
 61 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
 62 *	HCMD this Rx responds to. Can't sleep.
 63 * @rx_rss: data queue RX notification to the op_mode, for (data) notifications
 64 *	received on the RSS queue(s). The queue parameter indicates which of the
 65 *	RSS queues received this frame; it will always be non-zero.
 66 *	This method must not sleep.
 67 * @async_cb: called when an ASYNC command with CMD_WANT_ASYNC_CALLBACK set
 68 *	completes. Must be atomic.
 69 * @queue_full: notifies that a HW queue is full.
 70 *	Must be atomic and called with BH disabled.
 71 * @queue_not_full: notifies that a HW queue is not full any more.
 72 *	Must be atomic and called with BH disabled.
 73 * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
 74 *	the radio is killed. Return %true if the device should be stopped by
 75 *	the transport immediately after the call. May sleep.
 76 * @free_skb: allows the transport layer to free skbs that haven't been
 77 *	reclaimed by the op_mode. This can happen when the driver is freed and
 78 *	there are Tx packets pending in the transport layer.
 79 *	Must be atomic
 80 * @nic_error: error notification. Must be atomic and must be called with BH
 81 *	disabled.
 82 * @cmd_queue_full: Called when the command queue gets full. Must be atomic and
 83 *	called with BH disabled.
 84 * @nic_config: configure NIC, called before firmware is started.
 85 *	May sleep
 86 * @wimax_active: invoked when WiMax becomes active. May sleep
 87 * @time_point: called when transport layer wants to collect debug data
 88 */
 89struct iwl_op_mode_ops {
 90	struct iwl_op_mode *(*start)(struct iwl_trans *trans,
 91				     const struct iwl_cfg *cfg,
 92				     const struct iwl_fw *fw,
 93				     struct dentry *dbgfs_dir);
 94	void (*stop)(struct iwl_op_mode *op_mode);
 95	void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
 96		   struct iwl_rx_cmd_buffer *rxb);
 97	void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
 98		       struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
 99	void (*async_cb)(struct iwl_op_mode *op_mode,
100			 const struct iwl_device_cmd *cmd);
101	void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
102	void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
103	bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
104	void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
105	void (*nic_error)(struct iwl_op_mode *op_mode);
106	void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
107	void (*nic_config)(struct iwl_op_mode *op_mode);
108	void (*wimax_active)(struct iwl_op_mode *op_mode);
109	void (*time_point)(struct iwl_op_mode *op_mode,
110			   enum iwl_fw_ini_time_point tp_id,
111			   union iwl_dbg_tlv_tp_data *tp_data);
112};
113
114int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
115void iwl_opmode_deregister(const char *name);
116
117/**
118 * struct iwl_op_mode - operational mode
119 * @ops: pointer to its own ops
120 *
121 * This holds an implementation of the mac80211 / fw API.
122 */
123struct iwl_op_mode {
124	const struct iwl_op_mode_ops *ops;
125
126	char op_mode_specific[] __aligned(sizeof(void *));
127};
128
129static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
130{
131	might_sleep();
132	op_mode->ops->stop(op_mode);
133}
134
135static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
136				  struct napi_struct *napi,
137				  struct iwl_rx_cmd_buffer *rxb)
138{
139	return op_mode->ops->rx(op_mode, napi, rxb);
140}
141
142static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode,
143				      struct napi_struct *napi,
144				      struct iwl_rx_cmd_buffer *rxb,
145				      unsigned int queue)
146{
147	op_mode->ops->rx_rss(op_mode, napi, rxb, queue);
148}
149
150static inline void iwl_op_mode_async_cb(struct iwl_op_mode *op_mode,
151					const struct iwl_device_cmd *cmd)
152{
153	if (op_mode->ops->async_cb)
154		op_mode->ops->async_cb(op_mode, cmd);
155}
156
157static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
158					  int queue)
159{
160	op_mode->ops->queue_full(op_mode, queue);
161}
162
163static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
164					      int queue)
165{
166	op_mode->ops->queue_not_full(op_mode, queue);
167}
168
169static inline bool __must_check
170iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state)
171{
172	might_sleep();
173	return op_mode->ops->hw_rf_kill(op_mode, state);
174}
175
176static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
177					struct sk_buff *skb)
178{
179	if (WARN_ON_ONCE(!op_mode))
180		return;
181	op_mode->ops->free_skb(op_mode, skb);
182}
183
184static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode)
185{
186	op_mode->ops->nic_error(op_mode);
187}
188
189static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode)
190{
191	op_mode->ops->cmd_queue_full(op_mode);
192}
193
194static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
195{
196	might_sleep();
197	op_mode->ops->nic_config(op_mode);
198}
199
200static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
201{
202	might_sleep();
203	op_mode->ops->wimax_active(op_mode);
204}
205
206static inline void iwl_op_mode_time_point(struct iwl_op_mode *op_mode,
207					  enum iwl_fw_ini_time_point tp_id,
208					  union iwl_dbg_tlv_tp_data *tp_data)
209{
210	if (!op_mode || !op_mode->ops || !op_mode->ops->time_point)
211		return;
212	op_mode->ops->time_point(op_mode, tp_id, tp_data);
213}
214
215#endif /* __iwl_op_mode_h__ */