Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_common.h"
5#include "ice_vf_mbx.h"
6
7/**
8 * ice_aq_send_msg_to_vf
9 * @hw: pointer to the hardware structure
10 * @vfid: VF ID to send msg
11 * @v_opcode: opcodes for VF-PF communication
12 * @v_retval: return error code
13 * @msg: pointer to the msg buffer
14 * @msglen: msg length
15 * @cd: pointer to command details
16 *
17 * Send message to VF driver (0x0802) using mailbox
18 * queue and asynchronously sending message via
19 * ice_sq_send_cmd() function
20 */
21int
22ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
23 u8 *msg, u16 msglen, struct ice_sq_cd *cd)
24{
25 struct ice_aqc_pf_vf_msg *cmd;
26 struct ice_aq_desc desc;
27
28 ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
29
30 cmd = &desc.params.virt;
31 cmd->id = cpu_to_le32(vfid);
32
33 desc.cookie_high = cpu_to_le32(v_opcode);
34 desc.cookie_low = cpu_to_le32(v_retval);
35
36 if (msglen)
37 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
38
39 return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
40}
41
42static const u32 ice_legacy_aq_to_vc_speed[] = {
43 VIRTCHNL_LINK_SPEED_100MB, /* BIT(0) */
44 VIRTCHNL_LINK_SPEED_100MB,
45 VIRTCHNL_LINK_SPEED_1GB,
46 VIRTCHNL_LINK_SPEED_1GB,
47 VIRTCHNL_LINK_SPEED_1GB,
48 VIRTCHNL_LINK_SPEED_10GB,
49 VIRTCHNL_LINK_SPEED_20GB,
50 VIRTCHNL_LINK_SPEED_25GB,
51 VIRTCHNL_LINK_SPEED_40GB,
52 VIRTCHNL_LINK_SPEED_40GB,
53 VIRTCHNL_LINK_SPEED_40GB,
54};
55
56/**
57 * ice_conv_link_speed_to_virtchnl
58 * @adv_link_support: determines the format of the returned link speed
59 * @link_speed: variable containing the link_speed to be converted
60 *
61 * Convert link speed supported by HW to link speed supported by virtchnl.
62 * If adv_link_support is true, then return link speed in Mbps. Else return
63 * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
64 * needs to cast back to an enum virtchnl_link_speed in the case where
65 * adv_link_support is false, but when adv_link_support is true the caller can
66 * expect the speed in Mbps.
67 */
68u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
69{
70 /* convert a BIT() value into an array index */
71 u32 index = fls(link_speed) - 1;
72
73 if (adv_link_support)
74 return ice_get_link_speed(index);
75 else if (index < ARRAY_SIZE(ice_legacy_aq_to_vc_speed))
76 /* Virtchnl speeds are not defined for every speed supported in
77 * the hardware. To maintain compatibility with older AVF
78 * drivers, while reporting the speed the new speed values are
79 * resolved to the closest known virtchnl speeds
80 */
81 return ice_legacy_aq_to_vc_speed[index];
82
83 return VIRTCHNL_LINK_SPEED_UNKNOWN;
84}
85
86/* The mailbox overflow detection algorithm helps to check if there
87 * is a possibility of a malicious VF transmitting too many MBX messages to the
88 * PF.
89 * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
90 * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
91 * The struct ice_mbx_snapshot helps to track and traverse a static window of
92 * messages within the mailbox queue while looking for a malicious VF.
93 *
94 * 2. When the caller starts processing its mailbox queue in response to an
95 * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
96 * the algorithm can be run for the first time for that interrupt. This
97 * requires calling ice_mbx_reset_snapshot() as well as calling
98 * ice_mbx_reset_vf_info() for each VF tracking structure.
99 *
100 * 3. For every message read by the caller from the MBX Queue, the caller must
101 * call the detection algorithm's entry function ice_mbx_vf_state_handler().
102 * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
103 * filled as it is required to be passed to the algorithm.
104 *
105 * 4. Every time a message is read from the MBX queue, a tracking structure
106 * for the VF must be passed to the state handler. The boolean output
107 * report_malvf from ice_mbx_vf_state_handler() serves as an indicator to the
108 * caller whether it must report this VF as malicious or not.
109 *
110 * 5. When a VF is identified to be malicious, the caller can send a message
111 * to the system administrator.
112 *
113 * 6. The PF is responsible for maintaining the struct ice_mbx_vf_info
114 * structure for each VF. The PF should clear the VF tracking structure if the
115 * VF is reset. When a VF is shut down and brought back up, we will then
116 * assume that the new VF is not malicious and may report it again if we
117 * detect it again.
118 *
119 * 7. The function ice_mbx_reset_snapshot() is called to reset the information
120 * in ice_mbx_snapshot for every new mailbox interrupt handled.
121 */
122#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
123/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
124 * the max messages check must be ignored in the algorithm
125 */
126#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
127
128/**
129 * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
130 * @snap: pointer to the mailbox snapshot
131 */
132static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
133{
134 struct ice_mbx_vf_info *vf_info;
135
136 /* Clear mbx_buf in the mailbox snaphot structure and setting the
137 * mailbox snapshot state to a new capture.
138 */
139 memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
140 snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
141
142 /* Reset message counts for all VFs to zero */
143 list_for_each_entry(vf_info, &snap->mbx_vf, list_entry)
144 vf_info->msg_count = 0;
145}
146
147/**
148 * ice_mbx_traverse - Pass through mailbox snapshot
149 * @hw: pointer to the HW struct
150 * @new_state: new algorithm state
151 *
152 * Traversing the mailbox static snapshot without checking
153 * for malicious VFs.
154 */
155static void
156ice_mbx_traverse(struct ice_hw *hw,
157 enum ice_mbx_snapshot_state *new_state)
158{
159 struct ice_mbx_snap_buffer_data *snap_buf;
160 u32 num_iterations;
161
162 snap_buf = &hw->mbx_snapshot.mbx_buf;
163
164 /* As mailbox buffer is circular, applying a mask
165 * on the incremented iteration count.
166 */
167 num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
168
169 /* Checking either of the below conditions to exit snapshot traversal:
170 * Condition-1: If the number of iterations in the mailbox is equal to
171 * the mailbox head which would indicate that we have reached the end
172 * of the static snapshot.
173 * Condition-2: If the maximum messages serviced in the mailbox for a
174 * given interrupt is the highest possible value then there is no need
175 * to check if the number of messages processed is equal to it. If not
176 * check if the number of messages processed is greater than or equal
177 * to the maximum number of mailbox entries serviced in current work item.
178 */
179 if (num_iterations == snap_buf->head ||
180 (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
181 ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
182 *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
183}
184
185/**
186 * ice_mbx_detect_malvf - Detect malicious VF in snapshot
187 * @hw: pointer to the HW struct
188 * @vf_info: mailbox tracking structure for a VF
189 * @new_state: new algorithm state
190 * @is_malvf: boolean output to indicate if VF is malicious
191 *
192 * This function tracks the number of asynchronous messages
193 * sent per VF and marks the VF as malicious if it exceeds
194 * the permissible number of messages to send.
195 */
196static int
197ice_mbx_detect_malvf(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info,
198 enum ice_mbx_snapshot_state *new_state,
199 bool *is_malvf)
200{
201 /* increment the message count for this VF */
202 vf_info->msg_count++;
203
204 if (vf_info->msg_count >= ICE_ASYNC_VF_MSG_THRESHOLD)
205 *is_malvf = true;
206
207 /* continue to iterate through the mailbox snapshot */
208 ice_mbx_traverse(hw, new_state);
209
210 return 0;
211}
212
213/**
214 * ice_mbx_vf_dec_trig_e830 - Decrements the VF mailbox queue counter
215 * @hw: pointer to the HW struct
216 * @event: pointer to the control queue receive event
217 *
218 * This function triggers to decrement the counter
219 * MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT when the driver replenishes
220 * the buffers at the PF mailbox queue.
221 */
222void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
223 const struct ice_rq_event_info *event)
224{
225 u16 vfid = le16_to_cpu(event->desc.retval);
226
227 wr32(hw, E830_MBX_VF_DEC_TRIG(vfid), 1);
228}
229
230/**
231 * ice_mbx_vf_clear_cnt_e830 - Clear the VF mailbox queue count
232 * @hw: pointer to the HW struct
233 * @vf_id: VF ID in the PF space
234 *
235 * This function clears the counter MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT, and should
236 * be called when a VF is created and on VF reset.
237 */
238void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id)
239{
240 u32 reg = rd32(hw, E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(vf_id));
241
242 wr32(hw, E830_MBX_VF_DEC_TRIG(vf_id), reg);
243}
244
245/**
246 * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
247 * @hw: pointer to the HW struct
248 * @mbx_data: pointer to structure containing mailbox data
249 * @vf_info: mailbox tracking structure for the VF in question
250 * @report_malvf: boolean output to indicate whether VF should be reported
251 *
252 * The function serves as an entry point for the malicious VF
253 * detection algorithm by handling the different states and state
254 * transitions of the algorithm:
255 * New snapshot: This state is entered when creating a new static
256 * snapshot. The data from any previous mailbox snapshot is
257 * cleared and a new capture of the mailbox head and tail is
258 * logged. This will be the new static snapshot to detect
259 * asynchronous messages sent by VFs. On capturing the snapshot
260 * and depending on whether the number of pending messages in that
261 * snapshot exceed the watermark value, the state machine enters
262 * traverse or detect states.
263 * Traverse: If pending message count is below watermark then iterate
264 * through the snapshot without any action on VF.
265 * Detect: If pending message count exceeds watermark traverse
266 * the static snapshot and look for a malicious VF.
267 */
268int
269ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
270 struct ice_mbx_vf_info *vf_info, bool *report_malvf)
271{
272 struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
273 struct ice_mbx_snap_buffer_data *snap_buf;
274 struct ice_ctl_q_info *cq = &hw->mailboxq;
275 enum ice_mbx_snapshot_state new_state;
276 bool is_malvf = false;
277 int status = 0;
278
279 if (!report_malvf || !mbx_data || !vf_info)
280 return -EINVAL;
281
282 *report_malvf = false;
283
284 /* When entering the mailbox state machine assume that the VF
285 * is not malicious until detected.
286 */
287 /* Checking if max messages allowed to be processed while servicing current
288 * interrupt is not less than the defined AVF message threshold.
289 */
290 if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
291 return -EINVAL;
292
293 /* The watermark value should not be lesser than the threshold limit
294 * set for the number of asynchronous messages a VF can send to mailbox
295 * nor should it be greater than the maximum number of messages in the
296 * mailbox serviced in current interrupt.
297 */
298 if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
299 mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
300 return -EINVAL;
301
302 new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
303 snap_buf = &snap->mbx_buf;
304
305 switch (snap_buf->state) {
306 case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
307 /* Clear any previously held data in mailbox snapshot structure. */
308 ice_mbx_reset_snapshot(snap);
309
310 /* Collect the pending ARQ count, number of messages processed and
311 * the maximum number of messages allowed to be processed from the
312 * Mailbox for current interrupt.
313 */
314 snap_buf->num_pending_arq = mbx_data->num_pending_arq;
315 snap_buf->num_msg_proc = mbx_data->num_msg_proc;
316 snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
317
318 /* Capture a new static snapshot of the mailbox by logging the
319 * head and tail of snapshot and set num_iterations to the tail
320 * value to mark the start of the iteration through the snapshot.
321 */
322 snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
323 mbx_data->num_pending_arq);
324 snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
325 snap_buf->num_iterations = snap_buf->tail;
326
327 /* Pending ARQ messages returned by ice_clean_rq_elem
328 * is the difference between the head and tail of the
329 * mailbox queue. Comparing this value against the watermark
330 * helps to check if we potentially have malicious VFs.
331 */
332 if (snap_buf->num_pending_arq >=
333 mbx_data->async_watermark_val) {
334 new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
335 status = ice_mbx_detect_malvf(hw, vf_info, &new_state, &is_malvf);
336 } else {
337 new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
338 ice_mbx_traverse(hw, &new_state);
339 }
340 break;
341
342 case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
343 new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
344 ice_mbx_traverse(hw, &new_state);
345 break;
346
347 case ICE_MAL_VF_DETECT_STATE_DETECT:
348 new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
349 status = ice_mbx_detect_malvf(hw, vf_info, &new_state, &is_malvf);
350 break;
351
352 default:
353 new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
354 status = -EIO;
355 }
356
357 snap_buf->state = new_state;
358
359 /* Only report VFs as malicious the first time we detect it */
360 if (is_malvf && !vf_info->malicious) {
361 vf_info->malicious = 1;
362 *report_malvf = true;
363 }
364
365 return status;
366}
367
368/**
369 * ice_mbx_clear_malvf - Clear VF mailbox info
370 * @vf_info: the mailbox tracking structure for a VF
371 *
372 * In case of a VF reset, this function shall be called to clear the VF's
373 * current mailbox tracking state.
374 */
375void ice_mbx_clear_malvf(struct ice_mbx_vf_info *vf_info)
376{
377 vf_info->malicious = 0;
378 vf_info->msg_count = 0;
379}
380
381/**
382 * ice_mbx_init_vf_info - Initialize a new VF mailbox tracking info
383 * @hw: pointer to the hardware structure
384 * @vf_info: the mailbox tracking info structure for a VF
385 *
386 * Initialize a VF mailbox tracking info structure and insert it into the
387 * snapshot list.
388 *
389 * If you remove the VF, you must also delete the associated VF info structure
390 * from the linked list.
391 */
392void ice_mbx_init_vf_info(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info)
393{
394 struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
395
396 ice_mbx_clear_malvf(vf_info);
397 list_add(&vf_info->list_entry, &snap->mbx_vf);
398}
399
400/**
401 * ice_mbx_init_snapshot - Initialize mailbox snapshot data
402 * @hw: pointer to the hardware structure
403 *
404 * Clear the mailbox snapshot structure and initialize the VF mailbox list.
405 */
406void ice_mbx_init_snapshot(struct ice_hw *hw)
407{
408 struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
409
410 INIT_LIST_HEAD(&snap->mbx_vf);
411 ice_mbx_reset_snapshot(snap);
412}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_common.h"
5#include "ice_vf_mbx.h"
6
7/**
8 * ice_aq_send_msg_to_vf
9 * @hw: pointer to the hardware structure
10 * @vfid: VF ID to send msg
11 * @v_opcode: opcodes for VF-PF communication
12 * @v_retval: return error code
13 * @msg: pointer to the msg buffer
14 * @msglen: msg length
15 * @cd: pointer to command details
16 *
17 * Send message to VF driver (0x0802) using mailbox
18 * queue and asynchronously sending message via
19 * ice_sq_send_cmd() function
20 */
21int
22ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
23 u8 *msg, u16 msglen, struct ice_sq_cd *cd)
24{
25 struct ice_aqc_pf_vf_msg *cmd;
26 struct ice_aq_desc desc;
27
28 ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
29
30 cmd = &desc.params.virt;
31 cmd->id = cpu_to_le32(vfid);
32
33 desc.cookie_high = cpu_to_le32(v_opcode);
34 desc.cookie_low = cpu_to_le32(v_retval);
35
36 if (msglen)
37 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
38
39 return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
40}
41
42static const u32 ice_legacy_aq_to_vc_speed[] = {
43 VIRTCHNL_LINK_SPEED_100MB, /* BIT(0) */
44 VIRTCHNL_LINK_SPEED_100MB,
45 VIRTCHNL_LINK_SPEED_1GB,
46 VIRTCHNL_LINK_SPEED_1GB,
47 VIRTCHNL_LINK_SPEED_1GB,
48 VIRTCHNL_LINK_SPEED_10GB,
49 VIRTCHNL_LINK_SPEED_20GB,
50 VIRTCHNL_LINK_SPEED_25GB,
51 VIRTCHNL_LINK_SPEED_40GB,
52 VIRTCHNL_LINK_SPEED_40GB,
53 VIRTCHNL_LINK_SPEED_40GB,
54};
55
56/**
57 * ice_conv_link_speed_to_virtchnl
58 * @adv_link_support: determines the format of the returned link speed
59 * @link_speed: variable containing the link_speed to be converted
60 *
61 * Convert link speed supported by HW to link speed supported by virtchnl.
62 * If adv_link_support is true, then return link speed in Mbps. Else return
63 * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
64 * needs to cast back to an enum virtchnl_link_speed in the case where
65 * adv_link_support is false, but when adv_link_support is true the caller can
66 * expect the speed in Mbps.
67 */
68u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
69{
70 /* convert a BIT() value into an array index */
71 u32 index = fls(link_speed) - 1;
72
73 if (adv_link_support)
74 return ice_get_link_speed(index);
75 else if (index < ARRAY_SIZE(ice_legacy_aq_to_vc_speed))
76 /* Virtchnl speeds are not defined for every speed supported in
77 * the hardware. To maintain compatibility with older AVF
78 * drivers, while reporting the speed the new speed values are
79 * resolved to the closest known virtchnl speeds
80 */
81 return ice_legacy_aq_to_vc_speed[index];
82
83 return VIRTCHNL_LINK_SPEED_UNKNOWN;
84}
85
86/* The mailbox overflow detection algorithm helps to check if there
87 * is a possibility of a malicious VF transmitting too many MBX messages to the
88 * PF.
89 * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
90 * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
91 * The struct ice_mbx_snapshot helps to track and traverse a static window of
92 * messages within the mailbox queue while looking for a malicious VF.
93 *
94 * 2. When the caller starts processing its mailbox queue in response to an
95 * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
96 * the algorithm can be run for the first time for that interrupt. This can be
97 * done via ice_mbx_reset_snapshot().
98 *
99 * 3. For every message read by the caller from the MBX Queue, the caller must
100 * call the detection algorithm's entry function ice_mbx_vf_state_handler().
101 * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
102 * filled as it is required to be passed to the algorithm.
103 *
104 * 4. Every time a message is read from the MBX queue, a VFId is received which
105 * is passed to the state handler. The boolean output is_malvf of the state
106 * handler ice_mbx_vf_state_handler() serves as an indicator to the caller
107 * whether this VF is malicious or not.
108 *
109 * 5. When a VF is identified to be malicious, the caller can send a message
110 * to the system administrator. The caller can invoke ice_mbx_report_malvf()
111 * to help determine if a malicious VF is to be reported or not. This function
112 * requires the caller to maintain a global bitmap to track all malicious VFs
113 * and pass that to ice_mbx_report_malvf() along with the VFID which was identified
114 * to be malicious by ice_mbx_vf_state_handler().
115 *
116 * 6. The global bitmap maintained by PF can be cleared completely if PF is in
117 * reset or the bit corresponding to a VF can be cleared if that VF is in reset.
118 * When a VF is shut down and brought back up, we assume that the new VF
119 * brought up is not malicious and hence report it if found malicious.
120 *
121 * 7. The function ice_mbx_reset_snapshot() is called to reset the information
122 * in ice_mbx_snapshot for every new mailbox interrupt handled.
123 *
124 * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
125 * when driver is unloaded.
126 */
127#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
128/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
129 * the max messages check must be ignored in the algorithm
130 */
131#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
132
133/**
134 * ice_mbx_traverse - Pass through mailbox snapshot
135 * @hw: pointer to the HW struct
136 * @new_state: new algorithm state
137 *
138 * Traversing the mailbox static snapshot without checking
139 * for malicious VFs.
140 */
141static void
142ice_mbx_traverse(struct ice_hw *hw,
143 enum ice_mbx_snapshot_state *new_state)
144{
145 struct ice_mbx_snap_buffer_data *snap_buf;
146 u32 num_iterations;
147
148 snap_buf = &hw->mbx_snapshot.mbx_buf;
149
150 /* As mailbox buffer is circular, applying a mask
151 * on the incremented iteration count.
152 */
153 num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
154
155 /* Checking either of the below conditions to exit snapshot traversal:
156 * Condition-1: If the number of iterations in the mailbox is equal to
157 * the mailbox head which would indicate that we have reached the end
158 * of the static snapshot.
159 * Condition-2: If the maximum messages serviced in the mailbox for a
160 * given interrupt is the highest possible value then there is no need
161 * to check if the number of messages processed is equal to it. If not
162 * check if the number of messages processed is greater than or equal
163 * to the maximum number of mailbox entries serviced in current work item.
164 */
165 if (num_iterations == snap_buf->head ||
166 (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
167 ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
168 *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
169}
170
171/**
172 * ice_mbx_detect_malvf - Detect malicious VF in snapshot
173 * @hw: pointer to the HW struct
174 * @vf_id: relative virtual function ID
175 * @new_state: new algorithm state
176 * @is_malvf: boolean output to indicate if VF is malicious
177 *
178 * This function tracks the number of asynchronous messages
179 * sent per VF and marks the VF as malicious if it exceeds
180 * the permissible number of messages to send.
181 */
182static int
183ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
184 enum ice_mbx_snapshot_state *new_state,
185 bool *is_malvf)
186{
187 struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
188
189 if (vf_id >= snap->mbx_vf.vfcntr_len)
190 return -EIO;
191
192 /* increment the message count in the VF array */
193 snap->mbx_vf.vf_cntr[vf_id]++;
194
195 if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
196 *is_malvf = true;
197
198 /* continue to iterate through the mailbox snapshot */
199 ice_mbx_traverse(hw, new_state);
200
201 return 0;
202}
203
204/**
205 * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
206 * @snap: pointer to mailbox snapshot structure in the ice_hw struct
207 *
208 * Reset the mailbox snapshot structure and clear VF counter array.
209 */
210static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
211{
212 u32 vfcntr_len;
213
214 if (!snap || !snap->mbx_vf.vf_cntr)
215 return;
216
217 /* Clear VF counters. */
218 vfcntr_len = snap->mbx_vf.vfcntr_len;
219 if (vfcntr_len)
220 memset(snap->mbx_vf.vf_cntr, 0,
221 (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)));
222
223 /* Reset mailbox snapshot for a new capture. */
224 memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
225 snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
226}
227
228/**
229 * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
230 * @hw: pointer to the HW struct
231 * @mbx_data: pointer to structure containing mailbox data
232 * @vf_id: relative virtual function (VF) ID
233 * @is_malvf: boolean output to indicate if VF is malicious
234 *
235 * The function serves as an entry point for the malicious VF
236 * detection algorithm by handling the different states and state
237 * transitions of the algorithm:
238 * New snapshot: This state is entered when creating a new static
239 * snapshot. The data from any previous mailbox snapshot is
240 * cleared and a new capture of the mailbox head and tail is
241 * logged. This will be the new static snapshot to detect
242 * asynchronous messages sent by VFs. On capturing the snapshot
243 * and depending on whether the number of pending messages in that
244 * snapshot exceed the watermark value, the state machine enters
245 * traverse or detect states.
246 * Traverse: If pending message count is below watermark then iterate
247 * through the snapshot without any action on VF.
248 * Detect: If pending message count exceeds watermark traverse
249 * the static snapshot and look for a malicious VF.
250 */
251int
252ice_mbx_vf_state_handler(struct ice_hw *hw,
253 struct ice_mbx_data *mbx_data, u16 vf_id,
254 bool *is_malvf)
255{
256 struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
257 struct ice_mbx_snap_buffer_data *snap_buf;
258 struct ice_ctl_q_info *cq = &hw->mailboxq;
259 enum ice_mbx_snapshot_state new_state;
260 int status = 0;
261
262 if (!is_malvf || !mbx_data)
263 return -EINVAL;
264
265 /* When entering the mailbox state machine assume that the VF
266 * is not malicious until detected.
267 */
268 *is_malvf = false;
269
270 /* Checking if max messages allowed to be processed while servicing current
271 * interrupt is not less than the defined AVF message threshold.
272 */
273 if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
274 return -EINVAL;
275
276 /* The watermark value should not be lesser than the threshold limit
277 * set for the number of asynchronous messages a VF can send to mailbox
278 * nor should it be greater than the maximum number of messages in the
279 * mailbox serviced in current interrupt.
280 */
281 if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
282 mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
283 return -EINVAL;
284
285 new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
286 snap_buf = &snap->mbx_buf;
287
288 switch (snap_buf->state) {
289 case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
290 /* Clear any previously held data in mailbox snapshot structure. */
291 ice_mbx_reset_snapshot(snap);
292
293 /* Collect the pending ARQ count, number of messages processed and
294 * the maximum number of messages allowed to be processed from the
295 * Mailbox for current interrupt.
296 */
297 snap_buf->num_pending_arq = mbx_data->num_pending_arq;
298 snap_buf->num_msg_proc = mbx_data->num_msg_proc;
299 snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
300
301 /* Capture a new static snapshot of the mailbox by logging the
302 * head and tail of snapshot and set num_iterations to the tail
303 * value to mark the start of the iteration through the snapshot.
304 */
305 snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
306 mbx_data->num_pending_arq);
307 snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
308 snap_buf->num_iterations = snap_buf->tail;
309
310 /* Pending ARQ messages returned by ice_clean_rq_elem
311 * is the difference between the head and tail of the
312 * mailbox queue. Comparing this value against the watermark
313 * helps to check if we potentially have malicious VFs.
314 */
315 if (snap_buf->num_pending_arq >=
316 mbx_data->async_watermark_val) {
317 new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
318 status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
319 } else {
320 new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
321 ice_mbx_traverse(hw, &new_state);
322 }
323 break;
324
325 case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
326 new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
327 ice_mbx_traverse(hw, &new_state);
328 break;
329
330 case ICE_MAL_VF_DETECT_STATE_DETECT:
331 new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
332 status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
333 break;
334
335 default:
336 new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
337 status = -EIO;
338 }
339
340 snap_buf->state = new_state;
341
342 return status;
343}
344
345/**
346 * ice_mbx_report_malvf - Track and note malicious VF
347 * @hw: pointer to the HW struct
348 * @all_malvfs: all malicious VFs tracked by PF
349 * @bitmap_len: length of bitmap in bits
350 * @vf_id: relative virtual function ID of the malicious VF
351 * @report_malvf: boolean to indicate if malicious VF must be reported
352 *
353 * This function will update a bitmap that keeps track of the malicious
354 * VFs attached to the PF. A malicious VF must be reported only once if
355 * discovered between VF resets or loading so the function checks
356 * the input vf_id against the bitmap to verify if the VF has been
357 * detected in any previous mailbox iterations.
358 */
359int
360ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
361 u16 bitmap_len, u16 vf_id, bool *report_malvf)
362{
363 if (!all_malvfs || !report_malvf)
364 return -EINVAL;
365
366 *report_malvf = false;
367
368 if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
369 return -EINVAL;
370
371 if (vf_id >= bitmap_len)
372 return -EIO;
373
374 /* If the vf_id is found in the bitmap set bit and boolean to true */
375 if (!test_and_set_bit(vf_id, all_malvfs))
376 *report_malvf = true;
377
378 return 0;
379}
380
381/**
382 * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID
383 * @snap: pointer to the mailbox snapshot structure
384 * @all_malvfs: all malicious VFs tracked by PF
385 * @bitmap_len: length of bitmap in bits
386 * @vf_id: relative virtual function ID of the malicious VF
387 *
388 * In case of a VF reset, this function can be called to clear
389 * the bit corresponding to the VF ID in the bitmap tracking all
390 * malicious VFs attached to the PF. The function also clears the
391 * VF counter array at the index of the VF ID. This is to ensure
392 * that the new VF loaded is not considered malicious before going
393 * through the overflow detection algorithm.
394 */
395int
396ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
397 u16 bitmap_len, u16 vf_id)
398{
399 if (!snap || !all_malvfs)
400 return -EINVAL;
401
402 if (bitmap_len < snap->mbx_vf.vfcntr_len)
403 return -EINVAL;
404
405 /* Ensure VF ID value is not larger than bitmap or VF counter length */
406 if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
407 return -EIO;
408
409 /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
410 clear_bit(vf_id, all_malvfs);
411
412 /* Clear the VF counter in the mailbox snapshot structure for that VF ID.
413 * This is to ensure that if a VF is unloaded and a new one brought back
414 * up with the same VF ID for a snapshot currently in traversal or detect
415 * state the counter for that VF ID does not increment on top of existing
416 * values in the mailbox overflow detection algorithm.
417 */
418 snap->mbx_vf.vf_cntr[vf_id] = 0;
419
420 return 0;
421}
422
423/**
424 * ice_mbx_init_snapshot - Initialize mailbox snapshot structure
425 * @hw: pointer to the hardware structure
426 * @vf_count: number of VFs allocated on a PF
427 *
428 * Clear the mailbox snapshot structure and allocate memory
429 * for the VF counter array based on the number of VFs allocated
430 * on that PF.
431 *
432 * Assumption: This function will assume ice_get_caps() has already been
433 * called to ensure that the vf_count can be compared against the number
434 * of VFs supported as defined in the functional capabilities of the device.
435 */
436int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
437{
438 struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
439
440 /* Ensure that the number of VFs allocated is non-zero and
441 * is not greater than the number of supported VFs defined in
442 * the functional capabilities of the PF.
443 */
444 if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
445 return -EINVAL;
446
447 snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
448 sizeof(*snap->mbx_vf.vf_cntr),
449 GFP_KERNEL);
450 if (!snap->mbx_vf.vf_cntr)
451 return -ENOMEM;
452
453 /* Setting the VF counter length to the number of allocated
454 * VFs for given PF's functional capabilities.
455 */
456 snap->mbx_vf.vfcntr_len = vf_count;
457
458 /* Clear mbx_buf in the mailbox snaphot structure and setting the
459 * mailbox snapshot state to a new capture.
460 */
461 memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
462 snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
463
464 return 0;
465}
466
467/**
468 * ice_mbx_deinit_snapshot - Free mailbox snapshot structure
469 * @hw: pointer to the hardware structure
470 *
471 * Clear the mailbox snapshot structure and free the VF counter array.
472 */
473void ice_mbx_deinit_snapshot(struct ice_hw *hw)
474{
475 struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
476
477 /* Free VF counter array and reset VF counter length */
478 devm_kfree(ice_hw_to_dev(hw), snap->mbx_vf.vf_cntr);
479 snap->mbx_vf.vfcntr_len = 0;
480
481 /* Clear mbx_buf in the mailbox snaphot structure */
482 memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
483}