Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/* QLogic qed NIC Driver
  2 * Copyright (c) 2015 QLogic Corporation
  3 *
  4 * This software is available under the terms of the GNU General Public License
  5 * (GPL) Version 2, available from the file COPYING in the main directory of
  6 * this source tree.
  7 */
  8
  9#include <linux/types.h>
 10#include <linux/io.h>
 11#include <linux/delay.h>
 12#include <linux/errno.h>
 13#include <linux/kernel.h>
 14#include <linux/slab.h>
 15#include <linux/string.h>
 16#include "qed.h"
 17#include "qed_hsi.h"
 18#include "qed_hw.h"
 19#include "qed_init_ops.h"
 20#include "qed_reg_addr.h"
 21#include "qed_sriov.h"
 22
 23#define QED_INIT_MAX_POLL_COUNT 100
 24#define QED_INIT_POLL_PERIOD_US 500
 25
 26static u32 pxp_global_win[] = {
 27	0,
 28	0,
 29	0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
 30	0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
 31	0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
 32	0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
 33	0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
 34	0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
 35	0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
 36	0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
 37	0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
 38	0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
 39	0,
 40	0,
 41	0,
 42	0,
 43	0,
 44	0,
 45	0,
 46};
 47
 48void qed_init_iro_array(struct qed_dev *cdev)
 49{
 50	cdev->iro_arr = iro_arr;
 51}
 52
 53/* Runtime configuration helpers */
 54void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
 55{
 56	int i;
 57
 58	for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
 59		p_hwfn->rt_data.b_valid[i] = false;
 60}
 61
 62void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
 63{
 64	p_hwfn->rt_data.init_val[rt_offset] = val;
 65	p_hwfn->rt_data.b_valid[rt_offset] = true;
 66}
 67
 68void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
 69			   u32 rt_offset, u32 *p_val, size_t size)
 70{
 71	size_t i;
 72
 73	for (i = 0; i < size / sizeof(u32); i++) {
 74		p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
 75		p_hwfn->rt_data.b_valid[rt_offset + i]	= true;
 76	}
 77}
 78
 79static int qed_init_rt(struct qed_hwfn	*p_hwfn,
 80		       struct qed_ptt *p_ptt,
 81		       u32 addr, u16 rt_offset, u16 size, bool b_must_dmae)
 82{
 83	u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
 84	bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
 85	u16 i, segment;
 86	int rc = 0;
 87
 88	/* Since not all RT entries are initialized, go over the RT and
 89	 * for each segment of initialized values use DMA.
 90	 */
 91	for (i = 0; i < size; i++) {
 92		if (!p_valid[i])
 93			continue;
 94
 95		/* In case there isn't any wide-bus configuration here,
 96		 * simply write the data instead of using dmae.
 97		 */
 98		if (!b_must_dmae) {
 99			qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
100			continue;
101		}
102
103		/* Start of a new segment */
104		for (segment = 1; i + segment < size; segment++)
105			if (!p_valid[i + segment])
106				break;
107
108		rc = qed_dmae_host2grc(p_hwfn, p_ptt,
109				       (uintptr_t)(p_init_val + i),
110				       addr + (i << 2), segment, 0);
111		if (rc)
112			return rc;
113
114		/* Jump over the entire segment, including invalid entry */
115		i += segment;
116	}
117
118	return rc;
119}
120
121int qed_init_alloc(struct qed_hwfn *p_hwfn)
122{
123	struct qed_rt_data *rt_data = &p_hwfn->rt_data;
124
125	if (IS_VF(p_hwfn->cdev))
126		return 0;
127
128	rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
129				   GFP_KERNEL);
130	if (!rt_data->b_valid)
131		return -ENOMEM;
132
133	rt_data->init_val = kzalloc(sizeof(u32) * RUNTIME_ARRAY_SIZE,
134				    GFP_KERNEL);
135	if (!rt_data->init_val) {
136		kfree(rt_data->b_valid);
137		return -ENOMEM;
138	}
139
140	return 0;
141}
142
143void qed_init_free(struct qed_hwfn *p_hwfn)
144{
145	kfree(p_hwfn->rt_data.init_val);
146	kfree(p_hwfn->rt_data.b_valid);
147}
148
149static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
150			       struct qed_ptt *p_ptt,
151			       u32 addr,
152			       u32 dmae_data_offset,
153			       u32 size,
154			       const u32 *buf,
155			       bool b_must_dmae,
156			       bool b_can_dmae)
157{
158	int rc = 0;
159
160	/* Perform DMAE only for lengthy enough sections or for wide-bus */
161	if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
162		const u32 *data = buf + dmae_data_offset;
163		u32 i;
164
165		for (i = 0; i < size; i++)
166			qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
167	} else {
168		rc = qed_dmae_host2grc(p_hwfn, p_ptt,
169				       (uintptr_t)(buf + dmae_data_offset),
170				       addr, size, 0);
171	}
172
173	return rc;
174}
175
176static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
177			      struct qed_ptt *p_ptt,
178			      u32 addr, u32 fill, u32 fill_count)
179{
180	static u32 zero_buffer[DMAE_MAX_RW_SIZE];
181
182	memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
183
184	/* invoke the DMAE virtual/physical buffer API with
185	 * 1. DMAE init channel
186	 * 2. addr,
187	 * 3. p_hwfb->temp_data,
188	 * 4. fill_count
189	 */
190
191	return qed_dmae_host2grc(p_hwfn, p_ptt,
192				 (uintptr_t)(&zero_buffer[0]),
193				 addr, fill_count, QED_DMAE_FLAG_RW_REPL_SRC);
194}
195
196static void qed_init_fill(struct qed_hwfn *p_hwfn,
197			  struct qed_ptt *p_ptt,
198			  u32 addr, u32 fill, u32 fill_count)
199{
200	u32 i;
201
202	for (i = 0; i < fill_count; i++, addr += sizeof(u32))
203		qed_wr(p_hwfn, p_ptt, addr, fill);
204}
205
206static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
207			      struct qed_ptt *p_ptt,
208			      struct init_write_op *cmd,
209			      bool b_must_dmae, bool b_can_dmae)
210{
211	u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
212	u32 data = le32_to_cpu(cmd->data);
213	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
214
215	u32 offset, output_len, input_len, max_size;
216	struct qed_dev *cdev = p_hwfn->cdev;
217	union init_array_hdr *hdr;
218	const u32 *array_data;
219	int rc = 0;
220	u32 size;
221
222	array_data = cdev->fw_data->arr_data;
223
224	hdr = (union init_array_hdr *)(array_data + dmae_array_offset);
225	data = le32_to_cpu(hdr->raw.data);
226	switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
227	case INIT_ARR_ZIPPED:
228		offset = dmae_array_offset + 1;
229		input_len = GET_FIELD(data,
230				      INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
231		max_size = MAX_ZIPPED_SIZE * 4;
232		memset(p_hwfn->unzip_buf, 0, max_size);
233
234		output_len = qed_unzip_data(p_hwfn, input_len,
235					    (u8 *)&array_data[offset],
236					    max_size, (u8 *)p_hwfn->unzip_buf);
237		if (output_len) {
238			rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
239						 output_len,
240						 p_hwfn->unzip_buf,
241						 b_must_dmae, b_can_dmae);
242		} else {
243			DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
244			rc = -EINVAL;
245		}
246		break;
247	case INIT_ARR_PATTERN:
248	{
249		u32 repeats = GET_FIELD(data,
250					INIT_ARRAY_PATTERN_HDR_REPETITIONS);
251		u32 i;
252
253		size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
254
255		for (i = 0; i < repeats; i++, addr += size << 2) {
256			rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
257						 dmae_array_offset + 1,
258						 size, array_data,
259						 b_must_dmae, b_can_dmae);
260			if (rc)
261				break;
262		}
263		break;
264	}
265	case INIT_ARR_STANDARD:
266		size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
267		rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
268					 dmae_array_offset + 1,
269					 size, array_data,
270					 b_must_dmae, b_can_dmae);
271		break;
272	}
273
274	return rc;
275}
276
277/* init_ops write command */
278static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
279			   struct qed_ptt *p_ptt,
280			   struct init_write_op *p_cmd, bool b_can_dmae)
281{
282	u32 data = le32_to_cpu(p_cmd->data);
283	bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
284	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
285	union init_write_args *arg = &p_cmd->args;
286	int rc = 0;
287
288	/* Sanitize */
289	if (b_must_dmae && !b_can_dmae) {
290		DP_NOTICE(p_hwfn,
291			  "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
292			  addr);
293		return -EINVAL;
294	}
295
296	switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
297	case INIT_SRC_INLINE:
298		data = le32_to_cpu(p_cmd->args.inline_val);
299		qed_wr(p_hwfn, p_ptt, addr, data);
300		break;
301	case INIT_SRC_ZEROS:
302		data = le32_to_cpu(p_cmd->args.zeros_count);
303		if (b_must_dmae || (b_can_dmae && (data >= 64)))
304			rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
305		else
306			qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
307		break;
308	case INIT_SRC_ARRAY:
309		rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd,
310					b_must_dmae, b_can_dmae);
311		break;
312	case INIT_SRC_RUNTIME:
313		qed_init_rt(p_hwfn, p_ptt, addr,
314			    le16_to_cpu(arg->runtime.offset),
315			    le16_to_cpu(arg->runtime.size),
316			    b_must_dmae);
317		break;
318	}
319
320	return rc;
321}
322
323static inline bool comp_eq(u32 val, u32 expected_val)
324{
325	return val == expected_val;
326}
327
328static inline bool comp_and(u32 val, u32 expected_val)
329{
330	return (val & expected_val) == expected_val;
331}
332
333static inline bool comp_or(u32 val, u32 expected_val)
334{
335	return (val | expected_val) > 0;
336}
337
338/* init_ops read/poll commands */
339static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
340			    struct qed_ptt *p_ptt, struct init_read_op *cmd)
341{
342	bool (*comp_check)(u32 val, u32 expected_val);
343	u32 delay = QED_INIT_POLL_PERIOD_US, val;
344	u32 data, addr, poll;
345	int i;
346
347	data = le32_to_cpu(cmd->op_data);
348	addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
349	poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
350
351
352	val = qed_rd(p_hwfn, p_ptt, addr);
353
354	if (poll == INIT_POLL_NONE)
355		return;
356
357	switch (poll) {
358	case INIT_POLL_EQ:
359		comp_check = comp_eq;
360		break;
361	case INIT_POLL_OR:
362		comp_check = comp_or;
363		break;
364	case INIT_POLL_AND:
365		comp_check = comp_and;
366		break;
367	default:
368		DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
369		       cmd->op_data);
370		return;
371	}
372
373	data = le32_to_cpu(cmd->expected_val);
374	for (i = 0;
375	     i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data);
376	     i++) {
377		udelay(delay);
378		val = qed_rd(p_hwfn, p_ptt, addr);
379	}
380
381	if (i == QED_INIT_MAX_POLL_COUNT) {
382		DP_ERR(p_hwfn,
383		       "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
384		       addr, le32_to_cpu(cmd->expected_val),
385		       val, le32_to_cpu(cmd->op_data));
386	}
387}
388
389/* init_ops callbacks entry point */
390static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
391			    struct qed_ptt *p_ptt,
392			    struct init_callback_op *p_cmd)
393{
394	DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
395}
396
397static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
398				  u16 *p_offset, int modes)
399{
400	struct qed_dev *cdev = p_hwfn->cdev;
401	const u8 *modes_tree_buf;
402	u8 arg1, arg2, tree_val;
403
404	modes_tree_buf = cdev->fw_data->modes_tree_buf;
405	tree_val = modes_tree_buf[(*p_offset)++];
406	switch (tree_val) {
407	case INIT_MODE_OP_NOT:
408		return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
409	case INIT_MODE_OP_OR:
410		arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
411		arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
412		return arg1 | arg2;
413	case INIT_MODE_OP_AND:
414		arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
415		arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
416		return arg1 & arg2;
417	default:
418		tree_val -= MAX_INIT_MODE_OPS;
419		return (modes & BIT(tree_val)) ? 1 : 0;
420	}
421}
422
423static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
424			     struct init_if_mode_op *p_cmd, int modes)
425{
426	u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
427
428	if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
429		return 0;
430	else
431		return GET_FIELD(le32_to_cpu(p_cmd->op_data),
432				 INIT_IF_MODE_OP_CMD_OFFSET);
433}
434
435static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
436			      struct init_if_phase_op *p_cmd,
437			      u32 phase, u32 phase_id)
438{
439	u32 data = le32_to_cpu(p_cmd->phase_data);
440	u32 op_data = le32_to_cpu(p_cmd->op_data);
441
442	if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
443	      (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
444	       GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
445		return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
446	else
447		return 0;
448}
449
450int qed_init_run(struct qed_hwfn *p_hwfn,
451		 struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
452{
453	struct qed_dev *cdev = p_hwfn->cdev;
454	u32 cmd_num, num_init_ops;
455	union init_op *init_ops;
456	bool b_dmae = false;
457	int rc = 0;
458
459	num_init_ops = cdev->fw_data->init_ops_size;
460	init_ops = cdev->fw_data->init_ops;
461
462	p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
463	if (!p_hwfn->unzip_buf)
464		return -ENOMEM;
465
466	for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
467		union init_op *cmd = &init_ops[cmd_num];
468		u32 data = le32_to_cpu(cmd->raw.op_data);
469
470		switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
471		case INIT_OP_WRITE:
472			rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
473					     b_dmae);
474			break;
475		case INIT_OP_READ:
476			qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
477			break;
478		case INIT_OP_IF_MODE:
479			cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
480						     modes);
481			break;
482		case INIT_OP_IF_PHASE:
483			cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
484						      phase, phase_id);
485			b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
486			break;
487		case INIT_OP_DELAY:
488			/* qed_init_run is always invoked from
489			 * sleep-able context
490			 */
491			udelay(le32_to_cpu(cmd->delay.delay));
492			break;
493
494		case INIT_OP_CALLBACK:
495			qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
496			break;
497		}
498
499		if (rc)
500			break;
501	}
502
503	kfree(p_hwfn->unzip_buf);
504	return rc;
505}
506
507void qed_gtt_init(struct qed_hwfn *p_hwfn)
508{
509	u32 gtt_base;
510	u32 i;
511
512	/* Set the global windows */
513	gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
514
515	for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
516		if (pxp_global_win[i])
517			REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
518			       pxp_global_win[i]);
519}
520
521int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
522{
523	struct qed_fw_data *fw = cdev->fw_data;
524	struct bin_buffer_hdr *buf_hdr;
525	u32 offset, len;
526
527	if (!data) {
528		DP_NOTICE(cdev, "Invalid fw data\n");
529		return -EINVAL;
530	}
531
532	/* First Dword contains metadata and should be skipped */
533	buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
534
535	offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
536	fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
537
538	offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
539	fw->init_ops = (union init_op *)(data + offset);
540
541	offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
542	fw->arr_data = (u32 *)(data + offset);
543
544	offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
545	fw->modes_tree_buf = (u8 *)(data + offset);
546	len = buf_hdr[BIN_BUF_INIT_CMD].length;
547	fw->init_ops_size = len / sizeof(struct init_raw_op);
548
549	return 0;
550}