Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2/*
  3 * Copyright (C) 2017 Intel Deutschland GmbH
  4 * Copyright (C) 2018-2022 Intel Corporation
  5 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  6#include "iwl-trans.h"
  7#include "iwl-fh.h"
  8#include "iwl-context-info.h"
  9#include "internal.h"
 10#include "iwl-prph.h"
 11
 12static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
 13						    size_t size,
 14						    dma_addr_t *phys,
 15						    int depth)
 16{
 17	void *result;
 18
 19	if (WARN(depth > 2,
 20		 "failed to allocate DMA memory not crossing 2^32 boundary"))
 21		return NULL;
 22
 23	result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL);
 24
 25	if (!result)
 26		return NULL;
 27
 28	if (unlikely(iwl_txq_crosses_4g_boundary(*phys, size))) {
 29		void *old = result;
 30		dma_addr_t oldphys = *phys;
 31
 32		result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size,
 33								phys,
 34								depth + 1);
 35		dma_free_coherent(trans->dev, size, old, oldphys);
 
 36	}
 37
 38	return result;
 39}
 40
 41void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
 42					    size_t size,
 43					    dma_addr_t *phys)
 44{
 45	return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
 46}
 47
 48int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
 49				 const void *data, u32 len,
 50				 struct iwl_dram_data *dram)
 51{
 52	dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len,
 53							    &dram->physical);
 54	if (!dram->block)
 55		return -ENOMEM;
 56
 57	dram->size = len;
 58	memcpy(dram->block, data, len);
 59
 60	return 0;
 61}
 62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 63void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
 64{
 65	struct iwl_self_init_dram *dram = &trans->init_dram;
 
 66	int i;
 67
 68	if (!dram->paging) {
 69		WARN_ON(dram->paging_cnt);
 70		return;
 71	}
 72
 73	/* free paging*/
 74	for (i = 0; i < dram->paging_cnt; i++)
 75		dma_free_coherent(trans->dev, dram->paging[i].size,
 76				  dram->paging[i].block,
 77				  dram->paging[i].physical);
 78
 79	kfree(dram->paging);
 80	dram->paging_cnt = 0;
 81	dram->paging = NULL;
 82}
 83
 84int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
 85			 const struct fw_img *fw,
 86			 struct iwl_context_info_dram *ctxt_dram)
 87{
 88	struct iwl_self_init_dram *dram = &trans->init_dram;
 
 
 89	int i, ret, lmac_cnt, umac_cnt, paging_cnt;
 90
 91	if (WARN(dram->paging,
 92		 "paging shouldn't already be initialized (%d pages)\n",
 93		 dram->paging_cnt))
 94		iwl_pcie_ctxt_info_free_paging(trans);
 95
 96	lmac_cnt = iwl_pcie_get_num_sections(fw, 0);
 97	/* add 1 due to separator */
 98	umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1);
 99	/* add 2 due to separators */
100	paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2);
101
102	dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL);
103	if (!dram->fw)
104		return -ENOMEM;
105	dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL);
106	if (!dram->paging)
107		return -ENOMEM;
108
109	/* initialize lmac sections */
110	for (i = 0; i < lmac_cnt; i++) {
111		ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[i].data,
112						   fw->sec[i].len,
113						   &dram->fw[dram->fw_cnt]);
114		if (ret)
115			return ret;
116		ctxt_dram->lmac_img[i] =
117			cpu_to_le64(dram->fw[dram->fw_cnt].physical);
118		dram->fw_cnt++;
119	}
120
121	/* initialize umac sections */
122	for (i = 0; i < umac_cnt; i++) {
123		/* access FW with +1 to make up for lmac separator */
124		ret = iwl_pcie_ctxt_info_alloc_dma(trans,
125						   fw->sec[dram->fw_cnt + 1].data,
126						   fw->sec[dram->fw_cnt + 1].len,
127						   &dram->fw[dram->fw_cnt]);
128		if (ret)
129			return ret;
130		ctxt_dram->umac_img[i] =
131			cpu_to_le64(dram->fw[dram->fw_cnt].physical);
132		dram->fw_cnt++;
133	}
134
135	/*
136	 * Initialize paging.
137	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
138	 * stored separately.
139	 * This is since the timing of its release is different -
140	 * while fw memory can be released on alive, the paging memory can be
141	 * freed only when the device goes down.
142	 * Given that, the logic here in accessing the fw image is a bit
143	 * different - fw_cnt isn't changing so loop counter is added to it.
144	 */
145	for (i = 0; i < paging_cnt; i++) {
146		/* access FW with +2 to make up for lmac & umac separators */
147		int fw_idx = dram->fw_cnt + i + 2;
148
149		ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[fw_idx].data,
150						   fw->sec[fw_idx].len,
151						   &dram->paging[i]);
152		if (ret)
153			return ret;
154
155		ctxt_dram->virtual_img[i] =
156			cpu_to_le64(dram->paging[i].physical);
157		dram->paging_cnt++;
158	}
159
160	return 0;
161}
162
163int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
164			    const struct fw_img *fw)
165{
166	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
167	struct iwl_context_info *ctxt_info;
168	struct iwl_context_info_rbd_cfg *rx_cfg;
169	u32 control_flags = 0, rb_size;
170	dma_addr_t phys;
171	int ret;
172
173	ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans,
174							  sizeof(*ctxt_info),
175							  &phys);
176	if (!ctxt_info)
177		return -ENOMEM;
178
179	trans_pcie->ctxt_info_dma_addr = phys;
180
181	ctxt_info->version.version = 0;
182	ctxt_info->version.mac_id =
183		cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
184	/* size is in DWs */
185	ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
186
187	switch (trans_pcie->rx_buf_size) {
188	case IWL_AMSDU_2K:
189		rb_size = IWL_CTXT_INFO_RB_SIZE_2K;
190		break;
191	case IWL_AMSDU_4K:
192		rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
193		break;
194	case IWL_AMSDU_8K:
195		rb_size = IWL_CTXT_INFO_RB_SIZE_8K;
196		break;
197	case IWL_AMSDU_12K:
198		rb_size = IWL_CTXT_INFO_RB_SIZE_16K;
199		break;
200	default:
201		WARN_ON(1);
202		rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
203	}
204
205	WARN_ON(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds) > 12);
206	control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG;
207	control_flags |=
208		u32_encode_bits(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds),
209				IWL_CTXT_INFO_RB_CB_SIZE);
210	control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE);
211	ctxt_info->control.control_flags = cpu_to_le32(control_flags);
212
213	/* initialize RX default queue */
214	rx_cfg = &ctxt_info->rbd_cfg;
215	rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma);
216	rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma);
217	rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
218
219	/* initialize TX command queue */
220	ctxt_info->hcmd_cfg.cmd_queue_addr =
221		cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
222	ctxt_info->hcmd_cfg.cmd_queue_size =
223		TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
224
225	/* allocate ucode sections in dram and set addresses */
226	ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
227	if (ret) {
228		dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
229				  ctxt_info, trans_pcie->ctxt_info_dma_addr);
230		return ret;
231	}
232
233	trans_pcie->ctxt_info = ctxt_info;
234
235	iwl_enable_fw_load_int_ctx_info(trans);
236
237	/* Configure debug, if exists */
238	if (iwl_pcie_dbg_on(trans))
239		iwl_pcie_apply_destination(trans);
240
241	/* kick FW self load */
242	iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
 
243
244	/* Context info will be released upon alive or failure to get one */
245
246	return 0;
247}
248
249void iwl_pcie_ctxt_info_free(struct iwl_trans *trans)
250{
251	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
252
253	if (!trans_pcie->ctxt_info)
254		return;
255
256	dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
257			  trans_pcie->ctxt_info,
258			  trans_pcie->ctxt_info_dma_addr);
259	trans_pcie->ctxt_info_dma_addr = 0;
260	trans_pcie->ctxt_info = NULL;
261
262	iwl_pcie_ctxt_info_free_fw_img(trans);
263}
v4.17
  1/******************************************************************************
  2 *
  3 * This file is provided under a dual BSD/GPLv2 license.  When using or
  4 * redistributing this file, you may do so under either license.
  5 *
  6 * GPL LICENSE SUMMARY
  7 *
  8 * Copyright(c) 2017 Intel Deutschland GmbH
  9 *
 10 * This program is free software; you can redistribute it and/or modify
 11 * it under the terms of version 2 of the GNU General Public License as
 12 * published by the Free Software Foundation.
 13 *
 14 * This program is distributed in the hope that it will be useful, but
 15 * WITHOUT ANY WARRANTY; without even the implied warranty of
 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 17 * General Public License for more details.
 18 *
 19 * BSD LICENSE
 20 *
 21 * Copyright(c) 2017 Intel Deutschland GmbH
 22 * All rights reserved.
 23 *
 24 * Redistribution and use in source and binary forms, with or without
 25 * modification, are permitted provided that the following conditions
 26 * are met:
 27 *
 28 *  * Redistributions of source code must retain the above copyright
 29 *    notice, this list of conditions and the following disclaimer.
 30 *  * Redistributions in binary form must reproduce the above copyright
 31 *    notice, this list of conditions and the following disclaimer in
 32 *    the documentation and/or other materials provided with the
 33 *    distribution.
 34 *  * Neither the name Intel Corporation nor the names of its
 35 *    contributors may be used to endorse or promote products derived
 36 *    from this software without specific prior written permission.
 37 *
 38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 39 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 41 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 42 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 43 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 44 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 45 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 46 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 47 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 48 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 49 *
 50 *****************************************************************************/
 51
 52#include "iwl-trans.h"
 53#include "iwl-fh.h"
 54#include "iwl-context-info.h"
 55#include "internal.h"
 56#include "iwl-prph.h"
 57
 58static int iwl_pcie_get_num_sections(const struct fw_img *fw,
 59				     int start)
 
 
 60{
 61	int i = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 62
 63	while (start < fw->num_sec &&
 64	       fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
 65	       fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
 66		start++;
 67		i++;
 68	}
 69
 70	return i;
 71}
 72
 73static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
 74					const struct fw_desc *sec,
 75					struct iwl_dram_data *dram)
 76{
 77	dram->block = dma_alloc_coherent(trans->dev, sec->len,
 78					 &dram->physical,
 79					 GFP_KERNEL);
 
 
 
 
 
 
 80	if (!dram->block)
 81		return -ENOMEM;
 82
 83	dram->size = sec->len;
 84	memcpy(dram->block, sec->data, sec->len);
 85
 86	return 0;
 87}
 88
 89static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
 90{
 91	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 92	struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
 93	int i;
 94
 95	if (!dram->fw) {
 96		WARN_ON(dram->fw_cnt);
 97		return;
 98	}
 99
100	for (i = 0; i < dram->fw_cnt; i++)
101		dma_free_coherent(trans->dev, dram->fw[i].size,
102				  dram->fw[i].block, dram->fw[i].physical);
103
104	kfree(dram->fw);
105	dram->fw_cnt = 0;
106	dram->fw = NULL;
107}
108
109void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
110{
111	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
112	struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
113	int i;
114
115	if (!dram->paging) {
116		WARN_ON(dram->paging_cnt);
117		return;
118	}
119
120	/* free paging*/
121	for (i = 0; i < dram->paging_cnt; i++)
122		dma_free_coherent(trans->dev, dram->paging[i].size,
123				  dram->paging[i].block,
124				  dram->paging[i].physical);
125
126	kfree(dram->paging);
127	dram->paging_cnt = 0;
128	dram->paging = NULL;
129}
130
131static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans,
132					  const struct fw_img *fw,
133					  struct iwl_context_info *ctxt_info)
134{
135	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
136	struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
137	struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram;
138	int i, ret, lmac_cnt, umac_cnt, paging_cnt;
139
140	if (WARN(dram->paging,
141		 "paging shouldn't already be initialized (%d pages)\n",
142		 dram->paging_cnt))
143		iwl_pcie_ctxt_info_free_paging(trans);
144
145	lmac_cnt = iwl_pcie_get_num_sections(fw, 0);
146	/* add 1 due to separator */
147	umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1);
148	/* add 2 due to separators */
149	paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2);
150
151	dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL);
152	if (!dram->fw)
153		return -ENOMEM;
154	dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL);
155	if (!dram->paging)
156		return -ENOMEM;
157
158	/* initialize lmac sections */
159	for (i = 0; i < lmac_cnt; i++) {
160		ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[i],
 
161						   &dram->fw[dram->fw_cnt]);
162		if (ret)
163			return ret;
164		ctxt_dram->lmac_img[i] =
165			cpu_to_le64(dram->fw[dram->fw_cnt].physical);
166		dram->fw_cnt++;
167	}
168
169	/* initialize umac sections */
170	for (i = 0; i < umac_cnt; i++) {
171		/* access FW with +1 to make up for lmac separator */
172		ret = iwl_pcie_ctxt_info_alloc_dma(trans,
173						   &fw->sec[dram->fw_cnt + 1],
 
174						   &dram->fw[dram->fw_cnt]);
175		if (ret)
176			return ret;
177		ctxt_dram->umac_img[i] =
178			cpu_to_le64(dram->fw[dram->fw_cnt].physical);
179		dram->fw_cnt++;
180	}
181
182	/*
183	 * Initialize paging.
184	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
185	 * stored separately.
186	 * This is since the timing of its release is different -
187	 * while fw memory can be released on alive, the paging memory can be
188	 * freed only when the device goes down.
189	 * Given that, the logic here in accessing the fw image is a bit
190	 * different - fw_cnt isn't changing so loop counter is added to it.
191	 */
192	for (i = 0; i < paging_cnt; i++) {
193		/* access FW with +2 to make up for lmac & umac separators */
194		int fw_idx = dram->fw_cnt + i + 2;
195
196		ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[fw_idx],
 
197						   &dram->paging[i]);
198		if (ret)
199			return ret;
200
201		ctxt_dram->virtual_img[i] =
202			cpu_to_le64(dram->paging[i].physical);
203		dram->paging_cnt++;
204	}
205
206	return 0;
207}
208
209int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
210			    const struct fw_img *fw)
211{
212	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
213	struct iwl_context_info *ctxt_info;
214	struct iwl_context_info_rbd_cfg *rx_cfg;
215	u32 control_flags = 0;
 
216	int ret;
217
218	ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
219				       &trans_pcie->ctxt_info_dma_addr,
220				       GFP_KERNEL);
221	if (!ctxt_info)
222		return -ENOMEM;
223
 
 
224	ctxt_info->version.version = 0;
225	ctxt_info->version.mac_id =
226		cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
227	/* size is in DWs */
228	ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
229
230	BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
231	control_flags = IWL_CTXT_INFO_RB_SIZE_4K |
232			IWL_CTXT_INFO_TFD_FORMAT_LONG |
233			RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
234			IWL_CTXT_INFO_RB_CB_SIZE_POS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235	ctxt_info->control.control_flags = cpu_to_le32(control_flags);
236
237	/* initialize RX default queue */
238	rx_cfg = &ctxt_info->rbd_cfg;
239	rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma);
240	rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma);
241	rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
242
243	/* initialize TX command queue */
244	ctxt_info->hcmd_cfg.cmd_queue_addr =
245		cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
246	ctxt_info->hcmd_cfg.cmd_queue_size =
247		TFD_QUEUE_CB_SIZE(trans_pcie->tx_cmd_queue_size);
248
249	/* allocate ucode sections in dram and set addresses */
250	ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
251	if (ret) {
252		dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
253				  ctxt_info, trans_pcie->ctxt_info_dma_addr);
254		return ret;
255	}
256
257	trans_pcie->ctxt_info = ctxt_info;
258
259	iwl_enable_interrupts(trans);
260
261	/* Configure debug, if exists */
262	if (trans->dbg_dest_tlv)
263		iwl_pcie_apply_destination(trans);
264
265	/* kick FW self load */
266	iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
267	iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
268
269	/* Context info will be released upon alive or failure to get one */
270
271	return 0;
272}
273
274void iwl_pcie_ctxt_info_free(struct iwl_trans *trans)
275{
276	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
277
278	if (!trans_pcie->ctxt_info)
279		return;
280
281	dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
282			  trans_pcie->ctxt_info,
283			  trans_pcie->ctxt_info_dma_addr);
284	trans_pcie->ctxt_info_dma_addr = 0;
285	trans_pcie->ctxt_info = NULL;
286
287	iwl_pcie_ctxt_info_free_fw_img(trans);
288}