Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Thunderbolt DMA configuration based mailbox support
  4 *
  5 * Copyright (C) 2017, Intel Corporation
  6 * Authors: Michael Jamet <michael.jamet@intel.com>
  7 *          Mika Westerberg <mika.westerberg@linux.intel.com>
 
 
 
 
  8 */
  9
 10#include <linux/delay.h>
 11#include <linux/slab.h>
 12
 13#include "dma_port.h"
 14#include "tb_regs.h"
 15
 16#define DMA_PORT_CAP			0x3e
 17
 18#define MAIL_DATA			1
 19#define MAIL_DATA_DWORDS		16
 20
 21#define MAIL_IN				17
 22#define MAIL_IN_CMD_SHIFT		28
 23#define MAIL_IN_CMD_MASK		GENMASK(31, 28)
 24#define MAIL_IN_CMD_FLASH_WRITE		0x0
 25#define MAIL_IN_CMD_FLASH_UPDATE_AUTH	0x1
 26#define MAIL_IN_CMD_FLASH_READ		0x2
 27#define MAIL_IN_CMD_POWER_CYCLE		0x4
 28#define MAIL_IN_DWORDS_SHIFT		24
 29#define MAIL_IN_DWORDS_MASK		GENMASK(27, 24)
 30#define MAIL_IN_ADDRESS_SHIFT		2
 31#define MAIL_IN_ADDRESS_MASK		GENMASK(23, 2)
 32#define MAIL_IN_CSS			BIT(1)
 33#define MAIL_IN_OP_REQUEST		BIT(0)
 34
 35#define MAIL_OUT			18
 36#define MAIL_OUT_STATUS_RESPONSE	BIT(29)
 37#define MAIL_OUT_STATUS_CMD_SHIFT	4
 38#define MAIL_OUT_STATUS_CMD_MASK	GENMASK(7, 4)
 39#define MAIL_OUT_STATUS_MASK		GENMASK(3, 0)
 40#define MAIL_OUT_STATUS_COMPLETED	0
 41#define MAIL_OUT_STATUS_ERR_AUTH	1
 42#define MAIL_OUT_STATUS_ERR_ACCESS	2
 43
 44#define DMA_PORT_TIMEOUT		5000 /* ms */
 45#define DMA_PORT_RETRIES		3
 46
 47/**
 48 * struct tb_dma_port - DMA control port
 49 * @sw: Switch the DMA port belongs to
 50 * @port: Switch port number where DMA capability is found
 51 * @base: Start offset of the mailbox registers
 52 * @buf: Temporary buffer to store a single block
 53 */
 54struct tb_dma_port {
 55	struct tb_switch *sw;
 56	u8 port;
 57	u32 base;
 58	u8 *buf;
 59};
 60
 61/*
 62 * When the switch is in safe mode it supports very little functionality
 63 * so we don't validate that much here.
 64 */
 65static bool dma_port_match(const struct tb_cfg_request *req,
 66			   const struct ctl_pkg *pkg)
 67{
 68	u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
 69
 70	if (pkg->frame.eof == TB_CFG_PKG_ERROR)
 71		return true;
 72	if (pkg->frame.eof != req->response_type)
 73		return false;
 74	if (route != tb_cfg_get_route(req->request))
 75		return false;
 76	if (pkg->frame.size != req->response_size)
 77		return false;
 78
 79	return true;
 80}
 81
 82static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
 83{
 84	memcpy(req->response, pkg->buffer, req->response_size);
 85	return true;
 86}
 87
 88static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route,
 89			 u32 port, u32 offset, u32 length, int timeout_msec)
 90{
 91	struct cfg_read_pkg request = {
 92		.header = tb_cfg_make_header(route),
 93		.addr = {
 94			.seq = 1,
 95			.port = port,
 96			.space = TB_CFG_PORT,
 97			.offset = offset,
 98			.length = length,
 99		},
100	};
101	struct tb_cfg_request *req;
102	struct cfg_write_pkg reply;
103	struct tb_cfg_result res;
104
105	req = tb_cfg_request_alloc();
106	if (!req)
107		return -ENOMEM;
108
109	req->match = dma_port_match;
110	req->copy = dma_port_copy;
111	req->request = &request;
112	req->request_size = sizeof(request);
113	req->request_type = TB_CFG_PKG_READ;
114	req->response = &reply;
115	req->response_size = 12 + 4 * length;
116	req->response_type = TB_CFG_PKG_READ;
117
118	res = tb_cfg_request_sync(ctl, req, timeout_msec);
119
120	tb_cfg_request_put(req);
121
122	if (res.err)
123		return res.err;
124
125	memcpy(buffer, &reply.data, 4 * length);
126	return 0;
127}
128
129static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
130			  u32 port, u32 offset, u32 length, int timeout_msec)
131{
132	struct cfg_write_pkg request = {
133		.header = tb_cfg_make_header(route),
134		.addr = {
135			.seq = 1,
136			.port = port,
137			.space = TB_CFG_PORT,
138			.offset = offset,
139			.length = length,
140		},
141	};
142	struct tb_cfg_request *req;
143	struct cfg_read_pkg reply;
144	struct tb_cfg_result res;
145
146	memcpy(&request.data, buffer, length * 4);
147
148	req = tb_cfg_request_alloc();
149	if (!req)
150		return -ENOMEM;
151
152	req->match = dma_port_match;
153	req->copy = dma_port_copy;
154	req->request = &request;
155	req->request_size = 12 + 4 * length;
156	req->request_type = TB_CFG_PKG_WRITE;
157	req->response = &reply;
158	req->response_size = sizeof(reply);
159	req->response_type = TB_CFG_PKG_WRITE;
160
161	res = tb_cfg_request_sync(ctl, req, timeout_msec);
162
163	tb_cfg_request_put(req);
164
165	return res.err;
166}
167
168static int dma_find_port(struct tb_switch *sw)
169{
170	static const int ports[] = { 3, 5, 7 };
171	int i;
172
173	/*
174	 * The DMA (NHI) port is either 3, 5 or 7 depending on the
175	 * controller. Try all of them.
176	 */
177	for (i = 0; i < ARRAY_SIZE(ports); i++) {
178		u32 type;
179		int ret;
180
181		ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i],
182				    2, 1, DMA_PORT_TIMEOUT);
183		if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
184			return ports[i];
185	}
186
187	return -ENODEV;
188}
189
190/**
191 * dma_port_alloc() - Finds DMA control port from a switch pointed by route
192 * @sw: Switch from where find the DMA port
193 *
194 * Function checks if the switch NHI port supports DMA configuration
195 * based mailbox capability and if it does, allocates and initializes
196 * DMA port structure. Returns %NULL if the capabity was not found.
197 *
198 * The DMA control port is functional also when the switch is in safe
199 * mode.
200 */
201struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
202{
203	struct tb_dma_port *dma;
204	int port;
205
206	port = dma_find_port(sw);
207	if (port < 0)
208		return NULL;
209
210	dma = kzalloc(sizeof(*dma), GFP_KERNEL);
211	if (!dma)
212		return NULL;
213
214	dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL);
215	if (!dma->buf) {
216		kfree(dma);
217		return NULL;
218	}
219
220	dma->sw = sw;
221	dma->port = port;
222	dma->base = DMA_PORT_CAP;
223
224	return dma;
225}
226
227/**
228 * dma_port_free() - Release DMA control port structure
229 * @dma: DMA control port
230 */
231void dma_port_free(struct tb_dma_port *dma)
232{
233	if (dma) {
234		kfree(dma->buf);
235		kfree(dma);
236	}
237}
238
239static int dma_port_wait_for_completion(struct tb_dma_port *dma,
240					unsigned int timeout)
241{
242	unsigned long end = jiffies + msecs_to_jiffies(timeout);
243	struct tb_switch *sw = dma->sw;
244
245	do {
246		int ret;
247		u32 in;
248
249		ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
250				    dma->base + MAIL_IN, 1, 50);
251		if (ret) {
252			if (ret != -ETIMEDOUT)
253				return ret;
254		} else if (!(in & MAIL_IN_OP_REQUEST)) {
255			return 0;
256		}
257
258		usleep_range(50, 100);
259	} while (time_before(jiffies, end));
260
261	return -ETIMEDOUT;
262}
263
264static int status_to_errno(u32 status)
265{
266	switch (status & MAIL_OUT_STATUS_MASK) {
267	case MAIL_OUT_STATUS_COMPLETED:
268		return 0;
269	case MAIL_OUT_STATUS_ERR_AUTH:
270		return -EINVAL;
271	case MAIL_OUT_STATUS_ERR_ACCESS:
272		return -EACCES;
273	}
274
275	return -EIO;
276}
277
278static int dma_port_request(struct tb_dma_port *dma, u32 in,
279			    unsigned int timeout)
280{
281	struct tb_switch *sw = dma->sw;
282	u32 out;
283	int ret;
284
285	ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
286			     dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT);
287	if (ret)
288		return ret;
289
290	ret = dma_port_wait_for_completion(dma, timeout);
291	if (ret)
292		return ret;
293
294	ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
295			    dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
296	if (ret)
297		return ret;
298
299	return status_to_errno(out);
300}
301
302static int dma_port_flash_read_block(void *data, unsigned int dwaddress,
303				     void *buf, size_t dwords)
304{
305	struct tb_dma_port *dma = data;
306	struct tb_switch *sw = dma->sw;
 
307	int ret;
308	u32 in;
 
 
309
310	in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
311	if (dwords < MAIL_DATA_DWORDS)
312		in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
313	in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
314	in |= MAIL_IN_OP_REQUEST;
315
316	ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT);
317	if (ret)
318		return ret;
319
320	return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
321			     dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
322}
323
324static int dma_port_flash_write_block(void *data, unsigned int dwaddress,
325				      const void *buf, size_t dwords)
326{
327	struct tb_dma_port *dma = data;
328	struct tb_switch *sw = dma->sw;
 
329	int ret;
330	u32 in;
 
331
332	/* Write the block to MAIL_DATA registers */
333	ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
334			    dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
335	if (ret)
336		return ret;
337
338	in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
339
340	/* CSS header write is always done to the same magic address */
341	if (dwaddress >= DMA_PORT_CSS_ADDRESS)
 
342		in |= MAIL_IN_CSS;
 
 
 
343
344	in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
345	in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
346	in |= MAIL_IN_OP_REQUEST;
347
348	return dma_port_request(dma, in, DMA_PORT_TIMEOUT);
349}
350
351/**
352 * dma_port_flash_read() - Read from active flash region
353 * @dma: DMA control port
354 * @address: Address relative to the start of active region
355 * @buf: Buffer where the data is read
356 * @size: Size of the buffer
357 */
358int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
359			void *buf, size_t size)
360{
361	return tb_nvm_read_data(address, buf, size, DMA_PORT_RETRIES,
362				dma_port_flash_read_block, dma);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363}
364
365/**
366 * dma_port_flash_write() - Write to non-active flash region
367 * @dma: DMA control port
368 * @address: Address relative to the start of non-active region
369 * @buf: Data to write
370 * @size: Size of the buffer
371 *
372 * Writes block of data to the non-active flash region of the switch. If
373 * the address is given as %DMA_PORT_CSS_ADDRESS the block is written
374 * using CSS command.
375 */
376int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
377			 const void *buf, size_t size)
378{
379	if (address >= DMA_PORT_CSS_ADDRESS && size > DMA_PORT_CSS_MAX_SIZE)
380		return -E2BIG;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
381
382	return tb_nvm_write_data(address, buf, size, DMA_PORT_RETRIES,
383				 dma_port_flash_write_block, dma);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384}
385
386/**
387 * dma_port_flash_update_auth() - Starts flash authenticate cycle
388 * @dma: DMA control port
389 *
390 * Starts the flash update authentication cycle. If the image in the
391 * non-active area was valid, the switch starts upgrade process where
392 * active and non-active area get swapped in the end. Caller should call
393 * dma_port_flash_update_auth_status() to get status of this command.
394 * This is because if the switch in question is root switch the
395 * thunderbolt host controller gets reset as well.
396 */
397int dma_port_flash_update_auth(struct tb_dma_port *dma)
398{
399	u32 in;
400
401	in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT;
402	in |= MAIL_IN_OP_REQUEST;
403
404	return dma_port_request(dma, in, 150);
405}
406
407/**
408 * dma_port_flash_update_auth_status() - Reads status of update auth command
409 * @dma: DMA control port
410 * @status: Status code of the operation
411 *
412 * The function checks if there is status available from the last update
413 * auth command. Returns %0 if there is no status and no further
414 * action is required. If there is status, %1 is returned instead and
415 * @status holds the failure code.
416 *
417 * Negative return means there was an error reading status from the
418 * switch.
419 */
420int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
421{
422	struct tb_switch *sw = dma->sw;
423	u32 out, cmd;
424	int ret;
425
426	ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
427			    dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
428	if (ret)
429		return ret;
430
431	/* Check if the status relates to flash update auth */
432	cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT;
433	if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) {
434		if (status)
435			*status = out & MAIL_OUT_STATUS_MASK;
436
437		/* Reset is needed in any case */
438		return 1;
439	}
440
441	return 0;
442}
443
444/**
445 * dma_port_power_cycle() - Power cycles the switch
446 * @dma: DMA control port
447 *
448 * Triggers power cycle to the switch.
449 */
450int dma_port_power_cycle(struct tb_dma_port *dma)
451{
452	u32 in;
453
454	in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT;
455	in |= MAIL_IN_OP_REQUEST;
456
457	return dma_port_request(dma, in, 150);
458}
v4.17
 
  1/*
  2 * Thunderbolt DMA configuration based mailbox support
  3 *
  4 * Copyright (C) 2017, Intel Corporation
  5 * Authors: Michael Jamet <michael.jamet@intel.com>
  6 *          Mika Westerberg <mika.westerberg@linux.intel.com>
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License version 2 as
 10 * published by the Free Software Foundation.
 11 */
 12
 13#include <linux/delay.h>
 14#include <linux/slab.h>
 15
 16#include "dma_port.h"
 17#include "tb_regs.h"
 18
 19#define DMA_PORT_CAP			0x3e
 20
 21#define MAIL_DATA			1
 22#define MAIL_DATA_DWORDS		16
 23
 24#define MAIL_IN				17
 25#define MAIL_IN_CMD_SHIFT		28
 26#define MAIL_IN_CMD_MASK		GENMASK(31, 28)
 27#define MAIL_IN_CMD_FLASH_WRITE		0x0
 28#define MAIL_IN_CMD_FLASH_UPDATE_AUTH	0x1
 29#define MAIL_IN_CMD_FLASH_READ		0x2
 30#define MAIL_IN_CMD_POWER_CYCLE		0x4
 31#define MAIL_IN_DWORDS_SHIFT		24
 32#define MAIL_IN_DWORDS_MASK		GENMASK(27, 24)
 33#define MAIL_IN_ADDRESS_SHIFT		2
 34#define MAIL_IN_ADDRESS_MASK		GENMASK(23, 2)
 35#define MAIL_IN_CSS			BIT(1)
 36#define MAIL_IN_OP_REQUEST		BIT(0)
 37
 38#define MAIL_OUT			18
 39#define MAIL_OUT_STATUS_RESPONSE	BIT(29)
 40#define MAIL_OUT_STATUS_CMD_SHIFT	4
 41#define MAIL_OUT_STATUS_CMD_MASK	GENMASK(7, 4)
 42#define MAIL_OUT_STATUS_MASK		GENMASK(3, 0)
 43#define MAIL_OUT_STATUS_COMPLETED	0
 44#define MAIL_OUT_STATUS_ERR_AUTH	1
 45#define MAIL_OUT_STATUS_ERR_ACCESS	2
 46
 47#define DMA_PORT_TIMEOUT		5000 /* ms */
 48#define DMA_PORT_RETRIES		3
 49
 50/**
 51 * struct tb_dma_port - DMA control port
 52 * @sw: Switch the DMA port belongs to
 53 * @port: Switch port number where DMA capability is found
 54 * @base: Start offset of the mailbox registers
 55 * @buf: Temporary buffer to store a single block
 56 */
 57struct tb_dma_port {
 58	struct tb_switch *sw;
 59	u8 port;
 60	u32 base;
 61	u8 *buf;
 62};
 63
 64/*
 65 * When the switch is in safe mode it supports very little functionality
 66 * so we don't validate that much here.
 67 */
 68static bool dma_port_match(const struct tb_cfg_request *req,
 69			   const struct ctl_pkg *pkg)
 70{
 71	u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
 72
 73	if (pkg->frame.eof == TB_CFG_PKG_ERROR)
 74		return true;
 75	if (pkg->frame.eof != req->response_type)
 76		return false;
 77	if (route != tb_cfg_get_route(req->request))
 78		return false;
 79	if (pkg->frame.size != req->response_size)
 80		return false;
 81
 82	return true;
 83}
 84
 85static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
 86{
 87	memcpy(req->response, pkg->buffer, req->response_size);
 88	return true;
 89}
 90
 91static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route,
 92			 u32 port, u32 offset, u32 length, int timeout_msec)
 93{
 94	struct cfg_read_pkg request = {
 95		.header = tb_cfg_make_header(route),
 96		.addr = {
 97			.seq = 1,
 98			.port = port,
 99			.space = TB_CFG_PORT,
100			.offset = offset,
101			.length = length,
102		},
103	};
104	struct tb_cfg_request *req;
105	struct cfg_write_pkg reply;
106	struct tb_cfg_result res;
107
108	req = tb_cfg_request_alloc();
109	if (!req)
110		return -ENOMEM;
111
112	req->match = dma_port_match;
113	req->copy = dma_port_copy;
114	req->request = &request;
115	req->request_size = sizeof(request);
116	req->request_type = TB_CFG_PKG_READ;
117	req->response = &reply;
118	req->response_size = 12 + 4 * length;
119	req->response_type = TB_CFG_PKG_READ;
120
121	res = tb_cfg_request_sync(ctl, req, timeout_msec);
122
123	tb_cfg_request_put(req);
124
125	if (res.err)
126		return res.err;
127
128	memcpy(buffer, &reply.data, 4 * length);
129	return 0;
130}
131
132static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
133			  u32 port, u32 offset, u32 length, int timeout_msec)
134{
135	struct cfg_write_pkg request = {
136		.header = tb_cfg_make_header(route),
137		.addr = {
138			.seq = 1,
139			.port = port,
140			.space = TB_CFG_PORT,
141			.offset = offset,
142			.length = length,
143		},
144	};
145	struct tb_cfg_request *req;
146	struct cfg_read_pkg reply;
147	struct tb_cfg_result res;
148
149	memcpy(&request.data, buffer, length * 4);
150
151	req = tb_cfg_request_alloc();
152	if (!req)
153		return -ENOMEM;
154
155	req->match = dma_port_match;
156	req->copy = dma_port_copy;
157	req->request = &request;
158	req->request_size = 12 + 4 * length;
159	req->request_type = TB_CFG_PKG_WRITE;
160	req->response = &reply;
161	req->response_size = sizeof(reply);
162	req->response_type = TB_CFG_PKG_WRITE;
163
164	res = tb_cfg_request_sync(ctl, req, timeout_msec);
165
166	tb_cfg_request_put(req);
167
168	return res.err;
169}
170
171static int dma_find_port(struct tb_switch *sw)
172{
173	static const int ports[] = { 3, 5, 7 };
174	int i;
175
176	/*
177	 * The DMA (NHI) port is either 3, 5 or 7 depending on the
178	 * controller. Try all of them.
179	 */
180	for (i = 0; i < ARRAY_SIZE(ports); i++) {
181		u32 type;
182		int ret;
183
184		ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i],
185				    2, 1, DMA_PORT_TIMEOUT);
186		if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
187			return ports[i];
188	}
189
190	return -ENODEV;
191}
192
193/**
194 * dma_port_alloc() - Finds DMA control port from a switch pointed by route
195 * @sw: Switch from where find the DMA port
196 *
197 * Function checks if the switch NHI port supports DMA configuration
198 * based mailbox capability and if it does, allocates and initializes
199 * DMA port structure. Returns %NULL if the capabity was not found.
200 *
201 * The DMA control port is functional also when the switch is in safe
202 * mode.
203 */
204struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
205{
206	struct tb_dma_port *dma;
207	int port;
208
209	port = dma_find_port(sw);
210	if (port < 0)
211		return NULL;
212
213	dma = kzalloc(sizeof(*dma), GFP_KERNEL);
214	if (!dma)
215		return NULL;
216
217	dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL);
218	if (!dma->buf) {
219		kfree(dma);
220		return NULL;
221	}
222
223	dma->sw = sw;
224	dma->port = port;
225	dma->base = DMA_PORT_CAP;
226
227	return dma;
228}
229
230/**
231 * dma_port_free() - Release DMA control port structure
232 * @dma: DMA control port
233 */
234void dma_port_free(struct tb_dma_port *dma)
235{
236	if (dma) {
237		kfree(dma->buf);
238		kfree(dma);
239	}
240}
241
242static int dma_port_wait_for_completion(struct tb_dma_port *dma,
243					unsigned int timeout)
244{
245	unsigned long end = jiffies + msecs_to_jiffies(timeout);
246	struct tb_switch *sw = dma->sw;
247
248	do {
249		int ret;
250		u32 in;
251
252		ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
253				    dma->base + MAIL_IN, 1, 50);
254		if (ret) {
255			if (ret != -ETIMEDOUT)
256				return ret;
257		} else if (!(in & MAIL_IN_OP_REQUEST)) {
258			return 0;
259		}
260
261		usleep_range(50, 100);
262	} while (time_before(jiffies, end));
263
264	return -ETIMEDOUT;
265}
266
267static int status_to_errno(u32 status)
268{
269	switch (status & MAIL_OUT_STATUS_MASK) {
270	case MAIL_OUT_STATUS_COMPLETED:
271		return 0;
272	case MAIL_OUT_STATUS_ERR_AUTH:
273		return -EINVAL;
274	case MAIL_OUT_STATUS_ERR_ACCESS:
275		return -EACCES;
276	}
277
278	return -EIO;
279}
280
281static int dma_port_request(struct tb_dma_port *dma, u32 in,
282			    unsigned int timeout)
283{
284	struct tb_switch *sw = dma->sw;
285	u32 out;
286	int ret;
287
288	ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
289			     dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT);
290	if (ret)
291		return ret;
292
293	ret = dma_port_wait_for_completion(dma, timeout);
294	if (ret)
295		return ret;
296
297	ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
298			    dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
299	if (ret)
300		return ret;
301
302	return status_to_errno(out);
303}
304
305static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address,
306				     void *buf, u32 size)
307{
 
308	struct tb_switch *sw = dma->sw;
309	u32 in, dwaddress, dwords;
310	int ret;
311
312	dwaddress = address / 4;
313	dwords = size / 4;
314
315	in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
316	if (dwords < MAIL_DATA_DWORDS)
317		in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
318	in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
319	in |= MAIL_IN_OP_REQUEST;
320
321	ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT);
322	if (ret)
323		return ret;
324
325	return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
326			     dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
327}
328
329static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
330				      const void *buf, u32 size)
331{
 
332	struct tb_switch *sw = dma->sw;
333	u32 in, dwaddress, dwords;
334	int ret;
335
336	dwords = size / 4;
337
338	/* Write the block to MAIL_DATA registers */
339	ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
340			    dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
 
 
341
342	in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
343
344	/* CSS header write is always done to the same magic address */
345	if (address >= DMA_PORT_CSS_ADDRESS) {
346		dwaddress = DMA_PORT_CSS_ADDRESS;
347		in |= MAIL_IN_CSS;
348	} else {
349		dwaddress = address / 4;
350	}
351
352	in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
353	in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
354	in |= MAIL_IN_OP_REQUEST;
355
356	return dma_port_request(dma, in, DMA_PORT_TIMEOUT);
357}
358
359/**
360 * dma_port_flash_read() - Read from active flash region
361 * @dma: DMA control port
362 * @address: Address relative to the start of active region
363 * @buf: Buffer where the data is read
364 * @size: Size of the buffer
365 */
366int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
367			void *buf, size_t size)
368{
369	unsigned int retries = DMA_PORT_RETRIES;
370	unsigned int offset;
371
372	offset = address & 3;
373	address = address & ~3;
374
375	do {
376		u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
377		int ret;
378
379		ret = dma_port_flash_read_block(dma, address, dma->buf,
380						ALIGN(nbytes, 4));
381		if (ret) {
382			if (ret == -ETIMEDOUT) {
383				if (retries--)
384					continue;
385				ret = -EIO;
386			}
387			return ret;
388		}
389
390		memcpy(buf, dma->buf + offset, nbytes);
391
392		size -= nbytes;
393		address += nbytes;
394		buf += nbytes;
395	} while (size > 0);
396
397	return 0;
398}
399
400/**
401 * dma_port_flash_write() - Write to non-active flash region
402 * @dma: DMA control port
403 * @address: Address relative to the start of non-active region
404 * @buf: Data to write
405 * @size: Size of the buffer
406 *
407 * Writes block of data to the non-active flash region of the switch. If
408 * the address is given as %DMA_PORT_CSS_ADDRESS the block is written
409 * using CSS command.
410 */
411int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
412			 const void *buf, size_t size)
413{
414	unsigned int retries = DMA_PORT_RETRIES;
415	unsigned int offset;
416
417	if (address >= DMA_PORT_CSS_ADDRESS) {
418		offset = 0;
419		if (size > DMA_PORT_CSS_MAX_SIZE)
420			return -E2BIG;
421	} else {
422		offset = address & 3;
423		address = address & ~3;
424	}
425
426	do {
427		u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
428		int ret;
429
430		memcpy(dma->buf + offset, buf, nbytes);
431
432		ret = dma_port_flash_write_block(dma, address, buf, nbytes);
433		if (ret) {
434			if (ret == -ETIMEDOUT) {
435				if (retries--)
436					continue;
437				ret = -EIO;
438			}
439			return ret;
440		}
441
442		size -= nbytes;
443		address += nbytes;
444		buf += nbytes;
445	} while (size > 0);
446
447	return 0;
448}
449
450/**
451 * dma_port_flash_update_auth() - Starts flash authenticate cycle
452 * @dma: DMA control port
453 *
454 * Starts the flash update authentication cycle. If the image in the
455 * non-active area was valid, the switch starts upgrade process where
456 * active and non-active area get swapped in the end. Caller should call
457 * dma_port_flash_update_auth_status() to get status of this command.
458 * This is because if the switch in question is root switch the
459 * thunderbolt host controller gets reset as well.
460 */
461int dma_port_flash_update_auth(struct tb_dma_port *dma)
462{
463	u32 in;
464
465	in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT;
466	in |= MAIL_IN_OP_REQUEST;
467
468	return dma_port_request(dma, in, 150);
469}
470
471/**
472 * dma_port_flash_update_auth_status() - Reads status of update auth command
473 * @dma: DMA control port
474 * @status: Status code of the operation
475 *
476 * The function checks if there is status available from the last update
477 * auth command. Returns %0 if there is no status and no further
478 * action is required. If there is status, %1 is returned instead and
479 * @status holds the failure code.
480 *
481 * Negative return means there was an error reading status from the
482 * switch.
483 */
484int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
485{
486	struct tb_switch *sw = dma->sw;
487	u32 out, cmd;
488	int ret;
489
490	ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
491			    dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
492	if (ret)
493		return ret;
494
495	/* Check if the status relates to flash update auth */
496	cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT;
497	if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) {
498		if (status)
499			*status = out & MAIL_OUT_STATUS_MASK;
500
501		/* Reset is needed in any case */
502		return 1;
503	}
504
505	return 0;
506}
507
508/**
509 * dma_port_power_cycle() - Power cycles the switch
510 * @dma: DMA control port
511 *
512 * Triggers power cycle to the switch.
513 */
514int dma_port_power_cycle(struct tb_dma_port *dma)
515{
516	u32 in;
517
518	in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT;
519	in |= MAIL_IN_OP_REQUEST;
520
521	return dma_port_request(dma, in, 150);
522}