Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt driver - control channel and configuration commands
   4 *
   5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
   6 * Copyright (C) 2018, Intel Corporation
   7 */
   8
   9#include <linux/crc32.h>
  10#include <linux/delay.h>
  11#include <linux/slab.h>
  12#include <linux/pci.h>
  13#include <linux/dmapool.h>
  14#include <linux/workqueue.h>
  15
  16#include "ctl.h"
  17
  18
  19#define TB_CTL_RX_PKG_COUNT	10
  20#define TB_CTL_RETRIES		1
  21
  22/**
  23 * struct tb_ctl - Thunderbolt control channel
  24 * @nhi: Pointer to the NHI structure
  25 * @tx: Transmit ring
  26 * @rx: Receive ring
  27 * @frame_pool: DMA pool for control messages
  28 * @rx_packets: Received control messages
  29 * @request_queue_lock: Lock protecting @request_queue
  30 * @request_queue: List of outstanding requests
  31 * @running: Is the control channel running at the moment
  32 * @timeout_msec: Default timeout for non-raw control messages
  33 * @callback: Callback called when hotplug message is received
  34 * @callback_data: Data passed to @callback
  35 */
  36struct tb_ctl {
  37	struct tb_nhi *nhi;
  38	struct tb_ring *tx;
  39	struct tb_ring *rx;
  40
  41	struct dma_pool *frame_pool;
  42	struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
  43	struct mutex request_queue_lock;
  44	struct list_head request_queue;
  45	bool running;
  46
  47	int timeout_msec;
  48	event_cb callback;
  49	void *callback_data;
  50};
  51
  52
  53#define tb_ctl_WARN(ctl, format, arg...) \
  54	dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
  55
  56#define tb_ctl_err(ctl, format, arg...) \
  57	dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
  58
  59#define tb_ctl_warn(ctl, format, arg...) \
  60	dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
  61
  62#define tb_ctl_info(ctl, format, arg...) \
  63	dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
  64
  65#define tb_ctl_dbg(ctl, format, arg...) \
  66	dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
  67
  68static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
  69/* Serializes access to request kref_get/put */
  70static DEFINE_MUTEX(tb_cfg_request_lock);
  71
  72/**
  73 * tb_cfg_request_alloc() - Allocates a new config request
  74 *
  75 * This is refcounted object so when you are done with this, call
  76 * tb_cfg_request_put() to it.
  77 */
  78struct tb_cfg_request *tb_cfg_request_alloc(void)
  79{
  80	struct tb_cfg_request *req;
  81
  82	req = kzalloc(sizeof(*req), GFP_KERNEL);
  83	if (!req)
  84		return NULL;
  85
  86	kref_init(&req->kref);
  87
  88	return req;
  89}
  90
  91/**
  92 * tb_cfg_request_get() - Increase refcount of a request
  93 * @req: Request whose refcount is increased
  94 */
  95void tb_cfg_request_get(struct tb_cfg_request *req)
  96{
  97	mutex_lock(&tb_cfg_request_lock);
  98	kref_get(&req->kref);
  99	mutex_unlock(&tb_cfg_request_lock);
 100}
 101
 102static void tb_cfg_request_destroy(struct kref *kref)
 103{
 104	struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
 105
 106	kfree(req);
 107}
 108
 109/**
 110 * tb_cfg_request_put() - Decrease refcount and possibly release the request
 111 * @req: Request whose refcount is decreased
 112 *
 113 * Call this function when you are done with the request. When refcount
 114 * goes to %0 the object is released.
 115 */
 116void tb_cfg_request_put(struct tb_cfg_request *req)
 117{
 118	mutex_lock(&tb_cfg_request_lock);
 119	kref_put(&req->kref, tb_cfg_request_destroy);
 120	mutex_unlock(&tb_cfg_request_lock);
 121}
 122
 123static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
 124				  struct tb_cfg_request *req)
 125{
 126	WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
 127	WARN_ON(req->ctl);
 128
 129	mutex_lock(&ctl->request_queue_lock);
 130	if (!ctl->running) {
 131		mutex_unlock(&ctl->request_queue_lock);
 132		return -ENOTCONN;
 133	}
 134	req->ctl = ctl;
 135	list_add_tail(&req->list, &ctl->request_queue);
 136	set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
 137	mutex_unlock(&ctl->request_queue_lock);
 138	return 0;
 139}
 140
 141static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
 142{
 143	struct tb_ctl *ctl = req->ctl;
 144
 145	mutex_lock(&ctl->request_queue_lock);
 146	list_del(&req->list);
 147	clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
 148	if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
 149		wake_up(&tb_cfg_request_cancel_queue);
 150	mutex_unlock(&ctl->request_queue_lock);
 151}
 152
 153static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
 154{
 155	return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
 156}
 157
 158static struct tb_cfg_request *
 159tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
 160{
 161	struct tb_cfg_request *req;
 162	bool found = false;
 163
 164	mutex_lock(&pkg->ctl->request_queue_lock);
 165	list_for_each_entry(req, &pkg->ctl->request_queue, list) {
 166		tb_cfg_request_get(req);
 167		if (req->match(req, pkg)) {
 168			found = true;
 169			break;
 170		}
 171		tb_cfg_request_put(req);
 172	}
 173	mutex_unlock(&pkg->ctl->request_queue_lock);
 174
 175	return found ? req : NULL;
 176}
 177
 178/* utility functions */
 179
 180
 181static int check_header(const struct ctl_pkg *pkg, u32 len,
 182			enum tb_cfg_pkg_type type, u64 route)
 183{
 184	struct tb_cfg_header *header = pkg->buffer;
 185
 186	/* check frame, TODO: frame flags */
 187	if (WARN(len != pkg->frame.size,
 188			"wrong framesize (expected %#x, got %#x)\n",
 189			len, pkg->frame.size))
 190		return -EIO;
 191	if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
 192			type, pkg->frame.eof))
 193		return -EIO;
 194	if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
 195			pkg->frame.sof))
 196		return -EIO;
 197
 198	/* check header */
 199	if (WARN(header->unknown != 1 << 9,
 200			"header->unknown is %#x\n", header->unknown))
 201		return -EIO;
 202	if (WARN(route != tb_cfg_get_route(header),
 203			"wrong route (expected %llx, got %llx)",
 204			route, tb_cfg_get_route(header)))
 205		return -EIO;
 206	return 0;
 207}
 208
 209static int check_config_address(struct tb_cfg_address addr,
 210				enum tb_cfg_space space, u32 offset,
 211				u32 length)
 212{
 213	if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
 214		return -EIO;
 215	if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
 216			space, addr.space))
 217		return -EIO;
 218	if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
 219			offset, addr.offset))
 220		return -EIO;
 221	if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
 222			length, addr.length))
 223		return -EIO;
 224	/*
 225	 * We cannot check addr->port as it is set to the upstream port of the
 226	 * sender.
 227	 */
 228	return 0;
 229}
 230
 231static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
 232{
 233	struct cfg_error_pkg *pkg = response->buffer;
 234	struct tb_ctl *ctl = response->ctl;
 235	struct tb_cfg_result res = { 0 };
 236	res.response_route = tb_cfg_get_route(&pkg->header);
 237	res.response_port = 0;
 238	res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
 239			       tb_cfg_get_route(&pkg->header));
 240	if (res.err)
 241		return res;
 242
 243	if (pkg->zero1)
 244		tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1);
 245	if (pkg->zero2)
 246		tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2);
 247	if (pkg->zero3)
 248		tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3);
 249
 250	res.err = 1;
 251	res.tb_error = pkg->error;
 252	res.response_port = pkg->port;
 253	return res;
 254
 255}
 256
 257static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
 258					 enum tb_cfg_pkg_type type, u64 route)
 259{
 260	struct tb_cfg_header *header = pkg->buffer;
 261	struct tb_cfg_result res = { 0 };
 262
 263	if (pkg->frame.eof == TB_CFG_PKG_ERROR)
 264		return decode_error(pkg);
 265
 266	res.response_port = 0; /* will be updated later for cfg_read/write */
 267	res.response_route = tb_cfg_get_route(header);
 268	res.err = check_header(pkg, len, type, route);
 269	return res;
 270}
 271
 272static void tb_cfg_print_error(struct tb_ctl *ctl,
 273			       const struct tb_cfg_result *res)
 274{
 275	WARN_ON(res->err != 1);
 276	switch (res->tb_error) {
 277	case TB_CFG_ERROR_PORT_NOT_CONNECTED:
 278		/* Port is not connected. This can happen during surprise
 279		 * removal. Do not warn. */
 280		return;
 281	case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
 282		/*
 283		 * Invalid cfg_space/offset/length combination in
 284		 * cfg_read/cfg_write.
 285		 */
 286		tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
 287			   res->response_route, res->response_port);
 288		return;
 289	case TB_CFG_ERROR_NO_SUCH_PORT:
 290		/*
 291		 * - The route contains a non-existent port.
 292		 * - The route contains a non-PHY port (e.g. PCIe).
 293		 * - The port in cfg_read/cfg_write does not exist.
 294		 */
 295		tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
 296			res->response_route, res->response_port);
 297		return;
 298	case TB_CFG_ERROR_LOOP:
 299		tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
 300			res->response_route, res->response_port);
 301		return;
 302	case TB_CFG_ERROR_LOCK:
 303		tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
 304			    res->response_route, res->response_port);
 305		return;
 306	default:
 307		/* 5,6,7,9 and 11 are also valid error codes */
 308		tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
 309			res->response_route, res->response_port);
 310		return;
 311	}
 312}
 313
 314static __be32 tb_crc(const void *data, size_t len)
 315{
 316	return cpu_to_be32(~__crc32c_le(~0, data, len));
 317}
 318
 319static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
 320{
 321	if (pkg) {
 322		dma_pool_free(pkg->ctl->frame_pool,
 323			      pkg->buffer, pkg->frame.buffer_phy);
 324		kfree(pkg);
 325	}
 326}
 327
 328static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
 329{
 330	struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
 331	if (!pkg)
 332		return NULL;
 333	pkg->ctl = ctl;
 334	pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
 335				     &pkg->frame.buffer_phy);
 336	if (!pkg->buffer) {
 337		kfree(pkg);
 338		return NULL;
 339	}
 340	return pkg;
 341}
 342
 343
 344/* RX/TX handling */
 345
 346static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
 347			       bool canceled)
 348{
 349	struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
 350	tb_ctl_pkg_free(pkg);
 351}
 352
 353/*
 354 * tb_cfg_tx() - transmit a packet on the control channel
 355 *
 356 * len must be a multiple of four.
 357 *
 358 * Return: Returns 0 on success or an error code on failure.
 359 */
 360static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
 361		     enum tb_cfg_pkg_type type)
 362{
 363	int res;
 364	struct ctl_pkg *pkg;
 365	if (len % 4 != 0) { /* required for le->be conversion */
 366		tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
 367		return -EINVAL;
 368	}
 369	if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
 370		tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
 371			    len, TB_FRAME_SIZE - 4);
 372		return -EINVAL;
 373	}
 374	pkg = tb_ctl_pkg_alloc(ctl);
 375	if (!pkg)
 376		return -ENOMEM;
 377	pkg->frame.callback = tb_ctl_tx_callback;
 378	pkg->frame.size = len + 4;
 379	pkg->frame.sof = type;
 380	pkg->frame.eof = type;
 381	cpu_to_be32_array(pkg->buffer, data, len / 4);
 382	*(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
 383
 384	res = tb_ring_tx(ctl->tx, &pkg->frame);
 385	if (res) /* ring is stopped */
 386		tb_ctl_pkg_free(pkg);
 387	return res;
 388}
 389
 390/*
 391 * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
 392 */
 393static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
 394				struct ctl_pkg *pkg, size_t size)
 395{
 396	return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
 397}
 398
 399static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
 400{
 401	tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
 402					     * We ignore failures during stop.
 403					     * All rx packets are referenced
 404					     * from ctl->rx_packets, so we do
 405					     * not loose them.
 406					     */
 407}
 408
 409static int tb_async_error(const struct ctl_pkg *pkg)
 410{
 411	const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
 412
 413	if (pkg->frame.eof != TB_CFG_PKG_ERROR)
 414		return false;
 415
 416	switch (error->error) {
 417	case TB_CFG_ERROR_LINK_ERROR:
 418	case TB_CFG_ERROR_HEC_ERROR_DETECTED:
 419	case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
 420		return true;
 421
 422	default:
 423		return false;
 424	}
 425}
 426
 427static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
 428			       bool canceled)
 429{
 430	struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
 431	struct tb_cfg_request *req;
 432	__be32 crc32;
 433
 434	if (canceled)
 435		return; /*
 436			 * ring is stopped, packet is referenced from
 437			 * ctl->rx_packets.
 438			 */
 439
 440	if (frame->size < 4 || frame->size % 4 != 0) {
 441		tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
 442			   frame->size);
 443		goto rx;
 444	}
 445
 446	frame->size -= 4; /* remove checksum */
 447	crc32 = tb_crc(pkg->buffer, frame->size);
 448	be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
 449
 450	switch (frame->eof) {
 451	case TB_CFG_PKG_READ:
 452	case TB_CFG_PKG_WRITE:
 453	case TB_CFG_PKG_ERROR:
 454	case TB_CFG_PKG_OVERRIDE:
 455	case TB_CFG_PKG_RESET:
 456		if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
 457			tb_ctl_err(pkg->ctl,
 458				   "RX: checksum mismatch, dropping packet\n");
 459			goto rx;
 460		}
 461		if (tb_async_error(pkg)) {
 462			tb_ctl_handle_event(pkg->ctl, frame->eof,
 463					    pkg, frame->size);
 464			goto rx;
 465		}
 466		break;
 467
 468	case TB_CFG_PKG_EVENT:
 469	case TB_CFG_PKG_XDOMAIN_RESP:
 470	case TB_CFG_PKG_XDOMAIN_REQ:
 471		if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
 472			tb_ctl_err(pkg->ctl,
 473				   "RX: checksum mismatch, dropping packet\n");
 474			goto rx;
 475		}
 476		fallthrough;
 477	case TB_CFG_PKG_ICM_EVENT:
 478		if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
 479			goto rx;
 480		break;
 481
 482	default:
 483		break;
 484	}
 485
 486	/*
 487	 * The received packet will be processed only if there is an
 488	 * active request and that the packet is what is expected. This
 489	 * prevents packets such as replies coming after timeout has
 490	 * triggered from messing with the active requests.
 491	 */
 492	req = tb_cfg_request_find(pkg->ctl, pkg);
 493	if (req) {
 494		if (req->copy(req, pkg))
 495			schedule_work(&req->work);
 496		tb_cfg_request_put(req);
 497	}
 498
 499rx:
 500	tb_ctl_rx_submit(pkg);
 501}
 502
 503static void tb_cfg_request_work(struct work_struct *work)
 504{
 505	struct tb_cfg_request *req = container_of(work, typeof(*req), work);
 506
 507	if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
 508		req->callback(req->callback_data);
 509
 510	tb_cfg_request_dequeue(req);
 511	tb_cfg_request_put(req);
 512}
 513
 514/**
 515 * tb_cfg_request() - Start control request not waiting for it to complete
 516 * @ctl: Control channel to use
 517 * @req: Request to start
 518 * @callback: Callback called when the request is completed
 519 * @callback_data: Data to be passed to @callback
 520 *
 521 * This queues @req on the given control channel without waiting for it
 522 * to complete. When the request completes @callback is called.
 523 */
 524int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
 525		   void (*callback)(void *), void *callback_data)
 526{
 527	int ret;
 528
 529	req->flags = 0;
 530	req->callback = callback;
 531	req->callback_data = callback_data;
 532	INIT_WORK(&req->work, tb_cfg_request_work);
 533	INIT_LIST_HEAD(&req->list);
 534
 535	tb_cfg_request_get(req);
 536	ret = tb_cfg_request_enqueue(ctl, req);
 537	if (ret)
 538		goto err_put;
 539
 540	ret = tb_ctl_tx(ctl, req->request, req->request_size,
 541			req->request_type);
 542	if (ret)
 543		goto err_dequeue;
 544
 545	if (!req->response)
 546		schedule_work(&req->work);
 547
 548	return 0;
 549
 550err_dequeue:
 551	tb_cfg_request_dequeue(req);
 552err_put:
 553	tb_cfg_request_put(req);
 554
 555	return ret;
 556}
 557
 558/**
 559 * tb_cfg_request_cancel() - Cancel a control request
 560 * @req: Request to cancel
 561 * @err: Error to assign to the request
 562 *
 563 * This function can be used to cancel ongoing request. It will wait
 564 * until the request is not active anymore.
 565 */
 566void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
 567{
 568	set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
 569	schedule_work(&req->work);
 570	wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
 571	req->result.err = err;
 572}
 573
 574static void tb_cfg_request_complete(void *data)
 575{
 576	complete(data);
 577}
 578
 579/**
 580 * tb_cfg_request_sync() - Start control request and wait until it completes
 581 * @ctl: Control channel to use
 582 * @req: Request to start
 583 * @timeout_msec: Timeout how long to wait @req to complete
 584 *
 585 * Starts a control request and waits until it completes. If timeout
 586 * triggers the request is canceled before function returns. Note the
 587 * caller needs to make sure only one message for given switch is active
 588 * at a time.
 589 */
 590struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
 591					 struct tb_cfg_request *req,
 592					 int timeout_msec)
 593{
 594	unsigned long timeout = msecs_to_jiffies(timeout_msec);
 595	struct tb_cfg_result res = { 0 };
 596	DECLARE_COMPLETION_ONSTACK(done);
 597	int ret;
 598
 599	ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
 600	if (ret) {
 601		res.err = ret;
 602		return res;
 603	}
 604
 605	if (!wait_for_completion_timeout(&done, timeout))
 606		tb_cfg_request_cancel(req, -ETIMEDOUT);
 607
 608	flush_work(&req->work);
 609
 610	return req->result;
 611}
 612
 613/* public interface, alloc/start/stop/free */
 614
 615/**
 616 * tb_ctl_alloc() - allocate a control channel
 617 * @nhi: Pointer to NHI
 618 * @timeout_msec: Default timeout used with non-raw control messages
 619 * @cb: Callback called for plug events
 620 * @cb_data: Data passed to @cb
 621 *
 622 * cb will be invoked once for every hot plug event.
 623 *
 624 * Return: Returns a pointer on success or NULL on failure.
 625 */
 626struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
 627			    void *cb_data)
 628{
 629	int i;
 630	struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
 631	if (!ctl)
 632		return NULL;
 633	ctl->nhi = nhi;
 634	ctl->timeout_msec = timeout_msec;
 635	ctl->callback = cb;
 636	ctl->callback_data = cb_data;
 637
 638	mutex_init(&ctl->request_queue_lock);
 639	INIT_LIST_HEAD(&ctl->request_queue);
 640	ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
 641					 TB_FRAME_SIZE, 4, 0);
 642	if (!ctl->frame_pool)
 643		goto err;
 644
 645	ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
 646	if (!ctl->tx)
 647		goto err;
 648
 649	ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff,
 650				   0xffff, NULL, NULL);
 651	if (!ctl->rx)
 652		goto err;
 653
 654	for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
 655		ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
 656		if (!ctl->rx_packets[i])
 657			goto err;
 658		ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
 659	}
 660
 661	tb_ctl_dbg(ctl, "control channel created\n");
 662	return ctl;
 663err:
 664	tb_ctl_free(ctl);
 665	return NULL;
 666}
 667
 668/**
 669 * tb_ctl_free() - free a control channel
 670 * @ctl: Control channel to free
 671 *
 672 * Must be called after tb_ctl_stop.
 673 *
 674 * Must NOT be called from ctl->callback.
 675 */
 676void tb_ctl_free(struct tb_ctl *ctl)
 677{
 678	int i;
 679
 680	if (!ctl)
 681		return;
 682
 683	if (ctl->rx)
 684		tb_ring_free(ctl->rx);
 685	if (ctl->tx)
 686		tb_ring_free(ctl->tx);
 687
 688	/* free RX packets */
 689	for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
 690		tb_ctl_pkg_free(ctl->rx_packets[i]);
 691
 692
 693	dma_pool_destroy(ctl->frame_pool);
 694	kfree(ctl);
 695}
 696
 697/**
 698 * tb_cfg_start() - start/resume the control channel
 699 * @ctl: Control channel to start
 700 */
 701void tb_ctl_start(struct tb_ctl *ctl)
 702{
 703	int i;
 704	tb_ctl_dbg(ctl, "control channel starting...\n");
 705	tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
 706	tb_ring_start(ctl->rx);
 707	for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
 708		tb_ctl_rx_submit(ctl->rx_packets[i]);
 709
 710	ctl->running = true;
 711}
 712
 713/**
 714 * tb_ctrl_stop() - pause the control channel
 715 * @ctl: Control channel to stop
 716 *
 717 * All invocations of ctl->callback will have finished after this method
 718 * returns.
 719 *
 720 * Must NOT be called from ctl->callback.
 721 */
 722void tb_ctl_stop(struct tb_ctl *ctl)
 723{
 724	mutex_lock(&ctl->request_queue_lock);
 725	ctl->running = false;
 726	mutex_unlock(&ctl->request_queue_lock);
 727
 728	tb_ring_stop(ctl->rx);
 729	tb_ring_stop(ctl->tx);
 730
 731	if (!list_empty(&ctl->request_queue))
 732		tb_ctl_WARN(ctl, "dangling request in request_queue\n");
 733	INIT_LIST_HEAD(&ctl->request_queue);
 734	tb_ctl_dbg(ctl, "control channel stopped\n");
 735}
 736
 737/* public interface, commands */
 738
 739/**
 740 * tb_cfg_ack_plug() - Ack hot plug/unplug event
 741 * @ctl: Control channel to use
 742 * @route: Router that originated the event
 743 * @port: Port where the hot plug/unplug happened
 744 * @unplug: Ack hot plug or unplug
 745 *
 746 * Call this as response for hot plug/unplug event to ack it.
 747 * Returns %0 on success or an error code on failure.
 748 */
 749int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
 750{
 751	struct cfg_error_pkg pkg = {
 752		.header = tb_cfg_make_header(route),
 753		.port = port,
 754		.error = TB_CFG_ERROR_ACK_PLUG_EVENT,
 755		.pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
 756			     : TB_CFG_ERROR_PG_HOT_PLUG,
 757	};
 758	tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n",
 759		   unplug ? "un" : "", route, port);
 760	return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
 761}
 762
 763static bool tb_cfg_match(const struct tb_cfg_request *req,
 764			 const struct ctl_pkg *pkg)
 765{
 766	u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
 767
 768	if (pkg->frame.eof == TB_CFG_PKG_ERROR)
 769		return true;
 770
 771	if (pkg->frame.eof != req->response_type)
 772		return false;
 773	if (route != tb_cfg_get_route(req->request))
 774		return false;
 775	if (pkg->frame.size != req->response_size)
 776		return false;
 777
 778	if (pkg->frame.eof == TB_CFG_PKG_READ ||
 779	    pkg->frame.eof == TB_CFG_PKG_WRITE) {
 780		const struct cfg_read_pkg *req_hdr = req->request;
 781		const struct cfg_read_pkg *res_hdr = pkg->buffer;
 782
 783		if (req_hdr->addr.seq != res_hdr->addr.seq)
 784			return false;
 785	}
 786
 787	return true;
 788}
 789
 790static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
 791{
 792	struct tb_cfg_result res;
 793
 794	/* Now make sure it is in expected format */
 795	res = parse_header(pkg, req->response_size, req->response_type,
 796			   tb_cfg_get_route(req->request));
 797	if (!res.err)
 798		memcpy(req->response, pkg->buffer, req->response_size);
 799
 800	req->result = res;
 801
 802	/* Always complete when first response is received */
 803	return true;
 804}
 805
 806/**
 807 * tb_cfg_reset() - send a reset packet and wait for a response
 808 * @ctl: Control channel pointer
 809 * @route: Router string for the router to send reset
 810 *
 811 * If the switch at route is incorrectly configured then we will not receive a
 812 * reply (even though the switch will reset). The caller should check for
 813 * -ETIMEDOUT and attempt to reconfigure the switch.
 814 */
 815struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route)
 816{
 817	struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
 818	struct tb_cfg_result res = { 0 };
 819	struct tb_cfg_header reply;
 820	struct tb_cfg_request *req;
 821
 822	req = tb_cfg_request_alloc();
 823	if (!req) {
 824		res.err = -ENOMEM;
 825		return res;
 826	}
 827
 828	req->match = tb_cfg_match;
 829	req->copy = tb_cfg_copy;
 830	req->request = &request;
 831	req->request_size = sizeof(request);
 832	req->request_type = TB_CFG_PKG_RESET;
 833	req->response = &reply;
 834	req->response_size = sizeof(reply);
 835	req->response_type = TB_CFG_PKG_RESET;
 836
 837	res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec);
 838
 839	tb_cfg_request_put(req);
 840
 841	return res;
 842}
 843
 844/**
 845 * tb_cfg_read_raw() - read from config space into buffer
 846 * @ctl: Pointer to the control channel
 847 * @buffer: Buffer where the data is read
 848 * @route: Route string of the router
 849 * @port: Port number when reading from %TB_CFG_PORT, %0 otherwise
 850 * @space: Config space selector
 851 * @offset: Dword word offset of the register to start reading
 852 * @length: Number of dwords to read
 853 * @timeout_msec: Timeout in ms how long to wait for the response
 854 *
 855 * Reads from router config space without translating the possible error.
 856 */
 857struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
 858		u64 route, u32 port, enum tb_cfg_space space,
 859		u32 offset, u32 length, int timeout_msec)
 860{
 861	struct tb_cfg_result res = { 0 };
 862	struct cfg_read_pkg request = {
 863		.header = tb_cfg_make_header(route),
 864		.addr = {
 865			.port = port,
 866			.space = space,
 867			.offset = offset,
 868			.length = length,
 869		},
 870	};
 871	struct cfg_write_pkg reply;
 872	int retries = 0;
 873
 874	while (retries < TB_CTL_RETRIES) {
 875		struct tb_cfg_request *req;
 876
 877		req = tb_cfg_request_alloc();
 878		if (!req) {
 879			res.err = -ENOMEM;
 880			return res;
 881		}
 882
 883		request.addr.seq = retries++;
 884
 885		req->match = tb_cfg_match;
 886		req->copy = tb_cfg_copy;
 887		req->request = &request;
 888		req->request_size = sizeof(request);
 889		req->request_type = TB_CFG_PKG_READ;
 890		req->response = &reply;
 891		req->response_size = 12 + 4 * length;
 892		req->response_type = TB_CFG_PKG_READ;
 893
 894		res = tb_cfg_request_sync(ctl, req, timeout_msec);
 895
 896		tb_cfg_request_put(req);
 897
 898		if (res.err != -ETIMEDOUT)
 899			break;
 900
 901		/* Wait a bit (arbitrary time) until we send a retry */
 902		usleep_range(10, 100);
 903	}
 904
 905	if (res.err)
 906		return res;
 907
 908	res.response_port = reply.addr.port;
 909	res.err = check_config_address(reply.addr, space, offset, length);
 910	if (!res.err)
 911		memcpy(buffer, &reply.data, 4 * length);
 912	return res;
 913}
 914
 915/**
 916 * tb_cfg_write() - write from buffer into config space
 917 * @ctl: Pointer to the control channel
 918 * @buffer: Data to write
 919 * @route: Route string of the router
 920 * @port: Port number when writing to %TB_CFG_PORT, %0 otherwise
 921 * @space: Config space selector
 922 * @offset: Dword word offset of the register to start writing
 923 * @length: Number of dwords to write
 924 * @timeout_msec: Timeout in ms how long to wait for the response
 925 *
 926 * Writes to router config space without translating the possible error.
 927 */
 928struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
 929		u64 route, u32 port, enum tb_cfg_space space,
 930		u32 offset, u32 length, int timeout_msec)
 931{
 932	struct tb_cfg_result res = { 0 };
 933	struct cfg_write_pkg request = {
 934		.header = tb_cfg_make_header(route),
 935		.addr = {
 936			.port = port,
 937			.space = space,
 938			.offset = offset,
 939			.length = length,
 940		},
 941	};
 942	struct cfg_read_pkg reply;
 943	int retries = 0;
 944
 945	memcpy(&request.data, buffer, length * 4);
 946
 947	while (retries < TB_CTL_RETRIES) {
 948		struct tb_cfg_request *req;
 949
 950		req = tb_cfg_request_alloc();
 951		if (!req) {
 952			res.err = -ENOMEM;
 953			return res;
 954		}
 955
 956		request.addr.seq = retries++;
 957
 958		req->match = tb_cfg_match;
 959		req->copy = tb_cfg_copy;
 960		req->request = &request;
 961		req->request_size = 12 + 4 * length;
 962		req->request_type = TB_CFG_PKG_WRITE;
 963		req->response = &reply;
 964		req->response_size = sizeof(reply);
 965		req->response_type = TB_CFG_PKG_WRITE;
 966
 967		res = tb_cfg_request_sync(ctl, req, timeout_msec);
 968
 969		tb_cfg_request_put(req);
 970
 971		if (res.err != -ETIMEDOUT)
 972			break;
 973
 974		/* Wait a bit (arbitrary time) until we send a retry */
 975		usleep_range(10, 100);
 976	}
 977
 978	if (res.err)
 979		return res;
 980
 981	res.response_port = reply.addr.port;
 982	res.err = check_config_address(reply.addr, space, offset, length);
 983	return res;
 984}
 985
 986static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
 987			    const struct tb_cfg_result *res)
 988{
 989	/*
 990	 * For unimplemented ports access to port config space may return
 991	 * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
 992	 * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
 993	 * that the caller can mark the port as disabled.
 994	 */
 995	if (space == TB_CFG_PORT &&
 996	    res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
 997		return -ENODEV;
 998
 999	tb_cfg_print_error(ctl, res);
1000
1001	if (res->tb_error == TB_CFG_ERROR_LOCK)
1002		return -EACCES;
1003	else if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED)
1004		return -ENOTCONN;
1005
1006	return -EIO;
1007}
1008
1009int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
1010		enum tb_cfg_space space, u32 offset, u32 length)
1011{
1012	struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
1013			space, offset, length, ctl->timeout_msec);
1014	switch (res.err) {
1015	case 0:
1016		/* Success */
1017		break;
1018
1019	case 1:
1020		/* Thunderbolt error, tb_error holds the actual number */
1021		return tb_cfg_get_error(ctl, space, &res);
1022
1023	case -ETIMEDOUT:
1024		tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
1025			    route, space, offset);
1026		break;
1027
1028	default:
1029		WARN(1, "tb_cfg_read: %d\n", res.err);
1030		break;
1031	}
1032	return res.err;
1033}
1034
1035int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
1036		 enum tb_cfg_space space, u32 offset, u32 length)
1037{
1038	struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
1039			space, offset, length, ctl->timeout_msec);
1040	switch (res.err) {
1041	case 0:
1042		/* Success */
1043		break;
1044
1045	case 1:
1046		/* Thunderbolt error, tb_error holds the actual number */
1047		return tb_cfg_get_error(ctl, space, &res);
1048
1049	case -ETIMEDOUT:
1050		tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
1051			    route, space, offset);
1052		break;
1053
1054	default:
1055		WARN(1, "tb_cfg_write: %d\n", res.err);
1056		break;
1057	}
1058	return res.err;
1059}
1060
1061/**
1062 * tb_cfg_get_upstream_port() - get upstream port number of switch at route
1063 * @ctl: Pointer to the control channel
1064 * @route: Route string of the router
1065 *
1066 * Reads the first dword from the switches TB_CFG_SWITCH config area and
1067 * returns the port number from which the reply originated.
1068 *
1069 * Return: Returns the upstream port number on success or an error code on
1070 * failure.
1071 */
1072int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
1073{
1074	u32 dummy;
1075	struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
1076						   TB_CFG_SWITCH, 0, 1,
1077						   ctl->timeout_msec);
1078	if (res.err == 1)
1079		return -EIO;
1080	if (res.err)
1081		return res.err;
1082	return res.response_port;
1083}