Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.10.11.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Greybus interface code
   4 *
   5 * Copyright 2014 Google Inc.
   6 * Copyright 2014 Linaro Ltd.
   7 */
   8
   9#include <linux/delay.h>
  10#include <linux/greybus.h>
  11
  12#include "greybus_trace.h"
  13
  14#define GB_INTERFACE_MODE_SWITCH_TIMEOUT	2000
  15
  16#define GB_INTERFACE_DEVICE_ID_BAD	0xff
  17
  18#define GB_INTERFACE_AUTOSUSPEND_MS			3000
  19
  20/* Time required for interface to enter standby before disabling REFCLK */
  21#define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS			20
  22
  23/* Don't-care selector index */
  24#define DME_SELECTOR_INDEX_NULL		0
  25
  26/* DME attributes */
  27/* FIXME: remove ES2 support and DME_T_TST_SRC_INCREMENT */
  28#define DME_T_TST_SRC_INCREMENT		0x4083
  29
  30#define DME_DDBL1_MANUFACTURERID	0x5003
  31#define DME_DDBL1_PRODUCTID		0x5004
  32
  33#define DME_TOSHIBA_GMP_VID		0x6000
  34#define DME_TOSHIBA_GMP_PID		0x6001
  35#define DME_TOSHIBA_GMP_SN0		0x6002
  36#define DME_TOSHIBA_GMP_SN1		0x6003
  37#define DME_TOSHIBA_GMP_INIT_STATUS	0x6101
  38
  39/* DDBL1 Manufacturer and Product ids */
  40#define TOSHIBA_DMID			0x0126
  41#define TOSHIBA_ES2_BRIDGE_DPID		0x1000
  42#define TOSHIBA_ES3_APBRIDGE_DPID	0x1001
  43#define TOSHIBA_ES3_GBPHY_DPID	0x1002
  44
  45static int gb_interface_hibernate_link(struct gb_interface *intf);
  46static int gb_interface_refclk_set(struct gb_interface *intf, bool enable);
  47
  48static int gb_interface_dme_attr_get(struct gb_interface *intf,
  49				     u16 attr, u32 *val)
  50{
  51	return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id,
  52					attr, DME_SELECTOR_INDEX_NULL, val);
  53}
  54
  55static int gb_interface_read_ara_dme(struct gb_interface *intf)
  56{
  57	u32 sn0, sn1;
  58	int ret;
  59
  60	/*
  61	 * Unless this is a Toshiba bridge, bail out until we have defined
  62	 * standard GMP attributes.
  63	 */
  64	if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) {
  65		dev_err(&intf->dev, "unknown manufacturer %08x\n",
  66			intf->ddbl1_manufacturer_id);
  67		return -ENODEV;
  68	}
  69
  70	ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID,
  71					&intf->vendor_id);
  72	if (ret)
  73		return ret;
  74
  75	ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID,
  76					&intf->product_id);
  77	if (ret)
  78		return ret;
  79
  80	ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0);
  81	if (ret)
  82		return ret;
  83
  84	ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1);
  85	if (ret)
  86		return ret;
  87
  88	intf->serial_number = (u64)sn1 << 32 | sn0;
  89
  90	return 0;
  91}
  92
  93static int gb_interface_read_dme(struct gb_interface *intf)
  94{
  95	int ret;
  96
  97	/* DME attributes have already been read */
  98	if (intf->dme_read)
  99		return 0;
 100
 101	ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID,
 102					&intf->ddbl1_manufacturer_id);
 103	if (ret)
 104		return ret;
 105
 106	ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID,
 107					&intf->ddbl1_product_id);
 108	if (ret)
 109		return ret;
 110
 111	if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID &&
 112	    intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) {
 113		intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS;
 114		intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS;
 115	}
 116
 117	ret = gb_interface_read_ara_dme(intf);
 118	if (ret)
 119		return ret;
 120
 121	intf->dme_read = true;
 122
 123	return 0;
 124}
 125
 126static int gb_interface_route_create(struct gb_interface *intf)
 127{
 128	struct gb_svc *svc = intf->hd->svc;
 129	u8 intf_id = intf->interface_id;
 130	u8 device_id;
 131	int ret;
 132
 133	/* Allocate an interface device id. */
 134	ret = ida_simple_get(&svc->device_id_map,
 135			     GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1,
 136			     GFP_KERNEL);
 137	if (ret < 0) {
 138		dev_err(&intf->dev, "failed to allocate device id: %d\n", ret);
 139		return ret;
 140	}
 141	device_id = ret;
 142
 143	ret = gb_svc_intf_device_id(svc, intf_id, device_id);
 144	if (ret) {
 145		dev_err(&intf->dev, "failed to set device id %u: %d\n",
 146			device_id, ret);
 147		goto err_ida_remove;
 148	}
 149
 150	/* FIXME: Hard-coded AP device id. */
 151	ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP,
 152				  intf_id, device_id);
 153	if (ret) {
 154		dev_err(&intf->dev, "failed to create route: %d\n", ret);
 155		goto err_svc_id_free;
 156	}
 157
 158	intf->device_id = device_id;
 159
 160	return 0;
 161
 162err_svc_id_free:
 163	/*
 164	 * XXX Should we tell SVC that this id doesn't belong to interface
 165	 * XXX anymore.
 166	 */
 167err_ida_remove:
 168	ida_simple_remove(&svc->device_id_map, device_id);
 169
 170	return ret;
 171}
 172
 173static void gb_interface_route_destroy(struct gb_interface *intf)
 174{
 175	struct gb_svc *svc = intf->hd->svc;
 176
 177	if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD)
 178		return;
 179
 180	gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id);
 181	ida_simple_remove(&svc->device_id_map, intf->device_id);
 182	intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
 183}
 184
 185/* Locking: Caller holds the interface mutex. */
 186static int gb_interface_legacy_mode_switch(struct gb_interface *intf)
 187{
 188	int ret;
 189
 190	dev_info(&intf->dev, "legacy mode switch detected\n");
 191
 192	/* Mark as disconnected to prevent I/O during disable. */
 193	intf->disconnected = true;
 194	gb_interface_disable(intf);
 195	intf->disconnected = false;
 196
 197	ret = gb_interface_enable(intf);
 198	if (ret) {
 199		dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret);
 200		gb_interface_deactivate(intf);
 201	}
 202
 203	return ret;
 204}
 205
 206void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
 207				u32 mailbox)
 208{
 209	mutex_lock(&intf->mutex);
 210
 211	if (result) {
 212		dev_warn(&intf->dev,
 213			 "mailbox event with UniPro error: 0x%04x\n",
 214			 result);
 215		goto err_disable;
 216	}
 217
 218	if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
 219		dev_warn(&intf->dev,
 220			 "mailbox event with unexpected value: 0x%08x\n",
 221			 mailbox);
 222		goto err_disable;
 223	}
 224
 225	if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) {
 226		gb_interface_legacy_mode_switch(intf);
 227		goto out_unlock;
 228	}
 229
 230	if (!intf->mode_switch) {
 231		dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n",
 232			 mailbox);
 233		goto err_disable;
 234	}
 235
 236	dev_info(&intf->dev, "mode switch detected\n");
 237
 238	complete(&intf->mode_switch_completion);
 239
 240out_unlock:
 241	mutex_unlock(&intf->mutex);
 242
 243	return;
 244
 245err_disable:
 246	gb_interface_disable(intf);
 247	gb_interface_deactivate(intf);
 248	mutex_unlock(&intf->mutex);
 249}
 250
 251static void gb_interface_mode_switch_work(struct work_struct *work)
 252{
 253	struct gb_interface *intf;
 254	struct gb_control *control;
 255	unsigned long timeout;
 256	int ret;
 257
 258	intf = container_of(work, struct gb_interface, mode_switch_work);
 259
 260	mutex_lock(&intf->mutex);
 261	/* Make sure interface is still enabled. */
 262	if (!intf->enabled) {
 263		dev_dbg(&intf->dev, "mode switch aborted\n");
 264		intf->mode_switch = false;
 265		mutex_unlock(&intf->mutex);
 266		goto out_interface_put;
 267	}
 268
 269	/*
 270	 * Prepare the control device for mode switch and make sure to get an
 271	 * extra reference before it goes away during interface disable.
 272	 */
 273	control = gb_control_get(intf->control);
 274	gb_control_mode_switch_prepare(control);
 275	gb_interface_disable(intf);
 276	mutex_unlock(&intf->mutex);
 277
 278	timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT);
 279	ret = wait_for_completion_interruptible_timeout(
 280			&intf->mode_switch_completion, timeout);
 281
 282	/* Finalise control-connection mode switch. */
 283	gb_control_mode_switch_complete(control);
 284	gb_control_put(control);
 285
 286	if (ret < 0) {
 287		dev_err(&intf->dev, "mode switch interrupted\n");
 288		goto err_deactivate;
 289	} else if (ret == 0) {
 290		dev_err(&intf->dev, "mode switch timed out\n");
 291		goto err_deactivate;
 292	}
 293
 294	/* Re-enable (re-enumerate) interface if still active. */
 295	mutex_lock(&intf->mutex);
 296	intf->mode_switch = false;
 297	if (intf->active) {
 298		ret = gb_interface_enable(intf);
 299		if (ret) {
 300			dev_err(&intf->dev, "failed to re-enable interface: %d\n",
 301				ret);
 302			gb_interface_deactivate(intf);
 303		}
 304	}
 305	mutex_unlock(&intf->mutex);
 306
 307out_interface_put:
 308	gb_interface_put(intf);
 309
 310	return;
 311
 312err_deactivate:
 313	mutex_lock(&intf->mutex);
 314	intf->mode_switch = false;
 315	gb_interface_deactivate(intf);
 316	mutex_unlock(&intf->mutex);
 317
 318	gb_interface_put(intf);
 319}
 320
 321int gb_interface_request_mode_switch(struct gb_interface *intf)
 322{
 323	int ret = 0;
 324
 325	mutex_lock(&intf->mutex);
 326	if (intf->mode_switch) {
 327		ret = -EBUSY;
 328		goto out_unlock;
 329	}
 330
 331	intf->mode_switch = true;
 332	reinit_completion(&intf->mode_switch_completion);
 333
 334	/*
 335	 * Get a reference to the interface device, which will be put once the
 336	 * mode switch is complete.
 337	 */
 338	get_device(&intf->dev);
 339
 340	if (!queue_work(system_long_wq, &intf->mode_switch_work)) {
 341		put_device(&intf->dev);
 342		ret = -EBUSY;
 343		goto out_unlock;
 344	}
 345
 346out_unlock:
 347	mutex_unlock(&intf->mutex);
 348
 349	return ret;
 350}
 351EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch);
 352
 353/*
 354 * T_TstSrcIncrement is written by the module on ES2 as a stand-in for the
 355 * init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and
 356 * clear it after reading a non-zero value from it.
 357 *
 358 * FIXME: This is module-hardware dependent and needs to be extended for every
 359 * type of module we want to support.
 360 */
 361static int gb_interface_read_and_clear_init_status(struct gb_interface *intf)
 362{
 363	struct gb_host_device *hd = intf->hd;
 364	unsigned long bootrom_quirks;
 365	unsigned long s2l_quirks;
 366	int ret;
 367	u32 value;
 368	u16 attr;
 369	u8 init_status;
 370
 371	/*
 372	 * ES2 bridges use T_TstSrcIncrement for the init status.
 373	 *
 374	 * FIXME: Remove ES2 support
 375	 */
 376	if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
 377		attr = DME_T_TST_SRC_INCREMENT;
 378	else
 379		attr = DME_TOSHIBA_GMP_INIT_STATUS;
 380
 381	ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
 382				  DME_SELECTOR_INDEX_NULL, &value);
 383	if (ret)
 384		return ret;
 385
 386	/*
 387	 * A nonzero init status indicates the module has finished
 388	 * initializing.
 389	 */
 390	if (!value) {
 391		dev_err(&intf->dev, "invalid init status\n");
 392		return -ENODEV;
 393	}
 394
 395	/*
 396	 * Extract the init status.
 397	 *
 398	 * For ES2: We need to check lowest 8 bits of 'value'.
 399	 * For ES3: We need to check highest 8 bits out of 32 of 'value'.
 400	 *
 401	 * FIXME: Remove ES2 support
 402	 */
 403	if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
 404		init_status = value & 0xff;
 405	else
 406		init_status = value >> 24;
 407
 408	/*
 409	 * Check if the interface is executing the quirky ES3 bootrom that,
 410	 * for example, requires E2EFC, CSD and CSV to be disabled.
 411	 */
 412	bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES |
 413				GB_INTERFACE_QUIRK_FORCED_DISABLE |
 414				GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH |
 415				GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE;
 416
 417	s2l_quirks = GB_INTERFACE_QUIRK_NO_PM;
 418
 419	switch (init_status) {
 420	case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED:
 421	case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED:
 422		intf->quirks |= bootrom_quirks;
 423		break;
 424	case GB_INIT_S2_LOADER_BOOT_STARTED:
 425		/* S2 Loader doesn't support runtime PM */
 426		intf->quirks &= ~bootrom_quirks;
 427		intf->quirks |= s2l_quirks;
 428		break;
 429	default:
 430		intf->quirks &= ~bootrom_quirks;
 431		intf->quirks &= ~s2l_quirks;
 432	}
 433
 434	/* Clear the init status. */
 435	return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
 436				   DME_SELECTOR_INDEX_NULL, 0);
 437}
 438
 439/* interface sysfs attributes */
 440#define gb_interface_attr(field, type)					\
 441static ssize_t field##_show(struct device *dev,				\
 442			    struct device_attribute *attr,		\
 443			    char *buf)					\
 444{									\
 445	struct gb_interface *intf = to_gb_interface(dev);		\
 446	return scnprintf(buf, PAGE_SIZE, type"\n", intf->field);	\
 447}									\
 448static DEVICE_ATTR_RO(field)
 449
 450gb_interface_attr(ddbl1_manufacturer_id, "0x%08x");
 451gb_interface_attr(ddbl1_product_id, "0x%08x");
 452gb_interface_attr(interface_id, "%u");
 453gb_interface_attr(vendor_id, "0x%08x");
 454gb_interface_attr(product_id, "0x%08x");
 455gb_interface_attr(serial_number, "0x%016llx");
 456
 457static ssize_t voltage_now_show(struct device *dev,
 458				struct device_attribute *attr, char *buf)
 459{
 460	struct gb_interface *intf = to_gb_interface(dev);
 461	int ret;
 462	u32 measurement;
 463
 464	ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
 465					    GB_SVC_PWRMON_TYPE_VOL,
 466					    &measurement);
 467	if (ret) {
 468		dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret);
 469		return ret;
 470	}
 471
 472	return sprintf(buf, "%u\n", measurement);
 473}
 474static DEVICE_ATTR_RO(voltage_now);
 475
 476static ssize_t current_now_show(struct device *dev,
 477				struct device_attribute *attr, char *buf)
 478{
 479	struct gb_interface *intf = to_gb_interface(dev);
 480	int ret;
 481	u32 measurement;
 482
 483	ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
 484					    GB_SVC_PWRMON_TYPE_CURR,
 485					    &measurement);
 486	if (ret) {
 487		dev_err(&intf->dev, "failed to get current sample (%d)\n", ret);
 488		return ret;
 489	}
 490
 491	return sprintf(buf, "%u\n", measurement);
 492}
 493static DEVICE_ATTR_RO(current_now);
 494
 495static ssize_t power_now_show(struct device *dev,
 496			      struct device_attribute *attr, char *buf)
 497{
 498	struct gb_interface *intf = to_gb_interface(dev);
 499	int ret;
 500	u32 measurement;
 501
 502	ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
 503					    GB_SVC_PWRMON_TYPE_PWR,
 504					    &measurement);
 505	if (ret) {
 506		dev_err(&intf->dev, "failed to get power sample (%d)\n", ret);
 507		return ret;
 508	}
 509
 510	return sprintf(buf, "%u\n", measurement);
 511}
 512static DEVICE_ATTR_RO(power_now);
 513
 514static ssize_t power_state_show(struct device *dev,
 515				struct device_attribute *attr, char *buf)
 516{
 517	struct gb_interface *intf = to_gb_interface(dev);
 518
 519	if (intf->active)
 520		return scnprintf(buf, PAGE_SIZE, "on\n");
 521	else
 522		return scnprintf(buf, PAGE_SIZE, "off\n");
 523}
 524
 525static ssize_t power_state_store(struct device *dev,
 526				 struct device_attribute *attr, const char *buf,
 527				 size_t len)
 528{
 529	struct gb_interface *intf = to_gb_interface(dev);
 530	bool activate;
 531	int ret = 0;
 532
 533	if (kstrtobool(buf, &activate))
 534		return -EINVAL;
 535
 536	mutex_lock(&intf->mutex);
 537
 538	if (activate == intf->active)
 539		goto unlock;
 540
 541	if (activate) {
 542		ret = gb_interface_activate(intf);
 543		if (ret) {
 544			dev_err(&intf->dev,
 545				"failed to activate interface: %d\n", ret);
 546			goto unlock;
 547		}
 548
 549		ret = gb_interface_enable(intf);
 550		if (ret) {
 551			dev_err(&intf->dev,
 552				"failed to enable interface: %d\n", ret);
 553			gb_interface_deactivate(intf);
 554			goto unlock;
 555		}
 556	} else {
 557		gb_interface_disable(intf);
 558		gb_interface_deactivate(intf);
 559	}
 560
 561unlock:
 562	mutex_unlock(&intf->mutex);
 563
 564	if (ret)
 565		return ret;
 566
 567	return len;
 568}
 569static DEVICE_ATTR_RW(power_state);
 570
 571static const char *gb_interface_type_string(struct gb_interface *intf)
 572{
 573	static const char * const types[] = {
 574		[GB_INTERFACE_TYPE_INVALID] = "invalid",
 575		[GB_INTERFACE_TYPE_UNKNOWN] = "unknown",
 576		[GB_INTERFACE_TYPE_DUMMY] = "dummy",
 577		[GB_INTERFACE_TYPE_UNIPRO] = "unipro",
 578		[GB_INTERFACE_TYPE_GREYBUS] = "greybus",
 579	};
 580
 581	return types[intf->type];
 582}
 583
 584static ssize_t interface_type_show(struct device *dev,
 585				   struct device_attribute *attr, char *buf)
 586{
 587	struct gb_interface *intf = to_gb_interface(dev);
 588
 589	return sprintf(buf, "%s\n", gb_interface_type_string(intf));
 590}
 591static DEVICE_ATTR_RO(interface_type);
 592
 593static struct attribute *interface_unipro_attrs[] = {
 594	&dev_attr_ddbl1_manufacturer_id.attr,
 595	&dev_attr_ddbl1_product_id.attr,
 596	NULL
 597};
 598
 599static struct attribute *interface_greybus_attrs[] = {
 600	&dev_attr_vendor_id.attr,
 601	&dev_attr_product_id.attr,
 602	&dev_attr_serial_number.attr,
 603	NULL
 604};
 605
 606static struct attribute *interface_power_attrs[] = {
 607	&dev_attr_voltage_now.attr,
 608	&dev_attr_current_now.attr,
 609	&dev_attr_power_now.attr,
 610	&dev_attr_power_state.attr,
 611	NULL
 612};
 613
 614static struct attribute *interface_common_attrs[] = {
 615	&dev_attr_interface_id.attr,
 616	&dev_attr_interface_type.attr,
 617	NULL
 618};
 619
 620static umode_t interface_unipro_is_visible(struct kobject *kobj,
 621					   struct attribute *attr, int n)
 622{
 623	struct device *dev = kobj_to_dev(kobj);
 624	struct gb_interface *intf = to_gb_interface(dev);
 625
 626	switch (intf->type) {
 627	case GB_INTERFACE_TYPE_UNIPRO:
 628	case GB_INTERFACE_TYPE_GREYBUS:
 629		return attr->mode;
 630	default:
 631		return 0;
 632	}
 633}
 634
 635static umode_t interface_greybus_is_visible(struct kobject *kobj,
 636					    struct attribute *attr, int n)
 637{
 638	struct device *dev = kobj_to_dev(kobj);
 639	struct gb_interface *intf = to_gb_interface(dev);
 640
 641	switch (intf->type) {
 642	case GB_INTERFACE_TYPE_GREYBUS:
 643		return attr->mode;
 644	default:
 645		return 0;
 646	}
 647}
 648
 649static umode_t interface_power_is_visible(struct kobject *kobj,
 650					  struct attribute *attr, int n)
 651{
 652	struct device *dev = kobj_to_dev(kobj);
 653	struct gb_interface *intf = to_gb_interface(dev);
 654
 655	switch (intf->type) {
 656	case GB_INTERFACE_TYPE_UNIPRO:
 657	case GB_INTERFACE_TYPE_GREYBUS:
 658		return attr->mode;
 659	default:
 660		return 0;
 661	}
 662}
 663
 664static const struct attribute_group interface_unipro_group = {
 665	.is_visible	= interface_unipro_is_visible,
 666	.attrs		= interface_unipro_attrs,
 667};
 668
 669static const struct attribute_group interface_greybus_group = {
 670	.is_visible	= interface_greybus_is_visible,
 671	.attrs		= interface_greybus_attrs,
 672};
 673
 674static const struct attribute_group interface_power_group = {
 675	.is_visible	= interface_power_is_visible,
 676	.attrs		= interface_power_attrs,
 677};
 678
 679static const struct attribute_group interface_common_group = {
 680	.attrs		= interface_common_attrs,
 681};
 682
 683static const struct attribute_group *interface_groups[] = {
 684	&interface_unipro_group,
 685	&interface_greybus_group,
 686	&interface_power_group,
 687	&interface_common_group,
 688	NULL
 689};
 690
 691static void gb_interface_release(struct device *dev)
 692{
 693	struct gb_interface *intf = to_gb_interface(dev);
 694
 695	trace_gb_interface_release(intf);
 696
 697	kfree(intf);
 698}
 699
 700#ifdef CONFIG_PM
 701static int gb_interface_suspend(struct device *dev)
 702{
 703	struct gb_interface *intf = to_gb_interface(dev);
 704	int ret;
 705
 706	ret = gb_control_interface_suspend_prepare(intf->control);
 707	if (ret)
 708		return ret;
 709
 710	ret = gb_control_suspend(intf->control);
 711	if (ret)
 712		goto err_hibernate_abort;
 713
 714	ret = gb_interface_hibernate_link(intf);
 715	if (ret)
 716		return ret;
 717
 718	/* Delay to allow interface to enter standby before disabling refclk */
 719	msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS);
 720
 721	ret = gb_interface_refclk_set(intf, false);
 722	if (ret)
 723		return ret;
 724
 725	return 0;
 726
 727err_hibernate_abort:
 728	gb_control_interface_hibernate_abort(intf->control);
 729
 730	return ret;
 731}
 732
 733static int gb_interface_resume(struct device *dev)
 734{
 735	struct gb_interface *intf = to_gb_interface(dev);
 736	struct gb_svc *svc = intf->hd->svc;
 737	int ret;
 738
 739	ret = gb_interface_refclk_set(intf, true);
 740	if (ret)
 741		return ret;
 742
 743	ret = gb_svc_intf_resume(svc, intf->interface_id);
 744	if (ret)
 745		return ret;
 746
 747	ret = gb_control_resume(intf->control);
 748	if (ret)
 749		return ret;
 750
 751	return 0;
 752}
 753
 754static int gb_interface_runtime_idle(struct device *dev)
 755{
 756	pm_runtime_mark_last_busy(dev);
 757	pm_request_autosuspend(dev);
 758
 759	return 0;
 760}
 761#endif
 762
 763static const struct dev_pm_ops gb_interface_pm_ops = {
 764	SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume,
 765			   gb_interface_runtime_idle)
 766};
 767
 768struct device_type greybus_interface_type = {
 769	.name =		"greybus_interface",
 770	.release =	gb_interface_release,
 771	.pm =		&gb_interface_pm_ops,
 772};
 773
 774/*
 775 * A Greybus module represents a user-replaceable component on a GMP
 776 * phone.  An interface is the physical connection on that module.  A
 777 * module may have more than one interface.
 778 *
 779 * Create a gb_interface structure to represent a discovered interface.
 780 * The position of interface within the Endo is encoded in "interface_id"
 781 * argument.
 782 *
 783 * Returns a pointer to the new interfce or a null pointer if a
 784 * failure occurs due to memory exhaustion.
 785 */
 786struct gb_interface *gb_interface_create(struct gb_module *module,
 787					 u8 interface_id)
 788{
 789	struct gb_host_device *hd = module->hd;
 790	struct gb_interface *intf;
 791
 792	intf = kzalloc(sizeof(*intf), GFP_KERNEL);
 793	if (!intf)
 794		return NULL;
 795
 796	intf->hd = hd;		/* XXX refcount? */
 797	intf->module = module;
 798	intf->interface_id = interface_id;
 799	INIT_LIST_HEAD(&intf->bundles);
 800	INIT_LIST_HEAD(&intf->manifest_descs);
 801	mutex_init(&intf->mutex);
 802	INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work);
 803	init_completion(&intf->mode_switch_completion);
 804
 805	/* Invalid device id to start with */
 806	intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
 807
 808	intf->dev.parent = &module->dev;
 809	intf->dev.bus = &greybus_bus_type;
 810	intf->dev.type = &greybus_interface_type;
 811	intf->dev.groups = interface_groups;
 812	intf->dev.dma_mask = module->dev.dma_mask;
 813	device_initialize(&intf->dev);
 814	dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev),
 815		     interface_id);
 816
 817	pm_runtime_set_autosuspend_delay(&intf->dev,
 818					 GB_INTERFACE_AUTOSUSPEND_MS);
 819
 820	trace_gb_interface_create(intf);
 821
 822	return intf;
 823}
 824
 825static int gb_interface_vsys_set(struct gb_interface *intf, bool enable)
 826{
 827	struct gb_svc *svc = intf->hd->svc;
 828	int ret;
 829
 830	dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
 831
 832	ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable);
 833	if (ret) {
 834		dev_err(&intf->dev, "failed to set v_sys: %d\n", ret);
 835		return ret;
 836	}
 837
 838	return 0;
 839}
 840
 841static int gb_interface_refclk_set(struct gb_interface *intf, bool enable)
 842{
 843	struct gb_svc *svc = intf->hd->svc;
 844	int ret;
 845
 846	dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
 847
 848	ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable);
 849	if (ret) {
 850		dev_err(&intf->dev, "failed to set refclk: %d\n", ret);
 851		return ret;
 852	}
 853
 854	return 0;
 855}
 856
 857static int gb_interface_unipro_set(struct gb_interface *intf, bool enable)
 858{
 859	struct gb_svc *svc = intf->hd->svc;
 860	int ret;
 861
 862	dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
 863
 864	ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable);
 865	if (ret) {
 866		dev_err(&intf->dev, "failed to set UniPro: %d\n", ret);
 867		return ret;
 868	}
 869
 870	return 0;
 871}
 872
 873static int gb_interface_activate_operation(struct gb_interface *intf,
 874					   enum gb_interface_type *intf_type)
 875{
 876	struct gb_svc *svc = intf->hd->svc;
 877	u8 type;
 878	int ret;
 879
 880	dev_dbg(&intf->dev, "%s\n", __func__);
 881
 882	ret = gb_svc_intf_activate(svc, intf->interface_id, &type);
 883	if (ret) {
 884		dev_err(&intf->dev, "failed to activate: %d\n", ret);
 885		return ret;
 886	}
 887
 888	switch (type) {
 889	case GB_SVC_INTF_TYPE_DUMMY:
 890		*intf_type = GB_INTERFACE_TYPE_DUMMY;
 891		/* FIXME: handle as an error for now */
 892		return -ENODEV;
 893	case GB_SVC_INTF_TYPE_UNIPRO:
 894		*intf_type = GB_INTERFACE_TYPE_UNIPRO;
 895		dev_err(&intf->dev, "interface type UniPro not supported\n");
 896		/* FIXME: handle as an error for now */
 897		return -ENODEV;
 898	case GB_SVC_INTF_TYPE_GREYBUS:
 899		*intf_type = GB_INTERFACE_TYPE_GREYBUS;
 900		break;
 901	default:
 902		dev_err(&intf->dev, "unknown interface type: %u\n", type);
 903		*intf_type = GB_INTERFACE_TYPE_UNKNOWN;
 904		return -ENODEV;
 905	}
 906
 907	return 0;
 908}
 909
 910static int gb_interface_hibernate_link(struct gb_interface *intf)
 911{
 912	struct gb_svc *svc = intf->hd->svc;
 913
 914	return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id);
 915}
 916
 917static int _gb_interface_activate(struct gb_interface *intf,
 918				  enum gb_interface_type *type)
 919{
 920	int ret;
 921
 922	*type = GB_INTERFACE_TYPE_UNKNOWN;
 923
 924	if (intf->ejected || intf->removed)
 925		return -ENODEV;
 926
 927	ret = gb_interface_vsys_set(intf, true);
 928	if (ret)
 929		return ret;
 930
 931	ret = gb_interface_refclk_set(intf, true);
 932	if (ret)
 933		goto err_vsys_disable;
 934
 935	ret = gb_interface_unipro_set(intf, true);
 936	if (ret)
 937		goto err_refclk_disable;
 938
 939	ret = gb_interface_activate_operation(intf, type);
 940	if (ret) {
 941		switch (*type) {
 942		case GB_INTERFACE_TYPE_UNIPRO:
 943		case GB_INTERFACE_TYPE_GREYBUS:
 944			goto err_hibernate_link;
 945		default:
 946			goto err_unipro_disable;
 947		}
 948	}
 949
 950	ret = gb_interface_read_dme(intf);
 951	if (ret)
 952		goto err_hibernate_link;
 953
 954	ret = gb_interface_route_create(intf);
 955	if (ret)
 956		goto err_hibernate_link;
 957
 958	intf->active = true;
 959
 960	trace_gb_interface_activate(intf);
 961
 962	return 0;
 963
 964err_hibernate_link:
 965	gb_interface_hibernate_link(intf);
 966err_unipro_disable:
 967	gb_interface_unipro_set(intf, false);
 968err_refclk_disable:
 969	gb_interface_refclk_set(intf, false);
 970err_vsys_disable:
 971	gb_interface_vsys_set(intf, false);
 972
 973	return ret;
 974}
 975
 976/*
 977 * At present, we assume a UniPro-only module to be a Greybus module that
 978 * failed to send its mailbox poke. There is some reason to believe that this
 979 * is because of a bug in the ES3 bootrom.
 980 *
 981 * FIXME: Check if this is a Toshiba bridge before retrying?
 982 */
 983static int _gb_interface_activate_es3_hack(struct gb_interface *intf,
 984					   enum gb_interface_type *type)
 985{
 986	int retries = 3;
 987	int ret;
 988
 989	while (retries--) {
 990		ret = _gb_interface_activate(intf, type);
 991		if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO)
 992			continue;
 993
 994		break;
 995	}
 996
 997	return ret;
 998}
 999
1000/*
1001 * Activate an interface.
1002 *
1003 * Locking: Caller holds the interface mutex.
1004 */
1005int gb_interface_activate(struct gb_interface *intf)
1006{
1007	enum gb_interface_type type;
1008	int ret;
1009
1010	switch (intf->type) {
1011	case GB_INTERFACE_TYPE_INVALID:
1012	case GB_INTERFACE_TYPE_GREYBUS:
1013		ret = _gb_interface_activate_es3_hack(intf, &type);
1014		break;
1015	default:
1016		ret = _gb_interface_activate(intf, &type);
1017	}
1018
1019	/* Make sure type is detected correctly during reactivation. */
1020	if (intf->type != GB_INTERFACE_TYPE_INVALID) {
1021		if (type != intf->type) {
1022			dev_err(&intf->dev, "failed to detect interface type\n");
1023
1024			if (!ret)
1025				gb_interface_deactivate(intf);
1026
1027			return -EIO;
1028		}
1029	} else {
1030		intf->type = type;
1031	}
1032
1033	return ret;
1034}
1035
1036/*
1037 * Deactivate an interface.
1038 *
1039 * Locking: Caller holds the interface mutex.
1040 */
1041void gb_interface_deactivate(struct gb_interface *intf)
1042{
1043	if (!intf->active)
1044		return;
1045
1046	trace_gb_interface_deactivate(intf);
1047
1048	/* Abort any ongoing mode switch. */
1049	if (intf->mode_switch)
1050		complete(&intf->mode_switch_completion);
1051
1052	gb_interface_route_destroy(intf);
1053	gb_interface_hibernate_link(intf);
1054	gb_interface_unipro_set(intf, false);
1055	gb_interface_refclk_set(intf, false);
1056	gb_interface_vsys_set(intf, false);
1057
1058	intf->active = false;
1059}
1060
1061/*
1062 * Enable an interface by enabling its control connection, fetching the
1063 * manifest and other information over it, and finally registering its child
1064 * devices.
1065 *
1066 * Locking: Caller holds the interface mutex.
1067 */
1068int gb_interface_enable(struct gb_interface *intf)
1069{
1070	struct gb_control *control;
1071	struct gb_bundle *bundle, *tmp;
1072	int ret, size;
1073	void *manifest;
1074
1075	ret = gb_interface_read_and_clear_init_status(intf);
1076	if (ret) {
1077		dev_err(&intf->dev, "failed to clear init status: %d\n", ret);
1078		return ret;
1079	}
1080
1081	/* Establish control connection */
1082	control = gb_control_create(intf);
1083	if (IS_ERR(control)) {
1084		dev_err(&intf->dev, "failed to create control device: %ld\n",
1085			PTR_ERR(control));
1086		return PTR_ERR(control);
1087	}
1088	intf->control = control;
1089
1090	ret = gb_control_enable(intf->control);
1091	if (ret)
1092		goto err_put_control;
1093
1094	/* Get manifest size using control protocol on CPort */
1095	size = gb_control_get_manifest_size_operation(intf);
1096	if (size <= 0) {
1097		dev_err(&intf->dev, "failed to get manifest size: %d\n", size);
1098
1099		if (size)
1100			ret = size;
1101		else
1102			ret =  -EINVAL;
1103
1104		goto err_disable_control;
1105	}
1106
1107	manifest = kmalloc(size, GFP_KERNEL);
1108	if (!manifest) {
1109		ret = -ENOMEM;
1110		goto err_disable_control;
1111	}
1112
1113	/* Get manifest using control protocol on CPort */
1114	ret = gb_control_get_manifest_operation(intf, manifest, size);
1115	if (ret) {
1116		dev_err(&intf->dev, "failed to get manifest: %d\n", ret);
1117		goto err_free_manifest;
1118	}
1119
1120	/*
1121	 * Parse the manifest and build up our data structures representing
1122	 * what's in it.
1123	 */
1124	if (!gb_manifest_parse(intf, manifest, size)) {
1125		dev_err(&intf->dev, "failed to parse manifest\n");
1126		ret = -EINVAL;
1127		goto err_destroy_bundles;
1128	}
1129
1130	ret = gb_control_get_bundle_versions(intf->control);
1131	if (ret)
1132		goto err_destroy_bundles;
1133
1134	/* Register the control device and any bundles */
1135	ret = gb_control_add(intf->control);
1136	if (ret)
1137		goto err_destroy_bundles;
1138
1139	pm_runtime_use_autosuspend(&intf->dev);
1140	pm_runtime_get_noresume(&intf->dev);
1141	pm_runtime_set_active(&intf->dev);
1142	pm_runtime_enable(&intf->dev);
1143
1144	list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) {
1145		ret = gb_bundle_add(bundle);
1146		if (ret) {
1147			gb_bundle_destroy(bundle);
1148			continue;
1149		}
1150	}
1151
1152	kfree(manifest);
1153
1154	intf->enabled = true;
1155
1156	pm_runtime_put(&intf->dev);
1157
1158	trace_gb_interface_enable(intf);
1159
1160	return 0;
1161
1162err_destroy_bundles:
1163	list_for_each_entry_safe(bundle, tmp, &intf->bundles, links)
1164		gb_bundle_destroy(bundle);
1165err_free_manifest:
1166	kfree(manifest);
1167err_disable_control:
1168	gb_control_disable(intf->control);
1169err_put_control:
1170	gb_control_put(intf->control);
1171	intf->control = NULL;
1172
1173	return ret;
1174}
1175
1176/*
1177 * Disable an interface and destroy its bundles.
1178 *
1179 * Locking: Caller holds the interface mutex.
1180 */
1181void gb_interface_disable(struct gb_interface *intf)
1182{
1183	struct gb_bundle *bundle;
1184	struct gb_bundle *next;
1185
1186	if (!intf->enabled)
1187		return;
1188
1189	trace_gb_interface_disable(intf);
1190
1191	pm_runtime_get_sync(&intf->dev);
1192
1193	/* Set disconnected flag to avoid I/O during connection tear down. */
1194	if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE)
1195		intf->disconnected = true;
1196
1197	list_for_each_entry_safe(bundle, next, &intf->bundles, links)
1198		gb_bundle_destroy(bundle);
1199
1200	if (!intf->mode_switch && !intf->disconnected)
1201		gb_control_interface_deactivate_prepare(intf->control);
1202
1203	gb_control_del(intf->control);
1204	gb_control_disable(intf->control);
1205	gb_control_put(intf->control);
1206	intf->control = NULL;
1207
1208	intf->enabled = false;
1209
1210	pm_runtime_disable(&intf->dev);
1211	pm_runtime_set_suspended(&intf->dev);
1212	pm_runtime_dont_use_autosuspend(&intf->dev);
1213	pm_runtime_put_noidle(&intf->dev);
1214}
1215
1216/* Register an interface. */
1217int gb_interface_add(struct gb_interface *intf)
1218{
1219	int ret;
1220
1221	ret = device_add(&intf->dev);
1222	if (ret) {
1223		dev_err(&intf->dev, "failed to register interface: %d\n", ret);
1224		return ret;
1225	}
1226
1227	trace_gb_interface_add(intf);
1228
1229	dev_info(&intf->dev, "Interface added (%s)\n",
1230		 gb_interface_type_string(intf));
1231
1232	switch (intf->type) {
1233	case GB_INTERFACE_TYPE_GREYBUS:
1234		dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n",
1235			 intf->vendor_id, intf->product_id);
1236		fallthrough;
1237	case GB_INTERFACE_TYPE_UNIPRO:
1238		dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n",
1239			 intf->ddbl1_manufacturer_id,
1240			 intf->ddbl1_product_id);
1241		break;
1242	default:
1243		break;
1244	}
1245
1246	return 0;
1247}
1248
1249/* Deregister an interface. */
1250void gb_interface_del(struct gb_interface *intf)
1251{
1252	if (device_is_registered(&intf->dev)) {
1253		trace_gb_interface_del(intf);
1254
1255		device_del(&intf->dev);
1256		dev_info(&intf->dev, "Interface removed\n");
1257	}
1258}
1259
1260void gb_interface_put(struct gb_interface *intf)
1261{
1262	put_device(&intf->dev);
1263}