Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2/*
   3 * core.c - DesignWare HS OTG Controller common routines
   4 *
   5 * Copyright (C) 2004-2013 Synopsys, Inc.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   6 */
   7
   8/*
   9 * The Core code provides basic services for accessing and managing the
  10 * DWC_otg hardware. These services are used by both the Host Controller
  11 * Driver and the Peripheral Controller Driver.
  12 */
  13#include <linux/kernel.h>
  14#include <linux/module.h>
  15#include <linux/moduleparam.h>
  16#include <linux/spinlock.h>
  17#include <linux/interrupt.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/delay.h>
  20#include <linux/io.h>
  21#include <linux/slab.h>
  22#include <linux/usb.h>
  23
  24#include <linux/usb/hcd.h>
  25#include <linux/usb/ch11.h>
  26
  27#include "core.h"
  28#include "hcd.h"
  29
  30/**
  31 * dwc2_backup_global_registers() - Backup global controller registers.
  32 * When suspending usb bus, registers needs to be backuped
  33 * if controller power is disabled once suspended.
  34 *
  35 * @hsotg: Programming view of the DWC_otg controller
  36 */
  37int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
  38{
  39	struct dwc2_gregs_backup *gr;
  40
  41	dev_dbg(hsotg->dev, "%s\n", __func__);
 
  42
  43	/* Backup global regs */
  44	gr = &hsotg->gr_backup;
  45
  46	gr->gotgctl = dwc2_readl(hsotg, GOTGCTL);
  47	gr->gintmsk = dwc2_readl(hsotg, GINTMSK);
  48	gr->gahbcfg = dwc2_readl(hsotg, GAHBCFG);
  49	gr->gusbcfg = dwc2_readl(hsotg, GUSBCFG);
  50	gr->grxfsiz = dwc2_readl(hsotg, GRXFSIZ);
  51	gr->gnptxfsiz = dwc2_readl(hsotg, GNPTXFSIZ);
  52	gr->gdfifocfg = dwc2_readl(hsotg, GDFIFOCFG);
  53	gr->pcgcctl1 = dwc2_readl(hsotg, PCGCCTL1);
  54	gr->glpmcfg = dwc2_readl(hsotg, GLPMCFG);
  55	gr->gi2cctl = dwc2_readl(hsotg, GI2CCTL);
  56	gr->pcgcctl = dwc2_readl(hsotg, PCGCTL);
  57
  58	gr->valid = true;
  59	return 0;
 
 
 
 
 
  60}
  61
  62/**
  63 * dwc2_restore_global_registers() - Restore controller global registers.
  64 * When resuming usb bus, device registers needs to be restored
  65 * if controller power were disabled.
  66 *
  67 * @hsotg: Programming view of the DWC_otg controller
  68 */
  69int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
  70{
  71	struct dwc2_gregs_backup *gr;
  72
  73	dev_dbg(hsotg->dev, "%s\n", __func__);
  74
  75	/* Restore global regs */
  76	gr = &hsotg->gr_backup;
  77	if (!gr->valid) {
  78		dev_err(hsotg->dev, "%s: no global registers to restore\n",
  79			__func__);
  80		return -EINVAL;
 
 
 
  81	}
  82	gr->valid = false;
  83
  84	dwc2_writel(hsotg, 0xffffffff, GINTSTS);
  85	dwc2_writel(hsotg, gr->gotgctl, GOTGCTL);
  86	dwc2_writel(hsotg, gr->gintmsk, GINTMSK);
  87	dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
  88	dwc2_writel(hsotg, gr->gahbcfg, GAHBCFG);
  89	dwc2_writel(hsotg, gr->grxfsiz, GRXFSIZ);
  90	dwc2_writel(hsotg, gr->gnptxfsiz, GNPTXFSIZ);
  91	dwc2_writel(hsotg, gr->gdfifocfg, GDFIFOCFG);
  92	dwc2_writel(hsotg, gr->pcgcctl1, PCGCCTL1);
  93	dwc2_writel(hsotg, gr->glpmcfg, GLPMCFG);
  94	dwc2_writel(hsotg, gr->pcgcctl, PCGCTL);
  95	dwc2_writel(hsotg, gr->gi2cctl, GI2CCTL);
  96
  97	return 0;
 
 
 
 
  98}
  99
 100/**
 101 * dwc2_exit_partial_power_down() - Exit controller from Partial Power Down.
 102 *
 103 * @hsotg: Programming view of the DWC_otg controller
 104 * @rem_wakeup: indicates whether resume is initiated by Reset.
 105 * @restore: Controller registers need to be restored
 106 */
 107int dwc2_exit_partial_power_down(struct dwc2_hsotg *hsotg, int rem_wakeup,
 108				 bool restore)
 109{
 110	struct dwc2_gregs_backup *gr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111
 112	gr = &hsotg->gr_backup;
 
 
 
 
 
 
 
 
 
 
 
 
 
 113
 114	/*
 115	 * Restore host or device regisers with the same mode core enterted
 116	 * to partial power down by checking "GOTGCTL_CURMODE_HOST" backup
 117	 * value of the "gotgctl" register.
 118	 */
 119	if (gr->gotgctl & GOTGCTL_CURMODE_HOST)
 120		return dwc2_host_exit_partial_power_down(hsotg, rem_wakeup,
 121							 restore);
 122	else
 123		return dwc2_gadget_exit_partial_power_down(hsotg, restore);
 124}
 125
 126/**
 127 * dwc2_enter_partial_power_down() - Put controller in Partial Power Down.
 128 *
 129 * @hsotg: Programming view of the DWC_otg controller
 130 */
 131int dwc2_enter_partial_power_down(struct dwc2_hsotg *hsotg)
 132{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 133	if (dwc2_is_host_mode(hsotg))
 134		return dwc2_host_enter_partial_power_down(hsotg);
 135	else
 136		return dwc2_gadget_enter_partial_power_down(hsotg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 137}
 138
 139/**
 140 * dwc2_restore_essential_regs() - Restore essiential regs of core.
 141 *
 142 * @hsotg: Programming view of the DWC_otg controller
 143 * @rmode: Restore mode, enabled in case of remote-wakeup.
 144 * @is_host: Host or device mode.
 145 */
 146static void dwc2_restore_essential_regs(struct dwc2_hsotg *hsotg, int rmode,
 147					int is_host)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 148{
 149	u32 pcgcctl;
 150	struct dwc2_gregs_backup *gr;
 151	struct dwc2_dregs_backup *dr;
 152	struct dwc2_hregs_backup *hr;
 153
 154	gr = &hsotg->gr_backup;
 155	dr = &hsotg->dr_backup;
 156	hr = &hsotg->hr_backup;
 157
 158	dev_dbg(hsotg->dev, "%s: restoring essential regs\n", __func__);
 159
 160	/* Load restore values for [31:14] bits */
 161	pcgcctl = (gr->pcgcctl & 0xffffc000);
 162	/* If High Speed */
 163	if (is_host) {
 164		if (!(pcgcctl & PCGCTL_P2HD_PRT_SPD_MASK))
 165			pcgcctl |= BIT(17);
 166	} else {
 167		if (!(pcgcctl & PCGCTL_P2HD_DEV_ENUM_SPD_MASK))
 168			pcgcctl |= BIT(17);
 
 
 169	}
 170	dwc2_writel(hsotg, pcgcctl, PCGCTL);
 171
 172	/* Umnask global Interrupt in GAHBCFG and restore it */
 173	dwc2_writel(hsotg, gr->gahbcfg | GAHBCFG_GLBL_INTR_EN, GAHBCFG);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 174
 175	/* Clear all pending interupts */
 176	dwc2_writel(hsotg, 0xffffffff, GINTSTS);
 
 177
 178	/* Unmask restore done interrupt */
 179	dwc2_writel(hsotg, GINTSTS_RESTOREDONE, GINTMSK);
 
 
 180
 181	/* Restore GUSBCFG and HCFG/DCFG */
 182	dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
 
 
 
 
 
 
 183
 184	if (is_host) {
 185		dwc2_writel(hsotg, hr->hcfg, HCFG);
 186		if (rmode)
 187			pcgcctl |= PCGCTL_RESTOREMODE;
 188		dwc2_writel(hsotg, pcgcctl, PCGCTL);
 189		udelay(10);
 190
 191		pcgcctl |= PCGCTL_ESS_REG_RESTORED;
 192		dwc2_writel(hsotg, pcgcctl, PCGCTL);
 193		udelay(10);
 
 
 
 
 
 
 194	} else {
 195		dwc2_writel(hsotg, dr->dcfg, DCFG);
 196		if (!rmode)
 197			pcgcctl |= PCGCTL_RESTOREMODE | PCGCTL_RSTPDWNMODULE;
 198		dwc2_writel(hsotg, pcgcctl, PCGCTL);
 199		udelay(10);
 
 
 
 
 
 
 
 
 
 
 
 
 
 200
 201		pcgcctl |= PCGCTL_ESS_REG_RESTORED;
 202		dwc2_writel(hsotg, pcgcctl, PCGCTL);
 203		udelay(10);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 204	}
 
 
 205}
 206
 207/**
 208 * dwc2_hib_restore_common() - Common part of restore routine.
 
 209 *
 210 * @hsotg: Programming view of the DWC_otg controller
 211 * @rem_wakeup: Remote-wakeup, enabled in case of remote-wakeup.
 212 * @is_host: Host or device mode.
 213 */
 214void dwc2_hib_restore_common(struct dwc2_hsotg *hsotg, int rem_wakeup,
 215			     int is_host)
 216{
 217	u32 gpwrdn;
 
 
 
 
 
 218
 219	/* Switch-on voltage to the core */
 220	gpwrdn = dwc2_readl(hsotg, GPWRDN);
 221	gpwrdn &= ~GPWRDN_PWRDNSWTCH;
 222	dwc2_writel(hsotg, gpwrdn, GPWRDN);
 223	udelay(10);
 224
 225	/* Reset core */
 226	gpwrdn = dwc2_readl(hsotg, GPWRDN);
 227	gpwrdn &= ~GPWRDN_PWRDNRSTN;
 228	dwc2_writel(hsotg, gpwrdn, GPWRDN);
 229	udelay(10);
 230
 231	/* Enable restore from PMU */
 232	gpwrdn = dwc2_readl(hsotg, GPWRDN);
 233	gpwrdn |= GPWRDN_RESTORE;
 234	dwc2_writel(hsotg, gpwrdn, GPWRDN);
 235	udelay(10);
 236
 237	/* Disable Power Down Clamp */
 238	gpwrdn = dwc2_readl(hsotg, GPWRDN);
 239	gpwrdn &= ~GPWRDN_PWRDNCLMP;
 240	dwc2_writel(hsotg, gpwrdn, GPWRDN);
 241	udelay(50);
 242
 243	if (!is_host && rem_wakeup)
 244		udelay(70);
 245
 246	/* Deassert reset core */
 247	gpwrdn = dwc2_readl(hsotg, GPWRDN);
 248	gpwrdn |= GPWRDN_PWRDNRSTN;
 249	dwc2_writel(hsotg, gpwrdn, GPWRDN);
 250	udelay(10);
 251
 252	/* Disable PMU interrupt */
 253	gpwrdn = dwc2_readl(hsotg, GPWRDN);
 254	gpwrdn &= ~GPWRDN_PMUINTSEL;
 255	dwc2_writel(hsotg, gpwrdn, GPWRDN);
 256	udelay(10);
 257
 258	/* Set Restore Essential Regs bit in PCGCCTL register */
 259	dwc2_restore_essential_regs(hsotg, rem_wakeup, is_host);
 
 
 
 
 
 
 
 
 
 
 
 
 260
 261	/*
 262	 * Wait For Restore_done Interrupt. This mechanism of polling the
 263	 * interrupt is introduced to avoid any possible race conditions
 264	 */
 265	if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, GINTSTS_RESTOREDONE,
 266				    20000)) {
 267		dev_dbg(hsotg->dev,
 268			"%s: Restore Done wasn't generated here\n",
 269			__func__);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 270	} else {
 271		dev_dbg(hsotg->dev, "restore done  generated here\n");
 
 
 272
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 273		/*
 274		 * To avoid restore done interrupt storm after restore is
 275		 * generated clear GINTSTS_RESTOREDONE bit.
 276		 */
 277		dwc2_writel(hsotg, GINTSTS_RESTOREDONE, GINTSTS);
 
 
 
 
 
 
 
 278	}
 279}
 280
 281/**
 282 * dwc2_wait_for_mode() - Waits for the controller mode.
 283 * @hsotg:	Programming view of the DWC_otg controller.
 284 * @host_mode:	If true, waits for host mode, otherwise device mode.
 
 
 
 
 
 285 */
 286static void dwc2_wait_for_mode(struct dwc2_hsotg *hsotg,
 287			       bool host_mode)
 288{
 289	ktime_t start;
 290	ktime_t end;
 291	unsigned int timeout = 110;
 292
 293	dev_vdbg(hsotg->dev, "Waiting for %s mode\n",
 294		 host_mode ? "host" : "device");
 295
 296	start = ktime_get();
 
 297
 298	while (1) {
 299		s64 ms;
 
 
 
 
 
 300
 301		if (dwc2_is_host_mode(hsotg) == host_mode) {
 302			dev_vdbg(hsotg->dev, "%s mode set\n",
 303				 host_mode ? "Host" : "Device");
 304			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 305		}
 
 306
 307		end = ktime_get();
 308		ms = ktime_to_ms(ktime_sub(end, start));
 309
 310		if (ms >= (s64)timeout) {
 311			dev_warn(hsotg->dev, "%s: Couldn't set %s mode\n",
 312				 __func__, host_mode ? "host" : "device");
 313			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 314		}
 
 315
 316		usleep_range(1000, 2000);
 
 
 
 
 
 
 
 
 
 
 317	}
 
 
 318}
 319
 320/**
 321 * dwc2_iddig_filter_enabled() - Returns true if the IDDIG debounce
 322 * filter is enabled.
 323 *
 324 * @hsotg: Programming view of DWC_otg controller
 325 */
 326static bool dwc2_iddig_filter_enabled(struct dwc2_hsotg *hsotg)
 327{
 328	u32 gsnpsid;
 329	u32 ghwcfg4;
 330
 331	if (!dwc2_hw_is_otg(hsotg))
 332		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 333
 334	/* Check if core configuration includes the IDDIG filter. */
 335	ghwcfg4 = dwc2_readl(hsotg, GHWCFG4);
 336	if (!(ghwcfg4 & GHWCFG4_IDDIG_FILT_EN))
 337		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 338
 339	/*
 340	 * Check if the IDDIG debounce filter is bypassed. Available
 341	 * in core version >= 3.10a.
 342	 */
 343	gsnpsid = dwc2_readl(hsotg, GSNPSID);
 344	if (gsnpsid >= DWC2_CORE_REV_3_10a) {
 345		u32 gotgctl = dwc2_readl(hsotg, GOTGCTL);
 
 
 
 
 
 
 
 346
 347		if (gotgctl & GOTGCTL_DBNCE_FLTR_BYPASS)
 348			return false;
 
 
 
 
 
 
 
 
 349	}
 350
 351	return true;
 
 
 352}
 353
 354/*
 355 * dwc2_enter_hibernation() - Common function to enter hibernation.
 356 *
 357 * @hsotg: Programming view of the DWC_otg controller
 358 * @is_host: True if core is in host mode.
 359 *
 360 * Return: 0 if successful, negative error code otherwise
 361 */
 362int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg, int is_host)
 363{
 364	if (is_host)
 365		return dwc2_host_enter_hibernation(hsotg);
 366	else
 367		return dwc2_gadget_enter_hibernation(hsotg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 368}
 369
 370/*
 371 * dwc2_exit_hibernation() - Common function to exit from hibernation.
 
 372 *
 373 * @hsotg: Programming view of the DWC_otg controller
 374 * @rem_wakeup: Remote-wakeup, enabled in case of remote-wakeup.
 375 * @reset: Enabled in case of restore with reset.
 376 * @is_host: True if core is in host mode.
 377 *
 378 * Return: 0 if successful, negative error code otherwise
 
 
 379 */
 380int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
 381			  int reset, int is_host)
 382{
 383	if (is_host)
 384		return dwc2_host_exit_hibernation(hsotg, rem_wakeup, reset);
 385	else
 386		return dwc2_gadget_exit_hibernation(hsotg, rem_wakeup, reset);
 387}
 388
 389/*
 390 * Do core a soft reset of the core.  Be careful with this because it
 391 * resets all the internal state machines of the core.
 392 */
 393int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait)
 394{
 395	u32 greset;
 396	bool wait_for_host_mode = false;
 397
 398	dev_vdbg(hsotg->dev, "%s()\n", __func__);
 
 
 
 
 
 
 399
 400	/*
 401	 * If the current mode is host, either due to the force mode
 402	 * bit being set (which persists after core reset) or the
 403	 * connector id pin, a core soft reset will temporarily reset
 404	 * the mode to device. A delay from the IDDIG debounce filter
 405	 * will occur before going back to host mode.
 406	 *
 407	 * Determine whether we will go back into host mode after a
 408	 * reset and account for this delay after the reset.
 409	 */
 410	if (dwc2_iddig_filter_enabled(hsotg)) {
 411		u32 gotgctl = dwc2_readl(hsotg, GOTGCTL);
 412		u32 gusbcfg = dwc2_readl(hsotg, GUSBCFG);
 413
 414		if (!(gotgctl & GOTGCTL_CONID_B) ||
 415		    (gusbcfg & GUSBCFG_FORCEHOSTMODE)) {
 416			wait_for_host_mode = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 417		}
 418	}
 419
 420	/* Core Soft Reset */
 421	greset = dwc2_readl(hsotg, GRSTCTL);
 422	greset |= GRSTCTL_CSFTRST;
 423	dwc2_writel(hsotg, greset, GRSTCTL);
 424
 425	if ((hsotg->hw_params.snpsid & DWC2_CORE_REV_MASK) <
 426		(DWC2_CORE_REV_4_20a & DWC2_CORE_REV_MASK)) {
 427		if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL,
 428					      GRSTCTL_CSFTRST, 10000)) {
 429			dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL_CSFTRST\n",
 430				 __func__);
 431			return -EBUSY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 432		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 433	} else {
 434		if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL,
 435					    GRSTCTL_CSFTRST_DONE, 10000)) {
 436			dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL_CSFTRST_DONE\n",
 437				 __func__);
 438			return -EBUSY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 439		}
 440		greset = dwc2_readl(hsotg, GRSTCTL);
 441		greset &= ~GRSTCTL_CSFTRST;
 442		greset |= GRSTCTL_CSFTRST_DONE;
 443		dwc2_writel(hsotg, greset, GRSTCTL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 444	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 445
 446	/*
 447	 * Switching from device mode to host mode by disconnecting
 448	 * device cable core enters and exits form hibernation.
 449	 * However, the fifo map remains not cleared. It results
 450	 * to a WARNING (WARNING: CPU: 5 PID: 0 at drivers/usb/dwc2/
 451	 * gadget.c:307 dwc2_hsotg_init_fifo+0x12/0x152 [dwc2])
 452	 * if in host mode we disconnect the micro a to b host
 453	 * cable. Because core reset occurs.
 454	 * To avoid the WARNING, fifo_map should be cleared
 455	 * in dwc2_core_reset() function by taking into account configs.
 456	 * fifo_map must be cleared only if driver is configured in
 457	 * "CONFIG_USB_DWC2_PERIPHERAL" or "CONFIG_USB_DWC2_DUAL_ROLE"
 458	 * mode.
 459	 */
 460	dwc2_clear_fifo_map(hsotg);
 
 
 
 
 461
 462	/* Wait for AHB master IDLE state */
 463	if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL, GRSTCTL_AHBIDLE, 10000)) {
 464		dev_warn(hsotg->dev, "%s: HANG! AHB Idle timeout GRSTCTL GRSTCTL_AHBIDLE\n",
 465			 __func__);
 466		return -EBUSY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 467	}
 
 468
 469	if (wait_for_host_mode && !skip_wait)
 470		dwc2_wait_for_mode(hsotg, true);
 471
 472	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 473}
 474
 475/**
 476 * dwc2_force_mode() - Force the mode of the controller.
 
 477 *
 478 * Forcing the mode is needed for two cases:
 
 479 *
 480 * 1) If the dr_mode is set to either HOST or PERIPHERAL we force the
 481 * controller to stay in a particular mode regardless of ID pin
 482 * changes. We do this once during probe.
 483 *
 484 * 2) During probe we want to read reset values of the hw
 485 * configuration registers that are only available in either host or
 486 * device mode. We may need to force the mode if the current mode does
 487 * not allow us to access the register in the mode that we want.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 488 *
 489 * In either case it only makes sense to force the mode if the
 490 * controller hardware is OTG capable.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 491 *
 492 * Checks are done in this function to determine whether doing a force
 493 * would be valid or not.
 494 *
 495 * If a force is done, it requires a IDDIG debounce filter delay if
 496 * the filter is configured and enabled. We poll the current mode of
 497 * the controller to account for this delay.
 498 *
 499 * @hsotg: Programming view of DWC_otg controller
 500 * @host: Host mode flag
 501 */
 502void dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
 
 503{
 504	u32 gusbcfg;
 505	u32 set;
 506	u32 clear;
 507
 508	dev_dbg(hsotg->dev, "Forcing mode to %s\n", host ? "host" : "device");
 
 509
 510	/*
 511	 * Force mode has no effect if the hardware is not OTG.
 512	 */
 513	if (!dwc2_hw_is_otg(hsotg))
 514		return;
 515
 516	/*
 517	 * If dr_mode is either peripheral or host only, there is no
 518	 * need to ever force the mode to the opposite mode.
 519	 */
 520	if (WARN_ON(host && hsotg->dr_mode == USB_DR_MODE_PERIPHERAL))
 521		return;
 522
 523	if (WARN_ON(!host && hsotg->dr_mode == USB_DR_MODE_HOST))
 524		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 525
 526	gusbcfg = dwc2_readl(hsotg, GUSBCFG);
 527
 528	set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE;
 529	clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE;
 
 
 
 
 530
 531	gusbcfg &= ~clear;
 532	gusbcfg |= set;
 533	dwc2_writel(hsotg, gusbcfg, GUSBCFG);
 
 534
 535	dwc2_wait_for_mode(hsotg, host);
 536	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 537}
 538
 539/**
 540 * dwc2_clear_force_mode() - Clears the force mode bits.
 
 
 
 
 541 *
 542 * After clearing the bits, wait up to 100 ms to account for any
 543 * potential IDDIG filter delay. We can't know if we expect this delay
 544 * or not because the value of the connector ID status is affected by
 545 * the force mode. We only need to call this once during probe if
 546 * dr_mode == OTG.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 547 *
 548 * @hsotg: Programming view of DWC_otg controller
 
 
 
 
 549 */
 550static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
 551{
 552	u32 gusbcfg;
 
 553
 554	if (!dwc2_hw_is_otg(hsotg))
 555		return;
 
 556
 557	dev_dbg(hsotg->dev, "Clearing force mode bits\n");
 558
 559	gusbcfg = dwc2_readl(hsotg, GUSBCFG);
 560	gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
 561	gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
 562	dwc2_writel(hsotg, gusbcfg, GUSBCFG);
 563
 564	if (dwc2_iddig_filter_enabled(hsotg))
 565		msleep(100);
 
 
 566}
 567
 568/*
 569 * Sets or clears force mode based on the dr_mode parameter.
 
 
 
 
 
 
 
 570 */
 571void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
 572{
 573	switch (hsotg->dr_mode) {
 574	case USB_DR_MODE_HOST:
 575		/*
 576		 * NOTE: This is required for some rockchip soc based
 577		 * platforms on their host-only dwc2.
 578		 */
 579		if (!dwc2_hw_is_otg(hsotg))
 580			msleep(50);
 581
 582		break;
 583	case USB_DR_MODE_PERIPHERAL:
 584		dwc2_force_mode(hsotg, false);
 585		break;
 586	case USB_DR_MODE_OTG:
 587		dwc2_clear_force_mode(hsotg);
 588		break;
 589	default:
 590		dev_warn(hsotg->dev, "%s() Invalid dr_mode=%d\n",
 591			 __func__, hsotg->dr_mode);
 592		break;
 593	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 594}
 595
 596/*
 597 * dwc2_enable_acg - enable active clock gating feature
 598 */
 599void dwc2_enable_acg(struct dwc2_hsotg *hsotg)
 600{
 601	if (hsotg->params.acg_enable) {
 602		u32 pcgcctl1 = dwc2_readl(hsotg, PCGCCTL1);
 
 
 
 
 
 
 
 603
 604		dev_dbg(hsotg->dev, "Enabling Active Clock Gating\n");
 605		pcgcctl1 |= PCGCCTL1_GATEEN;
 606		dwc2_writel(hsotg, pcgcctl1, PCGCCTL1);
 607	}
 
 
 
 
 
 
 608}
 609
 610/**
 611 * dwc2_dump_host_registers() - Prints the host registers
 612 *
 613 * @hsotg: Programming view of DWC_otg controller
 614 *
 615 * NOTE: This function will be removed once the peripheral controller code
 616 * is integrated and the driver is stable
 617 */
 618void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
 619{
 620#ifdef DEBUG
 621	u32 __iomem *addr;
 622	int i;
 623
 624	dev_dbg(hsotg->dev, "Host Global Registers\n");
 625	addr = hsotg->regs + HCFG;
 626	dev_dbg(hsotg->dev, "HCFG	 @0x%08lX : 0x%08X\n",
 627		(unsigned long)addr, dwc2_readl(hsotg, HCFG));
 628	addr = hsotg->regs + HFIR;
 629	dev_dbg(hsotg->dev, "HFIR	 @0x%08lX : 0x%08X\n",
 630		(unsigned long)addr, dwc2_readl(hsotg, HFIR));
 631	addr = hsotg->regs + HFNUM;
 632	dev_dbg(hsotg->dev, "HFNUM	 @0x%08lX : 0x%08X\n",
 633		(unsigned long)addr, dwc2_readl(hsotg, HFNUM));
 634	addr = hsotg->regs + HPTXSTS;
 635	dev_dbg(hsotg->dev, "HPTXSTS	 @0x%08lX : 0x%08X\n",
 636		(unsigned long)addr, dwc2_readl(hsotg, HPTXSTS));
 637	addr = hsotg->regs + HAINT;
 638	dev_dbg(hsotg->dev, "HAINT	 @0x%08lX : 0x%08X\n",
 639		(unsigned long)addr, dwc2_readl(hsotg, HAINT));
 640	addr = hsotg->regs + HAINTMSK;
 641	dev_dbg(hsotg->dev, "HAINTMSK	 @0x%08lX : 0x%08X\n",
 642		(unsigned long)addr, dwc2_readl(hsotg, HAINTMSK));
 643	if (hsotg->params.dma_desc_enable) {
 644		addr = hsotg->regs + HFLBADDR;
 645		dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
 646			(unsigned long)addr, dwc2_readl(hsotg, HFLBADDR));
 647	}
 648
 649	addr = hsotg->regs + HPRT0;
 650	dev_dbg(hsotg->dev, "HPRT0	 @0x%08lX : 0x%08X\n",
 651		(unsigned long)addr, dwc2_readl(hsotg, HPRT0));
 652
 653	for (i = 0; i < hsotg->params.host_channels; i++) {
 654		dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
 655		addr = hsotg->regs + HCCHAR(i);
 656		dev_dbg(hsotg->dev, "HCCHAR	 @0x%08lX : 0x%08X\n",
 657			(unsigned long)addr, dwc2_readl(hsotg, HCCHAR(i)));
 658		addr = hsotg->regs + HCSPLT(i);
 659		dev_dbg(hsotg->dev, "HCSPLT	 @0x%08lX : 0x%08X\n",
 660			(unsigned long)addr, dwc2_readl(hsotg, HCSPLT(i)));
 661		addr = hsotg->regs + HCINT(i);
 662		dev_dbg(hsotg->dev, "HCINT	 @0x%08lX : 0x%08X\n",
 663			(unsigned long)addr, dwc2_readl(hsotg, HCINT(i)));
 664		addr = hsotg->regs + HCINTMSK(i);
 665		dev_dbg(hsotg->dev, "HCINTMSK	 @0x%08lX : 0x%08X\n",
 666			(unsigned long)addr, dwc2_readl(hsotg, HCINTMSK(i)));
 667		addr = hsotg->regs + HCTSIZ(i);
 668		dev_dbg(hsotg->dev, "HCTSIZ	 @0x%08lX : 0x%08X\n",
 669			(unsigned long)addr, dwc2_readl(hsotg, HCTSIZ(i)));
 670		addr = hsotg->regs + HCDMA(i);
 671		dev_dbg(hsotg->dev, "HCDMA	 @0x%08lX : 0x%08X\n",
 672			(unsigned long)addr, dwc2_readl(hsotg, HCDMA(i)));
 673		if (hsotg->params.dma_desc_enable) {
 674			addr = hsotg->regs + HCDMAB(i);
 675			dev_dbg(hsotg->dev, "HCDMAB	 @0x%08lX : 0x%08X\n",
 676				(unsigned long)addr, dwc2_readl(hsotg,
 677								HCDMAB(i)));
 678		}
 679	}
 680#endif
 681}
 682
 683/**
 684 * dwc2_dump_global_registers() - Prints the core global registers
 685 *
 686 * @hsotg: Programming view of DWC_otg controller
 687 *
 688 * NOTE: This function will be removed once the peripheral controller code
 689 * is integrated and the driver is stable
 690 */
 691void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
 692{
 693#ifdef DEBUG
 694	u32 __iomem *addr;
 695
 696	dev_dbg(hsotg->dev, "Core Global Registers\n");
 697	addr = hsotg->regs + GOTGCTL;
 698	dev_dbg(hsotg->dev, "GOTGCTL	 @0x%08lX : 0x%08X\n",
 699		(unsigned long)addr, dwc2_readl(hsotg, GOTGCTL));
 700	addr = hsotg->regs + GOTGINT;
 701	dev_dbg(hsotg->dev, "GOTGINT	 @0x%08lX : 0x%08X\n",
 702		(unsigned long)addr, dwc2_readl(hsotg, GOTGINT));
 703	addr = hsotg->regs + GAHBCFG;
 704	dev_dbg(hsotg->dev, "GAHBCFG	 @0x%08lX : 0x%08X\n",
 705		(unsigned long)addr, dwc2_readl(hsotg, GAHBCFG));
 706	addr = hsotg->regs + GUSBCFG;
 707	dev_dbg(hsotg->dev, "GUSBCFG	 @0x%08lX : 0x%08X\n",
 708		(unsigned long)addr, dwc2_readl(hsotg, GUSBCFG));
 709	addr = hsotg->regs + GRSTCTL;
 710	dev_dbg(hsotg->dev, "GRSTCTL	 @0x%08lX : 0x%08X\n",
 711		(unsigned long)addr, dwc2_readl(hsotg, GRSTCTL));
 712	addr = hsotg->regs + GINTSTS;
 713	dev_dbg(hsotg->dev, "GINTSTS	 @0x%08lX : 0x%08X\n",
 714		(unsigned long)addr, dwc2_readl(hsotg, GINTSTS));
 715	addr = hsotg->regs + GINTMSK;
 716	dev_dbg(hsotg->dev, "GINTMSK	 @0x%08lX : 0x%08X\n",
 717		(unsigned long)addr, dwc2_readl(hsotg, GINTMSK));
 718	addr = hsotg->regs + GRXSTSR;
 719	dev_dbg(hsotg->dev, "GRXSTSR	 @0x%08lX : 0x%08X\n",
 720		(unsigned long)addr, dwc2_readl(hsotg, GRXSTSR));
 721	addr = hsotg->regs + GRXFSIZ;
 722	dev_dbg(hsotg->dev, "GRXFSIZ	 @0x%08lX : 0x%08X\n",
 723		(unsigned long)addr, dwc2_readl(hsotg, GRXFSIZ));
 724	addr = hsotg->regs + GNPTXFSIZ;
 725	dev_dbg(hsotg->dev, "GNPTXFSIZ	 @0x%08lX : 0x%08X\n",
 726		(unsigned long)addr, dwc2_readl(hsotg, GNPTXFSIZ));
 727	addr = hsotg->regs + GNPTXSTS;
 728	dev_dbg(hsotg->dev, "GNPTXSTS	 @0x%08lX : 0x%08X\n",
 729		(unsigned long)addr, dwc2_readl(hsotg, GNPTXSTS));
 730	addr = hsotg->regs + GI2CCTL;
 731	dev_dbg(hsotg->dev, "GI2CCTL	 @0x%08lX : 0x%08X\n",
 732		(unsigned long)addr, dwc2_readl(hsotg, GI2CCTL));
 733	addr = hsotg->regs + GPVNDCTL;
 734	dev_dbg(hsotg->dev, "GPVNDCTL	 @0x%08lX : 0x%08X\n",
 735		(unsigned long)addr, dwc2_readl(hsotg, GPVNDCTL));
 736	addr = hsotg->regs + GGPIO;
 737	dev_dbg(hsotg->dev, "GGPIO	 @0x%08lX : 0x%08X\n",
 738		(unsigned long)addr, dwc2_readl(hsotg, GGPIO));
 739	addr = hsotg->regs + GUID;
 740	dev_dbg(hsotg->dev, "GUID	 @0x%08lX : 0x%08X\n",
 741		(unsigned long)addr, dwc2_readl(hsotg, GUID));
 742	addr = hsotg->regs + GSNPSID;
 743	dev_dbg(hsotg->dev, "GSNPSID	 @0x%08lX : 0x%08X\n",
 744		(unsigned long)addr, dwc2_readl(hsotg, GSNPSID));
 745	addr = hsotg->regs + GHWCFG1;
 746	dev_dbg(hsotg->dev, "GHWCFG1	 @0x%08lX : 0x%08X\n",
 747		(unsigned long)addr, dwc2_readl(hsotg, GHWCFG1));
 748	addr = hsotg->regs + GHWCFG2;
 749	dev_dbg(hsotg->dev, "GHWCFG2	 @0x%08lX : 0x%08X\n",
 750		(unsigned long)addr, dwc2_readl(hsotg, GHWCFG2));
 751	addr = hsotg->regs + GHWCFG3;
 752	dev_dbg(hsotg->dev, "GHWCFG3	 @0x%08lX : 0x%08X\n",
 753		(unsigned long)addr, dwc2_readl(hsotg, GHWCFG3));
 754	addr = hsotg->regs + GHWCFG4;
 755	dev_dbg(hsotg->dev, "GHWCFG4	 @0x%08lX : 0x%08X\n",
 756		(unsigned long)addr, dwc2_readl(hsotg, GHWCFG4));
 757	addr = hsotg->regs + GLPMCFG;
 758	dev_dbg(hsotg->dev, "GLPMCFG	 @0x%08lX : 0x%08X\n",
 759		(unsigned long)addr, dwc2_readl(hsotg, GLPMCFG));
 760	addr = hsotg->regs + GPWRDN;
 761	dev_dbg(hsotg->dev, "GPWRDN	 @0x%08lX : 0x%08X\n",
 762		(unsigned long)addr, dwc2_readl(hsotg, GPWRDN));
 763	addr = hsotg->regs + GDFIFOCFG;
 764	dev_dbg(hsotg->dev, "GDFIFOCFG	 @0x%08lX : 0x%08X\n",
 765		(unsigned long)addr, dwc2_readl(hsotg, GDFIFOCFG));
 766	addr = hsotg->regs + HPTXFSIZ;
 767	dev_dbg(hsotg->dev, "HPTXFSIZ	 @0x%08lX : 0x%08X\n",
 768		(unsigned long)addr, dwc2_readl(hsotg, HPTXFSIZ));
 769
 770	addr = hsotg->regs + PCGCTL;
 771	dev_dbg(hsotg->dev, "PCGCTL	 @0x%08lX : 0x%08X\n",
 772		(unsigned long)addr, dwc2_readl(hsotg, PCGCTL));
 773#endif
 774}
 775
 776/**
 777 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
 778 *
 779 * @hsotg: Programming view of DWC_otg controller
 780 * @num:   Tx FIFO to flush
 781 */
 782void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
 783{
 784	u32 greset;
 
 785
 786	dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
 787
 788	/* Wait for AHB master IDLE state */
 789	if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL, GRSTCTL_AHBIDLE, 10000))
 790		dev_warn(hsotg->dev, "%s:  HANG! AHB Idle GRSCTL\n",
 791			 __func__);
 792
 793	greset = GRSTCTL_TXFFLSH;
 794	greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
 795	dwc2_writel(hsotg, greset, GRSTCTL);
 796
 797	if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_TXFFLSH, 10000))
 798		dev_warn(hsotg->dev, "%s:  HANG! timeout GRSTCTL GRSTCTL_TXFFLSH\n",
 799			 __func__);
 
 
 
 
 
 
 
 
 800
 801	/* Wait for at least 3 PHY Clocks */
 802	udelay(1);
 803}
 804
 805/**
 806 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
 807 *
 808 * @hsotg: Programming view of DWC_otg controller
 809 */
 810void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
 811{
 812	u32 greset;
 
 813
 814	dev_vdbg(hsotg->dev, "%s()\n", __func__);
 815
 816	/* Wait for AHB master IDLE state */
 817	if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL, GRSTCTL_AHBIDLE, 10000))
 818		dev_warn(hsotg->dev, "%s:  HANG! AHB Idle GRSCTL\n",
 819			 __func__);
 820
 821	greset = GRSTCTL_RXFFLSH;
 822	dwc2_writel(hsotg, greset, GRSTCTL);
 823
 824	/* Wait for RxFIFO flush done */
 825	if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_RXFFLSH, 10000))
 826		dev_warn(hsotg->dev, "%s: HANG! timeout GRSTCTL GRSTCTL_RXFFLSH\n",
 827			 __func__);
 
 
 
 
 
 828
 829	/* Wait for at least 3 PHY Clocks */
 830	udelay(1);
 831}
 832
 833bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
 
 
 
 834{
 835	if (dwc2_readl(hsotg, GSNPSID) == 0xffffffff)
 836		return false;
 837	else
 838		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 839}
 840
 841/**
 842 * dwc2_enable_global_interrupts() - Enables the controller's Global
 843 * Interrupt in the AHB Config register
 844 *
 845 * @hsotg: Programming view of DWC_otg controller
 846 */
 847void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
 848{
 849	u32 ahbcfg = dwc2_readl(hsotg, GAHBCFG);
 850
 851	ahbcfg |= GAHBCFG_GLBL_INTR_EN;
 852	dwc2_writel(hsotg, ahbcfg, GAHBCFG);
 
 
 
 
 
 
 
 
 
 
 
 
 
 853}
 854
 855/**
 856 * dwc2_disable_global_interrupts() - Disables the controller's Global
 857 * Interrupt in the AHB Config register
 858 *
 859 * @hsotg: Programming view of DWC_otg controller
 860 */
 861void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
 862{
 863	u32 ahbcfg = dwc2_readl(hsotg, GAHBCFG);
 864
 865	ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
 866	dwc2_writel(hsotg, ahbcfg, GAHBCFG);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 867}
 868
 869/* Returns the controller's GHWCFG2.OTG_MODE. */
 870unsigned int dwc2_op_mode(struct dwc2_hsotg *hsotg)
 871{
 872	u32 ghwcfg2 = dwc2_readl(hsotg, GHWCFG2);
 
 
 
 
 
 
 
 
 
 
 873
 874	return (ghwcfg2 & GHWCFG2_OP_MODE_MASK) >>
 875		GHWCFG2_OP_MODE_SHIFT;
 876}
 877
 878/* Returns true if the controller is capable of DRD. */
 879bool dwc2_hw_is_otg(struct dwc2_hsotg *hsotg)
 880{
 881	unsigned int op_mode = dwc2_op_mode(hsotg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 882
 883	return (op_mode == GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) ||
 884		(op_mode == GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE) ||
 885		(op_mode == GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE);
 886}
 887
 888/* Returns true if the controller is host-only. */
 889bool dwc2_hw_is_host(struct dwc2_hsotg *hsotg)
 890{
 891	unsigned int op_mode = dwc2_op_mode(hsotg);
 
 
 
 
 
 
 
 
 
 
 
 
 892
 893	return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_HOST) ||
 894		(op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST);
 895}
 896
 897/* Returns true if the controller is device-only. */
 898bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg)
 899{
 900	unsigned int op_mode = dwc2_op_mode(hsotg);
 
 
 
 901
 902	return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) ||
 903		(op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE);
 
 
 
 
 
 
 
 
 
 904}
 905
 906/**
 907 * dwc2_hsotg_wait_bit_set - Waits for bit to be set.
 908 * @hsotg: Programming view of DWC_otg controller.
 909 * @offset: Register's offset where bit/bits must be set.
 910 * @mask: Mask of the bit/bits which must be set.
 911 * @timeout: Timeout to wait.
 912 *
 913 * Return: 0 if bit/bits are set or -ETIMEDOUT in case of timeout.
 914 */
 915int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hsotg, u32 offset, u32 mask,
 916			    u32 timeout)
 917{
 918	u32 i;
 919
 920	for (i = 0; i < timeout; i++) {
 921		if (dwc2_readl(hsotg, offset) & mask)
 922			return 0;
 923		udelay(1);
 
 
 
 
 
 
 
 924	}
 925
 926	return -ETIMEDOUT;
 927}
 928
 929/**
 930 * dwc2_hsotg_wait_bit_clear - Waits for bit to be clear.
 931 * @hsotg: Programming view of DWC_otg controller.
 932 * @offset: Register's offset where bit/bits must be set.
 933 * @mask: Mask of the bit/bits which must be set.
 934 * @timeout: Timeout to wait.
 935 *
 936 * Return: 0 if bit/bits are set or -ETIMEDOUT in case of timeout.
 937 */
 938int dwc2_hsotg_wait_bit_clear(struct dwc2_hsotg *hsotg, u32 offset, u32 mask,
 939			      u32 timeout)
 940{
 941	u32 i;
 942
 943	for (i = 0; i < timeout; i++) {
 944		if (!(dwc2_readl(hsotg, offset) & mask))
 945			return 0;
 946		udelay(1);
 
 
 
 
 
 
 947	}
 948
 949	return -ETIMEDOUT;
 950}
 951
 952/*
 953 * Initializes the FSLSPClkSel field of the HCFG register depending on the
 954 * PHY type
 955 */
 956void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
 957{
 958	u32 hcfg, val;
 959
 960	if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
 961	     hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
 962	     hsotg->params.ulpi_fs_ls) ||
 963	    hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
 964		/* Full speed PHY */
 965		val = HCFG_FSLSPCLKSEL_48_MHZ;
 966	} else {
 967		/* High speed PHY running at full speed or high speed */
 968		val = HCFG_FSLSPCLKSEL_30_60_MHZ;
 
 969	}
 970
 971	dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
 972	hcfg = dwc2_readl(hsotg, HCFG);
 973	hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
 974	hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
 975	dwc2_writel(hsotg, hcfg, HCFG);
 976}
 977
 978static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
 979{
 980	u32 usbcfg, ggpio, i2cctl;
 981	int retval = 0;
 982
 983	/*
 984	 * core_init() is now called on every switch so only call the
 985	 * following for the first time through
 986	 */
 987	if (select_phy) {
 988		dev_dbg(hsotg->dev, "FS PHY selected\n");
 989
 990		usbcfg = dwc2_readl(hsotg, GUSBCFG);
 991		if (!(usbcfg & GUSBCFG_PHYSEL)) {
 992			usbcfg |= GUSBCFG_PHYSEL;
 993			dwc2_writel(hsotg, usbcfg, GUSBCFG);
 994
 995			/* Reset after a PHY select */
 996			retval = dwc2_core_reset(hsotg, false);
 997
 998			if (retval) {
 999				dev_err(hsotg->dev,
1000					"%s: Reset failed, aborting", __func__);
1001				return retval;
1002			}
 
 
 
 
 
 
 
 
 
1003		}
1004
1005		if (hsotg->params.activate_stm_fs_transceiver) {
1006			ggpio = dwc2_readl(hsotg, GGPIO);
1007			if (!(ggpio & GGPIO_STM32_OTG_GCCFG_PWRDWN)) {
1008				dev_dbg(hsotg->dev, "Activating transceiver\n");
1009				/*
1010				 * STM32F4x9 uses the GGPIO register as general
1011				 * core configuration register.
1012				 */
1013				ggpio |= GGPIO_STM32_OTG_GCCFG_PWRDWN;
1014				dwc2_writel(hsotg, ggpio, GGPIO);
1015			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1016		}
 
1017	}
1018
1019	/*
1020	 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
1021	 * do this on HNP Dev/Host mode switches (done in dev_init and
1022	 * host_init).
1023	 */
1024	if (dwc2_is_host_mode(hsotg))
1025		dwc2_init_fs_ls_pclk_sel(hsotg);
1026
1027	if (hsotg->params.i2c_enable) {
1028		dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
 
 
1029
1030		/* Program GUSBCFG.OtgUtmiFsSel to I2C */
1031		usbcfg = dwc2_readl(hsotg, GUSBCFG);
1032		usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
1033		dwc2_writel(hsotg, usbcfg, GUSBCFG);
1034
1035		/* Program GI2CCTL.I2CEn */
1036		i2cctl = dwc2_readl(hsotg, GI2CCTL);
1037		i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
1038		i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
1039		i2cctl &= ~GI2CCTL_I2CEN;
1040		dwc2_writel(hsotg, i2cctl, GI2CCTL);
1041		i2cctl |= GI2CCTL_I2CEN;
1042		dwc2_writel(hsotg, i2cctl, GI2CCTL);
1043	}
1044
1045	return retval;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1046}
1047
1048static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
1049{
1050	u32 usbcfg, usbcfg_old;
1051	int retval = 0;
1052
1053	if (!select_phy)
1054		return 0;
 
 
 
 
 
 
 
 
1055
1056	usbcfg = dwc2_readl(hsotg, GUSBCFG);
1057	usbcfg_old = usbcfg;
 
1058
1059	/*
1060	 * HS PHY parameters. These parameters are preserved during soft reset
1061	 * so only program the first time. Do a soft reset immediately after
1062	 * setting phyif.
1063	 */
1064	switch (hsotg->params.phy_type) {
1065	case DWC2_PHY_TYPE_PARAM_ULPI:
1066		/* ULPI interface */
1067		dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
1068		usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
1069		usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
1070		if (hsotg->params.phy_ulpi_ddr)
1071			usbcfg |= GUSBCFG_DDRSEL;
1072
1073		/* Set external VBUS indicator as needed. */
1074		if (hsotg->params.oc_disable)
1075			usbcfg |= (GUSBCFG_ULPI_INT_VBUS_IND |
1076				   GUSBCFG_INDICATORPASSTHROUGH);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1077		break;
1078	case DWC2_PHY_TYPE_PARAM_UTMI:
1079		/* UTMI+ interface */
1080		dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
1081		usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
1082		if (hsotg->params.phy_utmi_width == 16)
1083			usbcfg |= GUSBCFG_PHYIF16;
1084		break;
1085	default:
1086		dev_err(hsotg->dev, "FS PHY selected at HS!\n");
1087		break;
1088	}
1089
1090	if (usbcfg != usbcfg_old) {
1091		dwc2_writel(hsotg, usbcfg, GUSBCFG);
1092
1093		/* Reset after setting the PHY parameters */
1094		retval = dwc2_core_reset(hsotg, false);
1095		if (retval) {
1096			dev_err(hsotg->dev,
1097				"%s: Reset failed, aborting", __func__);
1098			return retval;
1099		}
 
 
 
1100	}
1101
1102	return retval;
1103}
1104
1105static void dwc2_set_turnaround_time(struct dwc2_hsotg *hsotg)
1106{
1107	u32 usbcfg;
 
 
 
 
 
 
 
1108
1109	if (hsotg->params.phy_type != DWC2_PHY_TYPE_PARAM_UTMI)
1110		return;
1111
1112	usbcfg = dwc2_readl(hsotg, GUSBCFG);
 
 
 
 
 
 
 
 
 
1113
1114	usbcfg &= ~GUSBCFG_USBTRDTIM_MASK;
1115	if (hsotg->params.phy_utmi_width == 16)
1116		usbcfg |= 5 << GUSBCFG_USBTRDTIM_SHIFT;
1117	else
1118		usbcfg |= 9 << GUSBCFG_USBTRDTIM_SHIFT;
 
 
 
 
 
 
 
 
 
 
1119
1120	dwc2_writel(hsotg, usbcfg, GUSBCFG);
 
 
 
 
 
 
 
 
 
 
 
 
1121}
1122
1123int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
1124{
1125	u32 usbcfg;
1126	u32 otgctl;
1127	int retval = 0;
1128
1129	if ((hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
1130	     hsotg->params.speed == DWC2_SPEED_PARAM_LOW) &&
1131	    hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
1132		/* If FS/LS mode with FS/LS PHY */
1133		retval = dwc2_fs_phy_init(hsotg, select_phy);
1134		if (retval)
1135			return retval;
1136	} else {
1137		/* High speed PHY */
1138		retval = dwc2_hs_phy_init(hsotg, select_phy);
1139		if (retval)
1140			return retval;
1141
1142		if (dwc2_is_device_mode(hsotg))
1143			dwc2_set_turnaround_time(hsotg);
 
 
 
 
 
1144	}
1145
1146	if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
1147	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
1148	    hsotg->params.ulpi_fs_ls) {
1149		dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
1150		usbcfg = dwc2_readl(hsotg, GUSBCFG);
1151		usbcfg |= GUSBCFG_ULPI_FS_LS;
1152		usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
1153		dwc2_writel(hsotg, usbcfg, GUSBCFG);
1154	} else {
1155		usbcfg = dwc2_readl(hsotg, GUSBCFG);
1156		usbcfg &= ~GUSBCFG_ULPI_FS_LS;
1157		usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
1158		dwc2_writel(hsotg, usbcfg, GUSBCFG);
 
 
 
 
 
 
 
 
 
 
 
 
 
1159	}
1160
1161	if (!hsotg->params.activate_ingenic_overcurrent_detection) {
1162		if (dwc2_is_host_mode(hsotg)) {
1163			otgctl = readl(hsotg->regs + GOTGCTL);
1164			otgctl |= GOTGCTL_VBVALOEN | GOTGCTL_VBVALOVAL;
1165			writel(otgctl, hsotg->regs + GOTGCTL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1166		}
 
 
1167	}
1168
1169	return retval;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1170}
1171
1172MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
1173MODULE_AUTHOR("Synopsys, Inc.");
1174MODULE_LICENSE("Dual BSD/GPL");
v3.15
 
   1/*
   2 * core.c - DesignWare HS OTG Controller common routines
   3 *
   4 * Copyright (C) 2004-2013 Synopsys, Inc.
   5 *
   6 * Redistribution and use in source and binary forms, with or without
   7 * modification, are permitted provided that the following conditions
   8 * are met:
   9 * 1. Redistributions of source code must retain the above copyright
  10 *    notice, this list of conditions, and the following disclaimer,
  11 *    without modification.
  12 * 2. Redistributions in binary form must reproduce the above copyright
  13 *    notice, this list of conditions and the following disclaimer in the
  14 *    documentation and/or other materials provided with the distribution.
  15 * 3. The names of the above-listed copyright holders may not be used
  16 *    to endorse or promote products derived from this software without
  17 *    specific prior written permission.
  18 *
  19 * ALTERNATIVELY, this software may be distributed under the terms of the
  20 * GNU General Public License ("GPL") as published by the Free Software
  21 * Foundation; either version 2 of the License, or (at your option) any
  22 * later version.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
  25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35 */
  36
  37/*
  38 * The Core code provides basic services for accessing and managing the
  39 * DWC_otg hardware. These services are used by both the Host Controller
  40 * Driver and the Peripheral Controller Driver.
  41 */
  42#include <linux/kernel.h>
  43#include <linux/module.h>
  44#include <linux/moduleparam.h>
  45#include <linux/spinlock.h>
  46#include <linux/interrupt.h>
  47#include <linux/dma-mapping.h>
  48#include <linux/delay.h>
  49#include <linux/io.h>
  50#include <linux/slab.h>
  51#include <linux/usb.h>
  52
  53#include <linux/usb/hcd.h>
  54#include <linux/usb/ch11.h>
  55
  56#include "core.h"
  57#include "hcd.h"
  58
  59/**
  60 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
  61 * used in both device and host modes
 
  62 *
  63 * @hsotg: Programming view of the DWC_otg controller
  64 */
  65static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
  66{
  67	u32 intmsk;
  68
  69	/* Clear any pending OTG Interrupts */
  70	writel(0xffffffff, hsotg->regs + GOTGINT);
  71
  72	/* Clear any pending interrupts */
  73	writel(0xffffffff, hsotg->regs + GINTSTS);
  74
  75	/* Enable the interrupts in the GINTMSK */
  76	intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
 
 
 
 
 
 
 
 
 
  77
  78	if (hsotg->core_params->dma_enable <= 0)
  79		intmsk |= GINTSTS_RXFLVL;
  80
  81	intmsk |= GINTSTS_CONIDSTSCHNG | GINTSTS_WKUPINT | GINTSTS_USBSUSP |
  82		  GINTSTS_SESSREQINT;
  83
  84	writel(intmsk, hsotg->regs + GINTMSK);
  85}
  86
  87/*
  88 * Initializes the FSLSPClkSel field of the HCFG register depending on the
  89 * PHY type
 
 
 
  90 */
  91static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
  92{
  93	u32 hcfg, val;
 
 
  94
  95	if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
  96	     hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
  97	     hsotg->core_params->ulpi_fs_ls > 0) ||
  98	    hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
  99		/* Full speed PHY */
 100		val = HCFG_FSLSPCLKSEL_48_MHZ;
 101	} else {
 102		/* High speed PHY running at full speed or high speed */
 103		val = HCFG_FSLSPCLKSEL_30_60_MHZ;
 104	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 105
 106	dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
 107	hcfg = readl(hsotg->regs + HCFG);
 108	hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
 109	hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
 110	writel(hcfg, hsotg->regs + HCFG);
 111}
 112
 113/*
 114 * Do core a soft reset of the core.  Be careful with this because it
 115 * resets all the internal state machines of the core.
 
 
 
 116 */
 117static int dwc2_core_reset(struct dwc2_hsotg *hsotg)
 
 118{
 119	u32 greset;
 120	int count = 0;
 121
 122	dev_vdbg(hsotg->dev, "%s()\n", __func__);
 123
 124	/* Wait for AHB master IDLE state */
 125	do {
 126		usleep_range(20000, 40000);
 127		greset = readl(hsotg->regs + GRSTCTL);
 128		if (++count > 50) {
 129			dev_warn(hsotg->dev,
 130				 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
 131				 __func__, greset);
 132			return -EBUSY;
 133		}
 134	} while (!(greset & GRSTCTL_AHBIDLE));
 135
 136	/* Core Soft Reset */
 137	count = 0;
 138	greset |= GRSTCTL_CSFTRST;
 139	writel(greset, hsotg->regs + GRSTCTL);
 140	do {
 141		usleep_range(20000, 40000);
 142		greset = readl(hsotg->regs + GRSTCTL);
 143		if (++count > 50) {
 144			dev_warn(hsotg->dev,
 145				 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
 146				 __func__, greset);
 147			return -EBUSY;
 148		}
 149	} while (greset & GRSTCTL_CSFTRST);
 150
 151	/*
 152	 * NOTE: This long sleep is _very_ important, otherwise the core will
 153	 * not stay in host mode after a connector ID change!
 
 154	 */
 155	usleep_range(150000, 200000);
 156
 157	return 0;
 
 
 158}
 159
 160static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
 
 
 
 
 
 161{
 162	u32 usbcfg, i2cctl;
 163	int retval = 0;
 164
 165	/*
 166	 * core_init() is now called on every switch so only call the
 167	 * following for the first time through
 168	 */
 169	if (select_phy) {
 170		dev_dbg(hsotg->dev, "FS PHY selected\n");
 171		usbcfg = readl(hsotg->regs + GUSBCFG);
 172		usbcfg |= GUSBCFG_PHYSEL;
 173		writel(usbcfg, hsotg->regs + GUSBCFG);
 174
 175		/* Reset after a PHY select */
 176		retval = dwc2_core_reset(hsotg);
 177		if (retval) {
 178			dev_err(hsotg->dev, "%s() Reset failed, aborting",
 179					__func__);
 180			return retval;
 181		}
 182	}
 183
 184	/*
 185	 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
 186	 * do this on HNP Dev/Host mode switches (done in dev_init and
 187	 * host_init).
 188	 */
 189	if (dwc2_is_host_mode(hsotg))
 190		dwc2_init_fs_ls_pclk_sel(hsotg);
 191
 192	if (hsotg->core_params->i2c_enable > 0) {
 193		dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
 194
 195		/* Program GUSBCFG.OtgUtmiFsSel to I2C */
 196		usbcfg = readl(hsotg->regs + GUSBCFG);
 197		usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
 198		writel(usbcfg, hsotg->regs + GUSBCFG);
 199
 200		/* Program GI2CCTL.I2CEn */
 201		i2cctl = readl(hsotg->regs + GI2CCTL);
 202		i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
 203		i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
 204		i2cctl &= ~GI2CCTL_I2CEN;
 205		writel(i2cctl, hsotg->regs + GI2CCTL);
 206		i2cctl |= GI2CCTL_I2CEN;
 207		writel(i2cctl, hsotg->regs + GI2CCTL);
 208	}
 209
 210	return retval;
 211}
 212
 213static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
 214{
 215	u32 usbcfg;
 216	int retval = 0;
 217
 218	if (!select_phy)
 219		return 0;
 220
 221	usbcfg = readl(hsotg->regs + GUSBCFG);
 222
 223	/*
 224	 * HS PHY parameters. These parameters are preserved during soft reset
 225	 * so only program the first time. Do a soft reset immediately after
 226	 * setting phyif.
 227	 */
 228	switch (hsotg->core_params->phy_type) {
 229	case DWC2_PHY_TYPE_PARAM_ULPI:
 230		/* ULPI interface */
 231		dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
 232		usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
 233		usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
 234		if (hsotg->core_params->phy_ulpi_ddr > 0)
 235			usbcfg |= GUSBCFG_DDRSEL;
 236		break;
 237	case DWC2_PHY_TYPE_PARAM_UTMI:
 238		/* UTMI+ interface */
 239		dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
 240		usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
 241		if (hsotg->core_params->phy_utmi_width == 16)
 242			usbcfg |= GUSBCFG_PHYIF16;
 243		break;
 244	default:
 245		dev_err(hsotg->dev, "FS PHY selected at HS!\n");
 246		break;
 247	}
 248
 249	writel(usbcfg, hsotg->regs + GUSBCFG);
 250
 251	/* Reset after setting the PHY parameters */
 252	retval = dwc2_core_reset(hsotg);
 253	if (retval) {
 254		dev_err(hsotg->dev, "%s() Reset failed, aborting",
 255				__func__);
 256		return retval;
 257	}
 258
 259	return retval;
 260}
 261
 262static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
 263{
 264	u32 usbcfg;
 265	int retval = 0;
 266
 267	if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
 268	    hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
 269		/* If FS mode with FS PHY */
 270		retval = dwc2_fs_phy_init(hsotg, select_phy);
 271		if (retval)
 272			return retval;
 
 
 
 
 
 
 
 
 273	} else {
 274		/* High speed PHY */
 275		retval = dwc2_hs_phy_init(hsotg, select_phy);
 276		if (retval)
 277			return retval;
 278	}
 
 279
 280	if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
 281	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
 282	    hsotg->core_params->ulpi_fs_ls > 0) {
 283		dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
 284		usbcfg = readl(hsotg->regs + GUSBCFG);
 285		usbcfg |= GUSBCFG_ULPI_FS_LS;
 286		usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
 287		writel(usbcfg, hsotg->regs + GUSBCFG);
 288	} else {
 289		usbcfg = readl(hsotg->regs + GUSBCFG);
 290		usbcfg &= ~GUSBCFG_ULPI_FS_LS;
 291		usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
 292		writel(usbcfg, hsotg->regs + GUSBCFG);
 293	}
 294
 295	return retval;
 296}
 297
 298static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
 299{
 300	u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
 301
 302	switch (hsotg->hw_params.arch) {
 303	case GHWCFG2_EXT_DMA_ARCH:
 304		dev_err(hsotg->dev, "External DMA Mode not supported\n");
 305		return -EINVAL;
 306
 307	case GHWCFG2_INT_DMA_ARCH:
 308		dev_dbg(hsotg->dev, "Internal DMA Mode\n");
 309		if (hsotg->core_params->ahbcfg != -1) {
 310			ahbcfg &= GAHBCFG_CTRL_MASK;
 311			ahbcfg |= hsotg->core_params->ahbcfg &
 312				  ~GAHBCFG_CTRL_MASK;
 313		}
 314		break;
 315
 316	case GHWCFG2_SLAVE_ONLY_ARCH:
 317	default:
 318		dev_dbg(hsotg->dev, "Slave Only Mode\n");
 319		break;
 320	}
 
 321
 322	dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
 323		hsotg->core_params->dma_enable,
 324		hsotg->core_params->dma_desc_enable);
 325
 326	if (hsotg->core_params->dma_enable > 0) {
 327		if (hsotg->core_params->dma_desc_enable > 0)
 328			dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
 329		else
 330			dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
 331	} else {
 332		dev_dbg(hsotg->dev, "Using Slave mode\n");
 333		hsotg->core_params->dma_desc_enable = 0;
 334	}
 335
 336	if (hsotg->core_params->dma_enable > 0)
 337		ahbcfg |= GAHBCFG_DMA_EN;
 338
 339	writel(ahbcfg, hsotg->regs + GAHBCFG);
 340
 341	return 0;
 342}
 343
 344static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
 345{
 346	u32 usbcfg;
 347
 348	usbcfg = readl(hsotg->regs + GUSBCFG);
 349	usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
 350
 351	switch (hsotg->hw_params.op_mode) {
 352	case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
 353		if (hsotg->core_params->otg_cap ==
 354				DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
 355			usbcfg |= GUSBCFG_HNPCAP;
 356		if (hsotg->core_params->otg_cap !=
 357				DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
 358			usbcfg |= GUSBCFG_SRPCAP;
 359		break;
 360
 361	case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
 362	case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
 363	case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
 364		if (hsotg->core_params->otg_cap !=
 365				DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
 366			usbcfg |= GUSBCFG_SRPCAP;
 367		break;
 368
 369	case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
 370	case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
 371	case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
 372	default:
 373		break;
 374	}
 375
 376	writel(usbcfg, hsotg->regs + GUSBCFG);
 377}
 378
 379/**
 380 * dwc2_core_init() - Initializes the DWC_otg controller registers and
 381 * prepares the core for device mode or host mode operation
 382 *
 383 * @hsotg:      Programming view of the DWC_otg controller
 384 * @select_phy: If true then also set the Phy type
 385 * @irq:        If >= 0, the irq to register
 386 */
 387int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
 
 388{
 389	u32 usbcfg, otgctl;
 390	int retval;
 391
 392	dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
 393
 394	usbcfg = readl(hsotg->regs + GUSBCFG);
 395
 396	/* Set ULPI External VBUS bit if needed */
 397	usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
 398	if (hsotg->core_params->phy_ulpi_ext_vbus ==
 399				DWC2_PHY_ULPI_EXTERNAL_VBUS)
 400		usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 401
 402	/* Set external TS Dline pulsing bit if needed */
 403	usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
 404	if (hsotg->core_params->ts_dline > 0)
 405		usbcfg |= GUSBCFG_TERMSELDLPULSE;
 406
 407	writel(usbcfg, hsotg->regs + GUSBCFG);
 408
 409	/* Reset the Controller */
 410	retval = dwc2_core_reset(hsotg);
 411	if (retval) {
 412		dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
 413				__func__);
 414		return retval;
 415	}
 416
 417	/*
 418	 * This needs to happen in FS mode before any other programming occurs
 
 419	 */
 420	retval = dwc2_phy_init(hsotg, select_phy);
 421	if (retval)
 422		return retval;
 423
 424	/* Program the GAHBCFG Register */
 425	retval = dwc2_gahbcfg_init(hsotg);
 426	if (retval)
 427		return retval;
 428
 429	/* Program the GUSBCFG register */
 430	dwc2_gusbcfg_init(hsotg);
 431
 432	/* Program the GOTGCTL register */
 433	otgctl = readl(hsotg->regs + GOTGCTL);
 434	otgctl &= ~GOTGCTL_OTGVER;
 435	if (hsotg->core_params->otg_ver > 0)
 436		otgctl |= GOTGCTL_OTGVER;
 437	writel(otgctl, hsotg->regs + GOTGCTL);
 438	dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
 439
 440	/* Clear the SRP success bit for FS-I2c */
 441	hsotg->srp_success = 0;
 442
 443	if (irq >= 0) {
 444		dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
 445			irq);
 446		retval = devm_request_irq(hsotg->dev, irq,
 447					  dwc2_handle_common_intr, IRQF_SHARED,
 448					  dev_name(hsotg->dev), hsotg);
 449		if (retval)
 450			return retval;
 451	}
 452
 453	/* Enable common interrupts */
 454	dwc2_enable_common_interrupts(hsotg);
 455
 456	/*
 457	 * Do device or host intialization based on mode during PCD and
 458	 * HCD initialization
 459	 */
 460	if (dwc2_is_host_mode(hsotg)) {
 461		dev_dbg(hsotg->dev, "Host Mode\n");
 462		hsotg->op_state = OTG_STATE_A_HOST;
 463	} else {
 464		dev_dbg(hsotg->dev, "Device Mode\n");
 465		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
 466	}
 467
 468	return 0;
 469}
 470
 471/**
 472 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
 473 *
 474 * @hsotg: Programming view of DWC_otg controller
 475 */
 476void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
 477{
 478	u32 intmsk;
 479
 480	dev_dbg(hsotg->dev, "%s()\n", __func__);
 481
 482	/* Disable all interrupts */
 483	writel(0, hsotg->regs + GINTMSK);
 484	writel(0, hsotg->regs + HAINTMSK);
 485
 486	/* Enable the common interrupts */
 487	dwc2_enable_common_interrupts(hsotg);
 488
 489	/* Enable host mode interrupts without disturbing common interrupts */
 490	intmsk = readl(hsotg->regs + GINTMSK);
 491	intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
 492	writel(intmsk, hsotg->regs + GINTMSK);
 493}
 494
 495/**
 496 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
 497 *
 498 * @hsotg: Programming view of DWC_otg controller
 499 */
 500void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
 501{
 502	u32 intmsk = readl(hsotg->regs + GINTMSK);
 503
 504	/* Disable host mode interrupts without disturbing common interrupts */
 505	intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
 506		    GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP);
 507	writel(intmsk, hsotg->regs + GINTMSK);
 508}
 509
 510static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
 511{
 512	struct dwc2_core_params *params = hsotg->core_params;
 513	u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
 514
 515	if (!params->enable_dynamic_fifo)
 516		return;
 517
 518	/* Rx FIFO */
 519	grxfsiz = readl(hsotg->regs + GRXFSIZ);
 520	dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
 521	grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
 522	grxfsiz |= params->host_rx_fifo_size <<
 523		   GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
 524	writel(grxfsiz, hsotg->regs + GRXFSIZ);
 525	dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", readl(hsotg->regs + GRXFSIZ));
 526
 527	/* Non-periodic Tx FIFO */
 528	dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
 529		readl(hsotg->regs + GNPTXFSIZ));
 530	nptxfsiz = params->host_nperio_tx_fifo_size <<
 531		   FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
 532	nptxfsiz |= params->host_rx_fifo_size <<
 533		    FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
 534	writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
 535	dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
 536		readl(hsotg->regs + GNPTXFSIZ));
 537
 538	/* Periodic Tx FIFO */
 539	dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
 540		readl(hsotg->regs + HPTXFSIZ));
 541	hptxfsiz = params->host_perio_tx_fifo_size <<
 542		   FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
 543	hptxfsiz |= (params->host_rx_fifo_size +
 544		     params->host_nperio_tx_fifo_size) <<
 545		    FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
 546	writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
 547	dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
 548		readl(hsotg->regs + HPTXFSIZ));
 549
 550	if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
 551	    hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
 552		/*
 553		 * Global DFIFOCFG calculation for Host mode -
 554		 * include RxFIFO, NPTXFIFO and HPTXFIFO
 555		 */
 556		dfifocfg = readl(hsotg->regs + GDFIFOCFG);
 557		dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
 558		dfifocfg |= (params->host_rx_fifo_size +
 559			     params->host_nperio_tx_fifo_size +
 560			     params->host_perio_tx_fifo_size) <<
 561			    GDFIFOCFG_EPINFOBASE_SHIFT &
 562			    GDFIFOCFG_EPINFOBASE_MASK;
 563		writel(dfifocfg, hsotg->regs + GDFIFOCFG);
 564	}
 565}
 566
 567/**
 568 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
 569 * Host mode
 570 *
 571 * @hsotg: Programming view of DWC_otg controller
 572 *
 573 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
 574 * request queues. Host channels are reset to ensure that they are ready for
 575 * performing transfers.
 576 */
 577void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
 
 578{
 579	u32 hcfg, hfir, otgctl;
 
 
 580
 581	dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
 
 582
 583	/* Restart the Phy Clock */
 584	writel(0, hsotg->regs + PCGCTL);
 585
 586	/* Initialize Host Configuration Register */
 587	dwc2_init_fs_ls_pclk_sel(hsotg);
 588	if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
 589		hcfg = readl(hsotg->regs + HCFG);
 590		hcfg |= HCFG_FSLSSUPP;
 591		writel(hcfg, hsotg->regs + HCFG);
 592	}
 593
 594	/*
 595	 * This bit allows dynamic reloading of the HFIR register during
 596	 * runtime. This bit needs to be programmed during initial configuration
 597	 * and its value must not be changed during runtime.
 598	 */
 599	if (hsotg->core_params->reload_ctl > 0) {
 600		hfir = readl(hsotg->regs + HFIR);
 601		hfir |= HFIR_RLDCTRL;
 602		writel(hfir, hsotg->regs + HFIR);
 603	}
 604
 605	if (hsotg->core_params->dma_desc_enable > 0) {
 606		u32 op_mode = hsotg->hw_params.op_mode;
 607		if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
 608		    !hsotg->hw_params.dma_desc_enable ||
 609		    op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
 610		    op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
 611		    op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
 612			dev_err(hsotg->dev,
 613				"Hardware does not support descriptor DMA mode -\n");
 614			dev_err(hsotg->dev,
 615				"falling back to buffer DMA mode.\n");
 616			hsotg->core_params->dma_desc_enable = 0;
 617		} else {
 618			hcfg = readl(hsotg->regs + HCFG);
 619			hcfg |= HCFG_DESCDMA;
 620			writel(hcfg, hsotg->regs + HCFG);
 621		}
 622	}
 623
 624	/* Configure data FIFO sizes */
 625	dwc2_config_fifos(hsotg);
 626
 627	/* TODO - check this */
 628	/* Clear Host Set HNP Enable in the OTG Control Register */
 629	otgctl = readl(hsotg->regs + GOTGCTL);
 630	otgctl &= ~GOTGCTL_HSTSETHNPEN;
 631	writel(otgctl, hsotg->regs + GOTGCTL);
 632
 633	/* Make sure the FIFOs are flushed */
 634	dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
 635	dwc2_flush_rx_fifo(hsotg);
 636
 637	/* Clear Host Set HNP Enable in the OTG Control Register */
 638	otgctl = readl(hsotg->regs + GOTGCTL);
 639	otgctl &= ~GOTGCTL_HSTSETHNPEN;
 640	writel(otgctl, hsotg->regs + GOTGCTL);
 641
 642	if (hsotg->core_params->dma_desc_enable <= 0) {
 643		int num_channels, i;
 644		u32 hcchar;
 645
 646		/* Flush out any leftover queued requests */
 647		num_channels = hsotg->core_params->host_channels;
 648		for (i = 0; i < num_channels; i++) {
 649			hcchar = readl(hsotg->regs + HCCHAR(i));
 650			hcchar &= ~HCCHAR_CHENA;
 651			hcchar |= HCCHAR_CHDIS;
 652			hcchar &= ~HCCHAR_EPDIR;
 653			writel(hcchar, hsotg->regs + HCCHAR(i));
 654		}
 655
 656		/* Halt all channels to put them into a known state */
 657		for (i = 0; i < num_channels; i++) {
 658			int count = 0;
 659
 660			hcchar = readl(hsotg->regs + HCCHAR(i));
 661			hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
 662			hcchar &= ~HCCHAR_EPDIR;
 663			writel(hcchar, hsotg->regs + HCCHAR(i));
 664			dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
 665				__func__, i);
 666			do {
 667				hcchar = readl(hsotg->regs + HCCHAR(i));
 668				if (++count > 1000) {
 669					dev_err(hsotg->dev,
 670						"Unable to clear enable on channel %d\n",
 671						i);
 672					break;
 673				}
 674				udelay(1);
 675			} while (hcchar & HCCHAR_CHENA);
 676		}
 677	}
 678
 679	/* Turn on the vbus power */
 680	dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
 681	if (hsotg->op_state == OTG_STATE_A_HOST) {
 682		u32 hprt0 = dwc2_read_hprt0(hsotg);
 683
 684		dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
 685			!!(hprt0 & HPRT0_PWR));
 686		if (!(hprt0 & HPRT0_PWR)) {
 687			hprt0 |= HPRT0_PWR;
 688			writel(hprt0, hsotg->regs + HPRT0);
 689		}
 690	}
 691
 692	dwc2_enable_host_interrupts(hsotg);
 693}
 694
 695static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
 696				      struct dwc2_host_chan *chan)
 
 
 
 
 
 697{
 698	u32 hcintmsk = HCINTMSK_CHHLTD;
 
 699
 700	switch (chan->ep_type) {
 701	case USB_ENDPOINT_XFER_CONTROL:
 702	case USB_ENDPOINT_XFER_BULK:
 703		dev_vdbg(hsotg->dev, "control/bulk\n");
 704		hcintmsk |= HCINTMSK_XFERCOMPL;
 705		hcintmsk |= HCINTMSK_STALL;
 706		hcintmsk |= HCINTMSK_XACTERR;
 707		hcintmsk |= HCINTMSK_DATATGLERR;
 708		if (chan->ep_is_in) {
 709			hcintmsk |= HCINTMSK_BBLERR;
 710		} else {
 711			hcintmsk |= HCINTMSK_NAK;
 712			hcintmsk |= HCINTMSK_NYET;
 713			if (chan->do_ping)
 714				hcintmsk |= HCINTMSK_ACK;
 715		}
 716
 717		if (chan->do_split) {
 718			hcintmsk |= HCINTMSK_NAK;
 719			if (chan->complete_split)
 720				hcintmsk |= HCINTMSK_NYET;
 721			else
 722				hcintmsk |= HCINTMSK_ACK;
 723		}
 724
 725		if (chan->error_state)
 726			hcintmsk |= HCINTMSK_ACK;
 727		break;
 728
 729	case USB_ENDPOINT_XFER_INT:
 730		if (dbg_perio())
 731			dev_vdbg(hsotg->dev, "intr\n");
 732		hcintmsk |= HCINTMSK_XFERCOMPL;
 733		hcintmsk |= HCINTMSK_NAK;
 734		hcintmsk |= HCINTMSK_STALL;
 735		hcintmsk |= HCINTMSK_XACTERR;
 736		hcintmsk |= HCINTMSK_DATATGLERR;
 737		hcintmsk |= HCINTMSK_FRMOVRUN;
 738
 739		if (chan->ep_is_in)
 740			hcintmsk |= HCINTMSK_BBLERR;
 741		if (chan->error_state)
 742			hcintmsk |= HCINTMSK_ACK;
 743		if (chan->do_split) {
 744			if (chan->complete_split)
 745				hcintmsk |= HCINTMSK_NYET;
 746			else
 747				hcintmsk |= HCINTMSK_ACK;
 748		}
 749		break;
 750
 751	case USB_ENDPOINT_XFER_ISOC:
 752		if (dbg_perio())
 753			dev_vdbg(hsotg->dev, "isoc\n");
 754		hcintmsk |= HCINTMSK_XFERCOMPL;
 755		hcintmsk |= HCINTMSK_FRMOVRUN;
 756		hcintmsk |= HCINTMSK_ACK;
 757
 758		if (chan->ep_is_in) {
 759			hcintmsk |= HCINTMSK_XACTERR;
 760			hcintmsk |= HCINTMSK_BBLERR;
 761		}
 762		break;
 763	default:
 764		dev_err(hsotg->dev, "## Unknown EP type ##\n");
 765		break;
 766	}
 767
 768	writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
 769	if (dbg_hc(chan))
 770		dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
 771}
 772
 773static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
 774				    struct dwc2_host_chan *chan)
 775{
 776	u32 hcintmsk = HCINTMSK_CHHLTD;
 777
 778	/*
 779	 * For Descriptor DMA mode core halts the channel on AHB error.
 780	 * Interrupt is not required.
 781	 */
 782	if (hsotg->core_params->dma_desc_enable <= 0) {
 783		if (dbg_hc(chan))
 784			dev_vdbg(hsotg->dev, "desc DMA disabled\n");
 785		hcintmsk |= HCINTMSK_AHBERR;
 786	} else {
 787		if (dbg_hc(chan))
 788			dev_vdbg(hsotg->dev, "desc DMA enabled\n");
 789		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
 790			hcintmsk |= HCINTMSK_XFERCOMPL;
 791	}
 792
 793	if (chan->error_state && !chan->do_split &&
 794	    chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
 795		if (dbg_hc(chan))
 796			dev_vdbg(hsotg->dev, "setting ACK\n");
 797		hcintmsk |= HCINTMSK_ACK;
 798		if (chan->ep_is_in) {
 799			hcintmsk |= HCINTMSK_DATATGLERR;
 800			if (chan->ep_type != USB_ENDPOINT_XFER_INT)
 801				hcintmsk |= HCINTMSK_NAK;
 802		}
 803	}
 804
 805	writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
 806	if (dbg_hc(chan))
 807		dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
 808}
 809
 810static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
 811				struct dwc2_host_chan *chan)
 
 
 
 
 
 
 
 812{
 813	u32 intmsk;
 814
 815	if (hsotg->core_params->dma_enable > 0) {
 816		if (dbg_hc(chan))
 817			dev_vdbg(hsotg->dev, "DMA enabled\n");
 818		dwc2_hc_enable_dma_ints(hsotg, chan);
 819	} else {
 820		if (dbg_hc(chan))
 821			dev_vdbg(hsotg->dev, "DMA disabled\n");
 822		dwc2_hc_enable_slave_ints(hsotg, chan);
 823	}
 824
 825	/* Enable the top level host channel interrupt */
 826	intmsk = readl(hsotg->regs + HAINTMSK);
 827	intmsk |= 1 << chan->hc_num;
 828	writel(intmsk, hsotg->regs + HAINTMSK);
 829	if (dbg_hc(chan))
 830		dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
 831
 832	/* Make sure host channel interrupts are enabled */
 833	intmsk = readl(hsotg->regs + GINTMSK);
 834	intmsk |= GINTSTS_HCHINT;
 835	writel(intmsk, hsotg->regs + GINTMSK);
 836	if (dbg_hc(chan))
 837		dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
 838}
 839
 840/**
 841 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
 842 * a specific endpoint
 843 *
 844 * @hsotg: Programming view of DWC_otg controller
 845 * @chan:  Information needed to initialize the host channel
 
 
 846 *
 847 * The HCCHARn register is set up with the characteristics specified in chan.
 848 * Host channel interrupts that may need to be serviced while this transfer is
 849 * in progress are enabled.
 850 */
 851void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
 
 852{
 853	u8 hc_num = chan->hc_num;
 854	u32 hcintmsk;
 855	u32 hcchar;
 856	u32 hcsplt = 0;
 
 857
 858	if (dbg_hc(chan))
 859		dev_vdbg(hsotg->dev, "%s()\n", __func__);
 
 
 
 
 
 
 860
 861	/* Clear old interrupt conditions for this host channel */
 862	hcintmsk = 0xffffffff;
 863	hcintmsk &= ~HCINTMSK_RESERVED14_31;
 864	writel(hcintmsk, hsotg->regs + HCINT(hc_num));
 865
 866	/* Enable channel interrupts required for this transfer */
 867	dwc2_hc_enable_ints(hsotg, chan);
 868
 869	/*
 870	 * Program the HCCHARn register with the endpoint characteristics for
 871	 * the current transfer
 
 
 
 
 
 
 872	 */
 873	hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
 874	hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
 875	if (chan->ep_is_in)
 876		hcchar |= HCCHAR_EPDIR;
 877	if (chan->speed == USB_SPEED_LOW)
 878		hcchar |= HCCHAR_LSPDDEV;
 879	hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
 880	hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
 881	writel(hcchar, hsotg->regs + HCCHAR(hc_num));
 882	if (dbg_hc(chan)) {
 883		dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
 884			 hc_num, hcchar);
 885
 886		dev_vdbg(hsotg->dev, "%s: Channel %d\n",
 887			 __func__, hc_num);
 888		dev_vdbg(hsotg->dev, "	 Dev Addr: %d\n",
 889			 chan->dev_addr);
 890		dev_vdbg(hsotg->dev, "	 Ep Num: %d\n",
 891			 chan->ep_num);
 892		dev_vdbg(hsotg->dev, "	 Is In: %d\n",
 893			 chan->ep_is_in);
 894		dev_vdbg(hsotg->dev, "	 Is Low Speed: %d\n",
 895			 chan->speed == USB_SPEED_LOW);
 896		dev_vdbg(hsotg->dev, "	 Ep Type: %d\n",
 897			 chan->ep_type);
 898		dev_vdbg(hsotg->dev, "	 Max Pkt: %d\n",
 899			 chan->max_packet);
 900	}
 901
 902	/* Program the HCSPLT register for SPLITs */
 903	if (chan->do_split) {
 904		if (dbg_hc(chan))
 905			dev_vdbg(hsotg->dev,
 906				 "Programming HC %d with split --> %s\n",
 907				 hc_num,
 908				 chan->complete_split ? "CSPLIT" : "SSPLIT");
 909		if (chan->complete_split)
 910			hcsplt |= HCSPLT_COMPSPLT;
 911		hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
 912			  HCSPLT_XACTPOS_MASK;
 913		hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
 914			  HCSPLT_HUBADDR_MASK;
 915		hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
 916			  HCSPLT_PRTADDR_MASK;
 917		if (dbg_hc(chan)) {
 918			dev_vdbg(hsotg->dev, "	  comp split %d\n",
 919				 chan->complete_split);
 920			dev_vdbg(hsotg->dev, "	  xact pos %d\n",
 921				 chan->xact_pos);
 922			dev_vdbg(hsotg->dev, "	  hub addr %d\n",
 923				 chan->hub_addr);
 924			dev_vdbg(hsotg->dev, "	  hub port %d\n",
 925				 chan->hub_port);
 926			dev_vdbg(hsotg->dev, "	  is_in %d\n",
 927				 chan->ep_is_in);
 928			dev_vdbg(hsotg->dev, "	  Max Pkt %d\n",
 929				 chan->max_packet);
 930			dev_vdbg(hsotg->dev, "	  xferlen %d\n",
 931				 chan->xfer_len);
 932		}
 933	}
 934
 935	writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
 936}
 
 
 937
 938/**
 939 * dwc2_hc_halt() - Attempts to halt a host channel
 940 *
 941 * @hsotg:       Controller register interface
 942 * @chan:        Host channel to halt
 943 * @halt_status: Reason for halting the channel
 944 *
 945 * This function should only be called in Slave mode or to abort a transfer in
 946 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
 947 * controller halts the channel when the transfer is complete or a condition
 948 * occurs that requires application intervention.
 949 *
 950 * In slave mode, checks for a free request queue entry, then sets the Channel
 951 * Enable and Channel Disable bits of the Host Channel Characteristics
 952 * register of the specified channel to intiate the halt. If there is no free
 953 * request queue entry, sets only the Channel Disable bit of the HCCHARn
 954 * register to flush requests for this channel. In the latter case, sets a
 955 * flag to indicate that the host channel needs to be halted when a request
 956 * queue slot is open.
 957 *
 958 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
 959 * HCCHARn register. The controller ensures there is space in the request
 960 * queue before submitting the halt request.
 961 *
 962 * Some time may elapse before the core flushes any posted requests for this
 963 * host channel and halts. The Channel Halted interrupt handler completes the
 964 * deactivation of the host channel.
 965 */
 966void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
 967		  enum dwc2_halt_status halt_status)
 968{
 969	u32 nptxsts, hptxsts, hcchar;
 970
 971	if (dbg_hc(chan))
 972		dev_vdbg(hsotg->dev, "%s()\n", __func__);
 973	if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
 974		dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
 975
 976	if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
 977	    halt_status == DWC2_HC_XFER_AHB_ERR) {
 978		/*
 979		 * Disable all channel interrupts except Ch Halted. The QTD
 980		 * and QH state associated with this transfer has been cleared
 981		 * (in the case of URB_DEQUEUE), so the channel needs to be
 982		 * shut down carefully to prevent crashes.
 983		 */
 984		u32 hcintmsk = HCINTMSK_CHHLTD;
 985
 986		dev_vdbg(hsotg->dev, "dequeue/error\n");
 987		writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
 988
 989		/*
 990		 * Make sure no other interrupts besides halt are currently
 991		 * pending. Handling another interrupt could cause a crash due
 992		 * to the QTD and QH state.
 993		 */
 994		writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
 995
 996		/*
 997		 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
 998		 * even if the channel was already halted for some other
 999		 * reason
1000		 */
1001		chan->halt_status = halt_status;
1002
1003		hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1004		if (!(hcchar & HCCHAR_CHENA)) {
1005			/*
1006			 * The channel is either already halted or it hasn't
1007			 * started yet. In DMA mode, the transfer may halt if
1008			 * it finishes normally or a condition occurs that
1009			 * requires driver intervention. Don't want to halt
1010			 * the channel again. In either Slave or DMA mode,
1011			 * it's possible that the transfer has been assigned
1012			 * to a channel, but not started yet when an URB is
1013			 * dequeued. Don't want to halt a channel that hasn't
1014			 * started yet.
1015			 */
1016			return;
1017		}
1018	}
1019	if (chan->halt_pending) {
1020		/*
1021		 * A halt has already been issued for this channel. This might
1022		 * happen when a transfer is aborted by a higher level in
1023		 * the stack.
1024		 */
1025		dev_vdbg(hsotg->dev,
1026			 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1027			 __func__, chan->hc_num);
1028		return;
1029	}
1030
1031	hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1032
1033	/* No need to set the bit in DDMA for disabling the channel */
1034	/* TODO check it everywhere channel is disabled */
1035	if (hsotg->core_params->dma_desc_enable <= 0) {
1036		if (dbg_hc(chan))
1037			dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1038		hcchar |= HCCHAR_CHENA;
1039	} else {
1040		if (dbg_hc(chan))
1041			dev_dbg(hsotg->dev, "desc DMA enabled\n");
1042	}
1043	hcchar |= HCCHAR_CHDIS;
1044
1045	if (hsotg->core_params->dma_enable <= 0) {
1046		if (dbg_hc(chan))
1047			dev_vdbg(hsotg->dev, "DMA not enabled\n");
1048		hcchar |= HCCHAR_CHENA;
1049
1050		/* Check for space in the request queue to issue the halt */
1051		if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1052		    chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1053			dev_vdbg(hsotg->dev, "control/bulk\n");
1054			nptxsts = readl(hsotg->regs + GNPTXSTS);
1055			if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1056				dev_vdbg(hsotg->dev, "Disabling channel\n");
1057				hcchar &= ~HCCHAR_CHENA;
1058			}
1059		} else {
1060			if (dbg_perio())
1061				dev_vdbg(hsotg->dev, "isoc/intr\n");
1062			hptxsts = readl(hsotg->regs + HPTXSTS);
1063			if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1064			    hsotg->queuing_high_bandwidth) {
1065				if (dbg_perio())
1066					dev_vdbg(hsotg->dev, "Disabling channel\n");
1067				hcchar &= ~HCCHAR_CHENA;
1068			}
1069		}
1070	} else {
1071		if (dbg_hc(chan))
1072			dev_vdbg(hsotg->dev, "DMA enabled\n");
1073	}
1074
1075	writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1076	chan->halt_status = halt_status;
1077
1078	if (hcchar & HCCHAR_CHENA) {
1079		if (dbg_hc(chan))
1080			dev_vdbg(hsotg->dev, "Channel enabled\n");
1081		chan->halt_pending = 1;
1082		chan->halt_on_queue = 0;
1083	} else {
1084		if (dbg_hc(chan))
1085			dev_vdbg(hsotg->dev, "Channel disabled\n");
1086		chan->halt_on_queue = 1;
1087	}
1088
1089	if (dbg_hc(chan)) {
1090		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1091			 chan->hc_num);
1092		dev_vdbg(hsotg->dev, "	 hcchar: 0x%08x\n",
1093			 hcchar);
1094		dev_vdbg(hsotg->dev, "	 halt_pending: %d\n",
1095			 chan->halt_pending);
1096		dev_vdbg(hsotg->dev, "	 halt_on_queue: %d\n",
1097			 chan->halt_on_queue);
1098		dev_vdbg(hsotg->dev, "	 halt_status: %d\n",
1099			 chan->halt_status);
1100	}
1101}
1102
1103/**
1104 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1105 *
1106 * @hsotg: Programming view of DWC_otg controller
1107 * @chan:  Identifies the host channel to clean up
1108 *
1109 * This function is normally called after a transfer is done and the host
1110 * channel is being released
1111 */
1112void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1113{
1114	u32 hcintmsk;
1115
1116	chan->xfer_started = 0;
1117
1118	/*
1119	 * Clear channel interrupt enables and any unhandled channel interrupt
1120	 * conditions
 
 
 
 
 
 
 
 
 
 
1121	 */
1122	writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
1123	hcintmsk = 0xffffffff;
1124	hcintmsk &= ~HCINTMSK_RESERVED14_31;
1125	writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1126}
1127
1128/**
1129 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1130 * which frame a periodic transfer should occur
1131 *
1132 * @hsotg:  Programming view of DWC_otg controller
1133 * @chan:   Identifies the host channel to set up and its properties
1134 * @hcchar: Current value of the HCCHAR register for the specified host channel
1135 *
1136 * This function has no effect on non-periodic transfers
1137 */
1138static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1139				       struct dwc2_host_chan *chan, u32 *hcchar)
1140{
1141	if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1142	    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1143		/* 1 if _next_ frame is odd, 0 if it's even */
1144		if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
1145			*hcchar |= HCCHAR_ODDFRM;
1146	}
1147}
1148
1149static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1150{
1151	/* Set up the initial PID for the transfer */
1152	if (chan->speed == USB_SPEED_HIGH) {
1153		if (chan->ep_is_in) {
1154			if (chan->multi_count == 1)
1155				chan->data_pid_start = DWC2_HC_PID_DATA0;
1156			else if (chan->multi_count == 2)
1157				chan->data_pid_start = DWC2_HC_PID_DATA1;
1158			else
1159				chan->data_pid_start = DWC2_HC_PID_DATA2;
1160		} else {
1161			if (chan->multi_count == 1)
1162				chan->data_pid_start = DWC2_HC_PID_DATA0;
1163			else
1164				chan->data_pid_start = DWC2_HC_PID_MDATA;
1165		}
1166	} else {
1167		chan->data_pid_start = DWC2_HC_PID_DATA0;
1168	}
1169}
1170
1171/**
1172 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1173 * the Host Channel
1174 *
1175 * @hsotg: Programming view of DWC_otg controller
1176 * @chan:  Information needed to initialize the host channel
1177 *
1178 * This function should only be called in Slave mode. For a channel associated
1179 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1180 * associated with a periodic EP, the periodic Tx FIFO is written.
1181 *
1182 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1183 * the number of bytes written to the Tx FIFO.
1184 */
1185static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1186				 struct dwc2_host_chan *chan)
1187{
1188	u32 i;
1189	u32 remaining_count;
1190	u32 byte_count;
1191	u32 dword_count;
1192	u32 __iomem *data_fifo;
1193	u32 *data_buf = (u32 *)chan->xfer_buf;
1194
1195	if (dbg_hc(chan))
1196		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1197
1198	data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
1199
1200	remaining_count = chan->xfer_len - chan->xfer_count;
1201	if (remaining_count > chan->max_packet)
1202		byte_count = chan->max_packet;
1203	else
1204		byte_count = remaining_count;
1205
1206	dword_count = (byte_count + 3) / 4;
1207
1208	if (((unsigned long)data_buf & 0x3) == 0) {
1209		/* xfer_buf is DWORD aligned */
1210		for (i = 0; i < dword_count; i++, data_buf++)
1211			writel(*data_buf, data_fifo);
1212	} else {
1213		/* xfer_buf is not DWORD aligned */
1214		for (i = 0; i < dword_count; i++, data_buf++) {
1215			u32 data = data_buf[0] | data_buf[1] << 8 |
1216				   data_buf[2] << 16 | data_buf[3] << 24;
1217			writel(data, data_fifo);
1218		}
1219	}
1220
1221	chan->xfer_count += byte_count;
1222	chan->xfer_buf += byte_count;
1223}
1224
1225/**
1226 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1227 * channel and starts the transfer
1228 *
1229 * @hsotg: Programming view of DWC_otg controller
1230 * @chan:  Information needed to initialize the host channel. The xfer_len value
1231 *         may be reduced to accommodate the max widths of the XferSize and
1232 *         PktCnt fields in the HCTSIZn register. The multi_count value may be
1233 *         changed to reflect the final xfer_len value.
1234 *
1235 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1236 * the caller must ensure that there is sufficient space in the request queue
1237 * and Tx Data FIFO.
1238 *
1239 * For an OUT transfer in Slave mode, it loads a data packet into the
1240 * appropriate FIFO. If necessary, additional data packets are loaded in the
1241 * Host ISR.
1242 *
1243 * For an IN transfer in Slave mode, a data packet is requested. The data
1244 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1245 * additional data packets are requested in the Host ISR.
1246 *
1247 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1248 * register along with a packet count of 1 and the channel is enabled. This
1249 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1250 * simply set to 0 since no data transfer occurs in this case.
1251 *
1252 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1253 * all the information required to perform the subsequent data transfer. In
1254 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1255 * controller performs the entire PING protocol, then starts the data
1256 * transfer.
1257 */
1258void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1259			    struct dwc2_host_chan *chan)
1260{
1261	u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1262	u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1263	u32 hcchar;
1264	u32 hctsiz = 0;
1265	u16 num_packets;
1266
1267	if (dbg_hc(chan))
1268		dev_vdbg(hsotg->dev, "%s()\n", __func__);
1269
1270	if (chan->do_ping) {
1271		if (hsotg->core_params->dma_enable <= 0) {
1272			if (dbg_hc(chan))
1273				dev_vdbg(hsotg->dev, "ping, no DMA\n");
1274			dwc2_hc_do_ping(hsotg, chan);
1275			chan->xfer_started = 1;
1276			return;
1277		} else {
1278			if (dbg_hc(chan))
1279				dev_vdbg(hsotg->dev, "ping, DMA\n");
1280			hctsiz |= TSIZ_DOPNG;
1281		}
1282	}
1283
1284	if (chan->do_split) {
1285		if (dbg_hc(chan))
1286			dev_vdbg(hsotg->dev, "split\n");
1287		num_packets = 1;
1288
1289		if (chan->complete_split && !chan->ep_is_in)
1290			/*
1291			 * For CSPLIT OUT Transfer, set the size to 0 so the
1292			 * core doesn't expect any data written to the FIFO
1293			 */
1294			chan->xfer_len = 0;
1295		else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1296			chan->xfer_len = chan->max_packet;
1297		else if (!chan->ep_is_in && chan->xfer_len > 188)
1298			chan->xfer_len = 188;
1299
1300		hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1301			  TSIZ_XFERSIZE_MASK;
1302	} else {
1303		if (dbg_hc(chan))
1304			dev_vdbg(hsotg->dev, "no split\n");
1305		/*
1306		 * Ensure that the transfer length and packet count will fit
1307		 * in the widths allocated for them in the HCTSIZn register
1308		 */
1309		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1310		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1311			/*
1312			 * Make sure the transfer size is no larger than one
1313			 * (micro)frame's worth of data. (A check was done
1314			 * when the periodic transfer was accepted to ensure
1315			 * that a (micro)frame's worth of data can be
1316			 * programmed into a channel.)
1317			 */
1318			u32 max_periodic_len =
1319				chan->multi_count * chan->max_packet;
1320
1321			if (chan->xfer_len > max_periodic_len)
1322				chan->xfer_len = max_periodic_len;
1323		} else if (chan->xfer_len > max_hc_xfer_size) {
1324			/*
1325			 * Make sure that xfer_len is a multiple of max packet
1326			 * size
1327			 */
1328			chan->xfer_len =
1329				max_hc_xfer_size - chan->max_packet + 1;
1330		}
1331
1332		if (chan->xfer_len > 0) {
1333			num_packets = (chan->xfer_len + chan->max_packet - 1) /
1334					chan->max_packet;
1335			if (num_packets > max_hc_pkt_count) {
1336				num_packets = max_hc_pkt_count;
1337				chan->xfer_len = num_packets * chan->max_packet;
1338			}
1339		} else {
1340			/* Need 1 packet for transfer length of 0 */
1341			num_packets = 1;
1342		}
1343
1344		if (chan->ep_is_in)
1345			/*
1346			 * Always program an integral # of max packets for IN
1347			 * transfers
1348			 */
1349			chan->xfer_len = num_packets * chan->max_packet;
1350
1351		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1352		    chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1353			/*
1354			 * Make sure that the multi_count field matches the
1355			 * actual transfer length
1356			 */
1357			chan->multi_count = num_packets;
1358
1359		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1360			dwc2_set_pid_isoc(chan);
1361
1362		hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1363			  TSIZ_XFERSIZE_MASK;
1364	}
1365
1366	chan->start_pkt_count = num_packets;
1367	hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1368	hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1369		  TSIZ_SC_MC_PID_MASK;
1370	writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1371	if (dbg_hc(chan)) {
1372		dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1373			 hctsiz, chan->hc_num);
1374
1375		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1376			 chan->hc_num);
1377		dev_vdbg(hsotg->dev, "	 Xfer Size: %d\n",
1378			 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1379			 TSIZ_XFERSIZE_SHIFT);
1380		dev_vdbg(hsotg->dev, "	 Num Pkts: %d\n",
1381			 (hctsiz & TSIZ_PKTCNT_MASK) >>
1382			 TSIZ_PKTCNT_SHIFT);
1383		dev_vdbg(hsotg->dev, "	 Start PID: %d\n",
1384			 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1385			 TSIZ_SC_MC_PID_SHIFT);
1386	}
1387
1388	if (hsotg->core_params->dma_enable > 0) {
1389		dma_addr_t dma_addr;
1390
1391		if (chan->align_buf) {
1392			if (dbg_hc(chan))
1393				dev_vdbg(hsotg->dev, "align_buf\n");
1394			dma_addr = chan->align_buf;
1395		} else {
1396			dma_addr = chan->xfer_dma;
1397		}
1398		writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
1399		if (dbg_hc(chan))
1400			dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1401				 (unsigned long)dma_addr, chan->hc_num);
1402	}
1403
1404	/* Start the split */
1405	if (chan->do_split) {
1406		u32 hcsplt = readl(hsotg->regs + HCSPLT(chan->hc_num));
1407
1408		hcsplt |= HCSPLT_SPLTENA;
1409		writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
1410	}
1411
1412	hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1413	hcchar &= ~HCCHAR_MULTICNT_MASK;
1414	hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1415		  HCCHAR_MULTICNT_MASK;
1416	dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1417
1418	if (hcchar & HCCHAR_CHDIS)
1419		dev_warn(hsotg->dev,
1420			 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1421			 __func__, chan->hc_num, hcchar);
1422
1423	/* Set host channel enable after all other setup is complete */
1424	hcchar |= HCCHAR_CHENA;
1425	hcchar &= ~HCCHAR_CHDIS;
1426
1427	if (dbg_hc(chan))
1428		dev_vdbg(hsotg->dev, "	 Multi Cnt: %d\n",
1429			 (hcchar & HCCHAR_MULTICNT_MASK) >>
1430			 HCCHAR_MULTICNT_SHIFT);
1431
1432	writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1433	if (dbg_hc(chan))
1434		dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1435			 chan->hc_num);
1436
1437	chan->xfer_started = 1;
1438	chan->requests++;
1439
1440	if (hsotg->core_params->dma_enable <= 0 &&
1441	    !chan->ep_is_in && chan->xfer_len > 0)
1442		/* Load OUT packet into the appropriate Tx FIFO */
1443		dwc2_hc_write_packet(hsotg, chan);
1444}
1445
1446/**
1447 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1448 * host channel and starts the transfer in Descriptor DMA mode
1449 *
1450 * @hsotg: Programming view of DWC_otg controller
1451 * @chan:  Information needed to initialize the host channel
1452 *
1453 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1454 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1455 * with micro-frame bitmap.
1456 *
1457 * Initializes HCDMA register with descriptor list address and CTD value then
1458 * starts the transfer via enabling the channel.
1459 */
1460void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1461				 struct dwc2_host_chan *chan)
1462{
1463	u32 hcchar;
1464	u32 hc_dma;
1465	u32 hctsiz = 0;
1466
1467	if (chan->do_ping)
1468		hctsiz |= TSIZ_DOPNG;
1469
1470	if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1471		dwc2_set_pid_isoc(chan);
 
 
 
1472
1473	/* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1474	hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1475		  TSIZ_SC_MC_PID_MASK;
 
 
 
1476
1477	/* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1478	hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1479
1480	/* Non-zero only for high-speed interrupt endpoints */
1481	hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1482
1483	if (dbg_hc(chan)) {
1484		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1485			 chan->hc_num);
1486		dev_vdbg(hsotg->dev, "	 Start PID: %d\n",
1487			 chan->data_pid_start);
1488		dev_vdbg(hsotg->dev, "	 NTD: %d\n", chan->ntd - 1);
1489	}
1490
1491	writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1492
1493	hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
1494
1495	/* Always start from first descriptor */
1496	hc_dma &= ~HCDMA_CTD_MASK;
1497	writel(hc_dma, hsotg->regs + HCDMA(chan->hc_num));
1498	if (dbg_hc(chan))
1499		dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n",
1500			 hc_dma, chan->hc_num);
1501
1502	hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1503	hcchar &= ~HCCHAR_MULTICNT_MASK;
1504	hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1505		  HCCHAR_MULTICNT_MASK;
1506
1507	if (hcchar & HCCHAR_CHDIS)
1508		dev_warn(hsotg->dev,
1509			 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1510			 __func__, chan->hc_num, hcchar);
1511
1512	/* Set host channel enable after all other setup is complete */
1513	hcchar |= HCCHAR_CHENA;
1514	hcchar &= ~HCCHAR_CHDIS;
1515
1516	if (dbg_hc(chan))
1517		dev_vdbg(hsotg->dev, "	 Multi Cnt: %d\n",
1518			 (hcchar & HCCHAR_MULTICNT_MASK) >>
1519			 HCCHAR_MULTICNT_SHIFT);
1520
1521	writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1522	if (dbg_hc(chan))
1523		dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1524			 chan->hc_num);
1525
1526	chan->xfer_started = 1;
1527	chan->requests++;
1528}
1529
1530/**
1531 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1532 * a previous call to dwc2_hc_start_transfer()
1533 *
1534 * @hsotg: Programming view of DWC_otg controller
1535 * @chan:  Information needed to initialize the host channel
1536 *
1537 * The caller must ensure there is sufficient space in the request queue and Tx
1538 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
1539 * the controller acts autonomously to complete transfers programmed to a host
1540 * channel.
1541 *
1542 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1543 * if there is any data remaining to be queued. For an IN transfer, another
1544 * data packet is always requested. For the SETUP phase of a control transfer,
1545 * this function does nothing.
1546 *
1547 * Return: 1 if a new request is queued, 0 if no more requests are required
1548 * for this transfer
1549 */
1550int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
1551			      struct dwc2_host_chan *chan)
1552{
1553	if (dbg_hc(chan))
1554		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1555			 chan->hc_num);
1556
1557	if (chan->do_split)
1558		/* SPLITs always queue just once per channel */
1559		return 0;
1560
1561	if (chan->data_pid_start == DWC2_HC_PID_SETUP)
1562		/* SETUPs are queued only once since they can't be NAK'd */
1563		return 0;
1564
1565	if (chan->ep_is_in) {
1566		/*
1567		 * Always queue another request for other IN transfers. If
1568		 * back-to-back INs are issued and NAKs are received for both,
1569		 * the driver may still be processing the first NAK when the
1570		 * second NAK is received. When the interrupt handler clears
1571		 * the NAK interrupt for the first NAK, the second NAK will
1572		 * not be seen. So we can't depend on the NAK interrupt
1573		 * handler to requeue a NAK'd request. Instead, IN requests
1574		 * are issued each time this function is called. When the
1575		 * transfer completes, the extra requests for the channel will
1576		 * be flushed.
1577		 */
1578		u32 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1579
1580		dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1581		hcchar |= HCCHAR_CHENA;
1582		hcchar &= ~HCCHAR_CHDIS;
1583		if (dbg_hc(chan))
1584			dev_vdbg(hsotg->dev, "	 IN xfer: hcchar = 0x%08x\n",
1585				 hcchar);
1586		writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1587		chan->requests++;
1588		return 1;
1589	}
1590
1591	/* OUT transfers */
1592
1593	if (chan->xfer_count < chan->xfer_len) {
1594		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1595		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1596			u32 hcchar = readl(hsotg->regs +
1597					   HCCHAR(chan->hc_num));
1598
1599			dwc2_hc_set_even_odd_frame(hsotg, chan,
1600						   &hcchar);
1601		}
1602
1603		/* Load OUT packet into the appropriate Tx FIFO */
1604		dwc2_hc_write_packet(hsotg, chan);
1605		chan->requests++;
1606		return 1;
1607	}
1608
1609	return 0;
1610}
1611
1612/**
1613 * dwc2_hc_do_ping() - Starts a PING transfer
1614 *
1615 * @hsotg: Programming view of DWC_otg controller
1616 * @chan:  Information needed to initialize the host channel
1617 *
1618 * This function should only be called in Slave mode. The Do Ping bit is set in
1619 * the HCTSIZ register, then the channel is enabled.
1620 */
1621void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1622{
1623	u32 hcchar;
1624	u32 hctsiz;
1625
1626	if (dbg_hc(chan))
1627		dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1628			 chan->hc_num);
1629
 
1630
1631	hctsiz = TSIZ_DOPNG;
1632	hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
1633	writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
 
1634
1635	hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1636	hcchar |= HCCHAR_CHENA;
1637	hcchar &= ~HCCHAR_CHDIS;
1638	writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1639}
1640
1641/**
1642 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
1643 * the HFIR register according to PHY type and speed
1644 *
1645 * @hsotg: Programming view of DWC_otg controller
1646 *
1647 * NOTE: The caller can modify the value of the HFIR register only after the
1648 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
1649 * has been set
1650 */
1651u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
1652{
1653	u32 usbcfg;
1654	u32 hprt0;
1655	int clock = 60;	/* default value */
 
 
 
 
 
1656
1657	usbcfg = readl(hsotg->regs + GUSBCFG);
1658	hprt0 = readl(hsotg->regs + HPRT0);
1659
1660	if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
1661	    !(usbcfg & GUSBCFG_PHYIF16))
1662		clock = 60;
1663	if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
1664	    GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
1665		clock = 48;
1666	if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1667	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1668		clock = 30;
1669	if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1670	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
1671		clock = 60;
1672	if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1673	    !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1674		clock = 48;
1675	if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
1676	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
1677		clock = 48;
1678	if ((usbcfg & GUSBCFG_PHYSEL) &&
1679	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
1680		clock = 48;
1681
1682	if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
1683		/* High speed case */
1684		return 125 * clock;
1685	else
1686		/* FS/LS case */
1687		return 1000 * clock;
1688}
1689
1690/**
1691 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
1692 * buffer
1693 *
1694 * @core_if: Programming view of DWC_otg controller
1695 * @dest:    Destination buffer for the packet
1696 * @bytes:   Number of bytes to copy to the destination
1697 */
1698void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
1699{
1700	u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
1701	u32 *data_buf = (u32 *)dest;
1702	int word_count = (bytes + 3) / 4;
1703	int i;
1704
1705	/*
1706	 * Todo: Account for the case where dest is not dword aligned. This
1707	 * requires reading data from the FIFO into a u32 temp buffer, then
1708	 * moving it into the data buffer.
1709	 */
1710
1711	dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
1712
1713	for (i = 0; i < word_count; i++, data_buf++)
1714		*data_buf = readl(fifo);
1715}
1716
1717/**
1718 * dwc2_dump_host_registers() - Prints the host registers
1719 *
1720 * @hsotg: Programming view of DWC_otg controller
1721 *
1722 * NOTE: This function will be removed once the peripheral controller code
1723 * is integrated and the driver is stable
1724 */
1725void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
1726{
1727#ifdef DEBUG
1728	u32 __iomem *addr;
1729	int i;
1730
1731	dev_dbg(hsotg->dev, "Host Global Registers\n");
1732	addr = hsotg->regs + HCFG;
1733	dev_dbg(hsotg->dev, "HCFG	 @0x%08lX : 0x%08X\n",
1734		(unsigned long)addr, readl(addr));
1735	addr = hsotg->regs + HFIR;
1736	dev_dbg(hsotg->dev, "HFIR	 @0x%08lX : 0x%08X\n",
1737		(unsigned long)addr, readl(addr));
1738	addr = hsotg->regs + HFNUM;
1739	dev_dbg(hsotg->dev, "HFNUM	 @0x%08lX : 0x%08X\n",
1740		(unsigned long)addr, readl(addr));
1741	addr = hsotg->regs + HPTXSTS;
1742	dev_dbg(hsotg->dev, "HPTXSTS	 @0x%08lX : 0x%08X\n",
1743		(unsigned long)addr, readl(addr));
1744	addr = hsotg->regs + HAINT;
1745	dev_dbg(hsotg->dev, "HAINT	 @0x%08lX : 0x%08X\n",
1746		(unsigned long)addr, readl(addr));
1747	addr = hsotg->regs + HAINTMSK;
1748	dev_dbg(hsotg->dev, "HAINTMSK	 @0x%08lX : 0x%08X\n",
1749		(unsigned long)addr, readl(addr));
1750	if (hsotg->core_params->dma_desc_enable > 0) {
1751		addr = hsotg->regs + HFLBADDR;
1752		dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
1753			(unsigned long)addr, readl(addr));
1754	}
1755
1756	addr = hsotg->regs + HPRT0;
1757	dev_dbg(hsotg->dev, "HPRT0	 @0x%08lX : 0x%08X\n",
1758		(unsigned long)addr, readl(addr));
1759
1760	for (i = 0; i < hsotg->core_params->host_channels; i++) {
1761		dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
1762		addr = hsotg->regs + HCCHAR(i);
1763		dev_dbg(hsotg->dev, "HCCHAR	 @0x%08lX : 0x%08X\n",
1764			(unsigned long)addr, readl(addr));
1765		addr = hsotg->regs + HCSPLT(i);
1766		dev_dbg(hsotg->dev, "HCSPLT	 @0x%08lX : 0x%08X\n",
1767			(unsigned long)addr, readl(addr));
1768		addr = hsotg->regs + HCINT(i);
1769		dev_dbg(hsotg->dev, "HCINT	 @0x%08lX : 0x%08X\n",
1770			(unsigned long)addr, readl(addr));
1771		addr = hsotg->regs + HCINTMSK(i);
1772		dev_dbg(hsotg->dev, "HCINTMSK	 @0x%08lX : 0x%08X\n",
1773			(unsigned long)addr, readl(addr));
1774		addr = hsotg->regs + HCTSIZ(i);
1775		dev_dbg(hsotg->dev, "HCTSIZ	 @0x%08lX : 0x%08X\n",
1776			(unsigned long)addr, readl(addr));
1777		addr = hsotg->regs + HCDMA(i);
1778		dev_dbg(hsotg->dev, "HCDMA	 @0x%08lX : 0x%08X\n",
1779			(unsigned long)addr, readl(addr));
1780		if (hsotg->core_params->dma_desc_enable > 0) {
1781			addr = hsotg->regs + HCDMAB(i);
1782			dev_dbg(hsotg->dev, "HCDMAB	 @0x%08lX : 0x%08X\n",
1783				(unsigned long)addr, readl(addr));
 
1784		}
1785	}
1786#endif
1787}
1788
1789/**
1790 * dwc2_dump_global_registers() - Prints the core global registers
1791 *
1792 * @hsotg: Programming view of DWC_otg controller
1793 *
1794 * NOTE: This function will be removed once the peripheral controller code
1795 * is integrated and the driver is stable
1796 */
1797void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
1798{
1799#ifdef DEBUG
1800	u32 __iomem *addr;
1801
1802	dev_dbg(hsotg->dev, "Core Global Registers\n");
1803	addr = hsotg->regs + GOTGCTL;
1804	dev_dbg(hsotg->dev, "GOTGCTL	 @0x%08lX : 0x%08X\n",
1805		(unsigned long)addr, readl(addr));
1806	addr = hsotg->regs + GOTGINT;
1807	dev_dbg(hsotg->dev, "GOTGINT	 @0x%08lX : 0x%08X\n",
1808		(unsigned long)addr, readl(addr));
1809	addr = hsotg->regs + GAHBCFG;
1810	dev_dbg(hsotg->dev, "GAHBCFG	 @0x%08lX : 0x%08X\n",
1811		(unsigned long)addr, readl(addr));
1812	addr = hsotg->regs + GUSBCFG;
1813	dev_dbg(hsotg->dev, "GUSBCFG	 @0x%08lX : 0x%08X\n",
1814		(unsigned long)addr, readl(addr));
1815	addr = hsotg->regs + GRSTCTL;
1816	dev_dbg(hsotg->dev, "GRSTCTL	 @0x%08lX : 0x%08X\n",
1817		(unsigned long)addr, readl(addr));
1818	addr = hsotg->regs + GINTSTS;
1819	dev_dbg(hsotg->dev, "GINTSTS	 @0x%08lX : 0x%08X\n",
1820		(unsigned long)addr, readl(addr));
1821	addr = hsotg->regs + GINTMSK;
1822	dev_dbg(hsotg->dev, "GINTMSK	 @0x%08lX : 0x%08X\n",
1823		(unsigned long)addr, readl(addr));
1824	addr = hsotg->regs + GRXSTSR;
1825	dev_dbg(hsotg->dev, "GRXSTSR	 @0x%08lX : 0x%08X\n",
1826		(unsigned long)addr, readl(addr));
1827	addr = hsotg->regs + GRXFSIZ;
1828	dev_dbg(hsotg->dev, "GRXFSIZ	 @0x%08lX : 0x%08X\n",
1829		(unsigned long)addr, readl(addr));
1830	addr = hsotg->regs + GNPTXFSIZ;
1831	dev_dbg(hsotg->dev, "GNPTXFSIZ	 @0x%08lX : 0x%08X\n",
1832		(unsigned long)addr, readl(addr));
1833	addr = hsotg->regs + GNPTXSTS;
1834	dev_dbg(hsotg->dev, "GNPTXSTS	 @0x%08lX : 0x%08X\n",
1835		(unsigned long)addr, readl(addr));
1836	addr = hsotg->regs + GI2CCTL;
1837	dev_dbg(hsotg->dev, "GI2CCTL	 @0x%08lX : 0x%08X\n",
1838		(unsigned long)addr, readl(addr));
1839	addr = hsotg->regs + GPVNDCTL;
1840	dev_dbg(hsotg->dev, "GPVNDCTL	 @0x%08lX : 0x%08X\n",
1841		(unsigned long)addr, readl(addr));
1842	addr = hsotg->regs + GGPIO;
1843	dev_dbg(hsotg->dev, "GGPIO	 @0x%08lX : 0x%08X\n",
1844		(unsigned long)addr, readl(addr));
1845	addr = hsotg->regs + GUID;
1846	dev_dbg(hsotg->dev, "GUID	 @0x%08lX : 0x%08X\n",
1847		(unsigned long)addr, readl(addr));
1848	addr = hsotg->regs + GSNPSID;
1849	dev_dbg(hsotg->dev, "GSNPSID	 @0x%08lX : 0x%08X\n",
1850		(unsigned long)addr, readl(addr));
1851	addr = hsotg->regs + GHWCFG1;
1852	dev_dbg(hsotg->dev, "GHWCFG1	 @0x%08lX : 0x%08X\n",
1853		(unsigned long)addr, readl(addr));
1854	addr = hsotg->regs + GHWCFG2;
1855	dev_dbg(hsotg->dev, "GHWCFG2	 @0x%08lX : 0x%08X\n",
1856		(unsigned long)addr, readl(addr));
1857	addr = hsotg->regs + GHWCFG3;
1858	dev_dbg(hsotg->dev, "GHWCFG3	 @0x%08lX : 0x%08X\n",
1859		(unsigned long)addr, readl(addr));
1860	addr = hsotg->regs + GHWCFG4;
1861	dev_dbg(hsotg->dev, "GHWCFG4	 @0x%08lX : 0x%08X\n",
1862		(unsigned long)addr, readl(addr));
1863	addr = hsotg->regs + GLPMCFG;
1864	dev_dbg(hsotg->dev, "GLPMCFG	 @0x%08lX : 0x%08X\n",
1865		(unsigned long)addr, readl(addr));
1866	addr = hsotg->regs + GPWRDN;
1867	dev_dbg(hsotg->dev, "GPWRDN	 @0x%08lX : 0x%08X\n",
1868		(unsigned long)addr, readl(addr));
1869	addr = hsotg->regs + GDFIFOCFG;
1870	dev_dbg(hsotg->dev, "GDFIFOCFG	 @0x%08lX : 0x%08X\n",
1871		(unsigned long)addr, readl(addr));
1872	addr = hsotg->regs + HPTXFSIZ;
1873	dev_dbg(hsotg->dev, "HPTXFSIZ	 @0x%08lX : 0x%08X\n",
1874		(unsigned long)addr, readl(addr));
1875
1876	addr = hsotg->regs + PCGCTL;
1877	dev_dbg(hsotg->dev, "PCGCTL	 @0x%08lX : 0x%08X\n",
1878		(unsigned long)addr, readl(addr));
1879#endif
1880}
1881
1882/**
1883 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
1884 *
1885 * @hsotg: Programming view of DWC_otg controller
1886 * @num:   Tx FIFO to flush
1887 */
1888void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
1889{
1890	u32 greset;
1891	int count = 0;
1892
1893	dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
1894
 
 
 
 
 
1895	greset = GRSTCTL_TXFFLSH;
1896	greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
1897	writel(greset, hsotg->regs + GRSTCTL);
1898
1899	do {
1900		greset = readl(hsotg->regs + GRSTCTL);
1901		if (++count > 10000) {
1902			dev_warn(hsotg->dev,
1903				 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
1904				 __func__, greset,
1905				 readl(hsotg->regs + GNPTXSTS));
1906			break;
1907		}
1908		udelay(1);
1909	} while (greset & GRSTCTL_TXFFLSH);
1910
1911	/* Wait for at least 3 PHY Clocks */
1912	udelay(1);
1913}
1914
1915/**
1916 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
1917 *
1918 * @hsotg: Programming view of DWC_otg controller
1919 */
1920void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
1921{
1922	u32 greset;
1923	int count = 0;
1924
1925	dev_vdbg(hsotg->dev, "%s()\n", __func__);
1926
 
 
 
 
 
1927	greset = GRSTCTL_RXFFLSH;
1928	writel(greset, hsotg->regs + GRSTCTL);
1929
1930	do {
1931		greset = readl(hsotg->regs + GRSTCTL);
1932		if (++count > 10000) {
1933			dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
1934				 __func__, greset);
1935			break;
1936		}
1937		udelay(1);
1938	} while (greset & GRSTCTL_RXFFLSH);
1939
1940	/* Wait for at least 3 PHY Clocks */
1941	udelay(1);
1942}
1943
1944#define DWC2_OUT_OF_BOUNDS(a, b, c)	((a) < (b) || (a) > (c))
1945
1946/* Parameter access functions */
1947void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
1948{
1949	int valid = 1;
1950
1951	switch (val) {
1952	case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
1953		if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
1954			valid = 0;
1955		break;
1956	case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
1957		switch (hsotg->hw_params.op_mode) {
1958		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
1959		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
1960		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
1961		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
1962			break;
1963		default:
1964			valid = 0;
1965			break;
1966		}
1967		break;
1968	case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
1969		/* always valid */
1970		break;
1971	default:
1972		valid = 0;
1973		break;
1974	}
1975
1976	if (!valid) {
1977		if (val >= 0)
1978			dev_err(hsotg->dev,
1979				"%d invalid for otg_cap parameter. Check HW configuration.\n",
1980				val);
1981		switch (hsotg->hw_params.op_mode) {
1982		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
1983			val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
1984			break;
1985		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
1986		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
1987		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
1988			val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
1989			break;
1990		default:
1991			val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
1992			break;
1993		}
1994		dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
1995	}
1996
1997	hsotg->core_params->otg_cap = val;
1998}
1999
2000void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
 
 
 
 
 
 
2001{
2002	int valid = 1;
2003
2004	if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
2005		valid = 0;
2006	if (val < 0)
2007		valid = 0;
2008
2009	if (!valid) {
2010		if (val >= 0)
2011			dev_err(hsotg->dev,
2012				"%d invalid for dma_enable parameter. Check HW configuration.\n",
2013				val);
2014		val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
2015		dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
2016	}
2017
2018	hsotg->core_params->dma_enable = val;
2019}
2020
2021void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
 
 
 
 
 
 
2022{
2023	int valid = 1;
2024
2025	if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
2026			!hsotg->hw_params.dma_desc_enable))
2027		valid = 0;
2028	if (val < 0)
2029		valid = 0;
2030
2031	if (!valid) {
2032		if (val >= 0)
2033			dev_err(hsotg->dev,
2034				"%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2035				val);
2036		val = (hsotg->core_params->dma_enable > 0 &&
2037			hsotg->hw_params.dma_desc_enable);
2038		dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
2039	}
2040
2041	hsotg->core_params->dma_desc_enable = val;
2042}
2043
2044void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2045						 int val)
2046{
2047	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2048		if (val >= 0) {
2049			dev_err(hsotg->dev,
2050				"Wrong value for host_support_fs_low_power\n");
2051			dev_err(hsotg->dev,
2052				"host_support_fs_low_power must be 0 or 1\n");
2053		}
2054		val = 0;
2055		dev_dbg(hsotg->dev,
2056			"Setting host_support_fs_low_power to %d\n", val);
2057	}
2058
2059	hsotg->core_params->host_support_fs_ls_low_power = val;
 
2060}
2061
2062void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
 
2063{
2064	int valid = 1;
2065
2066	if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
2067		valid = 0;
2068	if (val < 0)
2069		valid = 0;
2070
2071	if (!valid) {
2072		if (val >= 0)
2073			dev_err(hsotg->dev,
2074				"%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2075				val);
2076		val = hsotg->hw_params.enable_dynamic_fifo;
2077		dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
2078	}
2079
2080	hsotg->core_params->enable_dynamic_fifo = val;
 
 
2081}
2082
2083void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
 
2084{
2085	int valid = 1;
2086
2087	if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
2088		valid = 0;
2089
2090	if (!valid) {
2091		if (val >= 0)
2092			dev_err(hsotg->dev,
2093				"%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2094				val);
2095		val = hsotg->hw_params.host_rx_fifo_size;
2096		dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
2097	}
2098
2099	hsotg->core_params->host_rx_fifo_size = val;
 
2100}
2101
2102void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
 
2103{
2104	int valid = 1;
2105
2106	if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
2107		valid = 0;
2108
2109	if (!valid) {
2110		if (val >= 0)
2111			dev_err(hsotg->dev,
2112				"%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2113				val);
2114		val = hsotg->hw_params.host_nperio_tx_fifo_size;
2115		dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2116			val);
2117	}
2118
2119	hsotg->core_params->host_nperio_tx_fifo_size = val;
2120}
2121
2122void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
 
 
 
 
 
 
 
 
 
 
2123{
2124	int valid = 1;
2125
2126	if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
2127		valid = 0;
2128
2129	if (!valid) {
2130		if (val >= 0)
2131			dev_err(hsotg->dev,
2132				"%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2133				val);
2134		val = hsotg->hw_params.host_perio_tx_fifo_size;
2135		dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2136			val);
2137	}
2138
2139	hsotg->core_params->host_perio_tx_fifo_size = val;
2140}
2141
2142void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
 
 
 
 
 
 
 
 
 
 
2143{
2144	int valid = 1;
2145
2146	if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
2147		valid = 0;
2148
2149	if (!valid) {
2150		if (val >= 0)
2151			dev_err(hsotg->dev,
2152				"%d invalid for max_transfer_size. Check HW configuration.\n",
2153				val);
2154		val = hsotg->hw_params.max_transfer_size;
2155		dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
2156	}
2157
2158	hsotg->core_params->max_transfer_size = val;
2159}
2160
2161void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
 
 
 
 
2162{
2163	int valid = 1;
2164
2165	if (val < 15 || val > hsotg->hw_params.max_packet_count)
2166		valid = 0;
2167
2168	if (!valid) {
2169		if (val >= 0)
2170			dev_err(hsotg->dev,
2171				"%d invalid for max_packet_count. Check HW configuration.\n",
2172				val);
2173		val = hsotg->hw_params.max_packet_count;
2174		dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
2175	}
2176
2177	hsotg->core_params->max_packet_count = val;
 
 
 
 
2178}
2179
2180void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
2181{
2182	int valid = 1;
 
2183
2184	if (val < 1 || val > hsotg->hw_params.host_channels)
2185		valid = 0;
 
 
 
 
2186
2187	if (!valid) {
2188		if (val >= 0)
2189			dev_err(hsotg->dev,
2190				"%d invalid for host_channels. Check HW configuration.\n",
2191				val);
2192		val = hsotg->hw_params.host_channels;
2193		dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
2194	}
2195
2196	hsotg->core_params->host_channels = val;
2197}
2198
2199void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
2200{
2201	int valid = 0;
2202	u32 hs_phy_type, fs_phy_type;
2203
2204	if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
2205			       DWC2_PHY_TYPE_PARAM_ULPI)) {
2206		if (val >= 0) {
2207			dev_err(hsotg->dev, "Wrong value for phy_type\n");
2208			dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2209		}
2210
2211		valid = 0;
2212	}
2213
2214	hs_phy_type = hsotg->hw_params.hs_phy_type;
2215	fs_phy_type = hsotg->hw_params.fs_phy_type;
2216	if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2217	    (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2218	     hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2219		valid = 1;
2220	else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2221		 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2222		  hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2223		valid = 1;
2224	else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2225		 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2226		valid = 1;
2227
2228	if (!valid) {
2229		if (val >= 0)
2230			dev_err(hsotg->dev,
2231				"%d invalid for phy_type. Check HW configuration.\n",
2232				val);
2233		val = DWC2_PHY_TYPE_PARAM_FS;
2234		if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2235			if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2236			    hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2237				val = DWC2_PHY_TYPE_PARAM_UTMI;
2238			else
2239				val = DWC2_PHY_TYPE_PARAM_ULPI;
2240		}
2241		dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2242	}
2243
2244	hsotg->core_params->phy_type = val;
2245}
 
 
 
 
 
2246
2247static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2248{
2249	return hsotg->core_params->phy_type;
2250}
2251
2252void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
2253{
2254	int valid = 1;
 
2255
2256	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2257		if (val >= 0) {
2258			dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2259			dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2260		}
2261		valid = 0;
 
 
2262	}
2263
2264	if (val == DWC2_SPEED_PARAM_HIGH &&
2265	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2266		valid = 0;
2267
2268	if (!valid) {
2269		if (val >= 0)
2270			dev_err(hsotg->dev,
2271				"%d invalid for speed parameter. Check HW configuration.\n",
2272				val);
2273		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
2274				DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
2275		dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
2276	}
2277
2278	hsotg->core_params->speed = val;
2279}
2280
2281void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
2282{
2283	int valid = 1;
 
2284
2285	if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2286			       DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
2287		if (val >= 0) {
2288			dev_err(hsotg->dev,
2289				"Wrong value for host_ls_low_power_phy_clk parameter\n");
2290			dev_err(hsotg->dev,
2291				"host_ls_low_power_phy_clk must be 0 or 1\n");
2292		}
2293		valid = 0;
2294	}
2295
2296	if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2297	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2298		valid = 0;
2299
2300	if (!valid) {
2301		if (val >= 0)
2302			dev_err(hsotg->dev,
2303				"%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2304				val);
2305		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2306			? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2307			: DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2308		dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2309			val);
2310	}
 
 
2311
2312	hsotg->core_params->host_ls_low_power_phy_clk = val;
2313}
2314
2315void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
2316{
2317	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2318		if (val >= 0) {
2319			dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2320			dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2321		}
2322		val = 0;
2323		dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
2324	}
2325
2326	hsotg->core_params->phy_ulpi_ddr = val;
2327}
2328
2329void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
2330{
2331	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2332		if (val >= 0) {
2333			dev_err(hsotg->dev,
2334				"Wrong value for phy_ulpi_ext_vbus\n");
2335			dev_err(hsotg->dev,
2336				"phy_ulpi_ext_vbus must be 0 or 1\n");
2337		}
2338		val = 0;
2339		dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
2340	}
2341
2342	hsotg->core_params->phy_ulpi_ext_vbus = val;
2343}
2344
2345void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
2346{
2347	int valid = 0;
2348
2349	switch (hsotg->hw_params.utmi_phy_data_width) {
2350	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
2351		valid = (val == 8);
2352		break;
2353	case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
2354		valid = (val == 16);
 
 
 
 
2355		break;
2356	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
2357		valid = (val == 8 || val == 16);
2358		break;
2359	}
2360
2361	if (!valid) {
2362		if (val >= 0) {
 
 
 
 
2363			dev_err(hsotg->dev,
2364				"%d invalid for phy_utmi_width. Check HW configuration.\n",
2365				val);
2366		}
2367		val = (hsotg->hw_params.utmi_phy_data_width ==
2368		       GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
2369		dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
2370	}
2371
2372	hsotg->core_params->phy_utmi_width = val;
2373}
2374
2375void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
2376{
2377	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2378		if (val >= 0) {
2379			dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
2380			dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
2381		}
2382		val = 0;
2383		dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
2384	}
2385
2386	hsotg->core_params->ulpi_fs_ls = val;
2387}
2388
2389void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
2390{
2391	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2392		if (val >= 0) {
2393			dev_err(hsotg->dev, "Wrong value for ts_dline\n");
2394			dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
2395		}
2396		val = 0;
2397		dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
2398	}
2399
2400	hsotg->core_params->ts_dline = val;
2401}
2402
2403void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
2404{
2405	int valid = 1;
2406
2407	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2408		if (val >= 0) {
2409			dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
2410			dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
2411		}
2412
2413		valid = 0;
2414	}
2415
2416	if (val == 1 && !(hsotg->hw_params.i2c_enable))
2417		valid = 0;
2418
2419	if (!valid) {
2420		if (val >= 0)
2421			dev_err(hsotg->dev,
2422				"%d invalid for i2c_enable. Check HW configuration.\n",
2423				val);
2424		val = hsotg->hw_params.i2c_enable;
2425		dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2426	}
2427
2428	hsotg->core_params->i2c_enable = val;
2429}
2430
2431void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
2432{
2433	int valid = 1;
 
 
2434
2435	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2436		if (val >= 0) {
2437			dev_err(hsotg->dev,
2438				"Wrong value for en_multiple_tx_fifo,\n");
2439			dev_err(hsotg->dev,
2440				"en_multiple_tx_fifo must be 0 or 1\n");
2441		}
2442		valid = 0;
2443	}
2444
2445	if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
2446		valid = 0;
2447
2448	if (!valid) {
2449		if (val >= 0)
2450			dev_err(hsotg->dev,
2451				"%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
2452				val);
2453		val = hsotg->hw_params.en_multiple_tx_fifo;
2454		dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
2455	}
2456
2457	hsotg->core_params->en_multiple_tx_fifo = val;
2458}
2459
2460void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
2461{
2462	int valid = 1;
2463
2464	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2465		if (val >= 0) {
2466			dev_err(hsotg->dev,
2467				"'%d' invalid for parameter reload_ctl\n", val);
2468			dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
2469		}
2470		valid = 0;
2471	}
2472
2473	if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
2474		valid = 0;
2475
2476	if (!valid) {
2477		if (val >= 0)
2478			dev_err(hsotg->dev,
2479				"%d invalid for parameter reload_ctl. Check HW configuration.\n",
2480				val);
2481		val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
2482		dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
2483	}
2484
2485	hsotg->core_params->reload_ctl = val;
2486}
2487
2488void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
2489{
2490	if (val != -1)
2491		hsotg->core_params->ahbcfg = val;
2492	else
2493		hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
2494						GAHBCFG_HBSTLEN_SHIFT;
2495}
2496
2497void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
2498{
2499	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2500		if (val >= 0) {
2501			dev_err(hsotg->dev,
2502				"'%d' invalid for parameter otg_ver\n", val);
2503			dev_err(hsotg->dev,
2504				"otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
2505		}
2506		val = 0;
2507		dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
2508	}
2509
2510	hsotg->core_params->otg_ver = val;
2511}
2512
2513static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
2514{
2515	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2516		if (val >= 0) {
2517			dev_err(hsotg->dev,
2518				"'%d' invalid for parameter uframe_sched\n",
2519				val);
2520			dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
2521		}
2522		val = 1;
2523		dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
2524	}
2525
2526	hsotg->core_params->uframe_sched = val;
2527}
2528
2529/*
2530 * This function is called during module intialization to pass module parameters
2531 * for the DWC_otg core.
2532 */
2533void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
2534			 const struct dwc2_core_params *params)
2535{
2536	dev_dbg(hsotg->dev, "%s()\n", __func__);
2537
2538	dwc2_set_param_otg_cap(hsotg, params->otg_cap);
2539	dwc2_set_param_dma_enable(hsotg, params->dma_enable);
2540	dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
2541	dwc2_set_param_host_support_fs_ls_low_power(hsotg,
2542			params->host_support_fs_ls_low_power);
2543	dwc2_set_param_enable_dynamic_fifo(hsotg,
2544			params->enable_dynamic_fifo);
2545	dwc2_set_param_host_rx_fifo_size(hsotg,
2546			params->host_rx_fifo_size);
2547	dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
2548			params->host_nperio_tx_fifo_size);
2549	dwc2_set_param_host_perio_tx_fifo_size(hsotg,
2550			params->host_perio_tx_fifo_size);
2551	dwc2_set_param_max_transfer_size(hsotg,
2552			params->max_transfer_size);
2553	dwc2_set_param_max_packet_count(hsotg,
2554			params->max_packet_count);
2555	dwc2_set_param_host_channels(hsotg, params->host_channels);
2556	dwc2_set_param_phy_type(hsotg, params->phy_type);
2557	dwc2_set_param_speed(hsotg, params->speed);
2558	dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
2559			params->host_ls_low_power_phy_clk);
2560	dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
2561	dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
2562			params->phy_ulpi_ext_vbus);
2563	dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
2564	dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
2565	dwc2_set_param_ts_dline(hsotg, params->ts_dline);
2566	dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
2567	dwc2_set_param_en_multiple_tx_fifo(hsotg,
2568			params->en_multiple_tx_fifo);
2569	dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
2570	dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
2571	dwc2_set_param_otg_ver(hsotg, params->otg_ver);
2572	dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
2573}
2574
2575/**
2576 * During device initialization, read various hardware configuration
2577 * registers and interpret the contents.
2578 */
2579int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
2580{
2581	struct dwc2_hw_params *hw = &hsotg->hw_params;
2582	unsigned width;
2583	u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
2584	u32 hptxfsiz, grxfsiz, gnptxfsiz;
2585	u32 gusbcfg;
2586
2587	/*
2588	 * Attempt to ensure this device is really a DWC_otg Controller.
2589	 * Read and verify the GSNPSID register contents. The value should be
2590	 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
2591	 * as in "OTG version 2.xx" or "OTG version 3.xx".
2592	 */
2593	hw->snpsid = readl(hsotg->regs + GSNPSID);
2594	if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
2595	    (hw->snpsid & 0xfffff000) != 0x4f543000) {
2596		dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
2597			hw->snpsid);
2598		return -ENODEV;
2599	}
2600
2601	dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
2602		hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
2603		hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
2604
2605	hwcfg1 = readl(hsotg->regs + GHWCFG1);
2606	hwcfg2 = readl(hsotg->regs + GHWCFG2);
2607	hwcfg3 = readl(hsotg->regs + GHWCFG3);
2608	hwcfg4 = readl(hsotg->regs + GHWCFG4);
2609	gnptxfsiz = readl(hsotg->regs + GNPTXFSIZ);
2610	grxfsiz = readl(hsotg->regs + GRXFSIZ);
2611
2612	dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
2613	dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
2614	dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
2615	dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
2616	dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
2617	dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
2618
2619	/* Force host mode to get HPTXFSIZ exact power on value */
2620	gusbcfg = readl(hsotg->regs + GUSBCFG);
2621	gusbcfg |= GUSBCFG_FORCEHOSTMODE;
2622	writel(gusbcfg, hsotg->regs + GUSBCFG);
2623	usleep_range(100000, 150000);
2624
2625	hptxfsiz = readl(hsotg->regs + HPTXFSIZ);
2626	dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
2627	gusbcfg = readl(hsotg->regs + GUSBCFG);
2628	gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
2629	writel(gusbcfg, hsotg->regs + GUSBCFG);
2630	usleep_range(100000, 150000);
2631
2632	/* hwcfg2 */
2633	hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
2634		      GHWCFG2_OP_MODE_SHIFT;
2635	hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
2636		   GHWCFG2_ARCHITECTURE_SHIFT;
2637	hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
2638	hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
2639				GHWCFG2_NUM_HOST_CHAN_SHIFT);
2640	hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
2641			  GHWCFG2_HS_PHY_TYPE_SHIFT;
2642	hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
2643			  GHWCFG2_FS_PHY_TYPE_SHIFT;
2644	hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
2645			 GHWCFG2_NUM_DEV_EP_SHIFT;
2646	hw->nperio_tx_q_depth =
2647		(hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
2648		GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
2649	hw->host_perio_tx_q_depth =
2650		(hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
2651		GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
2652	hw->dev_token_q_depth =
2653		(hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
2654		GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
2655
2656	/* hwcfg3 */
2657	width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
2658		GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
2659	hw->max_transfer_size = (1 << (width + 11)) - 1;
2660	width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
2661		GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
2662	hw->max_packet_count = (1 << (width + 4)) - 1;
2663	hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
2664	hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
2665			      GHWCFG3_DFIFO_DEPTH_SHIFT;
2666
2667	/* hwcfg4 */
2668	hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
2669	hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
2670				  GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
2671	hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
2672	hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
2673	hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
2674				  GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
2675
2676	/* fifo sizes */
2677	hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
2678				GRXFSIZ_DEPTH_SHIFT;
2679	hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
2680				       FIFOSIZE_DEPTH_SHIFT;
2681	hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
2682				      FIFOSIZE_DEPTH_SHIFT;
2683
2684	dev_dbg(hsotg->dev, "Detected values from hardware:\n");
2685	dev_dbg(hsotg->dev, "  op_mode=%d\n",
2686		hw->op_mode);
2687	dev_dbg(hsotg->dev, "  arch=%d\n",
2688		hw->arch);
2689	dev_dbg(hsotg->dev, "  dma_desc_enable=%d\n",
2690		hw->dma_desc_enable);
2691	dev_dbg(hsotg->dev, "  power_optimized=%d\n",
2692		hw->power_optimized);
2693	dev_dbg(hsotg->dev, "  i2c_enable=%d\n",
2694		hw->i2c_enable);
2695	dev_dbg(hsotg->dev, "  hs_phy_type=%d\n",
2696		hw->hs_phy_type);
2697	dev_dbg(hsotg->dev, "  fs_phy_type=%d\n",
2698		hw->fs_phy_type);
2699	dev_dbg(hsotg->dev, "  utmi_phy_data_wdith=%d\n",
2700		hw->utmi_phy_data_width);
2701	dev_dbg(hsotg->dev, "  num_dev_ep=%d\n",
2702		hw->num_dev_ep);
2703	dev_dbg(hsotg->dev, "  num_dev_perio_in_ep=%d\n",
2704		hw->num_dev_perio_in_ep);
2705	dev_dbg(hsotg->dev, "  host_channels=%d\n",
2706		hw->host_channels);
2707	dev_dbg(hsotg->dev, "  max_transfer_size=%d\n",
2708		hw->max_transfer_size);
2709	dev_dbg(hsotg->dev, "  max_packet_count=%d\n",
2710		hw->max_packet_count);
2711	dev_dbg(hsotg->dev, "  nperio_tx_q_depth=0x%0x\n",
2712		hw->nperio_tx_q_depth);
2713	dev_dbg(hsotg->dev, "  host_perio_tx_q_depth=0x%0x\n",
2714		hw->host_perio_tx_q_depth);
2715	dev_dbg(hsotg->dev, "  dev_token_q_depth=0x%0x\n",
2716		hw->dev_token_q_depth);
2717	dev_dbg(hsotg->dev, "  enable_dynamic_fifo=%d\n",
2718		hw->enable_dynamic_fifo);
2719	dev_dbg(hsotg->dev, "  en_multiple_tx_fifo=%d\n",
2720		hw->en_multiple_tx_fifo);
2721	dev_dbg(hsotg->dev, "  total_fifo_size=%d\n",
2722		hw->total_fifo_size);
2723	dev_dbg(hsotg->dev, "  host_rx_fifo_size=%d\n",
2724		hw->host_rx_fifo_size);
2725	dev_dbg(hsotg->dev, "  host_nperio_tx_fifo_size=%d\n",
2726		hw->host_nperio_tx_fifo_size);
2727	dev_dbg(hsotg->dev, "  host_perio_tx_fifo_size=%d\n",
2728		hw->host_perio_tx_fifo_size);
2729	dev_dbg(hsotg->dev, "\n");
2730
2731	return 0;
2732}
2733
2734u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
2735{
2736	return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
2737}
2738
2739bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
2740{
2741	if (readl(hsotg->regs + GSNPSID) == 0xffffffff)
2742		return false;
2743	else
2744		return true;
2745}
2746
2747/**
2748 * dwc2_enable_global_interrupts() - Enables the controller's Global
2749 * Interrupt in the AHB Config register
2750 *
2751 * @hsotg: Programming view of DWC_otg controller
2752 */
2753void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
2754{
2755	u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
2756
2757	ahbcfg |= GAHBCFG_GLBL_INTR_EN;
2758	writel(ahbcfg, hsotg->regs + GAHBCFG);
2759}
2760
2761/**
2762 * dwc2_disable_global_interrupts() - Disables the controller's Global
2763 * Interrupt in the AHB Config register
2764 *
2765 * @hsotg: Programming view of DWC_otg controller
2766 */
2767void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
2768{
2769	u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
2770
2771	ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
2772	writel(ahbcfg, hsotg->regs + GAHBCFG);
2773}
2774
2775MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
2776MODULE_AUTHOR("Synopsys, Inc.");
2777MODULE_LICENSE("Dual BSD/GPL");