Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  FUJITSU Extended Socket Network Device driver
   4 *  Copyright (c) 2015 FUJITSU LIMITED
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/types.h>
   9#include <linux/nls.h>
  10#include <linux/platform_device.h>
  11#include <linux/netdevice.h>
  12#include <linux/interrupt.h>
  13
  14#include "fjes.h"
  15#include "fjes_trace.h"
  16
  17#define MAJ 1
  18#define MIN 2
  19#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
  20#define DRV_NAME	"fjes"
  21char fjes_driver_name[] = DRV_NAME;
  22char fjes_driver_version[] = DRV_VERSION;
  23static const char fjes_driver_string[] =
  24		"FUJITSU Extended Socket Network Device Driver";
  25static const char fjes_copyright[] =
  26		"Copyright (c) 2015 FUJITSU LIMITED";
  27
  28MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
  29MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
  30MODULE_LICENSE("GPL");
  31MODULE_VERSION(DRV_VERSION);
  32
  33#define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  34
  35static const struct acpi_device_id fjes_acpi_ids[] = {
  36	{ACPI_MOTHERBOARD_RESOURCE_HID, 0},
  37	{"", 0},
  38};
  39MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
  40
  41static bool is_extended_socket_device(struct acpi_device *device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  42{
  43	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
  44	char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
 
  45	union acpi_object *str;
  46	acpi_status status;
  47	int result;
  48
  49	status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
  50	if (ACPI_FAILURE(status))
  51		return false;
  52
  53	str = buffer.pointer;
  54	result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
  55				 str->string.length, UTF16_LITTLE_ENDIAN,
  56				 str_buf, sizeof(str_buf) - 1);
  57	str_buf[result] = 0;
  58
  59	if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
  60		kfree(buffer.pointer);
  61		return false;
  62	}
  63	kfree(buffer.pointer);
  64
  65	return true;
 
 
 
 
 
 
 
 
 
 
  66}
  67
  68static int acpi_check_extended_socket_status(struct acpi_device *device)
  69{
  70	unsigned long long sta;
  71	acpi_status status;
  72
  73	status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
  74	if (ACPI_FAILURE(status))
  75		return -ENODEV;
  76
  77	if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
  78	      (sta & ACPI_STA_DEVICE_ENABLED) &&
  79	      (sta & ACPI_STA_DEVICE_UI) &&
  80	      (sta & ACPI_STA_DEVICE_FUNCTIONING)))
  81		return -ENODEV;
  82
  83	return 0;
  84}
  85
  86static acpi_status
  87fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
  88{
  89	struct acpi_resource_address32 *addr;
  90	struct acpi_resource_irq *irq;
  91	struct resource *res = data;
  92
  93	switch (acpi_res->type) {
  94	case ACPI_RESOURCE_TYPE_ADDRESS32:
  95		addr = &acpi_res->data.address32;
  96		res[0].start = addr->address.minimum;
  97		res[0].end = addr->address.minimum +
  98			addr->address.address_length - 1;
  99		break;
 100
 101	case ACPI_RESOURCE_TYPE_IRQ:
 102		irq = &acpi_res->data.irq;
 103		if (irq->interrupt_count != 1)
 104			return AE_ERROR;
 105		res[1].start = irq->interrupts[0];
 106		res[1].end = irq->interrupts[0];
 107		break;
 108
 109	default:
 110		break;
 111	}
 112
 113	return AE_OK;
 114}
 115
 116static struct resource fjes_resource[] = {
 117	DEFINE_RES_MEM(0, 1),
 118	DEFINE_RES_IRQ(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 119};
 120
 121static int fjes_acpi_add(struct acpi_device *device)
 
 122{
 123	struct platform_device *plat_dev;
 124	acpi_status status;
 
 125
 126	if (!is_extended_socket_device(device))
 127		return -ENODEV;
 128
 129	if (acpi_check_extended_socket_status(device))
 130		return -ENODEV;
 
 131
 132	status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
 133				     fjes_get_acpi_resource, fjes_resource);
 134	if (ACPI_FAILURE(status))
 135		return -ENODEV;
 136
 137	/* create platform_device */
 138	plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
 139						   ARRAY_SIZE(fjes_resource));
 140	if (IS_ERR(plat_dev))
 141		return PTR_ERR(plat_dev);
 142
 143	device->driver_data = plat_dev;
 
 
 
 
 
 
 
 
 
 144
 145	return 0;
 
 
 
 
 
 
 
 
 146}
 147
 148static void fjes_acpi_remove(struct acpi_device *device)
 
 149{
 150	struct platform_device *plat_dev;
 
 
 
 151
 152	plat_dev = (struct platform_device *)acpi_driver_data(device);
 153	platform_device_unregister(plat_dev);
 154}
 155
 156static struct acpi_driver fjes_acpi_driver = {
 157	.name = DRV_NAME,
 158	.class = DRV_NAME,
 159	.owner = THIS_MODULE,
 160	.ids = fjes_acpi_ids,
 161	.ops = {
 162		.add = fjes_acpi_add,
 163		.remove = fjes_acpi_remove,
 164	},
 165};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 166
 167static int fjes_setup_resources(struct fjes_adapter *adapter)
 168{
 169	struct net_device *netdev = adapter->netdev;
 170	struct ep_share_mem_info *buf_pair;
 171	struct fjes_hw *hw = &adapter->hw;
 172	unsigned long flags;
 173	int result;
 174	int epidx;
 175
 176	mutex_lock(&hw->hw_info.lock);
 177	result = fjes_hw_request_info(hw);
 178	switch (result) {
 179	case 0:
 180		for (epidx = 0; epidx < hw->max_epid; epidx++) {
 181			hw->ep_shm_info[epidx].es_status =
 182			    hw->hw_info.res_buf->info.info[epidx].es_status;
 183			hw->ep_shm_info[epidx].zone =
 184			    hw->hw_info.res_buf->info.info[epidx].zone;
 185		}
 186		break;
 187	default:
 188	case -ENOMSG:
 189	case -EBUSY:
 190		adapter->force_reset = true;
 191
 192		mutex_unlock(&hw->hw_info.lock);
 193		return result;
 194	}
 195	mutex_unlock(&hw->hw_info.lock);
 196
 197	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
 198		if ((epidx != hw->my_epid) &&
 199		    (hw->ep_shm_info[epidx].es_status ==
 200		     FJES_ZONING_STATUS_ENABLE)) {
 201			fjes_hw_raise_interrupt(hw, epidx,
 202						REG_ICTL_MASK_INFO_UPDATE);
 203			hw->ep_shm_info[epidx].ep_stats
 204				.send_intr_zoneupdate += 1;
 205		}
 206	}
 207
 208	msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
 209
 210	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
 211		if (epidx == hw->my_epid)
 212			continue;
 213
 214		buf_pair = &hw->ep_shm_info[epidx];
 215
 216		spin_lock_irqsave(&hw->rx_status_lock, flags);
 217		fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
 218				    netdev->mtu);
 219		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 220
 221		if (fjes_hw_epid_is_same_zone(hw, epidx)) {
 222			mutex_lock(&hw->hw_info.lock);
 223			result =
 224			fjes_hw_register_buff_addr(hw, epidx, buf_pair);
 225			mutex_unlock(&hw->hw_info.lock);
 226
 227			switch (result) {
 228			case 0:
 229				break;
 230			case -ENOMSG:
 231			case -EBUSY:
 232			default:
 233				adapter->force_reset = true;
 234				return result;
 235			}
 236
 237			hw->ep_shm_info[epidx].ep_stats
 238				.com_regist_buf_exec += 1;
 239		}
 240	}
 241
 242	return 0;
 243}
 244
 245static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
 246{
 247	struct fjes_hw *hw = &adapter->hw;
 248
 249	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
 250
 251	adapter->unset_rx_last = true;
 252	napi_schedule(&adapter->napi);
 253}
 254
 255static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
 256{
 257	struct fjes_hw *hw = &adapter->hw;
 258	enum ep_partner_status status;
 259	unsigned long flags;
 260
 261	set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
 262
 263	status = fjes_hw_get_partner_ep_status(hw, src_epid);
 264	trace_fjes_stop_req_irq_pre(hw, src_epid, status);
 265	switch (status) {
 266	case EP_PARTNER_WAITING:
 267		spin_lock_irqsave(&hw->rx_status_lock, flags);
 268		hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
 269				FJES_RX_STOP_REQ_DONE;
 270		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 271		clear_bit(src_epid, &hw->txrx_stop_req_bit);
 272		fallthrough;
 273	case EP_PARTNER_UNSHARE:
 274	case EP_PARTNER_COMPLETE:
 275	default:
 276		set_bit(src_epid, &adapter->unshare_watch_bitmask);
 277		if (!work_pending(&adapter->unshare_watch_task))
 278			queue_work(adapter->control_wq,
 279				   &adapter->unshare_watch_task);
 280		break;
 281	case EP_PARTNER_SHARED:
 282		set_bit(src_epid, &hw->epstop_req_bit);
 283
 284		if (!work_pending(&hw->epstop_task))
 285			queue_work(adapter->control_wq, &hw->epstop_task);
 286		break;
 287	}
 288	trace_fjes_stop_req_irq_post(hw, src_epid);
 289}
 290
 291static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
 292				   int src_epid)
 293{
 294	struct fjes_hw *hw = &adapter->hw;
 295	enum ep_partner_status status;
 296	unsigned long flags;
 297
 298	status = fjes_hw_get_partner_ep_status(hw, src_epid);
 299	trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
 300	switch (status) {
 301	case EP_PARTNER_UNSHARE:
 302	case EP_PARTNER_COMPLETE:
 303	default:
 304		break;
 305	case EP_PARTNER_WAITING:
 306		if (src_epid < hw->my_epid) {
 307			spin_lock_irqsave(&hw->rx_status_lock, flags);
 308			hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
 309				FJES_RX_STOP_REQ_DONE;
 310			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 311
 312			clear_bit(src_epid, &hw->txrx_stop_req_bit);
 313			set_bit(src_epid, &adapter->unshare_watch_bitmask);
 314
 315			if (!work_pending(&adapter->unshare_watch_task))
 316				queue_work(adapter->control_wq,
 317					   &adapter->unshare_watch_task);
 318		}
 319		break;
 320	case EP_PARTNER_SHARED:
 321		if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
 322		    FJES_RX_STOP_REQ_REQUEST) {
 323			set_bit(src_epid, &hw->epstop_req_bit);
 324			if (!work_pending(&hw->epstop_task))
 325				queue_work(adapter->control_wq,
 326					   &hw->epstop_task);
 327		}
 328		break;
 329	}
 330	trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
 331}
 332
 333static void fjes_update_zone_irq(struct fjes_adapter *adapter,
 334				 int src_epid)
 335{
 336	struct fjes_hw *hw = &adapter->hw;
 337
 338	if (!work_pending(&hw->update_zone_task))
 339		queue_work(adapter->control_wq, &hw->update_zone_task);
 340}
 341
 342static irqreturn_t fjes_intr(int irq, void *data)
 343{
 344	struct fjes_adapter *adapter = data;
 345	struct fjes_hw *hw = &adapter->hw;
 346	irqreturn_t ret;
 347	u32 icr;
 348
 349	icr = fjes_hw_capture_interrupt_status(hw);
 350
 351	if (icr & REG_IS_MASK_IS_ASSERT) {
 352		if (icr & REG_ICTL_MASK_RX_DATA) {
 353			fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
 354			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
 355				.recv_intr_rx += 1;
 356		}
 357
 358		if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
 359			fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
 360			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
 361				.recv_intr_stop += 1;
 362		}
 363
 364		if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
 365			fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
 366			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
 367				.recv_intr_unshare += 1;
 368		}
 369
 370		if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
 371			fjes_hw_set_irqmask(hw,
 372					    REG_ICTL_MASK_TXRX_STOP_DONE, true);
 373
 374		if (icr & REG_ICTL_MASK_INFO_UPDATE) {
 375			fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
 376			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
 377				.recv_intr_zoneupdate += 1;
 378		}
 379
 380		ret = IRQ_HANDLED;
 381	} else {
 382		ret = IRQ_NONE;
 383	}
 384
 385	return ret;
 386}
 387
 388static int fjes_request_irq(struct fjes_adapter *adapter)
 389{
 390	struct net_device *netdev = adapter->netdev;
 391	int result = -1;
 392
 393	adapter->interrupt_watch_enable = true;
 394	if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
 395		queue_delayed_work(adapter->control_wq,
 396				   &adapter->interrupt_watch_task,
 397				   FJES_IRQ_WATCH_DELAY);
 398	}
 399
 400	if (!adapter->irq_registered) {
 401		result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
 402				     IRQF_SHARED, netdev->name, adapter);
 403		if (result)
 404			adapter->irq_registered = false;
 405		else
 406			adapter->irq_registered = true;
 407	}
 408
 409	return result;
 410}
 411
 412static void fjes_free_irq(struct fjes_adapter *adapter)
 413{
 414	struct fjes_hw *hw = &adapter->hw;
 415
 416	adapter->interrupt_watch_enable = false;
 417	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
 418
 419	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
 420
 421	if (adapter->irq_registered) {
 422		free_irq(adapter->hw.hw_res.irq, adapter);
 423		adapter->irq_registered = false;
 424	}
 425}
 426
 427static void fjes_free_resources(struct fjes_adapter *adapter)
 428{
 429	struct net_device *netdev = adapter->netdev;
 430	struct fjes_device_command_param param;
 431	struct ep_share_mem_info *buf_pair;
 432	struct fjes_hw *hw = &adapter->hw;
 433	bool reset_flag = false;
 434	unsigned long flags;
 435	int result;
 436	int epidx;
 437
 438	for (epidx = 0; epidx < hw->max_epid; epidx++) {
 439		if (epidx == hw->my_epid)
 440			continue;
 441
 442		mutex_lock(&hw->hw_info.lock);
 443		result = fjes_hw_unregister_buff_addr(hw, epidx);
 444		mutex_unlock(&hw->hw_info.lock);
 445
 446		hw->ep_shm_info[epidx].ep_stats.com_unregist_buf_exec += 1;
 447
 448		if (result)
 449			reset_flag = true;
 450
 451		buf_pair = &hw->ep_shm_info[epidx];
 452
 453		spin_lock_irqsave(&hw->rx_status_lock, flags);
 454		fjes_hw_setup_epbuf(&buf_pair->tx,
 455				    netdev->dev_addr, netdev->mtu);
 456		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 457
 458		clear_bit(epidx, &hw->txrx_stop_req_bit);
 459	}
 460
 461	if (reset_flag || adapter->force_reset) {
 462		result = fjes_hw_reset(hw);
 463
 464		adapter->force_reset = false;
 465
 466		if (result)
 467			adapter->open_guard = true;
 468
 469		hw->hw_info.buffer_share_bit = 0;
 470
 471		memset((void *)&param, 0, sizeof(param));
 472
 473		param.req_len = hw->hw_info.req_buf_size;
 474		param.req_start = __pa(hw->hw_info.req_buf);
 475		param.res_len = hw->hw_info.res_buf_size;
 476		param.res_start = __pa(hw->hw_info.res_buf);
 477		param.share_start = __pa(hw->hw_info.share->ep_status);
 478
 479		fjes_hw_init_command_registers(hw, &param);
 480	}
 481}
 482
 483/* fjes_open - Called when a network interface is made active */
 484static int fjes_open(struct net_device *netdev)
 485{
 486	struct fjes_adapter *adapter = netdev_priv(netdev);
 
 
 487	struct fjes_hw *hw = &adapter->hw;
 488	int result;
 
 
 
 
 489
 490	if (adapter->open_guard)
 491		return -ENXIO;
 
 
 
 492
 493	result = fjes_setup_resources(adapter);
 494	if (result)
 495		goto err_setup_res;
 496
 497	hw->txrx_stop_req_bit = 0;
 498	hw->epstop_req_bit = 0;
 499
 500	napi_enable(&adapter->napi);
 
 
 501
 502	fjes_hw_capture_interrupt_status(hw);
 
 
 
 503
 504	result = fjes_request_irq(adapter);
 505	if (result)
 506		goto err_req_irq;
 507
 508	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
 
 509
 510	netif_tx_start_all_queues(netdev);
 511	netif_carrier_on(netdev);
 
 
 
 
 512
 513	return 0;
 
 
 
 
 514
 515err_req_irq:
 516	fjes_free_irq(adapter);
 517	napi_disable(&adapter->napi);
 518
 519err_setup_res:
 520	fjes_free_resources(adapter);
 521	return result;
 522}
 523
 524/* fjes_close - Disables a network interface */
 525static int fjes_close(struct net_device *netdev)
 526{
 527	struct fjes_adapter *adapter = netdev_priv(netdev);
 528	struct fjes_hw *hw = &adapter->hw;
 529	unsigned long flags;
 530	int epidx;
 531
 532	netif_tx_stop_all_queues(netdev);
 533	netif_carrier_off(netdev);
 534
 535	fjes_hw_raise_epstop(hw);
 
 
 
 536
 537	napi_disable(&adapter->napi);
 
 
 
 
 
 
 538
 539	spin_lock_irqsave(&hw->rx_status_lock, flags);
 540	for (epidx = 0; epidx < hw->max_epid; epidx++) {
 541		if (epidx == hw->my_epid)
 542			continue;
 543
 544		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
 545		    EP_PARTNER_SHARED)
 546			adapter->hw.ep_shm_info[epidx]
 547				   .tx.info->v1i.rx_status &=
 548				~FJES_RX_POLL_WORK;
 549	}
 550	spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 551
 552	fjes_free_irq(adapter);
 
 
 553
 554	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
 555	cancel_work_sync(&adapter->unshare_watch_task);
 556	adapter->unshare_watch_bitmask = 0;
 557	cancel_work_sync(&adapter->raise_intr_rxdata_task);
 558	cancel_work_sync(&adapter->tx_stall_task);
 559
 560	cancel_work_sync(&hw->update_zone_task);
 561	cancel_work_sync(&hw->epstop_task);
 
 
 
 
 
 562
 563	fjes_hw_wait_epstop(hw);
 
 
 564
 565	fjes_free_resources(adapter);
 
 
 
 
 
 
 
 
 
 
 566
 567	return 0;
 568}
 569
 570static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
 571			void *data, size_t len)
 572{
 573	int retval;
 574
 575	retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
 576					   data, len);
 577	if (retval)
 578		return retval;
 579
 580	adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
 581		FJES_TX_DELAY_SEND_PENDING;
 582	if (!work_pending(&adapter->raise_intr_rxdata_task))
 583		queue_work(adapter->txrx_wq,
 584			   &adapter->raise_intr_rxdata_task);
 585
 586	retval = 0;
 587	return retval;
 588}
 589
 590static netdev_tx_t
 591fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 592{
 593	struct fjes_adapter *adapter = netdev_priv(netdev);
 594	struct fjes_hw *hw = &adapter->hw;
 595
 596	int max_epid, my_epid, dest_epid;
 597	enum ep_partner_status pstatus;
 598	struct netdev_queue *cur_queue;
 599	char shortpkt[VLAN_ETH_HLEN];
 600	bool is_multi, vlan;
 601	struct ethhdr *eth;
 602	u16 queue_no = 0;
 603	u16 vlan_id = 0;
 604	netdev_tx_t ret;
 605	char *data;
 606	int len;
 607
 608	ret = NETDEV_TX_OK;
 609	is_multi = false;
 610	cur_queue = netdev_get_tx_queue(netdev, queue_no);
 611
 612	eth = (struct ethhdr *)skb->data;
 613	my_epid = hw->my_epid;
 614
 615	vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
 616
 617	data = skb->data;
 618	len = skb->len;
 619
 620	if (is_multicast_ether_addr(eth->h_dest)) {
 621		dest_epid = 0;
 622		max_epid = hw->max_epid;
 623		is_multi = true;
 624	} else if (is_local_ether_addr(eth->h_dest)) {
 625		dest_epid = eth->h_dest[ETH_ALEN - 1];
 626		max_epid = dest_epid + 1;
 627
 628		if ((eth->h_dest[0] == 0x02) &&
 629		    (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
 630			      eth->h_dest[3] | eth->h_dest[4])) &&
 631		    (dest_epid < hw->max_epid)) {
 632			;
 633		} else {
 634			dest_epid = 0;
 635			max_epid = 0;
 636			ret = NETDEV_TX_OK;
 637
 638			adapter->stats64.tx_packets += 1;
 639			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
 640			adapter->stats64.tx_bytes += len;
 641			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
 642		}
 643	} else {
 644		dest_epid = 0;
 645		max_epid = 0;
 646		ret = NETDEV_TX_OK;
 647
 648		adapter->stats64.tx_packets += 1;
 649		hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
 650		adapter->stats64.tx_bytes += len;
 651		hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
 652	}
 653
 654	for (; dest_epid < max_epid; dest_epid++) {
 655		if (my_epid == dest_epid)
 656			continue;
 657
 658		pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
 659		if (pstatus != EP_PARTNER_SHARED) {
 660			if (!is_multi)
 661				hw->ep_shm_info[dest_epid].ep_stats
 662					.tx_dropped_not_shared += 1;
 663			ret = NETDEV_TX_OK;
 664		} else if (!fjes_hw_check_epbuf_version(
 665				&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
 666			/* version is NOT 0 */
 667			adapter->stats64.tx_carrier_errors += 1;
 668			hw->ep_shm_info[dest_epid].net_stats
 669						.tx_carrier_errors += 1;
 670			hw->ep_shm_info[dest_epid].ep_stats
 671					.tx_dropped_ver_mismatch += 1;
 672
 673			ret = NETDEV_TX_OK;
 674		} else if (!fjes_hw_check_mtu(
 675				&adapter->hw.ep_shm_info[dest_epid].rx,
 676				netdev->mtu)) {
 677			adapter->stats64.tx_dropped += 1;
 678			hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
 679			adapter->stats64.tx_errors += 1;
 680			hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
 681			hw->ep_shm_info[dest_epid].ep_stats
 682					.tx_dropped_buf_size_mismatch += 1;
 683
 684			ret = NETDEV_TX_OK;
 685		} else if (vlan &&
 686			   !fjes_hw_check_vlan_id(
 687				&adapter->hw.ep_shm_info[dest_epid].rx,
 688				vlan_id)) {
 689			hw->ep_shm_info[dest_epid].ep_stats
 690				.tx_dropped_vlanid_mismatch += 1;
 691			ret = NETDEV_TX_OK;
 692		} else {
 693			if (len < VLAN_ETH_HLEN) {
 694				memset(shortpkt, 0, VLAN_ETH_HLEN);
 695				memcpy(shortpkt, skb->data, skb->len);
 696				len = VLAN_ETH_HLEN;
 697				data = shortpkt;
 698			}
 699
 700			if (adapter->tx_retry_count == 0) {
 701				adapter->tx_start_jiffies = jiffies;
 702				adapter->tx_retry_count = 1;
 703			} else {
 704				adapter->tx_retry_count++;
 705			}
 706
 707			if (fjes_tx_send(adapter, dest_epid, data, len)) {
 708				if (is_multi) {
 709					ret = NETDEV_TX_OK;
 710				} else if (
 711					   ((long)jiffies -
 712					    (long)adapter->tx_start_jiffies) >=
 713					    FJES_TX_RETRY_TIMEOUT) {
 714					adapter->stats64.tx_fifo_errors += 1;
 715					hw->ep_shm_info[dest_epid].net_stats
 716								.tx_fifo_errors += 1;
 717					adapter->stats64.tx_errors += 1;
 718					hw->ep_shm_info[dest_epid].net_stats
 719								.tx_errors += 1;
 720
 721					ret = NETDEV_TX_OK;
 722				} else {
 723					netif_trans_update(netdev);
 724					hw->ep_shm_info[dest_epid].ep_stats
 725						.tx_buffer_full += 1;
 726					netif_tx_stop_queue(cur_queue);
 727
 728					if (!work_pending(&adapter->tx_stall_task))
 729						queue_work(adapter->txrx_wq,
 730							   &adapter->tx_stall_task);
 731
 732					ret = NETDEV_TX_BUSY;
 733				}
 734			} else {
 735				if (!is_multi) {
 736					adapter->stats64.tx_packets += 1;
 737					hw->ep_shm_info[dest_epid].net_stats
 738								.tx_packets += 1;
 739					adapter->stats64.tx_bytes += len;
 740					hw->ep_shm_info[dest_epid].net_stats
 741								.tx_bytes += len;
 742				}
 743
 744				adapter->tx_retry_count = 0;
 745				ret = NETDEV_TX_OK;
 746			}
 747		}
 748	}
 749
 750	if (ret == NETDEV_TX_OK) {
 751		dev_kfree_skb(skb);
 752		if (is_multi) {
 753			adapter->stats64.tx_packets += 1;
 754			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
 755			adapter->stats64.tx_bytes += 1;
 756			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
 757		}
 758	}
 759
 760	return ret;
 761}
 762
 763static void
 
 
 
 
 
 
 
 764fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 765{
 766	struct fjes_adapter *adapter = netdev_priv(netdev);
 767
 768	memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
 
 
 769}
 770
 771static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
 772{
 773	struct fjes_adapter *adapter = netdev_priv(netdev);
 774	bool running = netif_running(netdev);
 775	struct fjes_hw *hw = &adapter->hw;
 776	unsigned long flags;
 777	int ret = -EINVAL;
 778	int idx, epidx;
 779
 780	for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
 781		if (new_mtu <= fjes_support_mtu[idx]) {
 782			new_mtu = fjes_support_mtu[idx];
 783			if (new_mtu == netdev->mtu)
 784				return 0;
 785
 786			ret = 0;
 787			break;
 788		}
 789	}
 790
 791	if (ret)
 792		return ret;
 793
 794	if (running) {
 795		spin_lock_irqsave(&hw->rx_status_lock, flags);
 796		for (epidx = 0; epidx < hw->max_epid; epidx++) {
 797			if (epidx == hw->my_epid)
 798				continue;
 799			hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
 800				~FJES_RX_MTU_CHANGING_DONE;
 801		}
 802		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 803
 804		netif_tx_stop_all_queues(netdev);
 805		netif_carrier_off(netdev);
 806		cancel_work_sync(&adapter->tx_stall_task);
 807		napi_disable(&adapter->napi);
 808
 809		msleep(1000);
 810
 811		netif_tx_stop_all_queues(netdev);
 812	}
 813
 814	netdev->mtu = new_mtu;
 815
 816	if (running) {
 817		for (epidx = 0; epidx < hw->max_epid; epidx++) {
 818			if (epidx == hw->my_epid)
 819				continue;
 820
 821			spin_lock_irqsave(&hw->rx_status_lock, flags);
 822			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
 823					    netdev->dev_addr,
 824					    netdev->mtu);
 825
 826			hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
 827				FJES_RX_MTU_CHANGING_DONE;
 828			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 829		}
 830
 831		netif_tx_wake_all_queues(netdev);
 832		netif_carrier_on(netdev);
 833		napi_enable(&adapter->napi);
 834		napi_schedule(&adapter->napi);
 835	}
 836
 837	return ret;
 838}
 839
 840static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
 841{
 842	struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
 843
 844	netif_tx_wake_queue(queue);
 845}
 846
 847static int fjes_vlan_rx_add_vid(struct net_device *netdev,
 848				__be16 proto, u16 vid)
 849{
 850	struct fjes_adapter *adapter = netdev_priv(netdev);
 851	bool ret = true;
 852	int epid;
 853
 854	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
 855		if (epid == adapter->hw.my_epid)
 856			continue;
 857
 858		if (!fjes_hw_check_vlan_id(
 859			&adapter->hw.ep_shm_info[epid].tx, vid))
 860			ret = fjes_hw_set_vlan_id(
 861				&adapter->hw.ep_shm_info[epid].tx, vid);
 862	}
 863
 864	return ret ? 0 : -ENOSPC;
 865}
 866
 867static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
 868				 __be16 proto, u16 vid)
 869{
 870	struct fjes_adapter *adapter = netdev_priv(netdev);
 871	int epid;
 872
 873	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
 874		if (epid == adapter->hw.my_epid)
 875			continue;
 876
 877		fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
 878	}
 879
 880	return 0;
 881}
 882
 883static const struct net_device_ops fjes_netdev_ops = {
 884	.ndo_open		= fjes_open,
 885	.ndo_stop		= fjes_close,
 886	.ndo_start_xmit		= fjes_xmit_frame,
 887	.ndo_get_stats64	= fjes_get_stats64,
 888	.ndo_change_mtu		= fjes_change_mtu,
 889	.ndo_tx_timeout		= fjes_tx_retry,
 890	.ndo_vlan_rx_add_vid	= fjes_vlan_rx_add_vid,
 891	.ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
 892};
 893
 894/* fjes_netdev_setup - netdevice initialization routine */
 895static void fjes_netdev_setup(struct net_device *netdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 896{
 897	ether_setup(netdev);
 
 
 898
 899	netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
 900	netdev->netdev_ops = &fjes_netdev_ops;
 901	fjes_set_ethtool_ops(netdev);
 902	netdev->mtu = fjes_support_mtu[3];
 903	netdev->min_mtu = fjes_support_mtu[0];
 904	netdev->max_mtu = fjes_support_mtu[3];
 905	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 906}
 907
 908static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
 909				     int start_epid)
 910{
 911	struct fjes_hw *hw = &adapter->hw;
 912	enum ep_partner_status pstatus;
 913	int max_epid, cur_epid;
 914	int i;
 915
 916	max_epid = hw->max_epid;
 917	start_epid = (start_epid + 1 + max_epid) % max_epid;
 918
 919	for (i = 0; i < max_epid; i++) {
 920		cur_epid = (start_epid + i) % max_epid;
 921		if (cur_epid == hw->my_epid)
 922			continue;
 923
 924		pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
 925		if (pstatus == EP_PARTNER_SHARED) {
 926			if (!fjes_hw_epbuf_rx_is_empty(
 927				&hw->ep_shm_info[cur_epid].rx))
 928				return cur_epid;
 929		}
 930	}
 931	return -1;
 932}
 933
 934static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
 935			      int *cur_epid)
 936{
 937	void *frame;
 938
 939	*cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
 940	if (*cur_epid < 0)
 941		return NULL;
 942
 943	frame =
 944	fjes_hw_epbuf_rx_curpkt_get_addr(
 945		&adapter->hw.ep_shm_info[*cur_epid].rx, psize);
 946
 947	return frame;
 948}
 949
 950static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
 951{
 952	fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
 953}
 954
 
 
 
 
 
 
 
 
 
 
 955static int fjes_poll(struct napi_struct *napi, int budget)
 956{
 957	struct fjes_adapter *adapter =
 958			container_of(napi, struct fjes_adapter, napi);
 959	struct net_device *netdev = napi->dev;
 960	struct fjes_hw *hw = &adapter->hw;
 961	struct sk_buff *skb;
 962	int work_done = 0;
 963	int cur_epid = 0;
 964	int epidx;
 965	size_t frame_len;
 966	void *frame;
 967
 968	spin_lock(&hw->rx_status_lock);
 969	for (epidx = 0; epidx < hw->max_epid; epidx++) {
 970		if (epidx == hw->my_epid)
 971			continue;
 972
 973		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
 974		    EP_PARTNER_SHARED)
 975			adapter->hw.ep_shm_info[epidx]
 976				   .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
 977	}
 978	spin_unlock(&hw->rx_status_lock);
 979
 980	while (work_done < budget) {
 981		prefetch(&adapter->hw);
 982		frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
 983
 984		if (frame) {
 985			skb = napi_alloc_skb(napi, frame_len);
 986			if (!skb) {
 987				adapter->stats64.rx_dropped += 1;
 988				hw->ep_shm_info[cur_epid].net_stats
 989							 .rx_dropped += 1;
 990				adapter->stats64.rx_errors += 1;
 991				hw->ep_shm_info[cur_epid].net_stats
 992							 .rx_errors += 1;
 993			} else {
 994				skb_put_data(skb, frame, frame_len);
 
 995				skb->protocol = eth_type_trans(skb, netdev);
 996				skb->ip_summed = CHECKSUM_UNNECESSARY;
 997
 998				netif_receive_skb(skb);
 999
1000				work_done++;
1001
1002				adapter->stats64.rx_packets += 1;
1003				hw->ep_shm_info[cur_epid].net_stats
1004							 .rx_packets += 1;
1005				adapter->stats64.rx_bytes += frame_len;
1006				hw->ep_shm_info[cur_epid].net_stats
1007							 .rx_bytes += frame_len;
1008
1009				if (is_multicast_ether_addr(
1010					((struct ethhdr *)frame)->h_dest)) {
1011					adapter->stats64.multicast += 1;
1012					hw->ep_shm_info[cur_epid].net_stats
1013								 .multicast += 1;
1014				}
1015			}
1016
1017			fjes_rxframe_release(adapter, cur_epid);
1018			adapter->unset_rx_last = true;
1019		} else {
1020			break;
1021		}
1022	}
1023
1024	if (work_done < budget) {
1025		napi_complete_done(napi, work_done);
1026
1027		if (adapter->unset_rx_last) {
1028			adapter->rx_last_jiffies = jiffies;
1029			adapter->unset_rx_last = false;
1030		}
1031
1032		if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
1033			napi_schedule(napi);
1034		} else {
1035			spin_lock(&hw->rx_status_lock);
1036			for (epidx = 0; epidx < hw->max_epid; epidx++) {
1037				if (epidx == hw->my_epid)
1038					continue;
1039				if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1040				    EP_PARTNER_SHARED)
1041					adapter->hw.ep_shm_info[epidx].tx
1042						   .info->v1i.rx_status &=
1043						~FJES_RX_POLL_WORK;
1044			}
1045			spin_unlock(&hw->rx_status_lock);
1046
1047			fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
1048		}
1049	}
1050
1051	return work_done;
1052}
1053
1054static int fjes_sw_init(struct fjes_adapter *adapter)
 
1055{
1056	struct net_device *netdev = adapter->netdev;
 
 
 
 
1057
1058	netif_napi_add(netdev, &adapter->napi, fjes_poll);
 
 
 
1059
1060	return 0;
1061}
1062
1063static void fjes_force_close_task(struct work_struct *work)
1064{
1065	struct fjes_adapter *adapter = container_of(work,
1066			struct fjes_adapter, force_close_task);
1067	struct net_device *netdev = adapter->netdev;
1068
1069	rtnl_lock();
1070	dev_close(netdev);
1071	rtnl_unlock();
1072}
 
 
1073
1074static void fjes_tx_stall_task(struct work_struct *work)
1075{
1076	struct fjes_adapter *adapter = container_of(work,
1077			struct fjes_adapter, tx_stall_task);
1078	struct net_device *netdev = adapter->netdev;
1079	struct fjes_hw *hw = &adapter->hw;
1080	int all_queue_available, sendable;
1081	enum ep_partner_status pstatus;
1082	int max_epid, my_epid, epid;
1083	union ep_buffer_info *info;
1084	int i;
1085
1086	if (((long)jiffies -
1087		dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
1088		netif_wake_queue(netdev);
1089		return;
1090	}
1091
1092	my_epid = hw->my_epid;
1093	max_epid = hw->max_epid;
 
1094
1095	for (i = 0; i < 5; i++) {
1096		all_queue_available = 1;
 
 
 
1097
1098		for (epid = 0; epid < max_epid; epid++) {
1099			if (my_epid == epid)
1100				continue;
1101
1102			pstatus = fjes_hw_get_partner_ep_status(hw, epid);
1103			sendable = (pstatus == EP_PARTNER_SHARED);
1104			if (!sendable)
1105				continue;
 
 
 
1106
1107			info = adapter->hw.ep_shm_info[epid].tx.info;
 
 
 
 
 
 
1108
1109			if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
1110				return;
 
1111
1112			if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
1113					 info->v1i.count_max)) {
1114				all_queue_available = 0;
1115				break;
1116			}
1117		}
1118
1119		if (all_queue_available) {
1120			netif_wake_queue(netdev);
1121			return;
1122		}
1123	}
1124
1125	usleep_range(50, 100);
1126
1127	queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
 
 
 
 
 
1128}
1129
1130static void fjes_raise_intr_rxdata_task(struct work_struct *work)
 
1131{
1132	struct fjes_adapter *adapter = container_of(work,
1133			struct fjes_adapter, raise_intr_rxdata_task);
1134	struct fjes_hw *hw = &adapter->hw;
1135	enum ep_partner_status pstatus;
1136	int max_epid, my_epid, epid;
1137
1138	my_epid = hw->my_epid;
1139	max_epid = hw->max_epid;
1140
1141	for (epid = 0; epid < max_epid; epid++)
1142		hw->ep_shm_info[epid].tx_status_work = 0;
 
 
 
 
 
 
1143
1144	for (epid = 0; epid < max_epid; epid++) {
1145		if (epid == my_epid)
1146			continue;
1147
1148		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
1149		if (pstatus == EP_PARTNER_SHARED) {
1150			hw->ep_shm_info[epid].tx_status_work =
1151				hw->ep_shm_info[epid].tx.info->v1i.tx_status;
1152
1153			if (hw->ep_shm_info[epid].tx_status_work ==
1154				FJES_TX_DELAY_SEND_PENDING) {
1155				hw->ep_shm_info[epid].tx.info->v1i.tx_status =
1156					FJES_TX_DELAY_SEND_NONE;
1157			}
1158		}
1159	}
1160
1161	for (epid = 0; epid < max_epid; epid++) {
1162		if (epid == my_epid)
1163			continue;
1164
1165		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
1166		if ((hw->ep_shm_info[epid].tx_status_work ==
1167		     FJES_TX_DELAY_SEND_PENDING) &&
1168		    (pstatus == EP_PARTNER_SHARED) &&
1169		    !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
1170		      FJES_RX_POLL_WORK)) {
1171			fjes_hw_raise_interrupt(hw, epid,
1172						REG_ICTL_MASK_RX_DATA);
1173			hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
1174		}
1175	}
1176
1177	usleep_range(500, 1000);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1178}
1179
1180static void fjes_watch_unshare_task(struct work_struct *work)
1181{
1182	struct fjes_adapter *adapter =
1183	container_of(work, struct fjes_adapter, unshare_watch_task);
1184
1185	struct net_device *netdev = adapter->netdev;
1186	struct fjes_hw *hw = &adapter->hw;
1187
1188	int unshare_watch, unshare_reserve;
1189	int max_epid, my_epid, epidx;
1190	int stop_req, stop_req_done;
1191	ulong unshare_watch_bitmask;
1192	unsigned long flags;
1193	int wait_time = 0;
1194	int is_shared;
1195	int ret;
1196
1197	my_epid = hw->my_epid;
1198	max_epid = hw->max_epid;
1199
1200	unshare_watch_bitmask = adapter->unshare_watch_bitmask;
1201	adapter->unshare_watch_bitmask = 0;
1202
1203	while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
1204	       (wait_time < 3000)) {
1205		for (epidx = 0; epidx < max_epid; epidx++) {
1206			if (epidx == my_epid)
1207				continue;
1208
1209			is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
1210							   epidx);
1211
1212			stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
1213
1214			stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
1215					FJES_RX_STOP_REQ_DONE;
1216
1217			unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
1218
1219			unshare_reserve = test_bit(epidx,
1220						   &hw->hw_info.buffer_unshare_reserve_bit);
1221
1222			if ((!stop_req ||
1223			     (is_shared && (!is_shared || !stop_req_done))) &&
1224			    (is_shared || !unshare_watch || !unshare_reserve))
1225				continue;
1226
1227			mutex_lock(&hw->hw_info.lock);
1228			ret = fjes_hw_unregister_buff_addr(hw, epidx);
1229			switch (ret) {
1230			case 0:
1231				break;
1232			case -ENOMSG:
1233			case -EBUSY:
1234			default:
1235				if (!work_pending(
1236					&adapter->force_close_task)) {
1237					adapter->force_reset = true;
1238					schedule_work(
1239						&adapter->force_close_task);
1240				}
1241				break;
1242			}
1243			mutex_unlock(&hw->hw_info.lock);
1244			hw->ep_shm_info[epidx].ep_stats
1245					.com_unregist_buf_exec += 1;
1246
1247			spin_lock_irqsave(&hw->rx_status_lock, flags);
1248			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1249					    netdev->dev_addr, netdev->mtu);
1250			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1251
1252			clear_bit(epidx, &hw->txrx_stop_req_bit);
1253			clear_bit(epidx, &unshare_watch_bitmask);
1254			clear_bit(epidx,
1255				  &hw->hw_info.buffer_unshare_reserve_bit);
1256		}
1257
1258		msleep(100);
1259		wait_time += 100;
1260	}
1261
1262	if (hw->hw_info.buffer_unshare_reserve_bit) {
1263		for (epidx = 0; epidx < max_epid; epidx++) {
1264			if (epidx == my_epid)
1265				continue;
1266
1267			if (test_bit(epidx,
1268				     &hw->hw_info.buffer_unshare_reserve_bit)) {
1269				mutex_lock(&hw->hw_info.lock);
1270
1271				ret = fjes_hw_unregister_buff_addr(hw, epidx);
1272				switch (ret) {
1273				case 0:
1274					break;
1275				case -ENOMSG:
1276				case -EBUSY:
1277				default:
1278					if (!work_pending(
1279						&adapter->force_close_task)) {
1280						adapter->force_reset = true;
1281						schedule_work(
1282							&adapter->force_close_task);
1283					}
1284					break;
1285				}
1286				mutex_unlock(&hw->hw_info.lock);
1287
1288				hw->ep_shm_info[epidx].ep_stats
1289					.com_unregist_buf_exec += 1;
1290
1291				spin_lock_irqsave(&hw->rx_status_lock, flags);
1292				fjes_hw_setup_epbuf(
1293					&hw->ep_shm_info[epidx].tx,
1294					netdev->dev_addr, netdev->mtu);
1295				spin_unlock_irqrestore(&hw->rx_status_lock,
1296						       flags);
1297
1298				clear_bit(epidx, &hw->txrx_stop_req_bit);
1299				clear_bit(epidx, &unshare_watch_bitmask);
1300				clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1301			}
1302
1303			if (test_bit(epidx, &unshare_watch_bitmask)) {
1304				spin_lock_irqsave(&hw->rx_status_lock, flags);
1305				hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
1306						~FJES_RX_STOP_REQ_DONE;
1307				spin_unlock_irqrestore(&hw->rx_status_lock,
1308						       flags);
1309			}
1310		}
1311	}
1312}
1313
1314static void fjes_irq_watch_task(struct work_struct *work)
1315{
1316	struct fjes_adapter *adapter = container_of(to_delayed_work(work),
1317			struct fjes_adapter, interrupt_watch_task);
1318
1319	local_irq_disable();
1320	fjes_intr(adapter->hw.hw_res.irq, adapter);
1321	local_irq_enable();
1322
1323	if (fjes_rxframe_search_exist(adapter, 0) >= 0)
1324		napi_schedule(&adapter->napi);
1325
1326	if (adapter->interrupt_watch_enable) {
1327		if (!delayed_work_pending(&adapter->interrupt_watch_task))
1328			queue_delayed_work(adapter->control_wq,
1329					   &adapter->interrupt_watch_task,
1330					   FJES_IRQ_WATCH_DELAY);
1331	}
1332}
1333
1334/* fjes_probe - Device Initialization Routine */
1335static int fjes_probe(struct platform_device *plat_dev)
1336{
1337	struct fjes_adapter *adapter;
1338	struct net_device *netdev;
1339	struct resource *res;
1340	struct fjes_hw *hw;
1341	u8 addr[ETH_ALEN];
1342	int err;
1343
1344	err = -ENOMEM;
1345	netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
1346				 NET_NAME_UNKNOWN, fjes_netdev_setup,
1347				 FJES_MAX_QUEUES);
1348
1349	if (!netdev)
1350		goto err_out;
1351
1352	SET_NETDEV_DEV(netdev, &plat_dev->dev);
1353
1354	dev_set_drvdata(&plat_dev->dev, netdev);
1355	adapter = netdev_priv(netdev);
1356	adapter->netdev = netdev;
1357	adapter->plat_dev = plat_dev;
1358	hw = &adapter->hw;
1359	hw->back = adapter;
1360
1361	/* setup the private structure */
1362	err = fjes_sw_init(adapter);
1363	if (err)
1364		goto err_free_netdev;
1365
1366	INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
1367	adapter->force_reset = false;
1368	adapter->open_guard = false;
1369
1370	adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
1371	if (unlikely(!adapter->txrx_wq)) {
1372		err = -ENOMEM;
1373		goto err_free_netdev;
1374	}
1375
1376	adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
1377					      WQ_MEM_RECLAIM, 0);
1378	if (unlikely(!adapter->control_wq)) {
1379		err = -ENOMEM;
1380		goto err_free_txrx_wq;
1381	}
1382
1383	INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
1384	INIT_WORK(&adapter->raise_intr_rxdata_task,
1385		  fjes_raise_intr_rxdata_task);
1386	INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
1387	adapter->unshare_watch_bitmask = 0;
1388
1389	INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
1390	adapter->interrupt_watch_enable = false;
1391
1392	res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
1393	if (!res) {
1394		err = -EINVAL;
1395		goto err_free_control_wq;
1396	}
1397	hw->hw_res.start = res->start;
1398	hw->hw_res.size = resource_size(res);
1399	hw->hw_res.irq = platform_get_irq(plat_dev, 0);
1400	if (hw->hw_res.irq < 0) {
1401		err = hw->hw_res.irq;
1402		goto err_free_control_wq;
1403	}
1404
1405	err = fjes_hw_init(&adapter->hw);
1406	if (err)
1407		goto err_free_control_wq;
1408
1409	/* setup MAC address (02:00:00:00:00:[epid])*/
1410	addr[0] = 2;
1411	addr[1] = 0;
1412	addr[2] = 0;
1413	addr[3] = 0;
1414	addr[4] = 0;
1415	addr[5] = hw->my_epid; /* EPID */
1416	eth_hw_addr_set(netdev, addr);
1417
1418	err = register_netdev(netdev);
1419	if (err)
1420		goto err_hw_exit;
1421
1422	netif_carrier_off(netdev);
1423
1424	fjes_dbg_adapter_init(adapter);
1425
1426	return 0;
1427
1428err_hw_exit:
1429	fjes_hw_exit(&adapter->hw);
1430err_free_control_wq:
1431	destroy_workqueue(adapter->control_wq);
1432err_free_txrx_wq:
1433	destroy_workqueue(adapter->txrx_wq);
1434err_free_netdev:
1435	free_netdev(netdev);
1436err_out:
1437	return err;
1438}
1439
1440/* fjes_remove - Device Removal Routine */
1441static void fjes_remove(struct platform_device *plat_dev)
1442{
1443	struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
1444	struct fjes_adapter *adapter = netdev_priv(netdev);
1445	struct fjes_hw *hw = &adapter->hw;
1446
1447	fjes_dbg_adapter_exit(adapter);
1448
1449	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
1450	cancel_work_sync(&adapter->unshare_watch_task);
1451	cancel_work_sync(&adapter->raise_intr_rxdata_task);
1452	cancel_work_sync(&adapter->tx_stall_task);
1453	if (adapter->control_wq)
1454		destroy_workqueue(adapter->control_wq);
1455	if (adapter->txrx_wq)
1456		destroy_workqueue(adapter->txrx_wq);
1457
1458	unregister_netdev(netdev);
1459
1460	fjes_hw_exit(hw);
1461
1462	netif_napi_del(&adapter->napi);
1463
1464	free_netdev(netdev);
1465}
1466
1467static struct platform_driver fjes_driver = {
1468	.driver = {
1469		.name = DRV_NAME,
1470	},
1471	.probe = fjes_probe,
1472	.remove_new = fjes_remove,
1473};
1474
1475static acpi_status
1476acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
1477				 void *context, void **return_value)
1478{
1479	struct acpi_device *device;
1480	bool *found = context;
1481
1482	device = acpi_fetch_acpi_dev(obj_handle);
1483	if (!device)
1484		return AE_OK;
1485
1486	if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
1487		return AE_OK;
1488
1489	if (!is_extended_socket_device(device))
1490		return AE_OK;
1491
1492	if (acpi_check_extended_socket_status(device))
1493		return AE_OK;
1494
1495	*found = true;
1496	return AE_CTRL_TERMINATE;
1497}
1498
1499/* fjes_init_module - Driver Registration Routine */
1500static int __init fjes_init_module(void)
1501{
1502	bool found = false;
1503	int result;
1504
1505	acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
1506			    acpi_find_extended_socket_device, NULL, &found,
1507			    NULL);
1508
1509	if (!found)
1510		return -ENODEV;
1511
1512	pr_info("%s - version %s - %s\n",
1513		fjes_driver_string, fjes_driver_version, fjes_copyright);
1514
1515	fjes_dbg_init();
1516
1517	result = platform_driver_register(&fjes_driver);
1518	if (result < 0) {
1519		fjes_dbg_exit();
1520		return result;
1521	}
1522
1523	result = acpi_bus_register_driver(&fjes_acpi_driver);
1524	if (result < 0)
1525		goto fail_acpi_driver;
1526
1527	return 0;
1528
1529fail_acpi_driver:
1530	platform_driver_unregister(&fjes_driver);
1531	fjes_dbg_exit();
1532	return result;
1533}
1534
1535module_init(fjes_init_module);
1536
1537/* fjes_exit_module - Driver Exit Cleanup Routine */
1538static void __exit fjes_exit_module(void)
1539{
1540	acpi_bus_unregister_driver(&fjes_acpi_driver);
1541	platform_driver_unregister(&fjes_driver);
1542	fjes_dbg_exit();
1543}
1544
1545module_exit(fjes_exit_module);
v4.10.11
 
   1/*
   2 *  FUJITSU Extended Socket Network Device driver
   3 *  Copyright (c) 2015 FUJITSU LIMITED
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, see <http://www.gnu.org/licenses/>.
  16 *
  17 * The full GNU General Public License is included in this distribution in
  18 * the file called "COPYING".
  19 *
  20 */
  21
  22#include <linux/module.h>
  23#include <linux/types.h>
  24#include <linux/nls.h>
  25#include <linux/platform_device.h>
  26#include <linux/netdevice.h>
  27#include <linux/interrupt.h>
  28
  29#include "fjes.h"
  30#include "fjes_trace.h"
  31
  32#define MAJ 1
  33#define MIN 2
  34#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
  35#define DRV_NAME	"fjes"
  36char fjes_driver_name[] = DRV_NAME;
  37char fjes_driver_version[] = DRV_VERSION;
  38static const char fjes_driver_string[] =
  39		"FUJITSU Extended Socket Network Device Driver";
  40static const char fjes_copyright[] =
  41		"Copyright (c) 2015 FUJITSU LIMITED";
  42
  43MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
  44MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
  45MODULE_LICENSE("GPL");
  46MODULE_VERSION(DRV_VERSION);
  47
  48static int fjes_request_irq(struct fjes_adapter *);
  49static void fjes_free_irq(struct fjes_adapter *);
  50
  51static int fjes_open(struct net_device *);
  52static int fjes_close(struct net_device *);
  53static int fjes_setup_resources(struct fjes_adapter *);
  54static void fjes_free_resources(struct fjes_adapter *);
  55static netdev_tx_t fjes_xmit_frame(struct sk_buff *, struct net_device *);
  56static void fjes_raise_intr_rxdata_task(struct work_struct *);
  57static void fjes_tx_stall_task(struct work_struct *);
  58static void fjes_force_close_task(struct work_struct *);
  59static irqreturn_t fjes_intr(int, void*);
  60static struct rtnl_link_stats64 *
  61fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
  62static int fjes_change_mtu(struct net_device *, int);
  63static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
  64static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
  65static void fjes_tx_retry(struct net_device *);
  66
  67static int fjes_acpi_add(struct acpi_device *);
  68static int fjes_acpi_remove(struct acpi_device *);
  69static acpi_status fjes_get_acpi_resource(struct acpi_resource *, void*);
  70
  71static int fjes_probe(struct platform_device *);
  72static int fjes_remove(struct platform_device *);
  73
  74static int fjes_sw_init(struct fjes_adapter *);
  75static void fjes_netdev_setup(struct net_device *);
  76static void fjes_irq_watch_task(struct work_struct *);
  77static void fjes_watch_unshare_task(struct work_struct *);
  78static void fjes_rx_irq(struct fjes_adapter *, int);
  79static int fjes_poll(struct napi_struct *, int);
  80
  81static const struct acpi_device_id fjes_acpi_ids[] = {
  82	{"PNP0C02", 0},
  83	{"", 0},
  84};
  85MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
  86
  87static struct acpi_driver fjes_acpi_driver = {
  88	.name = DRV_NAME,
  89	.class = DRV_NAME,
  90	.owner = THIS_MODULE,
  91	.ids = fjes_acpi_ids,
  92	.ops = {
  93		.add = fjes_acpi_add,
  94		.remove = fjes_acpi_remove,
  95	},
  96};
  97
  98static struct platform_driver fjes_driver = {
  99	.driver = {
 100		.name = DRV_NAME,
 101	},
 102	.probe = fjes_probe,
 103	.remove = fjes_remove,
 104};
 105
 106static struct resource fjes_resource[] = {
 107	{
 108		.flags = IORESOURCE_MEM,
 109		.start = 0,
 110		.end = 0,
 111	},
 112	{
 113		.flags = IORESOURCE_IRQ,
 114		.start = 0,
 115		.end = 0,
 116	},
 117};
 118
 119static int fjes_acpi_add(struct acpi_device *device)
 120{
 121	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
 122	char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
 123	struct platform_device *plat_dev;
 124	union acpi_object *str;
 125	acpi_status status;
 126	int result;
 127
 128	status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
 129	if (ACPI_FAILURE(status))
 130		return -ENODEV;
 131
 132	str = buffer.pointer;
 133	result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
 134				 str->string.length, UTF16_LITTLE_ENDIAN,
 135				 str_buf, sizeof(str_buf) - 1);
 136	str_buf[result] = 0;
 137
 138	if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
 139		kfree(buffer.pointer);
 140		return -ENODEV;
 141	}
 142	kfree(buffer.pointer);
 143
 144	status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
 145				     fjes_get_acpi_resource, fjes_resource);
 146	if (ACPI_FAILURE(status))
 147		return -ENODEV;
 148
 149	/* create platform_device */
 150	plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
 151						   ARRAY_SIZE(fjes_resource));
 152	device->driver_data = plat_dev;
 153
 154	return 0;
 155}
 156
 157static int fjes_acpi_remove(struct acpi_device *device)
 158{
 159	struct platform_device *plat_dev;
 
 
 
 
 
 160
 161	plat_dev = (struct platform_device *)acpi_driver_data(device);
 162	platform_device_unregister(plat_dev);
 
 
 
 163
 164	return 0;
 165}
 166
 167static acpi_status
 168fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
 169{
 170	struct acpi_resource_address32 *addr;
 171	struct acpi_resource_irq *irq;
 172	struct resource *res = data;
 173
 174	switch (acpi_res->type) {
 175	case ACPI_RESOURCE_TYPE_ADDRESS32:
 176		addr = &acpi_res->data.address32;
 177		res[0].start = addr->address.minimum;
 178		res[0].end = addr->address.minimum +
 179			addr->address.address_length - 1;
 180		break;
 181
 182	case ACPI_RESOURCE_TYPE_IRQ:
 183		irq = &acpi_res->data.irq;
 184		if (irq->interrupt_count != 1)
 185			return AE_ERROR;
 186		res[1].start = irq->interrupts[0];
 187		res[1].end = irq->interrupts[0];
 188		break;
 189
 190	default:
 191		break;
 192	}
 193
 194	return AE_OK;
 195}
 196
 197static int fjes_request_irq(struct fjes_adapter *adapter)
 198{
 199	struct net_device *netdev = adapter->netdev;
 200	int result = -1;
 201
 202	adapter->interrupt_watch_enable = true;
 203	if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
 204		queue_delayed_work(adapter->control_wq,
 205				   &adapter->interrupt_watch_task,
 206				   FJES_IRQ_WATCH_DELAY);
 207	}
 208
 209	if (!adapter->irq_registered) {
 210		result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
 211				     IRQF_SHARED, netdev->name, adapter);
 212		if (result)
 213			adapter->irq_registered = false;
 214		else
 215			adapter->irq_registered = true;
 216	}
 217
 218	return result;
 219}
 220
 221static void fjes_free_irq(struct fjes_adapter *adapter)
 222{
 223	struct fjes_hw *hw = &adapter->hw;
 224
 225	adapter->interrupt_watch_enable = false;
 226	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
 227
 228	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
 229
 230	if (adapter->irq_registered) {
 231		free_irq(adapter->hw.hw_res.irq, adapter);
 232		adapter->irq_registered = false;
 233	}
 234}
 235
 236static const struct net_device_ops fjes_netdev_ops = {
 237	.ndo_open		= fjes_open,
 238	.ndo_stop		= fjes_close,
 239	.ndo_start_xmit		= fjes_xmit_frame,
 240	.ndo_get_stats64	= fjes_get_stats64,
 241	.ndo_change_mtu		= fjes_change_mtu,
 242	.ndo_tx_timeout		= fjes_tx_retry,
 243	.ndo_vlan_rx_add_vid	= fjes_vlan_rx_add_vid,
 244	.ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
 245};
 246
 247/* fjes_open - Called when a network interface is made active */
 248static int fjes_open(struct net_device *netdev)
 249{
 250	struct fjes_adapter *adapter = netdev_priv(netdev);
 251	struct fjes_hw *hw = &adapter->hw;
 252	int result;
 253
 254	if (adapter->open_guard)
 255		return -ENXIO;
 256
 257	result = fjes_setup_resources(adapter);
 258	if (result)
 259		goto err_setup_res;
 260
 261	hw->txrx_stop_req_bit = 0;
 262	hw->epstop_req_bit = 0;
 
 
 263
 264	napi_enable(&adapter->napi);
 
 
 
 
 265
 266	fjes_hw_capture_interrupt_status(hw);
 267
 268	result = fjes_request_irq(adapter);
 269	if (result)
 270		goto err_req_irq;
 271
 272	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
 273
 274	netif_tx_start_all_queues(netdev);
 275	netif_carrier_on(netdev);
 276
 277	return 0;
 278
 279err_req_irq:
 280	fjes_free_irq(adapter);
 281	napi_disable(&adapter->napi);
 282
 283err_setup_res:
 284	fjes_free_resources(adapter);
 285	return result;
 286}
 287
 288/* fjes_close - Disables a network interface */
 289static int fjes_close(struct net_device *netdev)
 290{
 291	struct fjes_adapter *adapter = netdev_priv(netdev);
 292	struct fjes_hw *hw = &adapter->hw;
 293	unsigned long flags;
 294	int epidx;
 295
 296	netif_tx_stop_all_queues(netdev);
 297	netif_carrier_off(netdev);
 
 298
 299	fjes_hw_raise_epstop(hw);
 300
 301	napi_disable(&adapter->napi);
 302
 303	spin_lock_irqsave(&hw->rx_status_lock, flags);
 304	for (epidx = 0; epidx < hw->max_epid; epidx++) {
 305		if (epidx == hw->my_epid)
 306			continue;
 307
 308		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
 309		    EP_PARTNER_SHARED)
 310			adapter->hw.ep_shm_info[epidx]
 311				   .tx.info->v1i.rx_status &=
 312				~FJES_RX_POLL_WORK;
 313	}
 314	spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 315
 316	fjes_free_irq(adapter);
 317
 318	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
 319	cancel_work_sync(&adapter->unshare_watch_task);
 320	adapter->unshare_watch_bitmask = 0;
 321	cancel_work_sync(&adapter->raise_intr_rxdata_task);
 322	cancel_work_sync(&adapter->tx_stall_task);
 323
 324	cancel_work_sync(&hw->update_zone_task);
 325	cancel_work_sync(&hw->epstop_task);
 326
 327	fjes_hw_wait_epstop(hw);
 328
 329	fjes_free_resources(adapter);
 330
 331	return 0;
 332}
 333
 334static int fjes_setup_resources(struct fjes_adapter *adapter)
 335{
 336	struct net_device *netdev = adapter->netdev;
 337	struct ep_share_mem_info *buf_pair;
 338	struct fjes_hw *hw = &adapter->hw;
 339	unsigned long flags;
 340	int result;
 341	int epidx;
 342
 343	mutex_lock(&hw->hw_info.lock);
 344	result = fjes_hw_request_info(hw);
 345	switch (result) {
 346	case 0:
 347		for (epidx = 0; epidx < hw->max_epid; epidx++) {
 348			hw->ep_shm_info[epidx].es_status =
 349			    hw->hw_info.res_buf->info.info[epidx].es_status;
 350			hw->ep_shm_info[epidx].zone =
 351			    hw->hw_info.res_buf->info.info[epidx].zone;
 352		}
 353		break;
 354	default:
 355	case -ENOMSG:
 356	case -EBUSY:
 357		adapter->force_reset = true;
 358
 359		mutex_unlock(&hw->hw_info.lock);
 360		return result;
 361	}
 362	mutex_unlock(&hw->hw_info.lock);
 363
 364	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
 365		if ((epidx != hw->my_epid) &&
 366		    (hw->ep_shm_info[epidx].es_status ==
 367		     FJES_ZONING_STATUS_ENABLE)) {
 368			fjes_hw_raise_interrupt(hw, epidx,
 369						REG_ICTL_MASK_INFO_UPDATE);
 370			hw->ep_shm_info[epidx].ep_stats
 371				.send_intr_zoneupdate += 1;
 372		}
 373	}
 374
 375	msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
 376
 377	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
 378		if (epidx == hw->my_epid)
 379			continue;
 380
 381		buf_pair = &hw->ep_shm_info[epidx];
 382
 383		spin_lock_irqsave(&hw->rx_status_lock, flags);
 384		fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
 385				    netdev->mtu);
 386		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 387
 388		if (fjes_hw_epid_is_same_zone(hw, epidx)) {
 389			mutex_lock(&hw->hw_info.lock);
 390			result =
 391			fjes_hw_register_buff_addr(hw, epidx, buf_pair);
 392			mutex_unlock(&hw->hw_info.lock);
 393
 394			switch (result) {
 395			case 0:
 396				break;
 397			case -ENOMSG:
 398			case -EBUSY:
 399			default:
 400				adapter->force_reset = true;
 401				return result;
 402			}
 403
 404			hw->ep_shm_info[epidx].ep_stats
 405				.com_regist_buf_exec += 1;
 406		}
 407	}
 408
 409	return 0;
 410}
 411
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 412static void fjes_free_resources(struct fjes_adapter *adapter)
 413{
 414	struct net_device *netdev = adapter->netdev;
 415	struct fjes_device_command_param param;
 416	struct ep_share_mem_info *buf_pair;
 417	struct fjes_hw *hw = &adapter->hw;
 418	bool reset_flag = false;
 419	unsigned long flags;
 420	int result;
 421	int epidx;
 422
 423	for (epidx = 0; epidx < hw->max_epid; epidx++) {
 424		if (epidx == hw->my_epid)
 425			continue;
 426
 427		mutex_lock(&hw->hw_info.lock);
 428		result = fjes_hw_unregister_buff_addr(hw, epidx);
 429		mutex_unlock(&hw->hw_info.lock);
 430
 431		hw->ep_shm_info[epidx].ep_stats.com_unregist_buf_exec += 1;
 432
 433		if (result)
 434			reset_flag = true;
 435
 436		buf_pair = &hw->ep_shm_info[epidx];
 437
 438		spin_lock_irqsave(&hw->rx_status_lock, flags);
 439		fjes_hw_setup_epbuf(&buf_pair->tx,
 440				    netdev->dev_addr, netdev->mtu);
 441		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 442
 443		clear_bit(epidx, &hw->txrx_stop_req_bit);
 444	}
 445
 446	if (reset_flag || adapter->force_reset) {
 447		result = fjes_hw_reset(hw);
 448
 449		adapter->force_reset = false;
 450
 451		if (result)
 452			adapter->open_guard = true;
 453
 454		hw->hw_info.buffer_share_bit = 0;
 455
 456		memset((void *)&param, 0, sizeof(param));
 457
 458		param.req_len = hw->hw_info.req_buf_size;
 459		param.req_start = __pa(hw->hw_info.req_buf);
 460		param.res_len = hw->hw_info.res_buf_size;
 461		param.res_start = __pa(hw->hw_info.res_buf);
 462		param.share_start = __pa(hw->hw_info.share->ep_status);
 463
 464		fjes_hw_init_command_registers(hw, &param);
 465	}
 466}
 467
 468static void fjes_tx_stall_task(struct work_struct *work)
 
 469{
 470	struct fjes_adapter *adapter = container_of(work,
 471			struct fjes_adapter, tx_stall_task);
 472	struct net_device *netdev = adapter->netdev;
 473	struct fjes_hw *hw = &adapter->hw;
 474	int all_queue_available, sendable;
 475	enum ep_partner_status pstatus;
 476	int max_epid, my_epid, epid;
 477	union ep_buffer_info *info;
 478	int i;
 479
 480	if (((long)jiffies -
 481		dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
 482		netif_wake_queue(netdev);
 483		return;
 484	}
 485
 486	my_epid = hw->my_epid;
 487	max_epid = hw->max_epid;
 
 488
 489	for (i = 0; i < 5; i++) {
 490		all_queue_available = 1;
 491
 492		for (epid = 0; epid < max_epid; epid++) {
 493			if (my_epid == epid)
 494				continue;
 495
 496			pstatus = fjes_hw_get_partner_ep_status(hw, epid);
 497			sendable = (pstatus == EP_PARTNER_SHARED);
 498			if (!sendable)
 499				continue;
 500
 501			info = adapter->hw.ep_shm_info[epid].tx.info;
 
 
 502
 503			if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
 504				return;
 505
 506			if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
 507					 info->v1i.count_max)) {
 508				all_queue_available = 0;
 509				break;
 510			}
 511		}
 512
 513		if (all_queue_available) {
 514			netif_wake_queue(netdev);
 515			return;
 516		}
 517	}
 518
 519	usleep_range(50, 100);
 
 
 520
 521	queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
 
 
 522}
 523
 524static void fjes_force_close_task(struct work_struct *work)
 
 525{
 526	struct fjes_adapter *adapter = container_of(work,
 527			struct fjes_adapter, force_close_task);
 528	struct net_device *netdev = adapter->netdev;
 
 
 
 
 529
 530	rtnl_lock();
 531	dev_close(netdev);
 532	rtnl_unlock();
 533}
 534
 535static void fjes_raise_intr_rxdata_task(struct work_struct *work)
 536{
 537	struct fjes_adapter *adapter = container_of(work,
 538			struct fjes_adapter, raise_intr_rxdata_task);
 539	struct fjes_hw *hw = &adapter->hw;
 540	enum ep_partner_status pstatus;
 541	int max_epid, my_epid, epid;
 542
 543	my_epid = hw->my_epid;
 544	max_epid = hw->max_epid;
 
 
 545
 546	for (epid = 0; epid < max_epid; epid++)
 547		hw->ep_shm_info[epid].tx_status_work = 0;
 
 
 
 
 
 548
 549	for (epid = 0; epid < max_epid; epid++) {
 550		if (epid == my_epid)
 551			continue;
 552
 553		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
 554		if (pstatus == EP_PARTNER_SHARED) {
 555			hw->ep_shm_info[epid].tx_status_work =
 556				hw->ep_shm_info[epid].tx.info->v1i.tx_status;
 
 557
 558			if (hw->ep_shm_info[epid].tx_status_work ==
 559				FJES_TX_DELAY_SEND_PENDING) {
 560				hw->ep_shm_info[epid].tx.info->v1i.tx_status =
 561					FJES_TX_DELAY_SEND_NONE;
 562			}
 563		}
 564	}
 565
 566	for (epid = 0; epid < max_epid; epid++) {
 567		if (epid == my_epid)
 568			continue;
 569
 570		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
 571		if ((hw->ep_shm_info[epid].tx_status_work ==
 572		     FJES_TX_DELAY_SEND_PENDING) &&
 573		    (pstatus == EP_PARTNER_SHARED) &&
 574		    !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
 575		      FJES_RX_POLL_WORK)) {
 576			fjes_hw_raise_interrupt(hw, epid,
 577						REG_ICTL_MASK_RX_DATA);
 578			hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
 579		}
 580	}
 581
 582	usleep_range(500, 1000);
 583}
 584
 585static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
 586			void *data, size_t len)
 587{
 588	int retval;
 589
 590	retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
 591					   data, len);
 592	if (retval)
 593		return retval;
 594
 595	adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
 596		FJES_TX_DELAY_SEND_PENDING;
 597	if (!work_pending(&adapter->raise_intr_rxdata_task))
 598		queue_work(adapter->txrx_wq,
 599			   &adapter->raise_intr_rxdata_task);
 600
 601	retval = 0;
 602	return retval;
 603}
 604
 605static netdev_tx_t
 606fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 607{
 608	struct fjes_adapter *adapter = netdev_priv(netdev);
 609	struct fjes_hw *hw = &adapter->hw;
 610
 611	int max_epid, my_epid, dest_epid;
 612	enum ep_partner_status pstatus;
 613	struct netdev_queue *cur_queue;
 614	char shortpkt[VLAN_ETH_HLEN];
 615	bool is_multi, vlan;
 616	struct ethhdr *eth;
 617	u16 queue_no = 0;
 618	u16 vlan_id = 0;
 619	netdev_tx_t ret;
 620	char *data;
 621	int len;
 622
 623	ret = NETDEV_TX_OK;
 624	is_multi = false;
 625	cur_queue = netdev_get_tx_queue(netdev, queue_no);
 626
 627	eth = (struct ethhdr *)skb->data;
 628	my_epid = hw->my_epid;
 629
 630	vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
 631
 632	data = skb->data;
 633	len = skb->len;
 634
 635	if (is_multicast_ether_addr(eth->h_dest)) {
 636		dest_epid = 0;
 637		max_epid = hw->max_epid;
 638		is_multi = true;
 639	} else if (is_local_ether_addr(eth->h_dest)) {
 640		dest_epid = eth->h_dest[ETH_ALEN - 1];
 641		max_epid = dest_epid + 1;
 642
 643		if ((eth->h_dest[0] == 0x02) &&
 644		    (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
 645			      eth->h_dest[3] | eth->h_dest[4])) &&
 646		    (dest_epid < hw->max_epid)) {
 647			;
 648		} else {
 649			dest_epid = 0;
 650			max_epid = 0;
 651			ret = NETDEV_TX_OK;
 652
 653			adapter->stats64.tx_packets += 1;
 654			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
 655			adapter->stats64.tx_bytes += len;
 656			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
 657		}
 658	} else {
 659		dest_epid = 0;
 660		max_epid = 0;
 661		ret = NETDEV_TX_OK;
 662
 663		adapter->stats64.tx_packets += 1;
 664		hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
 665		adapter->stats64.tx_bytes += len;
 666		hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
 667	}
 668
 669	for (; dest_epid < max_epid; dest_epid++) {
 670		if (my_epid == dest_epid)
 671			continue;
 672
 673		pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
 674		if (pstatus != EP_PARTNER_SHARED) {
 675			if (!is_multi)
 676				hw->ep_shm_info[dest_epid].ep_stats
 677					.tx_dropped_not_shared += 1;
 678			ret = NETDEV_TX_OK;
 679		} else if (!fjes_hw_check_epbuf_version(
 680				&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
 681			/* version is NOT 0 */
 682			adapter->stats64.tx_carrier_errors += 1;
 683			hw->ep_shm_info[dest_epid].net_stats
 684						.tx_carrier_errors += 1;
 685			hw->ep_shm_info[dest_epid].ep_stats
 686					.tx_dropped_ver_mismatch += 1;
 687
 688			ret = NETDEV_TX_OK;
 689		} else if (!fjes_hw_check_mtu(
 690				&adapter->hw.ep_shm_info[dest_epid].rx,
 691				netdev->mtu)) {
 692			adapter->stats64.tx_dropped += 1;
 693			hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
 694			adapter->stats64.tx_errors += 1;
 695			hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
 696			hw->ep_shm_info[dest_epid].ep_stats
 697					.tx_dropped_buf_size_mismatch += 1;
 698
 699			ret = NETDEV_TX_OK;
 700		} else if (vlan &&
 701			   !fjes_hw_check_vlan_id(
 702				&adapter->hw.ep_shm_info[dest_epid].rx,
 703				vlan_id)) {
 704			hw->ep_shm_info[dest_epid].ep_stats
 705				.tx_dropped_vlanid_mismatch += 1;
 706			ret = NETDEV_TX_OK;
 707		} else {
 708			if (len < VLAN_ETH_HLEN) {
 709				memset(shortpkt, 0, VLAN_ETH_HLEN);
 710				memcpy(shortpkt, skb->data, skb->len);
 711				len = VLAN_ETH_HLEN;
 712				data = shortpkt;
 713			}
 714
 715			if (adapter->tx_retry_count == 0) {
 716				adapter->tx_start_jiffies = jiffies;
 717				adapter->tx_retry_count = 1;
 718			} else {
 719				adapter->tx_retry_count++;
 720			}
 721
 722			if (fjes_tx_send(adapter, dest_epid, data, len)) {
 723				if (is_multi) {
 724					ret = NETDEV_TX_OK;
 725				} else if (
 726					   ((long)jiffies -
 727					    (long)adapter->tx_start_jiffies) >=
 728					    FJES_TX_RETRY_TIMEOUT) {
 729					adapter->stats64.tx_fifo_errors += 1;
 730					hw->ep_shm_info[dest_epid].net_stats
 731								.tx_fifo_errors += 1;
 732					adapter->stats64.tx_errors += 1;
 733					hw->ep_shm_info[dest_epid].net_stats
 734								.tx_errors += 1;
 735
 736					ret = NETDEV_TX_OK;
 737				} else {
 738					netif_trans_update(netdev);
 739					hw->ep_shm_info[dest_epid].ep_stats
 740						.tx_buffer_full += 1;
 741					netif_tx_stop_queue(cur_queue);
 742
 743					if (!work_pending(&adapter->tx_stall_task))
 744						queue_work(adapter->txrx_wq,
 745							   &adapter->tx_stall_task);
 746
 747					ret = NETDEV_TX_BUSY;
 748				}
 749			} else {
 750				if (!is_multi) {
 751					adapter->stats64.tx_packets += 1;
 752					hw->ep_shm_info[dest_epid].net_stats
 753								.tx_packets += 1;
 754					adapter->stats64.tx_bytes += len;
 755					hw->ep_shm_info[dest_epid].net_stats
 756								.tx_bytes += len;
 757				}
 758
 759				adapter->tx_retry_count = 0;
 760				ret = NETDEV_TX_OK;
 761			}
 762		}
 763	}
 764
 765	if (ret == NETDEV_TX_OK) {
 766		dev_kfree_skb(skb);
 767		if (is_multi) {
 768			adapter->stats64.tx_packets += 1;
 769			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
 770			adapter->stats64.tx_bytes += 1;
 771			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
 772		}
 773	}
 774
 775	return ret;
 776}
 777
 778static void fjes_tx_retry(struct net_device *netdev)
 779{
 780	struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
 781
 782	netif_tx_wake_queue(queue);
 783}
 784
 785static struct rtnl_link_stats64 *
 786fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 787{
 788	struct fjes_adapter *adapter = netdev_priv(netdev);
 789
 790	memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
 791
 792	return stats;
 793}
 794
 795static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
 796{
 797	struct fjes_adapter *adapter = netdev_priv(netdev);
 798	bool running = netif_running(netdev);
 799	struct fjes_hw *hw = &adapter->hw;
 800	unsigned long flags;
 801	int ret = -EINVAL;
 802	int idx, epidx;
 803
 804	for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
 805		if (new_mtu <= fjes_support_mtu[idx]) {
 806			new_mtu = fjes_support_mtu[idx];
 807			if (new_mtu == netdev->mtu)
 808				return 0;
 809
 810			ret = 0;
 811			break;
 812		}
 813	}
 814
 815	if (ret)
 816		return ret;
 817
 818	if (running) {
 819		spin_lock_irqsave(&hw->rx_status_lock, flags);
 820		for (epidx = 0; epidx < hw->max_epid; epidx++) {
 821			if (epidx == hw->my_epid)
 822				continue;
 823			hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
 824				~FJES_RX_MTU_CHANGING_DONE;
 825		}
 826		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 827
 828		netif_tx_stop_all_queues(netdev);
 829		netif_carrier_off(netdev);
 830		cancel_work_sync(&adapter->tx_stall_task);
 831		napi_disable(&adapter->napi);
 832
 833		msleep(1000);
 834
 835		netif_tx_stop_all_queues(netdev);
 836	}
 837
 838	netdev->mtu = new_mtu;
 839
 840	if (running) {
 841		for (epidx = 0; epidx < hw->max_epid; epidx++) {
 842			if (epidx == hw->my_epid)
 843				continue;
 844
 845			spin_lock_irqsave(&hw->rx_status_lock, flags);
 846			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
 847					    netdev->dev_addr,
 848					    netdev->mtu);
 849
 850			hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
 851				FJES_RX_MTU_CHANGING_DONE;
 852			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 853		}
 854
 855		netif_tx_wake_all_queues(netdev);
 856		netif_carrier_on(netdev);
 857		napi_enable(&adapter->napi);
 858		napi_schedule(&adapter->napi);
 859	}
 860
 861	return ret;
 862}
 863
 
 
 
 
 
 
 
 864static int fjes_vlan_rx_add_vid(struct net_device *netdev,
 865				__be16 proto, u16 vid)
 866{
 867	struct fjes_adapter *adapter = netdev_priv(netdev);
 868	bool ret = true;
 869	int epid;
 870
 871	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
 872		if (epid == adapter->hw.my_epid)
 873			continue;
 874
 875		if (!fjes_hw_check_vlan_id(
 876			&adapter->hw.ep_shm_info[epid].tx, vid))
 877			ret = fjes_hw_set_vlan_id(
 878				&adapter->hw.ep_shm_info[epid].tx, vid);
 879	}
 880
 881	return ret ? 0 : -ENOSPC;
 882}
 883
 884static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
 885				 __be16 proto, u16 vid)
 886{
 887	struct fjes_adapter *adapter = netdev_priv(netdev);
 888	int epid;
 889
 890	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
 891		if (epid == adapter->hw.my_epid)
 892			continue;
 893
 894		fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
 895	}
 896
 897	return 0;
 898}
 899
 900static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
 901				   int src_epid)
 902{
 903	struct fjes_hw *hw = &adapter->hw;
 904	enum ep_partner_status status;
 905	unsigned long flags;
 
 
 
 
 906
 907	status = fjes_hw_get_partner_ep_status(hw, src_epid);
 908	trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
 909	switch (status) {
 910	case EP_PARTNER_UNSHARE:
 911	case EP_PARTNER_COMPLETE:
 912	default:
 913		break;
 914	case EP_PARTNER_WAITING:
 915		if (src_epid < hw->my_epid) {
 916			spin_lock_irqsave(&hw->rx_status_lock, flags);
 917			hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
 918				FJES_RX_STOP_REQ_DONE;
 919			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 920
 921			clear_bit(src_epid, &hw->txrx_stop_req_bit);
 922			set_bit(src_epid, &adapter->unshare_watch_bitmask);
 923
 924			if (!work_pending(&adapter->unshare_watch_task))
 925				queue_work(adapter->control_wq,
 926					   &adapter->unshare_watch_task);
 927		}
 928		break;
 929	case EP_PARTNER_SHARED:
 930		if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
 931		    FJES_RX_STOP_REQ_REQUEST) {
 932			set_bit(src_epid, &hw->epstop_req_bit);
 933			if (!work_pending(&hw->epstop_task))
 934				queue_work(adapter->control_wq,
 935					   &hw->epstop_task);
 936		}
 937		break;
 938	}
 939	trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
 940}
 941
 942static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
 943{
 944	struct fjes_hw *hw = &adapter->hw;
 945	enum ep_partner_status status;
 946	unsigned long flags;
 947
 948	set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
 949
 950	status = fjes_hw_get_partner_ep_status(hw, src_epid);
 951	trace_fjes_stop_req_irq_pre(hw, src_epid, status);
 952	switch (status) {
 953	case EP_PARTNER_WAITING:
 954		spin_lock_irqsave(&hw->rx_status_lock, flags);
 955		hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
 956				FJES_RX_STOP_REQ_DONE;
 957		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 958		clear_bit(src_epid, &hw->txrx_stop_req_bit);
 959		/* fall through */
 960	case EP_PARTNER_UNSHARE:
 961	case EP_PARTNER_COMPLETE:
 962	default:
 963		set_bit(src_epid, &adapter->unshare_watch_bitmask);
 964		if (!work_pending(&adapter->unshare_watch_task))
 965			queue_work(adapter->control_wq,
 966				   &adapter->unshare_watch_task);
 967		break;
 968	case EP_PARTNER_SHARED:
 969		set_bit(src_epid, &hw->epstop_req_bit);
 970
 971		if (!work_pending(&hw->epstop_task))
 972			queue_work(adapter->control_wq, &hw->epstop_task);
 973		break;
 974	}
 975	trace_fjes_stop_req_irq_post(hw, src_epid);
 976}
 977
 978static void fjes_update_zone_irq(struct fjes_adapter *adapter,
 979				 int src_epid)
 980{
 981	struct fjes_hw *hw = &adapter->hw;
 982
 983	if (!work_pending(&hw->update_zone_task))
 984		queue_work(adapter->control_wq, &hw->update_zone_task);
 985}
 986
 987static irqreturn_t fjes_intr(int irq, void *data)
 988{
 989	struct fjes_adapter *adapter = data;
 990	struct fjes_hw *hw = &adapter->hw;
 991	irqreturn_t ret;
 992	u32 icr;
 993
 994	icr = fjes_hw_capture_interrupt_status(hw);
 995
 996	if (icr & REG_IS_MASK_IS_ASSERT) {
 997		if (icr & REG_ICTL_MASK_RX_DATA) {
 998			fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
 999			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1000				.recv_intr_rx += 1;
1001		}
1002
1003		if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
1004			fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
1005			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1006				.recv_intr_stop += 1;
1007		}
1008
1009		if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
1010			fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
1011			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1012				.recv_intr_unshare += 1;
1013		}
1014
1015		if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
1016			fjes_hw_set_irqmask(hw,
1017					    REG_ICTL_MASK_TXRX_STOP_DONE, true);
1018
1019		if (icr & REG_ICTL_MASK_INFO_UPDATE) {
1020			fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
1021			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1022				.recv_intr_zoneupdate += 1;
1023		}
1024
1025		ret = IRQ_HANDLED;
1026	} else {
1027		ret = IRQ_NONE;
1028	}
1029
1030	return ret;
1031}
1032
1033static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
1034				     int start_epid)
1035{
1036	struct fjes_hw *hw = &adapter->hw;
1037	enum ep_partner_status pstatus;
1038	int max_epid, cur_epid;
1039	int i;
1040
1041	max_epid = hw->max_epid;
1042	start_epid = (start_epid + 1 + max_epid) % max_epid;
1043
1044	for (i = 0; i < max_epid; i++) {
1045		cur_epid = (start_epid + i) % max_epid;
1046		if (cur_epid == hw->my_epid)
1047			continue;
1048
1049		pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
1050		if (pstatus == EP_PARTNER_SHARED) {
1051			if (!fjes_hw_epbuf_rx_is_empty(
1052				&hw->ep_shm_info[cur_epid].rx))
1053				return cur_epid;
1054		}
1055	}
1056	return -1;
1057}
1058
1059static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
1060			      int *cur_epid)
1061{
1062	void *frame;
1063
1064	*cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
1065	if (*cur_epid < 0)
1066		return NULL;
1067
1068	frame =
1069	fjes_hw_epbuf_rx_curpkt_get_addr(
1070		&adapter->hw.ep_shm_info[*cur_epid].rx, psize);
1071
1072	return frame;
1073}
1074
1075static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
1076{
1077	fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
1078}
1079
1080static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
1081{
1082	struct fjes_hw *hw = &adapter->hw;
1083
1084	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
1085
1086	adapter->unset_rx_last = true;
1087	napi_schedule(&adapter->napi);
1088}
1089
1090static int fjes_poll(struct napi_struct *napi, int budget)
1091{
1092	struct fjes_adapter *adapter =
1093			container_of(napi, struct fjes_adapter, napi);
1094	struct net_device *netdev = napi->dev;
1095	struct fjes_hw *hw = &adapter->hw;
1096	struct sk_buff *skb;
1097	int work_done = 0;
1098	int cur_epid = 0;
1099	int epidx;
1100	size_t frame_len;
1101	void *frame;
1102
1103	spin_lock(&hw->rx_status_lock);
1104	for (epidx = 0; epidx < hw->max_epid; epidx++) {
1105		if (epidx == hw->my_epid)
1106			continue;
1107
1108		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1109		    EP_PARTNER_SHARED)
1110			adapter->hw.ep_shm_info[epidx]
1111				   .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
1112	}
1113	spin_unlock(&hw->rx_status_lock);
1114
1115	while (work_done < budget) {
1116		prefetch(&adapter->hw);
1117		frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
1118
1119		if (frame) {
1120			skb = napi_alloc_skb(napi, frame_len);
1121			if (!skb) {
1122				adapter->stats64.rx_dropped += 1;
1123				hw->ep_shm_info[cur_epid].net_stats
1124							 .rx_dropped += 1;
1125				adapter->stats64.rx_errors += 1;
1126				hw->ep_shm_info[cur_epid].net_stats
1127							 .rx_errors += 1;
1128			} else {
1129				memcpy(skb_put(skb, frame_len),
1130				       frame, frame_len);
1131				skb->protocol = eth_type_trans(skb, netdev);
1132				skb->ip_summed = CHECKSUM_UNNECESSARY;
1133
1134				netif_receive_skb(skb);
1135
1136				work_done++;
1137
1138				adapter->stats64.rx_packets += 1;
1139				hw->ep_shm_info[cur_epid].net_stats
1140							 .rx_packets += 1;
1141				adapter->stats64.rx_bytes += frame_len;
1142				hw->ep_shm_info[cur_epid].net_stats
1143							 .rx_bytes += frame_len;
1144
1145				if (is_multicast_ether_addr(
1146					((struct ethhdr *)frame)->h_dest)) {
1147					adapter->stats64.multicast += 1;
1148					hw->ep_shm_info[cur_epid].net_stats
1149								 .multicast += 1;
1150				}
1151			}
1152
1153			fjes_rxframe_release(adapter, cur_epid);
1154			adapter->unset_rx_last = true;
1155		} else {
1156			break;
1157		}
1158	}
1159
1160	if (work_done < budget) {
1161		napi_complete(napi);
1162
1163		if (adapter->unset_rx_last) {
1164			adapter->rx_last_jiffies = jiffies;
1165			adapter->unset_rx_last = false;
1166		}
1167
1168		if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
1169			napi_reschedule(napi);
1170		} else {
1171			spin_lock(&hw->rx_status_lock);
1172			for (epidx = 0; epidx < hw->max_epid; epidx++) {
1173				if (epidx == hw->my_epid)
1174					continue;
1175				if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1176				    EP_PARTNER_SHARED)
1177					adapter->hw.ep_shm_info[epidx].tx
1178						   .info->v1i.rx_status &=
1179						~FJES_RX_POLL_WORK;
1180			}
1181			spin_unlock(&hw->rx_status_lock);
1182
1183			fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
1184		}
1185	}
1186
1187	return work_done;
1188}
1189
1190/* fjes_probe - Device Initialization Routine */
1191static int fjes_probe(struct platform_device *plat_dev)
1192{
1193	struct fjes_adapter *adapter;
1194	struct net_device *netdev;
1195	struct resource *res;
1196	struct fjes_hw *hw;
1197	int err;
1198
1199	err = -ENOMEM;
1200	netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
1201				 NET_NAME_UNKNOWN, fjes_netdev_setup,
1202				 FJES_MAX_QUEUES);
1203
1204	if (!netdev)
1205		goto err_out;
1206
1207	SET_NETDEV_DEV(netdev, &plat_dev->dev);
 
 
 
 
1208
1209	dev_set_drvdata(&plat_dev->dev, netdev);
1210	adapter = netdev_priv(netdev);
1211	adapter->netdev = netdev;
1212	adapter->plat_dev = plat_dev;
1213	hw = &adapter->hw;
1214	hw->back = adapter;
1215
1216	/* setup the private structure */
1217	err = fjes_sw_init(adapter);
1218	if (err)
1219		goto err_free_netdev;
 
 
 
 
 
 
 
1220
1221	INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
1222	adapter->force_reset = false;
1223	adapter->open_guard = false;
 
 
1224
1225	adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
1226	adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
1227					      WQ_MEM_RECLAIM, 0);
1228
1229	INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
1230	INIT_WORK(&adapter->raise_intr_rxdata_task,
1231		  fjes_raise_intr_rxdata_task);
1232	INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
1233	adapter->unshare_watch_bitmask = 0;
1234
1235	INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
1236	adapter->interrupt_watch_enable = false;
 
1237
1238	res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
1239	hw->hw_res.start = res->start;
1240	hw->hw_res.size = resource_size(res);
1241	hw->hw_res.irq = platform_get_irq(plat_dev, 0);
1242	err = fjes_hw_init(&adapter->hw);
1243	if (err)
1244		goto err_free_netdev;
1245
1246	/* setup MAC address (02:00:00:00:00:[epid])*/
1247	netdev->dev_addr[0] = 2;
1248	netdev->dev_addr[1] = 0;
1249	netdev->dev_addr[2] = 0;
1250	netdev->dev_addr[3] = 0;
1251	netdev->dev_addr[4] = 0;
1252	netdev->dev_addr[5] = hw->my_epid; /* EPID */
1253
1254	err = register_netdev(netdev);
1255	if (err)
1256		goto err_hw_exit;
1257
1258	netif_carrier_off(netdev);
 
 
 
 
 
1259
1260	fjes_dbg_adapter_init(adapter);
 
 
 
 
1261
1262	return 0;
1263
1264err_hw_exit:
1265	fjes_hw_exit(&adapter->hw);
1266err_free_netdev:
1267	free_netdev(netdev);
1268err_out:
1269	return err;
1270}
1271
1272/* fjes_remove - Device Removal Routine */
1273static int fjes_remove(struct platform_device *plat_dev)
1274{
1275	struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
1276	struct fjes_adapter *adapter = netdev_priv(netdev);
1277	struct fjes_hw *hw = &adapter->hw;
 
 
1278
1279	fjes_dbg_adapter_exit(adapter);
 
1280
1281	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
1282	cancel_work_sync(&adapter->unshare_watch_task);
1283	cancel_work_sync(&adapter->raise_intr_rxdata_task);
1284	cancel_work_sync(&adapter->tx_stall_task);
1285	if (adapter->control_wq)
1286		destroy_workqueue(adapter->control_wq);
1287	if (adapter->txrx_wq)
1288		destroy_workqueue(adapter->txrx_wq);
1289
1290	unregister_netdev(netdev);
 
 
1291
1292	fjes_hw_exit(hw);
 
 
 
1293
1294	netif_napi_del(&adapter->napi);
 
 
 
 
 
 
1295
1296	free_netdev(netdev);
 
 
1297
1298	return 0;
1299}
 
 
 
 
 
 
 
 
 
1300
1301static int fjes_sw_init(struct fjes_adapter *adapter)
1302{
1303	struct net_device *netdev = adapter->netdev;
1304
1305	netif_napi_add(netdev, &adapter->napi, fjes_poll, 64);
1306
1307	return 0;
1308}
1309
1310/* fjes_netdev_setup - netdevice initialization routine */
1311static void fjes_netdev_setup(struct net_device *netdev)
1312{
1313	ether_setup(netdev);
1314
1315	netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
1316	netdev->netdev_ops = &fjes_netdev_ops;
1317	fjes_set_ethtool_ops(netdev);
1318	netdev->mtu = fjes_support_mtu[3];
1319	netdev->min_mtu = fjes_support_mtu[0];
1320	netdev->max_mtu = fjes_support_mtu[3];
1321	netdev->flags |= IFF_BROADCAST;
1322	netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
1323}
1324
1325static void fjes_irq_watch_task(struct work_struct *work)
1326{
1327	struct fjes_adapter *adapter = container_of(to_delayed_work(work),
1328			struct fjes_adapter, interrupt_watch_task);
1329
1330	local_irq_disable();
1331	fjes_intr(adapter->hw.hw_res.irq, adapter);
1332	local_irq_enable();
1333
1334	if (fjes_rxframe_search_exist(adapter, 0) >= 0)
1335		napi_schedule(&adapter->napi);
1336
1337	if (adapter->interrupt_watch_enable) {
1338		if (!delayed_work_pending(&adapter->interrupt_watch_task))
1339			queue_delayed_work(adapter->control_wq,
1340					   &adapter->interrupt_watch_task,
1341					   FJES_IRQ_WATCH_DELAY);
1342	}
1343}
1344
1345static void fjes_watch_unshare_task(struct work_struct *work)
1346{
1347	struct fjes_adapter *adapter =
1348	container_of(work, struct fjes_adapter, unshare_watch_task);
1349
1350	struct net_device *netdev = adapter->netdev;
1351	struct fjes_hw *hw = &adapter->hw;
1352
1353	int unshare_watch, unshare_reserve;
1354	int max_epid, my_epid, epidx;
1355	int stop_req, stop_req_done;
1356	ulong unshare_watch_bitmask;
1357	unsigned long flags;
1358	int wait_time = 0;
1359	int is_shared;
1360	int ret;
1361
1362	my_epid = hw->my_epid;
1363	max_epid = hw->max_epid;
1364
1365	unshare_watch_bitmask = adapter->unshare_watch_bitmask;
1366	adapter->unshare_watch_bitmask = 0;
1367
1368	while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
1369	       (wait_time < 3000)) {
1370		for (epidx = 0; epidx < hw->max_epid; epidx++) {
1371			if (epidx == hw->my_epid)
1372				continue;
1373
1374			is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
1375							   epidx);
1376
1377			stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
1378
1379			stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
1380					FJES_RX_STOP_REQ_DONE;
1381
1382			unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
1383
1384			unshare_reserve = test_bit(epidx,
1385						   &hw->hw_info.buffer_unshare_reserve_bit);
1386
1387			if ((!stop_req ||
1388			     (is_shared && (!is_shared || !stop_req_done))) &&
1389			    (is_shared || !unshare_watch || !unshare_reserve))
1390				continue;
1391
1392			mutex_lock(&hw->hw_info.lock);
1393			ret = fjes_hw_unregister_buff_addr(hw, epidx);
1394			switch (ret) {
1395			case 0:
1396				break;
1397			case -ENOMSG:
1398			case -EBUSY:
1399			default:
1400				if (!work_pending(
1401					&adapter->force_close_task)) {
1402					adapter->force_reset = true;
1403					schedule_work(
1404						&adapter->force_close_task);
1405				}
1406				break;
1407			}
1408			mutex_unlock(&hw->hw_info.lock);
1409			hw->ep_shm_info[epidx].ep_stats
1410					.com_unregist_buf_exec += 1;
1411
1412			spin_lock_irqsave(&hw->rx_status_lock, flags);
1413			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1414					    netdev->dev_addr, netdev->mtu);
1415			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1416
1417			clear_bit(epidx, &hw->txrx_stop_req_bit);
1418			clear_bit(epidx, &unshare_watch_bitmask);
1419			clear_bit(epidx,
1420				  &hw->hw_info.buffer_unshare_reserve_bit);
1421		}
1422
1423		msleep(100);
1424		wait_time += 100;
1425	}
1426
1427	if (hw->hw_info.buffer_unshare_reserve_bit) {
1428		for (epidx = 0; epidx < hw->max_epid; epidx++) {
1429			if (epidx == hw->my_epid)
1430				continue;
1431
1432			if (test_bit(epidx,
1433				     &hw->hw_info.buffer_unshare_reserve_bit)) {
1434				mutex_lock(&hw->hw_info.lock);
1435
1436				ret = fjes_hw_unregister_buff_addr(hw, epidx);
1437				switch (ret) {
1438				case 0:
1439					break;
1440				case -ENOMSG:
1441				case -EBUSY:
1442				default:
1443					if (!work_pending(
1444						&adapter->force_close_task)) {
1445						adapter->force_reset = true;
1446						schedule_work(
1447							&adapter->force_close_task);
1448					}
1449					break;
1450				}
1451				mutex_unlock(&hw->hw_info.lock);
1452
1453				hw->ep_shm_info[epidx].ep_stats
1454					.com_unregist_buf_exec += 1;
1455
1456				spin_lock_irqsave(&hw->rx_status_lock, flags);
1457				fjes_hw_setup_epbuf(
1458					&hw->ep_shm_info[epidx].tx,
1459					netdev->dev_addr, netdev->mtu);
1460				spin_unlock_irqrestore(&hw->rx_status_lock,
1461						       flags);
1462
1463				clear_bit(epidx, &hw->txrx_stop_req_bit);
1464				clear_bit(epidx, &unshare_watch_bitmask);
1465				clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1466			}
1467
1468			if (test_bit(epidx, &unshare_watch_bitmask)) {
1469				spin_lock_irqsave(&hw->rx_status_lock, flags);
1470				hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
1471						~FJES_RX_STOP_REQ_DONE;
1472				spin_unlock_irqrestore(&hw->rx_status_lock,
1473						       flags);
1474			}
1475		}
1476	}
1477}
1478
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1479/* fjes_init_module - Driver Registration Routine */
1480static int __init fjes_init_module(void)
1481{
 
1482	int result;
 
 
 
 
 
 
 
1483
1484	pr_info("%s - version %s - %s\n",
1485		fjes_driver_string, fjes_driver_version, fjes_copyright);
1486
1487	fjes_dbg_init();
1488
1489	result = platform_driver_register(&fjes_driver);
1490	if (result < 0) {
1491		fjes_dbg_exit();
1492		return result;
1493	}
1494
1495	result = acpi_bus_register_driver(&fjes_acpi_driver);
1496	if (result < 0)
1497		goto fail_acpi_driver;
1498
1499	return 0;
1500
1501fail_acpi_driver:
1502	platform_driver_unregister(&fjes_driver);
1503	fjes_dbg_exit();
1504	return result;
1505}
1506
1507module_init(fjes_init_module);
1508
1509/* fjes_exit_module - Driver Exit Cleanup Routine */
1510static void __exit fjes_exit_module(void)
1511{
1512	acpi_bus_unregister_driver(&fjes_acpi_driver);
1513	platform_driver_unregister(&fjes_driver);
1514	fjes_dbg_exit();
1515}
1516
1517module_exit(fjes_exit_module);