Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  FUJITSU Extended Socket Network Device driver
   4 *  Copyright (c) 2015 FUJITSU LIMITED
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/types.h>
   9#include <linux/nls.h>
  10#include <linux/platform_device.h>
  11#include <linux/netdevice.h>
  12#include <linux/interrupt.h>
  13
  14#include "fjes.h"
  15#include "fjes_trace.h"
  16
  17#define MAJ 1
  18#define MIN 2
  19#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
  20#define DRV_NAME	"fjes"
  21char fjes_driver_name[] = DRV_NAME;
  22char fjes_driver_version[] = DRV_VERSION;
  23static const char fjes_driver_string[] =
  24		"FUJITSU Extended Socket Network Device Driver";
  25static const char fjes_copyright[] =
  26		"Copyright (c) 2015 FUJITSU LIMITED";
  27
  28MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
  29MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
  30MODULE_LICENSE("GPL");
  31MODULE_VERSION(DRV_VERSION);
  32
  33#define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
  34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  35static const struct acpi_device_id fjes_acpi_ids[] = {
  36	{ACPI_MOTHERBOARD_RESOURCE_HID, 0},
  37	{"", 0},
  38};
  39MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
  40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  41static bool is_extended_socket_device(struct acpi_device *device)
  42{
  43	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
  44	char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
  45	union acpi_object *str;
  46	acpi_status status;
  47	int result;
  48
  49	status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
  50	if (ACPI_FAILURE(status))
  51		return false;
  52
  53	str = buffer.pointer;
  54	result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
  55				 str->string.length, UTF16_LITTLE_ENDIAN,
  56				 str_buf, sizeof(str_buf) - 1);
  57	str_buf[result] = 0;
  58
  59	if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
  60		kfree(buffer.pointer);
  61		return false;
  62	}
  63	kfree(buffer.pointer);
  64
  65	return true;
  66}
  67
  68static int acpi_check_extended_socket_status(struct acpi_device *device)
  69{
  70	unsigned long long sta;
  71	acpi_status status;
  72
  73	status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
  74	if (ACPI_FAILURE(status))
  75		return -ENODEV;
  76
  77	if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
  78	      (sta & ACPI_STA_DEVICE_ENABLED) &&
  79	      (sta & ACPI_STA_DEVICE_UI) &&
  80	      (sta & ACPI_STA_DEVICE_FUNCTIONING)))
  81		return -ENODEV;
  82
  83	return 0;
  84}
  85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  86static acpi_status
  87fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
  88{
  89	struct acpi_resource_address32 *addr;
  90	struct acpi_resource_irq *irq;
  91	struct resource *res = data;
  92
  93	switch (acpi_res->type) {
  94	case ACPI_RESOURCE_TYPE_ADDRESS32:
  95		addr = &acpi_res->data.address32;
  96		res[0].start = addr->address.minimum;
  97		res[0].end = addr->address.minimum +
  98			addr->address.address_length - 1;
  99		break;
 100
 101	case ACPI_RESOURCE_TYPE_IRQ:
 102		irq = &acpi_res->data.irq;
 103		if (irq->interrupt_count != 1)
 104			return AE_ERROR;
 105		res[1].start = irq->interrupts[0];
 106		res[1].end = irq->interrupts[0];
 107		break;
 108
 109	default:
 110		break;
 111	}
 112
 113	return AE_OK;
 114}
 115
 116static struct resource fjes_resource[] = {
 117	DEFINE_RES_MEM(0, 1),
 118	DEFINE_RES_IRQ(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 119};
 120
 121static int fjes_acpi_add(struct acpi_device *device)
 
 122{
 123	struct platform_device *plat_dev;
 124	acpi_status status;
 
 125
 126	if (!is_extended_socket_device(device))
 127		return -ENODEV;
 128
 129	if (acpi_check_extended_socket_status(device))
 130		return -ENODEV;
 
 131
 132	status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
 133				     fjes_get_acpi_resource, fjes_resource);
 134	if (ACPI_FAILURE(status))
 135		return -ENODEV;
 136
 137	/* create platform_device */
 138	plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
 139						   ARRAY_SIZE(fjes_resource));
 140	if (IS_ERR(plat_dev))
 141		return PTR_ERR(plat_dev);
 142
 143	device->driver_data = plat_dev;
 
 
 
 
 
 
 
 
 
 144
 145	return 0;
 
 
 
 
 
 
 
 
 146}
 147
 148static void fjes_acpi_remove(struct acpi_device *device)
 
 149{
 150	struct platform_device *plat_dev;
 
 
 
 151
 152	plat_dev = (struct platform_device *)acpi_driver_data(device);
 153	platform_device_unregister(plat_dev);
 154}
 155
 156static struct acpi_driver fjes_acpi_driver = {
 157	.name = DRV_NAME,
 158	.class = DRV_NAME,
 159	.owner = THIS_MODULE,
 160	.ids = fjes_acpi_ids,
 161	.ops = {
 162		.add = fjes_acpi_add,
 163		.remove = fjes_acpi_remove,
 164	},
 165};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 166
 167static int fjes_setup_resources(struct fjes_adapter *adapter)
 168{
 169	struct net_device *netdev = adapter->netdev;
 170	struct ep_share_mem_info *buf_pair;
 171	struct fjes_hw *hw = &adapter->hw;
 172	unsigned long flags;
 173	int result;
 174	int epidx;
 175
 176	mutex_lock(&hw->hw_info.lock);
 177	result = fjes_hw_request_info(hw);
 178	switch (result) {
 179	case 0:
 180		for (epidx = 0; epidx < hw->max_epid; epidx++) {
 181			hw->ep_shm_info[epidx].es_status =
 182			    hw->hw_info.res_buf->info.info[epidx].es_status;
 183			hw->ep_shm_info[epidx].zone =
 184			    hw->hw_info.res_buf->info.info[epidx].zone;
 185		}
 186		break;
 187	default:
 188	case -ENOMSG:
 189	case -EBUSY:
 190		adapter->force_reset = true;
 191
 192		mutex_unlock(&hw->hw_info.lock);
 193		return result;
 194	}
 195	mutex_unlock(&hw->hw_info.lock);
 196
 197	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
 198		if ((epidx != hw->my_epid) &&
 199		    (hw->ep_shm_info[epidx].es_status ==
 200		     FJES_ZONING_STATUS_ENABLE)) {
 201			fjes_hw_raise_interrupt(hw, epidx,
 202						REG_ICTL_MASK_INFO_UPDATE);
 203			hw->ep_shm_info[epidx].ep_stats
 204				.send_intr_zoneupdate += 1;
 205		}
 206	}
 207
 208	msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
 209
 210	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
 211		if (epidx == hw->my_epid)
 212			continue;
 213
 214		buf_pair = &hw->ep_shm_info[epidx];
 215
 216		spin_lock_irqsave(&hw->rx_status_lock, flags);
 217		fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
 218				    netdev->mtu);
 219		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 220
 221		if (fjes_hw_epid_is_same_zone(hw, epidx)) {
 222			mutex_lock(&hw->hw_info.lock);
 223			result =
 224			fjes_hw_register_buff_addr(hw, epidx, buf_pair);
 225			mutex_unlock(&hw->hw_info.lock);
 226
 227			switch (result) {
 228			case 0:
 229				break;
 230			case -ENOMSG:
 231			case -EBUSY:
 232			default:
 233				adapter->force_reset = true;
 234				return result;
 235			}
 236
 237			hw->ep_shm_info[epidx].ep_stats
 238				.com_regist_buf_exec += 1;
 239		}
 240	}
 241
 242	return 0;
 243}
 244
 245static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
 246{
 247	struct fjes_hw *hw = &adapter->hw;
 248
 249	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
 250
 251	adapter->unset_rx_last = true;
 252	napi_schedule(&adapter->napi);
 253}
 254
 255static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
 256{
 257	struct fjes_hw *hw = &adapter->hw;
 258	enum ep_partner_status status;
 259	unsigned long flags;
 260
 261	set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
 262
 263	status = fjes_hw_get_partner_ep_status(hw, src_epid);
 264	trace_fjes_stop_req_irq_pre(hw, src_epid, status);
 265	switch (status) {
 266	case EP_PARTNER_WAITING:
 267		spin_lock_irqsave(&hw->rx_status_lock, flags);
 268		hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
 269				FJES_RX_STOP_REQ_DONE;
 270		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 271		clear_bit(src_epid, &hw->txrx_stop_req_bit);
 272		fallthrough;
 273	case EP_PARTNER_UNSHARE:
 274	case EP_PARTNER_COMPLETE:
 275	default:
 276		set_bit(src_epid, &adapter->unshare_watch_bitmask);
 277		if (!work_pending(&adapter->unshare_watch_task))
 278			queue_work(adapter->control_wq,
 279				   &adapter->unshare_watch_task);
 280		break;
 281	case EP_PARTNER_SHARED:
 282		set_bit(src_epid, &hw->epstop_req_bit);
 283
 284		if (!work_pending(&hw->epstop_task))
 285			queue_work(adapter->control_wq, &hw->epstop_task);
 286		break;
 287	}
 288	trace_fjes_stop_req_irq_post(hw, src_epid);
 289}
 290
 291static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
 292				   int src_epid)
 293{
 294	struct fjes_hw *hw = &adapter->hw;
 295	enum ep_partner_status status;
 296	unsigned long flags;
 297
 298	status = fjes_hw_get_partner_ep_status(hw, src_epid);
 299	trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
 300	switch (status) {
 301	case EP_PARTNER_UNSHARE:
 302	case EP_PARTNER_COMPLETE:
 303	default:
 304		break;
 305	case EP_PARTNER_WAITING:
 306		if (src_epid < hw->my_epid) {
 307			spin_lock_irqsave(&hw->rx_status_lock, flags);
 308			hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
 309				FJES_RX_STOP_REQ_DONE;
 310			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 311
 312			clear_bit(src_epid, &hw->txrx_stop_req_bit);
 313			set_bit(src_epid, &adapter->unshare_watch_bitmask);
 314
 315			if (!work_pending(&adapter->unshare_watch_task))
 316				queue_work(adapter->control_wq,
 317					   &adapter->unshare_watch_task);
 318		}
 319		break;
 320	case EP_PARTNER_SHARED:
 321		if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
 322		    FJES_RX_STOP_REQ_REQUEST) {
 323			set_bit(src_epid, &hw->epstop_req_bit);
 324			if (!work_pending(&hw->epstop_task))
 325				queue_work(adapter->control_wq,
 326					   &hw->epstop_task);
 327		}
 328		break;
 329	}
 330	trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
 331}
 332
 333static void fjes_update_zone_irq(struct fjes_adapter *adapter,
 334				 int src_epid)
 335{
 336	struct fjes_hw *hw = &adapter->hw;
 337
 338	if (!work_pending(&hw->update_zone_task))
 339		queue_work(adapter->control_wq, &hw->update_zone_task);
 340}
 341
 342static irqreturn_t fjes_intr(int irq, void *data)
 343{
 344	struct fjes_adapter *adapter = data;
 345	struct fjes_hw *hw = &adapter->hw;
 346	irqreturn_t ret;
 347	u32 icr;
 348
 349	icr = fjes_hw_capture_interrupt_status(hw);
 350
 351	if (icr & REG_IS_MASK_IS_ASSERT) {
 352		if (icr & REG_ICTL_MASK_RX_DATA) {
 353			fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
 354			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
 355				.recv_intr_rx += 1;
 356		}
 357
 358		if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
 359			fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
 360			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
 361				.recv_intr_stop += 1;
 362		}
 363
 364		if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
 365			fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
 366			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
 367				.recv_intr_unshare += 1;
 368		}
 369
 370		if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
 371			fjes_hw_set_irqmask(hw,
 372					    REG_ICTL_MASK_TXRX_STOP_DONE, true);
 373
 374		if (icr & REG_ICTL_MASK_INFO_UPDATE) {
 375			fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
 376			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
 377				.recv_intr_zoneupdate += 1;
 378		}
 379
 380		ret = IRQ_HANDLED;
 381	} else {
 382		ret = IRQ_NONE;
 383	}
 384
 385	return ret;
 386}
 387
 388static int fjes_request_irq(struct fjes_adapter *adapter)
 389{
 390	struct net_device *netdev = adapter->netdev;
 391	int result = -1;
 392
 393	adapter->interrupt_watch_enable = true;
 394	if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
 395		queue_delayed_work(adapter->control_wq,
 396				   &adapter->interrupt_watch_task,
 397				   FJES_IRQ_WATCH_DELAY);
 398	}
 399
 400	if (!adapter->irq_registered) {
 401		result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
 402				     IRQF_SHARED, netdev->name, adapter);
 403		if (result)
 404			adapter->irq_registered = false;
 405		else
 406			adapter->irq_registered = true;
 407	}
 408
 409	return result;
 410}
 411
 412static void fjes_free_irq(struct fjes_adapter *adapter)
 413{
 414	struct fjes_hw *hw = &adapter->hw;
 415
 416	adapter->interrupt_watch_enable = false;
 417	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
 418
 419	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
 420
 421	if (adapter->irq_registered) {
 422		free_irq(adapter->hw.hw_res.irq, adapter);
 423		adapter->irq_registered = false;
 424	}
 425}
 426
 427static void fjes_free_resources(struct fjes_adapter *adapter)
 428{
 429	struct net_device *netdev = adapter->netdev;
 430	struct fjes_device_command_param param;
 431	struct ep_share_mem_info *buf_pair;
 432	struct fjes_hw *hw = &adapter->hw;
 433	bool reset_flag = false;
 434	unsigned long flags;
 435	int result;
 436	int epidx;
 437
 438	for (epidx = 0; epidx < hw->max_epid; epidx++) {
 439		if (epidx == hw->my_epid)
 440			continue;
 441
 442		mutex_lock(&hw->hw_info.lock);
 443		result = fjes_hw_unregister_buff_addr(hw, epidx);
 444		mutex_unlock(&hw->hw_info.lock);
 445
 446		hw->ep_shm_info[epidx].ep_stats.com_unregist_buf_exec += 1;
 447
 448		if (result)
 449			reset_flag = true;
 450
 451		buf_pair = &hw->ep_shm_info[epidx];
 452
 453		spin_lock_irqsave(&hw->rx_status_lock, flags);
 454		fjes_hw_setup_epbuf(&buf_pair->tx,
 455				    netdev->dev_addr, netdev->mtu);
 456		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 457
 458		clear_bit(epidx, &hw->txrx_stop_req_bit);
 459	}
 460
 461	if (reset_flag || adapter->force_reset) {
 462		result = fjes_hw_reset(hw);
 463
 464		adapter->force_reset = false;
 465
 466		if (result)
 467			adapter->open_guard = true;
 468
 469		hw->hw_info.buffer_share_bit = 0;
 470
 471		memset((void *)&param, 0, sizeof(param));
 472
 473		param.req_len = hw->hw_info.req_buf_size;
 474		param.req_start = __pa(hw->hw_info.req_buf);
 475		param.res_len = hw->hw_info.res_buf_size;
 476		param.res_start = __pa(hw->hw_info.res_buf);
 477		param.share_start = __pa(hw->hw_info.share->ep_status);
 478
 479		fjes_hw_init_command_registers(hw, &param);
 480	}
 481}
 482
 483/* fjes_open - Called when a network interface is made active */
 484static int fjes_open(struct net_device *netdev)
 485{
 486	struct fjes_adapter *adapter = netdev_priv(netdev);
 
 
 487	struct fjes_hw *hw = &adapter->hw;
 488	int result;
 
 
 
 
 489
 490	if (adapter->open_guard)
 491		return -ENXIO;
 
 
 
 492
 493	result = fjes_setup_resources(adapter);
 494	if (result)
 495		goto err_setup_res;
 496
 497	hw->txrx_stop_req_bit = 0;
 498	hw->epstop_req_bit = 0;
 499
 500	napi_enable(&adapter->napi);
 
 
 501
 502	fjes_hw_capture_interrupt_status(hw);
 
 
 
 503
 504	result = fjes_request_irq(adapter);
 505	if (result)
 506		goto err_req_irq;
 507
 508	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
 
 509
 510	netif_tx_start_all_queues(netdev);
 511	netif_carrier_on(netdev);
 
 
 
 
 512
 513	return 0;
 
 
 
 
 514
 515err_req_irq:
 516	fjes_free_irq(adapter);
 517	napi_disable(&adapter->napi);
 518
 519err_setup_res:
 520	fjes_free_resources(adapter);
 521	return result;
 522}
 523
 524/* fjes_close - Disables a network interface */
 525static int fjes_close(struct net_device *netdev)
 526{
 527	struct fjes_adapter *adapter = netdev_priv(netdev);
 528	struct fjes_hw *hw = &adapter->hw;
 529	unsigned long flags;
 530	int epidx;
 531
 532	netif_tx_stop_all_queues(netdev);
 533	netif_carrier_off(netdev);
 534
 535	fjes_hw_raise_epstop(hw);
 
 
 
 536
 537	napi_disable(&adapter->napi);
 
 
 
 
 
 
 538
 539	spin_lock_irqsave(&hw->rx_status_lock, flags);
 540	for (epidx = 0; epidx < hw->max_epid; epidx++) {
 541		if (epidx == hw->my_epid)
 542			continue;
 543
 544		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
 545		    EP_PARTNER_SHARED)
 546			adapter->hw.ep_shm_info[epidx]
 547				   .tx.info->v1i.rx_status &=
 548				~FJES_RX_POLL_WORK;
 549	}
 550	spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 551
 552	fjes_free_irq(adapter);
 
 
 553
 554	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
 555	cancel_work_sync(&adapter->unshare_watch_task);
 556	adapter->unshare_watch_bitmask = 0;
 557	cancel_work_sync(&adapter->raise_intr_rxdata_task);
 558	cancel_work_sync(&adapter->tx_stall_task);
 559
 560	cancel_work_sync(&hw->update_zone_task);
 561	cancel_work_sync(&hw->epstop_task);
 
 
 
 
 
 562
 563	fjes_hw_wait_epstop(hw);
 
 
 564
 565	fjes_free_resources(adapter);
 
 
 
 
 
 
 
 
 
 
 566
 567	return 0;
 568}
 569
 570static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
 571			void *data, size_t len)
 572{
 573	int retval;
 574
 575	retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
 576					   data, len);
 577	if (retval)
 578		return retval;
 579
 580	adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
 581		FJES_TX_DELAY_SEND_PENDING;
 582	if (!work_pending(&adapter->raise_intr_rxdata_task))
 583		queue_work(adapter->txrx_wq,
 584			   &adapter->raise_intr_rxdata_task);
 585
 586	retval = 0;
 587	return retval;
 588}
 589
 590static netdev_tx_t
 591fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 592{
 593	struct fjes_adapter *adapter = netdev_priv(netdev);
 594	struct fjes_hw *hw = &adapter->hw;
 595
 596	int max_epid, my_epid, dest_epid;
 597	enum ep_partner_status pstatus;
 598	struct netdev_queue *cur_queue;
 599	char shortpkt[VLAN_ETH_HLEN];
 600	bool is_multi, vlan;
 601	struct ethhdr *eth;
 602	u16 queue_no = 0;
 603	u16 vlan_id = 0;
 604	netdev_tx_t ret;
 605	char *data;
 606	int len;
 607
 608	ret = NETDEV_TX_OK;
 609	is_multi = false;
 610	cur_queue = netdev_get_tx_queue(netdev, queue_no);
 611
 612	eth = (struct ethhdr *)skb->data;
 613	my_epid = hw->my_epid;
 614
 615	vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
 616
 617	data = skb->data;
 618	len = skb->len;
 619
 620	if (is_multicast_ether_addr(eth->h_dest)) {
 621		dest_epid = 0;
 622		max_epid = hw->max_epid;
 623		is_multi = true;
 624	} else if (is_local_ether_addr(eth->h_dest)) {
 625		dest_epid = eth->h_dest[ETH_ALEN - 1];
 626		max_epid = dest_epid + 1;
 627
 628		if ((eth->h_dest[0] == 0x02) &&
 629		    (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
 630			      eth->h_dest[3] | eth->h_dest[4])) &&
 631		    (dest_epid < hw->max_epid)) {
 632			;
 633		} else {
 634			dest_epid = 0;
 635			max_epid = 0;
 636			ret = NETDEV_TX_OK;
 637
 638			adapter->stats64.tx_packets += 1;
 639			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
 640			adapter->stats64.tx_bytes += len;
 641			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
 642		}
 643	} else {
 644		dest_epid = 0;
 645		max_epid = 0;
 646		ret = NETDEV_TX_OK;
 647
 648		adapter->stats64.tx_packets += 1;
 649		hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
 650		adapter->stats64.tx_bytes += len;
 651		hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
 652	}
 653
 654	for (; dest_epid < max_epid; dest_epid++) {
 655		if (my_epid == dest_epid)
 656			continue;
 657
 658		pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
 659		if (pstatus != EP_PARTNER_SHARED) {
 660			if (!is_multi)
 661				hw->ep_shm_info[dest_epid].ep_stats
 662					.tx_dropped_not_shared += 1;
 663			ret = NETDEV_TX_OK;
 664		} else if (!fjes_hw_check_epbuf_version(
 665				&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
 666			/* version is NOT 0 */
 667			adapter->stats64.tx_carrier_errors += 1;
 668			hw->ep_shm_info[dest_epid].net_stats
 669						.tx_carrier_errors += 1;
 670			hw->ep_shm_info[dest_epid].ep_stats
 671					.tx_dropped_ver_mismatch += 1;
 672
 673			ret = NETDEV_TX_OK;
 674		} else if (!fjes_hw_check_mtu(
 675				&adapter->hw.ep_shm_info[dest_epid].rx,
 676				netdev->mtu)) {
 677			adapter->stats64.tx_dropped += 1;
 678			hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
 679			adapter->stats64.tx_errors += 1;
 680			hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
 681			hw->ep_shm_info[dest_epid].ep_stats
 682					.tx_dropped_buf_size_mismatch += 1;
 683
 684			ret = NETDEV_TX_OK;
 685		} else if (vlan &&
 686			   !fjes_hw_check_vlan_id(
 687				&adapter->hw.ep_shm_info[dest_epid].rx,
 688				vlan_id)) {
 689			hw->ep_shm_info[dest_epid].ep_stats
 690				.tx_dropped_vlanid_mismatch += 1;
 691			ret = NETDEV_TX_OK;
 692		} else {
 693			if (len < VLAN_ETH_HLEN) {
 694				memset(shortpkt, 0, VLAN_ETH_HLEN);
 695				memcpy(shortpkt, skb->data, skb->len);
 696				len = VLAN_ETH_HLEN;
 697				data = shortpkt;
 698			}
 699
 700			if (adapter->tx_retry_count == 0) {
 701				adapter->tx_start_jiffies = jiffies;
 702				adapter->tx_retry_count = 1;
 703			} else {
 704				adapter->tx_retry_count++;
 705			}
 706
 707			if (fjes_tx_send(adapter, dest_epid, data, len)) {
 708				if (is_multi) {
 709					ret = NETDEV_TX_OK;
 710				} else if (
 711					   ((long)jiffies -
 712					    (long)adapter->tx_start_jiffies) >=
 713					    FJES_TX_RETRY_TIMEOUT) {
 714					adapter->stats64.tx_fifo_errors += 1;
 715					hw->ep_shm_info[dest_epid].net_stats
 716								.tx_fifo_errors += 1;
 717					adapter->stats64.tx_errors += 1;
 718					hw->ep_shm_info[dest_epid].net_stats
 719								.tx_errors += 1;
 720
 721					ret = NETDEV_TX_OK;
 722				} else {
 723					netif_trans_update(netdev);
 724					hw->ep_shm_info[dest_epid].ep_stats
 725						.tx_buffer_full += 1;
 726					netif_tx_stop_queue(cur_queue);
 727
 728					if (!work_pending(&adapter->tx_stall_task))
 729						queue_work(adapter->txrx_wq,
 730							   &adapter->tx_stall_task);
 731
 732					ret = NETDEV_TX_BUSY;
 733				}
 734			} else {
 735				if (!is_multi) {
 736					adapter->stats64.tx_packets += 1;
 737					hw->ep_shm_info[dest_epid].net_stats
 738								.tx_packets += 1;
 739					adapter->stats64.tx_bytes += len;
 740					hw->ep_shm_info[dest_epid].net_stats
 741								.tx_bytes += len;
 742				}
 743
 744				adapter->tx_retry_count = 0;
 745				ret = NETDEV_TX_OK;
 746			}
 747		}
 748	}
 749
 750	if (ret == NETDEV_TX_OK) {
 751		dev_kfree_skb(skb);
 752		if (is_multi) {
 753			adapter->stats64.tx_packets += 1;
 754			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
 755			adapter->stats64.tx_bytes += 1;
 756			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
 757		}
 758	}
 759
 760	return ret;
 761}
 762
 
 
 
 
 
 
 
 763static void
 764fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 765{
 766	struct fjes_adapter *adapter = netdev_priv(netdev);
 767
 768	memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
 769}
 770
 771static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
 772{
 773	struct fjes_adapter *adapter = netdev_priv(netdev);
 774	bool running = netif_running(netdev);
 775	struct fjes_hw *hw = &adapter->hw;
 776	unsigned long flags;
 777	int ret = -EINVAL;
 778	int idx, epidx;
 779
 780	for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
 781		if (new_mtu <= fjes_support_mtu[idx]) {
 782			new_mtu = fjes_support_mtu[idx];
 783			if (new_mtu == netdev->mtu)
 784				return 0;
 785
 786			ret = 0;
 787			break;
 788		}
 789	}
 790
 791	if (ret)
 792		return ret;
 793
 794	if (running) {
 795		spin_lock_irqsave(&hw->rx_status_lock, flags);
 796		for (epidx = 0; epidx < hw->max_epid; epidx++) {
 797			if (epidx == hw->my_epid)
 798				continue;
 799			hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
 800				~FJES_RX_MTU_CHANGING_DONE;
 801		}
 802		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 803
 804		netif_tx_stop_all_queues(netdev);
 805		netif_carrier_off(netdev);
 806		cancel_work_sync(&adapter->tx_stall_task);
 807		napi_disable(&adapter->napi);
 808
 809		msleep(1000);
 810
 811		netif_tx_stop_all_queues(netdev);
 812	}
 813
 814	netdev->mtu = new_mtu;
 815
 816	if (running) {
 817		for (epidx = 0; epidx < hw->max_epid; epidx++) {
 818			if (epidx == hw->my_epid)
 819				continue;
 820
 821			spin_lock_irqsave(&hw->rx_status_lock, flags);
 822			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
 823					    netdev->dev_addr,
 824					    netdev->mtu);
 825
 826			hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
 827				FJES_RX_MTU_CHANGING_DONE;
 828			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 829		}
 830
 831		netif_tx_wake_all_queues(netdev);
 832		netif_carrier_on(netdev);
 833		napi_enable(&adapter->napi);
 834		napi_schedule(&adapter->napi);
 835	}
 836
 837	return ret;
 838}
 839
 840static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
 841{
 842	struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
 843
 844	netif_tx_wake_queue(queue);
 845}
 846
 847static int fjes_vlan_rx_add_vid(struct net_device *netdev,
 848				__be16 proto, u16 vid)
 849{
 850	struct fjes_adapter *adapter = netdev_priv(netdev);
 851	bool ret = true;
 852	int epid;
 853
 854	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
 855		if (epid == adapter->hw.my_epid)
 856			continue;
 857
 858		if (!fjes_hw_check_vlan_id(
 859			&adapter->hw.ep_shm_info[epid].tx, vid))
 860			ret = fjes_hw_set_vlan_id(
 861				&adapter->hw.ep_shm_info[epid].tx, vid);
 862	}
 863
 864	return ret ? 0 : -ENOSPC;
 865}
 866
 867static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
 868				 __be16 proto, u16 vid)
 869{
 870	struct fjes_adapter *adapter = netdev_priv(netdev);
 871	int epid;
 872
 873	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
 874		if (epid == adapter->hw.my_epid)
 875			continue;
 876
 877		fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
 878	}
 879
 880	return 0;
 881}
 882
 883static const struct net_device_ops fjes_netdev_ops = {
 884	.ndo_open		= fjes_open,
 885	.ndo_stop		= fjes_close,
 886	.ndo_start_xmit		= fjes_xmit_frame,
 887	.ndo_get_stats64	= fjes_get_stats64,
 888	.ndo_change_mtu		= fjes_change_mtu,
 889	.ndo_tx_timeout		= fjes_tx_retry,
 890	.ndo_vlan_rx_add_vid	= fjes_vlan_rx_add_vid,
 891	.ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
 892};
 893
 894/* fjes_netdev_setup - netdevice initialization routine */
 895static void fjes_netdev_setup(struct net_device *netdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 896{
 897	ether_setup(netdev);
 
 
 898
 899	netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
 900	netdev->netdev_ops = &fjes_netdev_ops;
 901	fjes_set_ethtool_ops(netdev);
 902	netdev->mtu = fjes_support_mtu[3];
 903	netdev->min_mtu = fjes_support_mtu[0];
 904	netdev->max_mtu = fjes_support_mtu[3];
 905	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 906}
 907
 908static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
 909				     int start_epid)
 910{
 911	struct fjes_hw *hw = &adapter->hw;
 912	enum ep_partner_status pstatus;
 913	int max_epid, cur_epid;
 914	int i;
 915
 916	max_epid = hw->max_epid;
 917	start_epid = (start_epid + 1 + max_epid) % max_epid;
 918
 919	for (i = 0; i < max_epid; i++) {
 920		cur_epid = (start_epid + i) % max_epid;
 921		if (cur_epid == hw->my_epid)
 922			continue;
 923
 924		pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
 925		if (pstatus == EP_PARTNER_SHARED) {
 926			if (!fjes_hw_epbuf_rx_is_empty(
 927				&hw->ep_shm_info[cur_epid].rx))
 928				return cur_epid;
 929		}
 930	}
 931	return -1;
 932}
 933
 934static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
 935			      int *cur_epid)
 936{
 937	void *frame;
 938
 939	*cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
 940	if (*cur_epid < 0)
 941		return NULL;
 942
 943	frame =
 944	fjes_hw_epbuf_rx_curpkt_get_addr(
 945		&adapter->hw.ep_shm_info[*cur_epid].rx, psize);
 946
 947	return frame;
 948}
 949
 950static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
 951{
 952	fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
 953}
 954
 
 
 
 
 
 
 
 
 
 
 955static int fjes_poll(struct napi_struct *napi, int budget)
 956{
 957	struct fjes_adapter *adapter =
 958			container_of(napi, struct fjes_adapter, napi);
 959	struct net_device *netdev = napi->dev;
 960	struct fjes_hw *hw = &adapter->hw;
 961	struct sk_buff *skb;
 962	int work_done = 0;
 963	int cur_epid = 0;
 964	int epidx;
 965	size_t frame_len;
 966	void *frame;
 967
 968	spin_lock(&hw->rx_status_lock);
 969	for (epidx = 0; epidx < hw->max_epid; epidx++) {
 970		if (epidx == hw->my_epid)
 971			continue;
 972
 973		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
 974		    EP_PARTNER_SHARED)
 975			adapter->hw.ep_shm_info[epidx]
 976				   .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
 977	}
 978	spin_unlock(&hw->rx_status_lock);
 979
 980	while (work_done < budget) {
 981		prefetch(&adapter->hw);
 982		frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
 983
 984		if (frame) {
 985			skb = napi_alloc_skb(napi, frame_len);
 986			if (!skb) {
 987				adapter->stats64.rx_dropped += 1;
 988				hw->ep_shm_info[cur_epid].net_stats
 989							 .rx_dropped += 1;
 990				adapter->stats64.rx_errors += 1;
 991				hw->ep_shm_info[cur_epid].net_stats
 992							 .rx_errors += 1;
 993			} else {
 994				skb_put_data(skb, frame, frame_len);
 995				skb->protocol = eth_type_trans(skb, netdev);
 996				skb->ip_summed = CHECKSUM_UNNECESSARY;
 997
 998				netif_receive_skb(skb);
 999
1000				work_done++;
1001
1002				adapter->stats64.rx_packets += 1;
1003				hw->ep_shm_info[cur_epid].net_stats
1004							 .rx_packets += 1;
1005				adapter->stats64.rx_bytes += frame_len;
1006				hw->ep_shm_info[cur_epid].net_stats
1007							 .rx_bytes += frame_len;
1008
1009				if (is_multicast_ether_addr(
1010					((struct ethhdr *)frame)->h_dest)) {
1011					adapter->stats64.multicast += 1;
1012					hw->ep_shm_info[cur_epid].net_stats
1013								 .multicast += 1;
1014				}
1015			}
1016
1017			fjes_rxframe_release(adapter, cur_epid);
1018			adapter->unset_rx_last = true;
1019		} else {
1020			break;
1021		}
1022	}
1023
1024	if (work_done < budget) {
1025		napi_complete_done(napi, work_done);
1026
1027		if (adapter->unset_rx_last) {
1028			adapter->rx_last_jiffies = jiffies;
1029			adapter->unset_rx_last = false;
1030		}
1031
1032		if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
1033			napi_schedule(napi);
1034		} else {
1035			spin_lock(&hw->rx_status_lock);
1036			for (epidx = 0; epidx < hw->max_epid; epidx++) {
1037				if (epidx == hw->my_epid)
1038					continue;
1039				if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1040				    EP_PARTNER_SHARED)
1041					adapter->hw.ep_shm_info[epidx].tx
1042						   .info->v1i.rx_status &=
1043						~FJES_RX_POLL_WORK;
1044			}
1045			spin_unlock(&hw->rx_status_lock);
1046
1047			fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
1048		}
1049	}
1050
1051	return work_done;
1052}
1053
1054static int fjes_sw_init(struct fjes_adapter *adapter)
 
1055{
1056	struct net_device *netdev = adapter->netdev;
 
 
 
 
1057
1058	netif_napi_add(netdev, &adapter->napi, fjes_poll);
 
 
 
1059
1060	return 0;
1061}
1062
1063static void fjes_force_close_task(struct work_struct *work)
1064{
1065	struct fjes_adapter *adapter = container_of(work,
1066			struct fjes_adapter, force_close_task);
1067	struct net_device *netdev = adapter->netdev;
1068
1069	rtnl_lock();
1070	dev_close(netdev);
1071	rtnl_unlock();
1072}
 
 
1073
1074static void fjes_tx_stall_task(struct work_struct *work)
1075{
1076	struct fjes_adapter *adapter = container_of(work,
1077			struct fjes_adapter, tx_stall_task);
1078	struct net_device *netdev = adapter->netdev;
1079	struct fjes_hw *hw = &adapter->hw;
1080	int all_queue_available, sendable;
1081	enum ep_partner_status pstatus;
1082	int max_epid, my_epid, epid;
1083	union ep_buffer_info *info;
1084	int i;
1085
1086	if (((long)jiffies -
1087		dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
1088		netif_wake_queue(netdev);
1089		return;
 
 
 
 
1090	}
1091
1092	my_epid = hw->my_epid;
1093	max_epid = hw->max_epid;
 
 
 
 
1094
1095	for (i = 0; i < 5; i++) {
1096		all_queue_available = 1;
 
 
 
1097
1098		for (epid = 0; epid < max_epid; epid++) {
1099			if (my_epid == epid)
1100				continue;
1101
1102			pstatus = fjes_hw_get_partner_ep_status(hw, epid);
1103			sendable = (pstatus == EP_PARTNER_SHARED);
1104			if (!sendable)
1105				continue;
 
 
 
1106
1107			info = adapter->hw.ep_shm_info[epid].tx.info;
 
 
 
 
 
 
1108
1109			if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
1110				return;
 
1111
1112			if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
1113					 info->v1i.count_max)) {
1114				all_queue_available = 0;
1115				break;
1116			}
1117		}
1118
1119		if (all_queue_available) {
1120			netif_wake_queue(netdev);
1121			return;
1122		}
1123	}
1124
1125	usleep_range(50, 100);
1126
1127	queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
 
 
 
 
 
 
 
 
 
1128}
1129
1130static void fjes_raise_intr_rxdata_task(struct work_struct *work)
 
1131{
1132	struct fjes_adapter *adapter = container_of(work,
1133			struct fjes_adapter, raise_intr_rxdata_task);
1134	struct fjes_hw *hw = &adapter->hw;
1135	enum ep_partner_status pstatus;
1136	int max_epid, my_epid, epid;
1137
1138	my_epid = hw->my_epid;
1139	max_epid = hw->max_epid;
1140
1141	for (epid = 0; epid < max_epid; epid++)
1142		hw->ep_shm_info[epid].tx_status_work = 0;
 
 
 
 
 
 
1143
1144	for (epid = 0; epid < max_epid; epid++) {
1145		if (epid == my_epid)
1146			continue;
1147
1148		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
1149		if (pstatus == EP_PARTNER_SHARED) {
1150			hw->ep_shm_info[epid].tx_status_work =
1151				hw->ep_shm_info[epid].tx.info->v1i.tx_status;
1152
1153			if (hw->ep_shm_info[epid].tx_status_work ==
1154				FJES_TX_DELAY_SEND_PENDING) {
1155				hw->ep_shm_info[epid].tx.info->v1i.tx_status =
1156					FJES_TX_DELAY_SEND_NONE;
1157			}
1158		}
1159	}
1160
1161	for (epid = 0; epid < max_epid; epid++) {
1162		if (epid == my_epid)
1163			continue;
1164
1165		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
1166		if ((hw->ep_shm_info[epid].tx_status_work ==
1167		     FJES_TX_DELAY_SEND_PENDING) &&
1168		    (pstatus == EP_PARTNER_SHARED) &&
1169		    !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
1170		      FJES_RX_POLL_WORK)) {
1171			fjes_hw_raise_interrupt(hw, epid,
1172						REG_ICTL_MASK_RX_DATA);
1173			hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
1174		}
1175	}
1176
1177	usleep_range(500, 1000);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1178}
1179
1180static void fjes_watch_unshare_task(struct work_struct *work)
1181{
1182	struct fjes_adapter *adapter =
1183	container_of(work, struct fjes_adapter, unshare_watch_task);
1184
1185	struct net_device *netdev = adapter->netdev;
1186	struct fjes_hw *hw = &adapter->hw;
1187
1188	int unshare_watch, unshare_reserve;
1189	int max_epid, my_epid, epidx;
1190	int stop_req, stop_req_done;
1191	ulong unshare_watch_bitmask;
1192	unsigned long flags;
1193	int wait_time = 0;
1194	int is_shared;
1195	int ret;
1196
1197	my_epid = hw->my_epid;
1198	max_epid = hw->max_epid;
1199
1200	unshare_watch_bitmask = adapter->unshare_watch_bitmask;
1201	adapter->unshare_watch_bitmask = 0;
1202
1203	while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
1204	       (wait_time < 3000)) {
1205		for (epidx = 0; epidx < max_epid; epidx++) {
1206			if (epidx == my_epid)
1207				continue;
1208
1209			is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
1210							   epidx);
1211
1212			stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
1213
1214			stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
1215					FJES_RX_STOP_REQ_DONE;
1216
1217			unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
1218
1219			unshare_reserve = test_bit(epidx,
1220						   &hw->hw_info.buffer_unshare_reserve_bit);
1221
1222			if ((!stop_req ||
1223			     (is_shared && (!is_shared || !stop_req_done))) &&
1224			    (is_shared || !unshare_watch || !unshare_reserve))
1225				continue;
1226
1227			mutex_lock(&hw->hw_info.lock);
1228			ret = fjes_hw_unregister_buff_addr(hw, epidx);
1229			switch (ret) {
1230			case 0:
1231				break;
1232			case -ENOMSG:
1233			case -EBUSY:
1234			default:
1235				if (!work_pending(
1236					&adapter->force_close_task)) {
1237					adapter->force_reset = true;
1238					schedule_work(
1239						&adapter->force_close_task);
1240				}
1241				break;
1242			}
1243			mutex_unlock(&hw->hw_info.lock);
1244			hw->ep_shm_info[epidx].ep_stats
1245					.com_unregist_buf_exec += 1;
1246
1247			spin_lock_irqsave(&hw->rx_status_lock, flags);
1248			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1249					    netdev->dev_addr, netdev->mtu);
1250			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1251
1252			clear_bit(epidx, &hw->txrx_stop_req_bit);
1253			clear_bit(epidx, &unshare_watch_bitmask);
1254			clear_bit(epidx,
1255				  &hw->hw_info.buffer_unshare_reserve_bit);
1256		}
1257
1258		msleep(100);
1259		wait_time += 100;
1260	}
1261
1262	if (hw->hw_info.buffer_unshare_reserve_bit) {
1263		for (epidx = 0; epidx < max_epid; epidx++) {
1264			if (epidx == my_epid)
1265				continue;
1266
1267			if (test_bit(epidx,
1268				     &hw->hw_info.buffer_unshare_reserve_bit)) {
1269				mutex_lock(&hw->hw_info.lock);
1270
1271				ret = fjes_hw_unregister_buff_addr(hw, epidx);
1272				switch (ret) {
1273				case 0:
1274					break;
1275				case -ENOMSG:
1276				case -EBUSY:
1277				default:
1278					if (!work_pending(
1279						&adapter->force_close_task)) {
1280						adapter->force_reset = true;
1281						schedule_work(
1282							&adapter->force_close_task);
1283					}
1284					break;
1285				}
1286				mutex_unlock(&hw->hw_info.lock);
1287
1288				hw->ep_shm_info[epidx].ep_stats
1289					.com_unregist_buf_exec += 1;
1290
1291				spin_lock_irqsave(&hw->rx_status_lock, flags);
1292				fjes_hw_setup_epbuf(
1293					&hw->ep_shm_info[epidx].tx,
1294					netdev->dev_addr, netdev->mtu);
1295				spin_unlock_irqrestore(&hw->rx_status_lock,
1296						       flags);
1297
1298				clear_bit(epidx, &hw->txrx_stop_req_bit);
1299				clear_bit(epidx, &unshare_watch_bitmask);
1300				clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1301			}
1302
1303			if (test_bit(epidx, &unshare_watch_bitmask)) {
1304				spin_lock_irqsave(&hw->rx_status_lock, flags);
1305				hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
1306						~FJES_RX_STOP_REQ_DONE;
1307				spin_unlock_irqrestore(&hw->rx_status_lock,
1308						       flags);
1309			}
1310		}
1311	}
1312}
1313
1314static void fjes_irq_watch_task(struct work_struct *work)
1315{
1316	struct fjes_adapter *adapter = container_of(to_delayed_work(work),
1317			struct fjes_adapter, interrupt_watch_task);
1318
1319	local_irq_disable();
1320	fjes_intr(adapter->hw.hw_res.irq, adapter);
1321	local_irq_enable();
1322
1323	if (fjes_rxframe_search_exist(adapter, 0) >= 0)
1324		napi_schedule(&adapter->napi);
1325
1326	if (adapter->interrupt_watch_enable) {
1327		if (!delayed_work_pending(&adapter->interrupt_watch_task))
1328			queue_delayed_work(adapter->control_wq,
1329					   &adapter->interrupt_watch_task,
1330					   FJES_IRQ_WATCH_DELAY);
1331	}
1332}
1333
1334/* fjes_probe - Device Initialization Routine */
1335static int fjes_probe(struct platform_device *plat_dev)
1336{
1337	struct fjes_adapter *adapter;
1338	struct net_device *netdev;
1339	struct resource *res;
1340	struct fjes_hw *hw;
1341	u8 addr[ETH_ALEN];
1342	int err;
1343
1344	err = -ENOMEM;
1345	netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
1346				 NET_NAME_UNKNOWN, fjes_netdev_setup,
1347				 FJES_MAX_QUEUES);
1348
1349	if (!netdev)
1350		goto err_out;
1351
1352	SET_NETDEV_DEV(netdev, &plat_dev->dev);
1353
1354	dev_set_drvdata(&plat_dev->dev, netdev);
1355	adapter = netdev_priv(netdev);
1356	adapter->netdev = netdev;
1357	adapter->plat_dev = plat_dev;
1358	hw = &adapter->hw;
1359	hw->back = adapter;
1360
1361	/* setup the private structure */
1362	err = fjes_sw_init(adapter);
1363	if (err)
1364		goto err_free_netdev;
1365
1366	INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
1367	adapter->force_reset = false;
1368	adapter->open_guard = false;
1369
1370	adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
1371	if (unlikely(!adapter->txrx_wq)) {
1372		err = -ENOMEM;
1373		goto err_free_netdev;
1374	}
1375
1376	adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
1377					      WQ_MEM_RECLAIM, 0);
1378	if (unlikely(!adapter->control_wq)) {
1379		err = -ENOMEM;
1380		goto err_free_txrx_wq;
1381	}
1382
1383	INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
1384	INIT_WORK(&adapter->raise_intr_rxdata_task,
1385		  fjes_raise_intr_rxdata_task);
1386	INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
1387	adapter->unshare_watch_bitmask = 0;
1388
1389	INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
1390	adapter->interrupt_watch_enable = false;
1391
1392	res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
1393	if (!res) {
1394		err = -EINVAL;
1395		goto err_free_control_wq;
1396	}
1397	hw->hw_res.start = res->start;
1398	hw->hw_res.size = resource_size(res);
1399	hw->hw_res.irq = platform_get_irq(plat_dev, 0);
1400	if (hw->hw_res.irq < 0) {
1401		err = hw->hw_res.irq;
1402		goto err_free_control_wq;
1403	}
1404
1405	err = fjes_hw_init(&adapter->hw);
1406	if (err)
1407		goto err_free_control_wq;
1408
1409	/* setup MAC address (02:00:00:00:00:[epid])*/
1410	addr[0] = 2;
1411	addr[1] = 0;
1412	addr[2] = 0;
1413	addr[3] = 0;
1414	addr[4] = 0;
1415	addr[5] = hw->my_epid; /* EPID */
1416	eth_hw_addr_set(netdev, addr);
1417
1418	err = register_netdev(netdev);
1419	if (err)
1420		goto err_hw_exit;
1421
1422	netif_carrier_off(netdev);
1423
1424	fjes_dbg_adapter_init(adapter);
1425
1426	return 0;
1427
1428err_hw_exit:
1429	fjes_hw_exit(&adapter->hw);
1430err_free_control_wq:
1431	destroy_workqueue(adapter->control_wq);
1432err_free_txrx_wq:
1433	destroy_workqueue(adapter->txrx_wq);
1434err_free_netdev:
1435	free_netdev(netdev);
1436err_out:
1437	return err;
1438}
1439
1440/* fjes_remove - Device Removal Routine */
1441static void fjes_remove(struct platform_device *plat_dev)
1442{
1443	struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
1444	struct fjes_adapter *adapter = netdev_priv(netdev);
1445	struct fjes_hw *hw = &adapter->hw;
1446
1447	fjes_dbg_adapter_exit(adapter);
1448
1449	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
1450	cancel_work_sync(&adapter->unshare_watch_task);
1451	cancel_work_sync(&adapter->raise_intr_rxdata_task);
1452	cancel_work_sync(&adapter->tx_stall_task);
1453	if (adapter->control_wq)
1454		destroy_workqueue(adapter->control_wq);
1455	if (adapter->txrx_wq)
1456		destroy_workqueue(adapter->txrx_wq);
1457
1458	unregister_netdev(netdev);
1459
1460	fjes_hw_exit(hw);
1461
1462	netif_napi_del(&adapter->napi);
1463
1464	free_netdev(netdev);
1465}
1466
1467static struct platform_driver fjes_driver = {
1468	.driver = {
1469		.name = DRV_NAME,
1470	},
1471	.probe = fjes_probe,
1472	.remove_new = fjes_remove,
1473};
1474
1475static acpi_status
1476acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
1477				 void *context, void **return_value)
1478{
1479	struct acpi_device *device;
1480	bool *found = context;
 
1481
1482	device = acpi_fetch_acpi_dev(obj_handle);
1483	if (!device)
1484		return AE_OK;
1485
1486	if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
1487		return AE_OK;
1488
1489	if (!is_extended_socket_device(device))
1490		return AE_OK;
1491
1492	if (acpi_check_extended_socket_status(device))
1493		return AE_OK;
1494
1495	*found = true;
1496	return AE_CTRL_TERMINATE;
1497}
1498
1499/* fjes_init_module - Driver Registration Routine */
1500static int __init fjes_init_module(void)
1501{
1502	bool found = false;
1503	int result;
1504
1505	acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
1506			    acpi_find_extended_socket_device, NULL, &found,
1507			    NULL);
1508
1509	if (!found)
1510		return -ENODEV;
1511
1512	pr_info("%s - version %s - %s\n",
1513		fjes_driver_string, fjes_driver_version, fjes_copyright);
1514
1515	fjes_dbg_init();
1516
1517	result = platform_driver_register(&fjes_driver);
1518	if (result < 0) {
1519		fjes_dbg_exit();
1520		return result;
1521	}
1522
1523	result = acpi_bus_register_driver(&fjes_acpi_driver);
1524	if (result < 0)
1525		goto fail_acpi_driver;
1526
1527	return 0;
1528
1529fail_acpi_driver:
1530	platform_driver_unregister(&fjes_driver);
1531	fjes_dbg_exit();
1532	return result;
1533}
1534
1535module_init(fjes_init_module);
1536
1537/* fjes_exit_module - Driver Exit Cleanup Routine */
1538static void __exit fjes_exit_module(void)
1539{
1540	acpi_bus_unregister_driver(&fjes_acpi_driver);
1541	platform_driver_unregister(&fjes_driver);
1542	fjes_dbg_exit();
1543}
1544
1545module_exit(fjes_exit_module);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  FUJITSU Extended Socket Network Device driver
   4 *  Copyright (c) 2015 FUJITSU LIMITED
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/types.h>
   9#include <linux/nls.h>
  10#include <linux/platform_device.h>
  11#include <linux/netdevice.h>
  12#include <linux/interrupt.h>
  13
  14#include "fjes.h"
  15#include "fjes_trace.h"
  16
  17#define MAJ 1
  18#define MIN 2
  19#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
  20#define DRV_NAME	"fjes"
  21char fjes_driver_name[] = DRV_NAME;
  22char fjes_driver_version[] = DRV_VERSION;
  23static const char fjes_driver_string[] =
  24		"FUJITSU Extended Socket Network Device Driver";
  25static const char fjes_copyright[] =
  26		"Copyright (c) 2015 FUJITSU LIMITED";
  27
  28MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
  29MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
  30MODULE_LICENSE("GPL");
  31MODULE_VERSION(DRV_VERSION);
  32
  33#define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
  34
  35static int fjes_request_irq(struct fjes_adapter *);
  36static void fjes_free_irq(struct fjes_adapter *);
  37
  38static int fjes_open(struct net_device *);
  39static int fjes_close(struct net_device *);
  40static int fjes_setup_resources(struct fjes_adapter *);
  41static void fjes_free_resources(struct fjes_adapter *);
  42static netdev_tx_t fjes_xmit_frame(struct sk_buff *, struct net_device *);
  43static void fjes_raise_intr_rxdata_task(struct work_struct *);
  44static void fjes_tx_stall_task(struct work_struct *);
  45static void fjes_force_close_task(struct work_struct *);
  46static irqreturn_t fjes_intr(int, void*);
  47static void fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
  48static int fjes_change_mtu(struct net_device *, int);
  49static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
  50static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
  51static void fjes_tx_retry(struct net_device *, unsigned int txqueue);
  52
  53static int fjes_acpi_add(struct acpi_device *);
  54static int fjes_acpi_remove(struct acpi_device *);
  55static acpi_status fjes_get_acpi_resource(struct acpi_resource *, void*);
  56
  57static int fjes_probe(struct platform_device *);
  58static int fjes_remove(struct platform_device *);
  59
  60static int fjes_sw_init(struct fjes_adapter *);
  61static void fjes_netdev_setup(struct net_device *);
  62static void fjes_irq_watch_task(struct work_struct *);
  63static void fjes_watch_unshare_task(struct work_struct *);
  64static void fjes_rx_irq(struct fjes_adapter *, int);
  65static int fjes_poll(struct napi_struct *, int);
  66
  67static const struct acpi_device_id fjes_acpi_ids[] = {
  68	{ACPI_MOTHERBOARD_RESOURCE_HID, 0},
  69	{"", 0},
  70};
  71MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
  72
  73static struct acpi_driver fjes_acpi_driver = {
  74	.name = DRV_NAME,
  75	.class = DRV_NAME,
  76	.owner = THIS_MODULE,
  77	.ids = fjes_acpi_ids,
  78	.ops = {
  79		.add = fjes_acpi_add,
  80		.remove = fjes_acpi_remove,
  81	},
  82};
  83
  84static struct platform_driver fjes_driver = {
  85	.driver = {
  86		.name = DRV_NAME,
  87	},
  88	.probe = fjes_probe,
  89	.remove = fjes_remove,
  90};
  91
  92static struct resource fjes_resource[] = {
  93	{
  94		.flags = IORESOURCE_MEM,
  95		.start = 0,
  96		.end = 0,
  97	},
  98	{
  99		.flags = IORESOURCE_IRQ,
 100		.start = 0,
 101		.end = 0,
 102	},
 103};
 104
 105static bool is_extended_socket_device(struct acpi_device *device)
 106{
 107	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
 108	char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
 109	union acpi_object *str;
 110	acpi_status status;
 111	int result;
 112
 113	status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
 114	if (ACPI_FAILURE(status))
 115		return false;
 116
 117	str = buffer.pointer;
 118	result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
 119				 str->string.length, UTF16_LITTLE_ENDIAN,
 120				 str_buf, sizeof(str_buf) - 1);
 121	str_buf[result] = 0;
 122
 123	if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
 124		kfree(buffer.pointer);
 125		return false;
 126	}
 127	kfree(buffer.pointer);
 128
 129	return true;
 130}
 131
 132static int acpi_check_extended_socket_status(struct acpi_device *device)
 133{
 134	unsigned long long sta;
 135	acpi_status status;
 136
 137	status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
 138	if (ACPI_FAILURE(status))
 139		return -ENODEV;
 140
 141	if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
 142	      (sta & ACPI_STA_DEVICE_ENABLED) &&
 143	      (sta & ACPI_STA_DEVICE_UI) &&
 144	      (sta & ACPI_STA_DEVICE_FUNCTIONING)))
 145		return -ENODEV;
 146
 147	return 0;
 148}
 149
 150static int fjes_acpi_add(struct acpi_device *device)
 151{
 152	struct platform_device *plat_dev;
 153	acpi_status status;
 154
 155	if (!is_extended_socket_device(device))
 156		return -ENODEV;
 157
 158	if (acpi_check_extended_socket_status(device))
 159		return -ENODEV;
 160
 161	status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
 162				     fjes_get_acpi_resource, fjes_resource);
 163	if (ACPI_FAILURE(status))
 164		return -ENODEV;
 165
 166	/* create platform_device */
 167	plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
 168						   ARRAY_SIZE(fjes_resource));
 169	if (IS_ERR(plat_dev))
 170		return PTR_ERR(plat_dev);
 171
 172	device->driver_data = plat_dev;
 173
 174	return 0;
 175}
 176
 177static int fjes_acpi_remove(struct acpi_device *device)
 178{
 179	struct platform_device *plat_dev;
 180
 181	plat_dev = (struct platform_device *)acpi_driver_data(device);
 182	platform_device_unregister(plat_dev);
 183
 184	return 0;
 185}
 186
 187static acpi_status
 188fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
 189{
 190	struct acpi_resource_address32 *addr;
 191	struct acpi_resource_irq *irq;
 192	struct resource *res = data;
 193
 194	switch (acpi_res->type) {
 195	case ACPI_RESOURCE_TYPE_ADDRESS32:
 196		addr = &acpi_res->data.address32;
 197		res[0].start = addr->address.minimum;
 198		res[0].end = addr->address.minimum +
 199			addr->address.address_length - 1;
 200		break;
 201
 202	case ACPI_RESOURCE_TYPE_IRQ:
 203		irq = &acpi_res->data.irq;
 204		if (irq->interrupt_count != 1)
 205			return AE_ERROR;
 206		res[1].start = irq->interrupts[0];
 207		res[1].end = irq->interrupts[0];
 208		break;
 209
 210	default:
 211		break;
 212	}
 213
 214	return AE_OK;
 215}
 216
 217static int fjes_request_irq(struct fjes_adapter *adapter)
 218{
 219	struct net_device *netdev = adapter->netdev;
 220	int result = -1;
 221
 222	adapter->interrupt_watch_enable = true;
 223	if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
 224		queue_delayed_work(adapter->control_wq,
 225				   &adapter->interrupt_watch_task,
 226				   FJES_IRQ_WATCH_DELAY);
 227	}
 228
 229	if (!adapter->irq_registered) {
 230		result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
 231				     IRQF_SHARED, netdev->name, adapter);
 232		if (result)
 233			adapter->irq_registered = false;
 234		else
 235			adapter->irq_registered = true;
 236	}
 237
 238	return result;
 239}
 240
 241static void fjes_free_irq(struct fjes_adapter *adapter)
 242{
 243	struct fjes_hw *hw = &adapter->hw;
 244
 245	adapter->interrupt_watch_enable = false;
 246	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
 247
 248	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
 249
 250	if (adapter->irq_registered) {
 251		free_irq(adapter->hw.hw_res.irq, adapter);
 252		adapter->irq_registered = false;
 253	}
 254}
 255
 256static const struct net_device_ops fjes_netdev_ops = {
 257	.ndo_open		= fjes_open,
 258	.ndo_stop		= fjes_close,
 259	.ndo_start_xmit		= fjes_xmit_frame,
 260	.ndo_get_stats64	= fjes_get_stats64,
 261	.ndo_change_mtu		= fjes_change_mtu,
 262	.ndo_tx_timeout		= fjes_tx_retry,
 263	.ndo_vlan_rx_add_vid	= fjes_vlan_rx_add_vid,
 264	.ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
 265};
 266
 267/* fjes_open - Called when a network interface is made active */
 268static int fjes_open(struct net_device *netdev)
 269{
 270	struct fjes_adapter *adapter = netdev_priv(netdev);
 271	struct fjes_hw *hw = &adapter->hw;
 272	int result;
 273
 274	if (adapter->open_guard)
 275		return -ENXIO;
 276
 277	result = fjes_setup_resources(adapter);
 278	if (result)
 279		goto err_setup_res;
 280
 281	hw->txrx_stop_req_bit = 0;
 282	hw->epstop_req_bit = 0;
 
 
 283
 284	napi_enable(&adapter->napi);
 
 
 
 
 285
 286	fjes_hw_capture_interrupt_status(hw);
 287
 288	result = fjes_request_irq(adapter);
 289	if (result)
 290		goto err_req_irq;
 291
 292	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
 293
 294	netif_tx_start_all_queues(netdev);
 295	netif_carrier_on(netdev);
 296
 297	return 0;
 298
 299err_req_irq:
 300	fjes_free_irq(adapter);
 301	napi_disable(&adapter->napi);
 302
 303err_setup_res:
 304	fjes_free_resources(adapter);
 305	return result;
 306}
 307
 308/* fjes_close - Disables a network interface */
 309static int fjes_close(struct net_device *netdev)
 310{
 311	struct fjes_adapter *adapter = netdev_priv(netdev);
 312	struct fjes_hw *hw = &adapter->hw;
 313	unsigned long flags;
 314	int epidx;
 315
 316	netif_tx_stop_all_queues(netdev);
 317	netif_carrier_off(netdev);
 
 318
 319	fjes_hw_raise_epstop(hw);
 320
 321	napi_disable(&adapter->napi);
 322
 323	spin_lock_irqsave(&hw->rx_status_lock, flags);
 324	for (epidx = 0; epidx < hw->max_epid; epidx++) {
 325		if (epidx == hw->my_epid)
 326			continue;
 327
 328		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
 329		    EP_PARTNER_SHARED)
 330			adapter->hw.ep_shm_info[epidx]
 331				   .tx.info->v1i.rx_status &=
 332				~FJES_RX_POLL_WORK;
 333	}
 334	spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 335
 336	fjes_free_irq(adapter);
 337
 338	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
 339	cancel_work_sync(&adapter->unshare_watch_task);
 340	adapter->unshare_watch_bitmask = 0;
 341	cancel_work_sync(&adapter->raise_intr_rxdata_task);
 342	cancel_work_sync(&adapter->tx_stall_task);
 343
 344	cancel_work_sync(&hw->update_zone_task);
 345	cancel_work_sync(&hw->epstop_task);
 346
 347	fjes_hw_wait_epstop(hw);
 348
 349	fjes_free_resources(adapter);
 350
 351	return 0;
 352}
 353
 354static int fjes_setup_resources(struct fjes_adapter *adapter)
 355{
 356	struct net_device *netdev = adapter->netdev;
 357	struct ep_share_mem_info *buf_pair;
 358	struct fjes_hw *hw = &adapter->hw;
 359	unsigned long flags;
 360	int result;
 361	int epidx;
 362
 363	mutex_lock(&hw->hw_info.lock);
 364	result = fjes_hw_request_info(hw);
 365	switch (result) {
 366	case 0:
 367		for (epidx = 0; epidx < hw->max_epid; epidx++) {
 368			hw->ep_shm_info[epidx].es_status =
 369			    hw->hw_info.res_buf->info.info[epidx].es_status;
 370			hw->ep_shm_info[epidx].zone =
 371			    hw->hw_info.res_buf->info.info[epidx].zone;
 372		}
 373		break;
 374	default:
 375	case -ENOMSG:
 376	case -EBUSY:
 377		adapter->force_reset = true;
 378
 379		mutex_unlock(&hw->hw_info.lock);
 380		return result;
 381	}
 382	mutex_unlock(&hw->hw_info.lock);
 383
 384	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
 385		if ((epidx != hw->my_epid) &&
 386		    (hw->ep_shm_info[epidx].es_status ==
 387		     FJES_ZONING_STATUS_ENABLE)) {
 388			fjes_hw_raise_interrupt(hw, epidx,
 389						REG_ICTL_MASK_INFO_UPDATE);
 390			hw->ep_shm_info[epidx].ep_stats
 391				.send_intr_zoneupdate += 1;
 392		}
 393	}
 394
 395	msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
 396
 397	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
 398		if (epidx == hw->my_epid)
 399			continue;
 400
 401		buf_pair = &hw->ep_shm_info[epidx];
 402
 403		spin_lock_irqsave(&hw->rx_status_lock, flags);
 404		fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
 405				    netdev->mtu);
 406		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 407
 408		if (fjes_hw_epid_is_same_zone(hw, epidx)) {
 409			mutex_lock(&hw->hw_info.lock);
 410			result =
 411			fjes_hw_register_buff_addr(hw, epidx, buf_pair);
 412			mutex_unlock(&hw->hw_info.lock);
 413
 414			switch (result) {
 415			case 0:
 416				break;
 417			case -ENOMSG:
 418			case -EBUSY:
 419			default:
 420				adapter->force_reset = true;
 421				return result;
 422			}
 423
 424			hw->ep_shm_info[epidx].ep_stats
 425				.com_regist_buf_exec += 1;
 426		}
 427	}
 428
 429	return 0;
 430}
 431
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 432static void fjes_free_resources(struct fjes_adapter *adapter)
 433{
 434	struct net_device *netdev = adapter->netdev;
 435	struct fjes_device_command_param param;
 436	struct ep_share_mem_info *buf_pair;
 437	struct fjes_hw *hw = &adapter->hw;
 438	bool reset_flag = false;
 439	unsigned long flags;
 440	int result;
 441	int epidx;
 442
 443	for (epidx = 0; epidx < hw->max_epid; epidx++) {
 444		if (epidx == hw->my_epid)
 445			continue;
 446
 447		mutex_lock(&hw->hw_info.lock);
 448		result = fjes_hw_unregister_buff_addr(hw, epidx);
 449		mutex_unlock(&hw->hw_info.lock);
 450
 451		hw->ep_shm_info[epidx].ep_stats.com_unregist_buf_exec += 1;
 452
 453		if (result)
 454			reset_flag = true;
 455
 456		buf_pair = &hw->ep_shm_info[epidx];
 457
 458		spin_lock_irqsave(&hw->rx_status_lock, flags);
 459		fjes_hw_setup_epbuf(&buf_pair->tx,
 460				    netdev->dev_addr, netdev->mtu);
 461		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 462
 463		clear_bit(epidx, &hw->txrx_stop_req_bit);
 464	}
 465
 466	if (reset_flag || adapter->force_reset) {
 467		result = fjes_hw_reset(hw);
 468
 469		adapter->force_reset = false;
 470
 471		if (result)
 472			adapter->open_guard = true;
 473
 474		hw->hw_info.buffer_share_bit = 0;
 475
 476		memset((void *)&param, 0, sizeof(param));
 477
 478		param.req_len = hw->hw_info.req_buf_size;
 479		param.req_start = __pa(hw->hw_info.req_buf);
 480		param.res_len = hw->hw_info.res_buf_size;
 481		param.res_start = __pa(hw->hw_info.res_buf);
 482		param.share_start = __pa(hw->hw_info.share->ep_status);
 483
 484		fjes_hw_init_command_registers(hw, &param);
 485	}
 486}
 487
 488static void fjes_tx_stall_task(struct work_struct *work)
 
 489{
 490	struct fjes_adapter *adapter = container_of(work,
 491			struct fjes_adapter, tx_stall_task);
 492	struct net_device *netdev = adapter->netdev;
 493	struct fjes_hw *hw = &adapter->hw;
 494	int all_queue_available, sendable;
 495	enum ep_partner_status pstatus;
 496	int max_epid, my_epid, epid;
 497	union ep_buffer_info *info;
 498	int i;
 499
 500	if (((long)jiffies -
 501		dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
 502		netif_wake_queue(netdev);
 503		return;
 504	}
 505
 506	my_epid = hw->my_epid;
 507	max_epid = hw->max_epid;
 
 508
 509	for (i = 0; i < 5; i++) {
 510		all_queue_available = 1;
 511
 512		for (epid = 0; epid < max_epid; epid++) {
 513			if (my_epid == epid)
 514				continue;
 515
 516			pstatus = fjes_hw_get_partner_ep_status(hw, epid);
 517			sendable = (pstatus == EP_PARTNER_SHARED);
 518			if (!sendable)
 519				continue;
 520
 521			info = adapter->hw.ep_shm_info[epid].tx.info;
 
 
 522
 523			if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
 524				return;
 525
 526			if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
 527					 info->v1i.count_max)) {
 528				all_queue_available = 0;
 529				break;
 530			}
 531		}
 532
 533		if (all_queue_available) {
 534			netif_wake_queue(netdev);
 535			return;
 536		}
 537	}
 538
 539	usleep_range(50, 100);
 
 
 540
 541	queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
 
 
 542}
 543
 544static void fjes_force_close_task(struct work_struct *work)
 
 545{
 546	struct fjes_adapter *adapter = container_of(work,
 547			struct fjes_adapter, force_close_task);
 548	struct net_device *netdev = adapter->netdev;
 
 
 
 
 549
 550	rtnl_lock();
 551	dev_close(netdev);
 552	rtnl_unlock();
 553}
 554
 555static void fjes_raise_intr_rxdata_task(struct work_struct *work)
 556{
 557	struct fjes_adapter *adapter = container_of(work,
 558			struct fjes_adapter, raise_intr_rxdata_task);
 559	struct fjes_hw *hw = &adapter->hw;
 560	enum ep_partner_status pstatus;
 561	int max_epid, my_epid, epid;
 562
 563	my_epid = hw->my_epid;
 564	max_epid = hw->max_epid;
 
 
 565
 566	for (epid = 0; epid < max_epid; epid++)
 567		hw->ep_shm_info[epid].tx_status_work = 0;
 
 
 
 
 
 568
 569	for (epid = 0; epid < max_epid; epid++) {
 570		if (epid == my_epid)
 571			continue;
 572
 573		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
 574		if (pstatus == EP_PARTNER_SHARED) {
 575			hw->ep_shm_info[epid].tx_status_work =
 576				hw->ep_shm_info[epid].tx.info->v1i.tx_status;
 
 577
 578			if (hw->ep_shm_info[epid].tx_status_work ==
 579				FJES_TX_DELAY_SEND_PENDING) {
 580				hw->ep_shm_info[epid].tx.info->v1i.tx_status =
 581					FJES_TX_DELAY_SEND_NONE;
 582			}
 583		}
 584	}
 585
 586	for (epid = 0; epid < max_epid; epid++) {
 587		if (epid == my_epid)
 588			continue;
 589
 590		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
 591		if ((hw->ep_shm_info[epid].tx_status_work ==
 592		     FJES_TX_DELAY_SEND_PENDING) &&
 593		    (pstatus == EP_PARTNER_SHARED) &&
 594		    !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
 595		      FJES_RX_POLL_WORK)) {
 596			fjes_hw_raise_interrupt(hw, epid,
 597						REG_ICTL_MASK_RX_DATA);
 598			hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
 599		}
 600	}
 601
 602	usleep_range(500, 1000);
 603}
 604
 605static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
 606			void *data, size_t len)
 607{
 608	int retval;
 609
 610	retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
 611					   data, len);
 612	if (retval)
 613		return retval;
 614
 615	adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
 616		FJES_TX_DELAY_SEND_PENDING;
 617	if (!work_pending(&adapter->raise_intr_rxdata_task))
 618		queue_work(adapter->txrx_wq,
 619			   &adapter->raise_intr_rxdata_task);
 620
 621	retval = 0;
 622	return retval;
 623}
 624
 625static netdev_tx_t
 626fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 627{
 628	struct fjes_adapter *adapter = netdev_priv(netdev);
 629	struct fjes_hw *hw = &adapter->hw;
 630
 631	int max_epid, my_epid, dest_epid;
 632	enum ep_partner_status pstatus;
 633	struct netdev_queue *cur_queue;
 634	char shortpkt[VLAN_ETH_HLEN];
 635	bool is_multi, vlan;
 636	struct ethhdr *eth;
 637	u16 queue_no = 0;
 638	u16 vlan_id = 0;
 639	netdev_tx_t ret;
 640	char *data;
 641	int len;
 642
 643	ret = NETDEV_TX_OK;
 644	is_multi = false;
 645	cur_queue = netdev_get_tx_queue(netdev, queue_no);
 646
 647	eth = (struct ethhdr *)skb->data;
 648	my_epid = hw->my_epid;
 649
 650	vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
 651
 652	data = skb->data;
 653	len = skb->len;
 654
 655	if (is_multicast_ether_addr(eth->h_dest)) {
 656		dest_epid = 0;
 657		max_epid = hw->max_epid;
 658		is_multi = true;
 659	} else if (is_local_ether_addr(eth->h_dest)) {
 660		dest_epid = eth->h_dest[ETH_ALEN - 1];
 661		max_epid = dest_epid + 1;
 662
 663		if ((eth->h_dest[0] == 0x02) &&
 664		    (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
 665			      eth->h_dest[3] | eth->h_dest[4])) &&
 666		    (dest_epid < hw->max_epid)) {
 667			;
 668		} else {
 669			dest_epid = 0;
 670			max_epid = 0;
 671			ret = NETDEV_TX_OK;
 672
 673			adapter->stats64.tx_packets += 1;
 674			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
 675			adapter->stats64.tx_bytes += len;
 676			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
 677		}
 678	} else {
 679		dest_epid = 0;
 680		max_epid = 0;
 681		ret = NETDEV_TX_OK;
 682
 683		adapter->stats64.tx_packets += 1;
 684		hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
 685		adapter->stats64.tx_bytes += len;
 686		hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
 687	}
 688
 689	for (; dest_epid < max_epid; dest_epid++) {
 690		if (my_epid == dest_epid)
 691			continue;
 692
 693		pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
 694		if (pstatus != EP_PARTNER_SHARED) {
 695			if (!is_multi)
 696				hw->ep_shm_info[dest_epid].ep_stats
 697					.tx_dropped_not_shared += 1;
 698			ret = NETDEV_TX_OK;
 699		} else if (!fjes_hw_check_epbuf_version(
 700				&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
 701			/* version is NOT 0 */
 702			adapter->stats64.tx_carrier_errors += 1;
 703			hw->ep_shm_info[dest_epid].net_stats
 704						.tx_carrier_errors += 1;
 705			hw->ep_shm_info[dest_epid].ep_stats
 706					.tx_dropped_ver_mismatch += 1;
 707
 708			ret = NETDEV_TX_OK;
 709		} else if (!fjes_hw_check_mtu(
 710				&adapter->hw.ep_shm_info[dest_epid].rx,
 711				netdev->mtu)) {
 712			adapter->stats64.tx_dropped += 1;
 713			hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
 714			adapter->stats64.tx_errors += 1;
 715			hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
 716			hw->ep_shm_info[dest_epid].ep_stats
 717					.tx_dropped_buf_size_mismatch += 1;
 718
 719			ret = NETDEV_TX_OK;
 720		} else if (vlan &&
 721			   !fjes_hw_check_vlan_id(
 722				&adapter->hw.ep_shm_info[dest_epid].rx,
 723				vlan_id)) {
 724			hw->ep_shm_info[dest_epid].ep_stats
 725				.tx_dropped_vlanid_mismatch += 1;
 726			ret = NETDEV_TX_OK;
 727		} else {
 728			if (len < VLAN_ETH_HLEN) {
 729				memset(shortpkt, 0, VLAN_ETH_HLEN);
 730				memcpy(shortpkt, skb->data, skb->len);
 731				len = VLAN_ETH_HLEN;
 732				data = shortpkt;
 733			}
 734
 735			if (adapter->tx_retry_count == 0) {
 736				adapter->tx_start_jiffies = jiffies;
 737				adapter->tx_retry_count = 1;
 738			} else {
 739				adapter->tx_retry_count++;
 740			}
 741
 742			if (fjes_tx_send(adapter, dest_epid, data, len)) {
 743				if (is_multi) {
 744					ret = NETDEV_TX_OK;
 745				} else if (
 746					   ((long)jiffies -
 747					    (long)adapter->tx_start_jiffies) >=
 748					    FJES_TX_RETRY_TIMEOUT) {
 749					adapter->stats64.tx_fifo_errors += 1;
 750					hw->ep_shm_info[dest_epid].net_stats
 751								.tx_fifo_errors += 1;
 752					adapter->stats64.tx_errors += 1;
 753					hw->ep_shm_info[dest_epid].net_stats
 754								.tx_errors += 1;
 755
 756					ret = NETDEV_TX_OK;
 757				} else {
 758					netif_trans_update(netdev);
 759					hw->ep_shm_info[dest_epid].ep_stats
 760						.tx_buffer_full += 1;
 761					netif_tx_stop_queue(cur_queue);
 762
 763					if (!work_pending(&adapter->tx_stall_task))
 764						queue_work(adapter->txrx_wq,
 765							   &adapter->tx_stall_task);
 766
 767					ret = NETDEV_TX_BUSY;
 768				}
 769			} else {
 770				if (!is_multi) {
 771					adapter->stats64.tx_packets += 1;
 772					hw->ep_shm_info[dest_epid].net_stats
 773								.tx_packets += 1;
 774					adapter->stats64.tx_bytes += len;
 775					hw->ep_shm_info[dest_epid].net_stats
 776								.tx_bytes += len;
 777				}
 778
 779				adapter->tx_retry_count = 0;
 780				ret = NETDEV_TX_OK;
 781			}
 782		}
 783	}
 784
 785	if (ret == NETDEV_TX_OK) {
 786		dev_kfree_skb(skb);
 787		if (is_multi) {
 788			adapter->stats64.tx_packets += 1;
 789			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
 790			adapter->stats64.tx_bytes += 1;
 791			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
 792		}
 793	}
 794
 795	return ret;
 796}
 797
 798static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
 799{
 800	struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
 801
 802	netif_tx_wake_queue(queue);
 803}
 804
 805static void
 806fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 807{
 808	struct fjes_adapter *adapter = netdev_priv(netdev);
 809
 810	memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
 811}
 812
 813static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
 814{
 815	struct fjes_adapter *adapter = netdev_priv(netdev);
 816	bool running = netif_running(netdev);
 817	struct fjes_hw *hw = &adapter->hw;
 818	unsigned long flags;
 819	int ret = -EINVAL;
 820	int idx, epidx;
 821
 822	for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
 823		if (new_mtu <= fjes_support_mtu[idx]) {
 824			new_mtu = fjes_support_mtu[idx];
 825			if (new_mtu == netdev->mtu)
 826				return 0;
 827
 828			ret = 0;
 829			break;
 830		}
 831	}
 832
 833	if (ret)
 834		return ret;
 835
 836	if (running) {
 837		spin_lock_irqsave(&hw->rx_status_lock, flags);
 838		for (epidx = 0; epidx < hw->max_epid; epidx++) {
 839			if (epidx == hw->my_epid)
 840				continue;
 841			hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
 842				~FJES_RX_MTU_CHANGING_DONE;
 843		}
 844		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 845
 846		netif_tx_stop_all_queues(netdev);
 847		netif_carrier_off(netdev);
 848		cancel_work_sync(&adapter->tx_stall_task);
 849		napi_disable(&adapter->napi);
 850
 851		msleep(1000);
 852
 853		netif_tx_stop_all_queues(netdev);
 854	}
 855
 856	netdev->mtu = new_mtu;
 857
 858	if (running) {
 859		for (epidx = 0; epidx < hw->max_epid; epidx++) {
 860			if (epidx == hw->my_epid)
 861				continue;
 862
 863			spin_lock_irqsave(&hw->rx_status_lock, flags);
 864			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
 865					    netdev->dev_addr,
 866					    netdev->mtu);
 867
 868			hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
 869				FJES_RX_MTU_CHANGING_DONE;
 870			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 871		}
 872
 873		netif_tx_wake_all_queues(netdev);
 874		netif_carrier_on(netdev);
 875		napi_enable(&adapter->napi);
 876		napi_schedule(&adapter->napi);
 877	}
 878
 879	return ret;
 880}
 881
 
 
 
 
 
 
 
 882static int fjes_vlan_rx_add_vid(struct net_device *netdev,
 883				__be16 proto, u16 vid)
 884{
 885	struct fjes_adapter *adapter = netdev_priv(netdev);
 886	bool ret = true;
 887	int epid;
 888
 889	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
 890		if (epid == adapter->hw.my_epid)
 891			continue;
 892
 893		if (!fjes_hw_check_vlan_id(
 894			&adapter->hw.ep_shm_info[epid].tx, vid))
 895			ret = fjes_hw_set_vlan_id(
 896				&adapter->hw.ep_shm_info[epid].tx, vid);
 897	}
 898
 899	return ret ? 0 : -ENOSPC;
 900}
 901
 902static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
 903				 __be16 proto, u16 vid)
 904{
 905	struct fjes_adapter *adapter = netdev_priv(netdev);
 906	int epid;
 907
 908	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
 909		if (epid == adapter->hw.my_epid)
 910			continue;
 911
 912		fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
 913	}
 914
 915	return 0;
 916}
 917
 918static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
 919				   int src_epid)
 920{
 921	struct fjes_hw *hw = &adapter->hw;
 922	enum ep_partner_status status;
 923	unsigned long flags;
 
 
 
 
 924
 925	status = fjes_hw_get_partner_ep_status(hw, src_epid);
 926	trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
 927	switch (status) {
 928	case EP_PARTNER_UNSHARE:
 929	case EP_PARTNER_COMPLETE:
 930	default:
 931		break;
 932	case EP_PARTNER_WAITING:
 933		if (src_epid < hw->my_epid) {
 934			spin_lock_irqsave(&hw->rx_status_lock, flags);
 935			hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
 936				FJES_RX_STOP_REQ_DONE;
 937			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 938
 939			clear_bit(src_epid, &hw->txrx_stop_req_bit);
 940			set_bit(src_epid, &adapter->unshare_watch_bitmask);
 941
 942			if (!work_pending(&adapter->unshare_watch_task))
 943				queue_work(adapter->control_wq,
 944					   &adapter->unshare_watch_task);
 945		}
 946		break;
 947	case EP_PARTNER_SHARED:
 948		if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
 949		    FJES_RX_STOP_REQ_REQUEST) {
 950			set_bit(src_epid, &hw->epstop_req_bit);
 951			if (!work_pending(&hw->epstop_task))
 952				queue_work(adapter->control_wq,
 953					   &hw->epstop_task);
 954		}
 955		break;
 956	}
 957	trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
 958}
 959
 960static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
 961{
 962	struct fjes_hw *hw = &adapter->hw;
 963	enum ep_partner_status status;
 964	unsigned long flags;
 965
 966	set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
 967
 968	status = fjes_hw_get_partner_ep_status(hw, src_epid);
 969	trace_fjes_stop_req_irq_pre(hw, src_epid, status);
 970	switch (status) {
 971	case EP_PARTNER_WAITING:
 972		spin_lock_irqsave(&hw->rx_status_lock, flags);
 973		hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
 974				FJES_RX_STOP_REQ_DONE;
 975		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 976		clear_bit(src_epid, &hw->txrx_stop_req_bit);
 977		fallthrough;
 978	case EP_PARTNER_UNSHARE:
 979	case EP_PARTNER_COMPLETE:
 980	default:
 981		set_bit(src_epid, &adapter->unshare_watch_bitmask);
 982		if (!work_pending(&adapter->unshare_watch_task))
 983			queue_work(adapter->control_wq,
 984				   &adapter->unshare_watch_task);
 985		break;
 986	case EP_PARTNER_SHARED:
 987		set_bit(src_epid, &hw->epstop_req_bit);
 988
 989		if (!work_pending(&hw->epstop_task))
 990			queue_work(adapter->control_wq, &hw->epstop_task);
 991		break;
 992	}
 993	trace_fjes_stop_req_irq_post(hw, src_epid);
 994}
 995
 996static void fjes_update_zone_irq(struct fjes_adapter *adapter,
 997				 int src_epid)
 998{
 999	struct fjes_hw *hw = &adapter->hw;
1000
1001	if (!work_pending(&hw->update_zone_task))
1002		queue_work(adapter->control_wq, &hw->update_zone_task);
1003}
1004
1005static irqreturn_t fjes_intr(int irq, void *data)
1006{
1007	struct fjes_adapter *adapter = data;
1008	struct fjes_hw *hw = &adapter->hw;
1009	irqreturn_t ret;
1010	u32 icr;
1011
1012	icr = fjes_hw_capture_interrupt_status(hw);
1013
1014	if (icr & REG_IS_MASK_IS_ASSERT) {
1015		if (icr & REG_ICTL_MASK_RX_DATA) {
1016			fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
1017			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1018				.recv_intr_rx += 1;
1019		}
1020
1021		if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
1022			fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
1023			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1024				.recv_intr_stop += 1;
1025		}
1026
1027		if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
1028			fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
1029			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1030				.recv_intr_unshare += 1;
1031		}
1032
1033		if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
1034			fjes_hw_set_irqmask(hw,
1035					    REG_ICTL_MASK_TXRX_STOP_DONE, true);
1036
1037		if (icr & REG_ICTL_MASK_INFO_UPDATE) {
1038			fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
1039			hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
1040				.recv_intr_zoneupdate += 1;
1041		}
1042
1043		ret = IRQ_HANDLED;
1044	} else {
1045		ret = IRQ_NONE;
1046	}
1047
1048	return ret;
1049}
1050
1051static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
1052				     int start_epid)
1053{
1054	struct fjes_hw *hw = &adapter->hw;
1055	enum ep_partner_status pstatus;
1056	int max_epid, cur_epid;
1057	int i;
1058
1059	max_epid = hw->max_epid;
1060	start_epid = (start_epid + 1 + max_epid) % max_epid;
1061
1062	for (i = 0; i < max_epid; i++) {
1063		cur_epid = (start_epid + i) % max_epid;
1064		if (cur_epid == hw->my_epid)
1065			continue;
1066
1067		pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
1068		if (pstatus == EP_PARTNER_SHARED) {
1069			if (!fjes_hw_epbuf_rx_is_empty(
1070				&hw->ep_shm_info[cur_epid].rx))
1071				return cur_epid;
1072		}
1073	}
1074	return -1;
1075}
1076
1077static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
1078			      int *cur_epid)
1079{
1080	void *frame;
1081
1082	*cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
1083	if (*cur_epid < 0)
1084		return NULL;
1085
1086	frame =
1087	fjes_hw_epbuf_rx_curpkt_get_addr(
1088		&adapter->hw.ep_shm_info[*cur_epid].rx, psize);
1089
1090	return frame;
1091}
1092
1093static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
1094{
1095	fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
1096}
1097
1098static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
1099{
1100	struct fjes_hw *hw = &adapter->hw;
1101
1102	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
1103
1104	adapter->unset_rx_last = true;
1105	napi_schedule(&adapter->napi);
1106}
1107
1108static int fjes_poll(struct napi_struct *napi, int budget)
1109{
1110	struct fjes_adapter *adapter =
1111			container_of(napi, struct fjes_adapter, napi);
1112	struct net_device *netdev = napi->dev;
1113	struct fjes_hw *hw = &adapter->hw;
1114	struct sk_buff *skb;
1115	int work_done = 0;
1116	int cur_epid = 0;
1117	int epidx;
1118	size_t frame_len;
1119	void *frame;
1120
1121	spin_lock(&hw->rx_status_lock);
1122	for (epidx = 0; epidx < hw->max_epid; epidx++) {
1123		if (epidx == hw->my_epid)
1124			continue;
1125
1126		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1127		    EP_PARTNER_SHARED)
1128			adapter->hw.ep_shm_info[epidx]
1129				   .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
1130	}
1131	spin_unlock(&hw->rx_status_lock);
1132
1133	while (work_done < budget) {
1134		prefetch(&adapter->hw);
1135		frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
1136
1137		if (frame) {
1138			skb = napi_alloc_skb(napi, frame_len);
1139			if (!skb) {
1140				adapter->stats64.rx_dropped += 1;
1141				hw->ep_shm_info[cur_epid].net_stats
1142							 .rx_dropped += 1;
1143				adapter->stats64.rx_errors += 1;
1144				hw->ep_shm_info[cur_epid].net_stats
1145							 .rx_errors += 1;
1146			} else {
1147				skb_put_data(skb, frame, frame_len);
1148				skb->protocol = eth_type_trans(skb, netdev);
1149				skb->ip_summed = CHECKSUM_UNNECESSARY;
1150
1151				netif_receive_skb(skb);
1152
1153				work_done++;
1154
1155				adapter->stats64.rx_packets += 1;
1156				hw->ep_shm_info[cur_epid].net_stats
1157							 .rx_packets += 1;
1158				adapter->stats64.rx_bytes += frame_len;
1159				hw->ep_shm_info[cur_epid].net_stats
1160							 .rx_bytes += frame_len;
1161
1162				if (is_multicast_ether_addr(
1163					((struct ethhdr *)frame)->h_dest)) {
1164					adapter->stats64.multicast += 1;
1165					hw->ep_shm_info[cur_epid].net_stats
1166								 .multicast += 1;
1167				}
1168			}
1169
1170			fjes_rxframe_release(adapter, cur_epid);
1171			adapter->unset_rx_last = true;
1172		} else {
1173			break;
1174		}
1175	}
1176
1177	if (work_done < budget) {
1178		napi_complete_done(napi, work_done);
1179
1180		if (adapter->unset_rx_last) {
1181			adapter->rx_last_jiffies = jiffies;
1182			adapter->unset_rx_last = false;
1183		}
1184
1185		if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
1186			napi_reschedule(napi);
1187		} else {
1188			spin_lock(&hw->rx_status_lock);
1189			for (epidx = 0; epidx < hw->max_epid; epidx++) {
1190				if (epidx == hw->my_epid)
1191					continue;
1192				if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1193				    EP_PARTNER_SHARED)
1194					adapter->hw.ep_shm_info[epidx].tx
1195						   .info->v1i.rx_status &=
1196						~FJES_RX_POLL_WORK;
1197			}
1198			spin_unlock(&hw->rx_status_lock);
1199
1200			fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
1201		}
1202	}
1203
1204	return work_done;
1205}
1206
1207/* fjes_probe - Device Initialization Routine */
1208static int fjes_probe(struct platform_device *plat_dev)
1209{
1210	struct fjes_adapter *adapter;
1211	struct net_device *netdev;
1212	struct resource *res;
1213	struct fjes_hw *hw;
1214	int err;
1215
1216	err = -ENOMEM;
1217	netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
1218				 NET_NAME_UNKNOWN, fjes_netdev_setup,
1219				 FJES_MAX_QUEUES);
1220
1221	if (!netdev)
1222		goto err_out;
1223
1224	SET_NETDEV_DEV(netdev, &plat_dev->dev);
 
 
 
 
1225
1226	dev_set_drvdata(&plat_dev->dev, netdev);
1227	adapter = netdev_priv(netdev);
1228	adapter->netdev = netdev;
1229	adapter->plat_dev = plat_dev;
1230	hw = &adapter->hw;
1231	hw->back = adapter;
1232
1233	/* setup the private structure */
1234	err = fjes_sw_init(adapter);
1235	if (err)
1236		goto err_free_netdev;
 
 
 
 
 
 
 
1237
1238	INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
1239	adapter->force_reset = false;
1240	adapter->open_guard = false;
1241
1242	adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
1243	if (unlikely(!adapter->txrx_wq)) {
1244		err = -ENOMEM;
1245		goto err_free_netdev;
1246	}
1247
1248	adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
1249					      WQ_MEM_RECLAIM, 0);
1250	if (unlikely(!adapter->control_wq)) {
1251		err = -ENOMEM;
1252		goto err_free_txrx_wq;
1253	}
1254
1255	INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
1256	INIT_WORK(&adapter->raise_intr_rxdata_task,
1257		  fjes_raise_intr_rxdata_task);
1258	INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
1259	adapter->unshare_watch_bitmask = 0;
1260
1261	INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
1262	adapter->interrupt_watch_enable = false;
 
1263
1264	res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
1265	hw->hw_res.start = res->start;
1266	hw->hw_res.size = resource_size(res);
1267	hw->hw_res.irq = platform_get_irq(plat_dev, 0);
1268	err = fjes_hw_init(&adapter->hw);
1269	if (err)
1270		goto err_free_control_wq;
1271
1272	/* setup MAC address (02:00:00:00:00:[epid])*/
1273	netdev->dev_addr[0] = 2;
1274	netdev->dev_addr[1] = 0;
1275	netdev->dev_addr[2] = 0;
1276	netdev->dev_addr[3] = 0;
1277	netdev->dev_addr[4] = 0;
1278	netdev->dev_addr[5] = hw->my_epid; /* EPID */
1279
1280	err = register_netdev(netdev);
1281	if (err)
1282		goto err_hw_exit;
1283
1284	netif_carrier_off(netdev);
 
 
 
 
 
1285
1286	fjes_dbg_adapter_init(adapter);
 
 
 
 
1287
1288	return 0;
1289
1290err_hw_exit:
1291	fjes_hw_exit(&adapter->hw);
1292err_free_control_wq:
1293	destroy_workqueue(adapter->control_wq);
1294err_free_txrx_wq:
1295	destroy_workqueue(adapter->txrx_wq);
1296err_free_netdev:
1297	free_netdev(netdev);
1298err_out:
1299	return err;
1300}
1301
1302/* fjes_remove - Device Removal Routine */
1303static int fjes_remove(struct platform_device *plat_dev)
1304{
1305	struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
1306	struct fjes_adapter *adapter = netdev_priv(netdev);
1307	struct fjes_hw *hw = &adapter->hw;
 
 
1308
1309	fjes_dbg_adapter_exit(adapter);
 
1310
1311	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
1312	cancel_work_sync(&adapter->unshare_watch_task);
1313	cancel_work_sync(&adapter->raise_intr_rxdata_task);
1314	cancel_work_sync(&adapter->tx_stall_task);
1315	if (adapter->control_wq)
1316		destroy_workqueue(adapter->control_wq);
1317	if (adapter->txrx_wq)
1318		destroy_workqueue(adapter->txrx_wq);
1319
1320	unregister_netdev(netdev);
 
 
1321
1322	fjes_hw_exit(hw);
 
 
 
1323
1324	netif_napi_del(&adapter->napi);
 
 
 
 
 
 
1325
1326	free_netdev(netdev);
 
 
1327
1328	return 0;
1329}
 
 
 
 
 
 
 
 
 
1330
1331static int fjes_sw_init(struct fjes_adapter *adapter)
1332{
1333	struct net_device *netdev = adapter->netdev;
1334
1335	netif_napi_add(netdev, &adapter->napi, fjes_poll, 64);
1336
1337	return 0;
1338}
1339
1340/* fjes_netdev_setup - netdevice initialization routine */
1341static void fjes_netdev_setup(struct net_device *netdev)
1342{
1343	ether_setup(netdev);
1344
1345	netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
1346	netdev->netdev_ops = &fjes_netdev_ops;
1347	fjes_set_ethtool_ops(netdev);
1348	netdev->mtu = fjes_support_mtu[3];
1349	netdev->min_mtu = fjes_support_mtu[0];
1350	netdev->max_mtu = fjes_support_mtu[3];
1351	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1352}
1353
1354static void fjes_irq_watch_task(struct work_struct *work)
1355{
1356	struct fjes_adapter *adapter = container_of(to_delayed_work(work),
1357			struct fjes_adapter, interrupt_watch_task);
1358
1359	local_irq_disable();
1360	fjes_intr(adapter->hw.hw_res.irq, adapter);
1361	local_irq_enable();
1362
1363	if (fjes_rxframe_search_exist(adapter, 0) >= 0)
1364		napi_schedule(&adapter->napi);
1365
1366	if (adapter->interrupt_watch_enable) {
1367		if (!delayed_work_pending(&adapter->interrupt_watch_task))
1368			queue_delayed_work(adapter->control_wq,
1369					   &adapter->interrupt_watch_task,
1370					   FJES_IRQ_WATCH_DELAY);
1371	}
1372}
1373
1374static void fjes_watch_unshare_task(struct work_struct *work)
1375{
1376	struct fjes_adapter *adapter =
1377	container_of(work, struct fjes_adapter, unshare_watch_task);
1378
1379	struct net_device *netdev = adapter->netdev;
1380	struct fjes_hw *hw = &adapter->hw;
1381
1382	int unshare_watch, unshare_reserve;
1383	int max_epid, my_epid, epidx;
1384	int stop_req, stop_req_done;
1385	ulong unshare_watch_bitmask;
1386	unsigned long flags;
1387	int wait_time = 0;
1388	int is_shared;
1389	int ret;
1390
1391	my_epid = hw->my_epid;
1392	max_epid = hw->max_epid;
1393
1394	unshare_watch_bitmask = adapter->unshare_watch_bitmask;
1395	adapter->unshare_watch_bitmask = 0;
1396
1397	while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
1398	       (wait_time < 3000)) {
1399		for (epidx = 0; epidx < max_epid; epidx++) {
1400			if (epidx == my_epid)
1401				continue;
1402
1403			is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
1404							   epidx);
1405
1406			stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
1407
1408			stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
1409					FJES_RX_STOP_REQ_DONE;
1410
1411			unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
1412
1413			unshare_reserve = test_bit(epidx,
1414						   &hw->hw_info.buffer_unshare_reserve_bit);
1415
1416			if ((!stop_req ||
1417			     (is_shared && (!is_shared || !stop_req_done))) &&
1418			    (is_shared || !unshare_watch || !unshare_reserve))
1419				continue;
1420
1421			mutex_lock(&hw->hw_info.lock);
1422			ret = fjes_hw_unregister_buff_addr(hw, epidx);
1423			switch (ret) {
1424			case 0:
1425				break;
1426			case -ENOMSG:
1427			case -EBUSY:
1428			default:
1429				if (!work_pending(
1430					&adapter->force_close_task)) {
1431					adapter->force_reset = true;
1432					schedule_work(
1433						&adapter->force_close_task);
1434				}
1435				break;
1436			}
1437			mutex_unlock(&hw->hw_info.lock);
1438			hw->ep_shm_info[epidx].ep_stats
1439					.com_unregist_buf_exec += 1;
1440
1441			spin_lock_irqsave(&hw->rx_status_lock, flags);
1442			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1443					    netdev->dev_addr, netdev->mtu);
1444			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1445
1446			clear_bit(epidx, &hw->txrx_stop_req_bit);
1447			clear_bit(epidx, &unshare_watch_bitmask);
1448			clear_bit(epidx,
1449				  &hw->hw_info.buffer_unshare_reserve_bit);
1450		}
1451
1452		msleep(100);
1453		wait_time += 100;
1454	}
1455
1456	if (hw->hw_info.buffer_unshare_reserve_bit) {
1457		for (epidx = 0; epidx < max_epid; epidx++) {
1458			if (epidx == my_epid)
1459				continue;
1460
1461			if (test_bit(epidx,
1462				     &hw->hw_info.buffer_unshare_reserve_bit)) {
1463				mutex_lock(&hw->hw_info.lock);
1464
1465				ret = fjes_hw_unregister_buff_addr(hw, epidx);
1466				switch (ret) {
1467				case 0:
1468					break;
1469				case -ENOMSG:
1470				case -EBUSY:
1471				default:
1472					if (!work_pending(
1473						&adapter->force_close_task)) {
1474						adapter->force_reset = true;
1475						schedule_work(
1476							&adapter->force_close_task);
1477					}
1478					break;
1479				}
1480				mutex_unlock(&hw->hw_info.lock);
1481
1482				hw->ep_shm_info[epidx].ep_stats
1483					.com_unregist_buf_exec += 1;
1484
1485				spin_lock_irqsave(&hw->rx_status_lock, flags);
1486				fjes_hw_setup_epbuf(
1487					&hw->ep_shm_info[epidx].tx,
1488					netdev->dev_addr, netdev->mtu);
1489				spin_unlock_irqrestore(&hw->rx_status_lock,
1490						       flags);
1491
1492				clear_bit(epidx, &hw->txrx_stop_req_bit);
1493				clear_bit(epidx, &unshare_watch_bitmask);
1494				clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1495			}
1496
1497			if (test_bit(epidx, &unshare_watch_bitmask)) {
1498				spin_lock_irqsave(&hw->rx_status_lock, flags);
1499				hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
1500						~FJES_RX_STOP_REQ_DONE;
1501				spin_unlock_irqrestore(&hw->rx_status_lock,
1502						       flags);
1503			}
1504		}
1505	}
1506}
1507
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1508static acpi_status
1509acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
1510				 void *context, void **return_value)
1511{
1512	struct acpi_device *device;
1513	bool *found = context;
1514	int result;
1515
1516	result = acpi_bus_get_device(obj_handle, &device);
1517	if (result)
1518		return AE_OK;
1519
1520	if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
1521		return AE_OK;
1522
1523	if (!is_extended_socket_device(device))
1524		return AE_OK;
1525
1526	if (acpi_check_extended_socket_status(device))
1527		return AE_OK;
1528
1529	*found = true;
1530	return AE_CTRL_TERMINATE;
1531}
1532
1533/* fjes_init_module - Driver Registration Routine */
1534static int __init fjes_init_module(void)
1535{
1536	bool found = false;
1537	int result;
1538
1539	acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
1540			    acpi_find_extended_socket_device, NULL, &found,
1541			    NULL);
1542
1543	if (!found)
1544		return -ENODEV;
1545
1546	pr_info("%s - version %s - %s\n",
1547		fjes_driver_string, fjes_driver_version, fjes_copyright);
1548
1549	fjes_dbg_init();
1550
1551	result = platform_driver_register(&fjes_driver);
1552	if (result < 0) {
1553		fjes_dbg_exit();
1554		return result;
1555	}
1556
1557	result = acpi_bus_register_driver(&fjes_acpi_driver);
1558	if (result < 0)
1559		goto fail_acpi_driver;
1560
1561	return 0;
1562
1563fail_acpi_driver:
1564	platform_driver_unregister(&fjes_driver);
1565	fjes_dbg_exit();
1566	return result;
1567}
1568
1569module_init(fjes_init_module);
1570
1571/* fjes_exit_module - Driver Exit Cleanup Routine */
1572static void __exit fjes_exit_module(void)
1573{
1574	acpi_bus_unregister_driver(&fjes_acpi_driver);
1575	platform_driver_unregister(&fjes_driver);
1576	fjes_dbg_exit();
1577}
1578
1579module_exit(fjes_exit_module);