Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
   1/*
   2 *  linux/drivers/scsi/esas2r/esas2r_disc.c
   3 *      esas2r device discovery routines
   4 *
   5 *  Copyright (c) 2001-2013 ATTO Technology, Inc.
   6 *  (mailto:linuxdrivers@attotech.com)
   7 */
   8/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
   9/*
  10 *  This program is free software; you can redistribute it and/or modify
  11 *  it under the terms of the GNU General Public License as published by
  12 *  the Free Software Foundation; version 2 of the License.
  13 *
  14 *  This program is distributed in the hope that it will be useful,
  15 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 *  GNU General Public License for more details.
  18 *
  19 *  NO WARRANTY
  20 *  THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  21 *  CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  22 *  LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  23 *  MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  24 *  solely responsible for determining the appropriateness of using and
  25 *  distributing the Program and assumes all risks associated with its
  26 *  exercise of rights under this Agreement, including but not limited to
  27 *  the risks and costs of program errors, damage to or loss of data,
  28 *  programs or equipment, and unavailability or interruption of operations.
  29 *
  30 *  DISCLAIMER OF LIABILITY
  31 *  NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  32 *  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33 *  DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  34 *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  35 *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  36 *  USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  37 *  HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  38 *
  39 *  You should have received a copy of the GNU General Public License
  40 *  along with this program; if not, write to the Free Software
  41 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  42 */
  43/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  44
  45#include "esas2r.h"
  46
  47/* Miscellaneous internal discovery routines */
  48static void esas2r_disc_abort(struct esas2r_adapter *a,
  49			      struct esas2r_request *rq);
  50static bool esas2r_disc_continue(struct esas2r_adapter *a,
  51				 struct esas2r_request *rq);
  52static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a);
  53static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr);
  54static bool esas2r_disc_start_request(struct esas2r_adapter *a,
  55				      struct esas2r_request *rq);
  56
  57/* Internal discovery routines that process the states */
  58static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
  59				       struct esas2r_request *rq);
  60static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
  61					  struct esas2r_request *rq);
  62static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
  63				struct esas2r_request *rq);
  64static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
  65				   struct esas2r_request *rq);
  66static bool esas2r_disc_part_info(struct esas2r_adapter *a,
  67				  struct esas2r_request *rq);
  68static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
  69				     struct esas2r_request *rq);
  70static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
  71					  struct esas2r_request *rq);
  72static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
  73					     struct esas2r_request *rq);
  74static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
  75					  struct esas2r_request *rq);
  76static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
  77					     struct esas2r_request *rq);
  78static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
  79				      struct esas2r_request *rq);
  80static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
  81					 struct esas2r_request *rq);
  82
  83void esas2r_disc_initialize(struct esas2r_adapter *a)
  84{
  85	struct esas2r_sas_nvram *nvr = a->nvram;
  86
  87	esas2r_trace_enter();
  88
  89	clear_bit(AF_DISC_IN_PROG, &a->flags);
  90	clear_bit(AF2_DEV_SCAN, &a->flags2);
  91	clear_bit(AF2_DEV_CNT_OK, &a->flags2);
  92
  93	a->disc_start_time = jiffies_to_msecs(jiffies);
  94	a->disc_wait_time = nvr->dev_wait_time * 1000;
  95	a->disc_wait_cnt = nvr->dev_wait_count;
  96
  97	if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS)
  98		a->disc_wait_cnt = ESAS2R_MAX_TARGETS;
  99
 100	/*
 101	 * If we are doing chip reset or power management processing, always
 102	 * wait for devices.  use the NVRAM device count if it is greater than
 103	 * previously discovered devices.
 104	 */
 105
 106	esas2r_hdebug("starting discovery...");
 107
 108	a->general_req.interrupt_cx = NULL;
 109
 110	if (test_bit(AF_CHPRST_DETECTED, &a->flags) ||
 111	    test_bit(AF_POWER_MGT, &a->flags)) {
 112		if (a->prev_dev_cnt == 0) {
 113			/* Don't bother waiting if there is nothing to wait
 114			 * for.
 115			 */
 116			a->disc_wait_time = 0;
 117		} else {
 118			/*
 119			 * Set the device wait count to what was previously
 120			 * found.  We don't care if the user only configured
 121			 * a time because we know the exact count to wait for.
 122			 * There is no need to honor the user's wishes to
 123			 * always wait the full time.
 124			 */
 125			a->disc_wait_cnt = a->prev_dev_cnt;
 126
 127			/*
 128			 * bump the minimum wait time to 15 seconds since the
 129			 * default is 3 (system boot or the boot driver usually
 130			 * buys us more time).
 131			 */
 132			if (a->disc_wait_time < 15000)
 133				a->disc_wait_time = 15000;
 134		}
 135	}
 136
 137	esas2r_trace("disc wait count: %d", a->disc_wait_cnt);
 138	esas2r_trace("disc wait time: %d", a->disc_wait_time);
 139
 140	if (a->disc_wait_time == 0)
 141		esas2r_disc_check_complete(a);
 142
 143	esas2r_trace_exit();
 144}
 145
 146void esas2r_disc_start_waiting(struct esas2r_adapter *a)
 147{
 148	unsigned long flags;
 149
 150	spin_lock_irqsave(&a->mem_lock, flags);
 151
 152	if (a->disc_ctx.disc_evt)
 153		esas2r_disc_start_port(a);
 154
 155	spin_unlock_irqrestore(&a->mem_lock, flags);
 156}
 157
 158void esas2r_disc_check_for_work(struct esas2r_adapter *a)
 159{
 160	struct esas2r_request *rq = &a->general_req;
 161
 162	/* service any pending interrupts first */
 163
 164	esas2r_polled_interrupt(a);
 165
 166	/*
 167	 * now, interrupt processing may have queued up a discovery event.  go
 168	 * see if we have one to start.  we couldn't start it in the ISR since
 169	 * polled discovery would cause a deadlock.
 170	 */
 171
 172	esas2r_disc_start_waiting(a);
 173
 174	if (rq->interrupt_cx == NULL)
 175		return;
 176
 177	if (rq->req_stat == RS_STARTED
 178	    && rq->timeout <= RQ_MAX_TIMEOUT) {
 179		/* wait for the current discovery request to complete. */
 180		esas2r_wait_request(a, rq);
 181
 182		if (rq->req_stat == RS_TIMEOUT) {
 183			esas2r_disc_abort(a, rq);
 184			esas2r_local_reset_adapter(a);
 185			return;
 186		}
 187	}
 188
 189	if (rq->req_stat == RS_PENDING
 190	    || rq->req_stat == RS_STARTED)
 191		return;
 192
 193	esas2r_disc_continue(a, rq);
 194}
 195
 196void esas2r_disc_check_complete(struct esas2r_adapter *a)
 197{
 198	unsigned long flags;
 199
 200	esas2r_trace_enter();
 201
 202	/* check to see if we should be waiting for devices */
 203	if (a->disc_wait_time) {
 204		u32 currtime = jiffies_to_msecs(jiffies);
 205		u32 time = currtime - a->disc_start_time;
 206
 207		/*
 208		 * Wait until the device wait time is exhausted or the device
 209		 * wait count is satisfied.
 210		 */
 211		if (time < a->disc_wait_time
 212		    && (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt
 213			|| a->disc_wait_cnt == 0)) {
 214			/* After three seconds of waiting, schedule a scan. */
 215			if (time >= 3000
 216			    && !test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
 217				spin_lock_irqsave(&a->mem_lock, flags);
 218				esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
 219				spin_unlock_irqrestore(&a->mem_lock, flags);
 220			}
 221
 222			esas2r_trace_exit();
 223			return;
 224		}
 225
 226		/*
 227		 * We are done waiting...we think.  Adjust the wait time to
 228		 * consume events after the count is met.
 229		 */
 230		if (!test_and_set_bit(AF2_DEV_CNT_OK, &a->flags2))
 231			a->disc_wait_time = time + 3000;
 232
 233		/* If we haven't done a full scan yet, do it now. */
 234		if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
 235			spin_lock_irqsave(&a->mem_lock, flags);
 236			esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
 237			spin_unlock_irqrestore(&a->mem_lock, flags);
 238			esas2r_trace_exit();
 239			return;
 240		}
 241
 242		/*
 243		 * Now, if there is still time left to consume events, continue
 244		 * waiting.
 245		 */
 246		if (time < a->disc_wait_time) {
 247			esas2r_trace_exit();
 248			return;
 249		}
 250	} else {
 251		if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
 252			spin_lock_irqsave(&a->mem_lock, flags);
 253			esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
 254			spin_unlock_irqrestore(&a->mem_lock, flags);
 255		}
 256	}
 257
 258	/* We want to stop waiting for devices. */
 259	a->disc_wait_time = 0;
 260
 261	if (test_bit(AF_DISC_POLLED, &a->flags) &&
 262	    test_bit(AF_DISC_IN_PROG, &a->flags)) {
 263		/*
 264		 * Polled discovery is still pending so continue the active
 265		 * discovery until it is done.  At that point, we will stop
 266		 * polled discovery and transition to interrupt driven
 267		 * discovery.
 268		 */
 269	} else {
 270		/*
 271		 * Done waiting for devices.  Note that we get here immediately
 272		 * after deferred waiting completes because that is interrupt
 273		 * driven; i.e. There is no transition.
 274		 */
 275		esas2r_disc_fix_curr_requests(a);
 276		clear_bit(AF_DISC_PENDING, &a->flags);
 277
 278		/*
 279		 * We have deferred target state changes until now because we
 280		 * don't want to report any removals (due to the first arrival)
 281		 * until the device wait time expires.
 282		 */
 283		set_bit(AF_PORT_CHANGE, &a->flags);
 284	}
 285
 286	esas2r_trace_exit();
 287}
 288
 289void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)
 290{
 291	struct esas2r_disc_context *dc = &a->disc_ctx;
 292
 293	esas2r_trace_enter();
 294
 295	esas2r_trace("disc_event: %d", disc_evt);
 296
 297	/* Initialize the discovery context */
 298	dc->disc_evt |= disc_evt;
 299
 300	/*
 301	 * Don't start discovery before or during polled discovery.  if we did,
 302	 * we would have a deadlock if we are in the ISR already.
 303	 */
 304	if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
 305	    !test_bit(AF_DISC_POLLED, &a->flags))
 306		esas2r_disc_start_port(a);
 307
 308	esas2r_trace_exit();
 309}
 310
 311bool esas2r_disc_start_port(struct esas2r_adapter *a)
 312{
 313	struct esas2r_request *rq = &a->general_req;
 314	struct esas2r_disc_context *dc = &a->disc_ctx;
 315	bool ret;
 316
 317	esas2r_trace_enter();
 318
 319	if (test_bit(AF_DISC_IN_PROG, &a->flags)) {
 320		esas2r_trace_exit();
 321
 322		return false;
 323	}
 324
 325	/* If there is a discovery waiting, process it. */
 326	if (dc->disc_evt) {
 327		if (test_bit(AF_DISC_POLLED, &a->flags)
 328		    && a->disc_wait_time == 0) {
 329			/*
 330			 * We are doing polled discovery, but we no longer want
 331			 * to wait for devices.  Stop polled discovery and
 332			 * transition to interrupt driven discovery.
 333			 */
 334
 335			esas2r_trace_exit();
 336
 337			return false;
 338		}
 339	} else {
 340		/* Discovery is complete. */
 341
 342		esas2r_hdebug("disc done");
 343
 344		set_bit(AF_PORT_CHANGE, &a->flags);
 345
 346		esas2r_trace_exit();
 347
 348		return false;
 349	}
 350
 351	/* Handle the discovery context */
 352	esas2r_trace("disc_evt: %d", dc->disc_evt);
 353	set_bit(AF_DISC_IN_PROG, &a->flags);
 354	dc->flags = 0;
 355
 356	if (test_bit(AF_DISC_POLLED, &a->flags))
 357		dc->flags |= DCF_POLLED;
 358
 359	rq->interrupt_cx = dc;
 360	rq->req_stat = RS_SUCCESS;
 361
 362	/* Decode the event code */
 363	if (dc->disc_evt & DCDE_DEV_SCAN) {
 364		dc->disc_evt &= ~DCDE_DEV_SCAN;
 365
 366		dc->flags |= DCF_DEV_SCAN;
 367		dc->state = DCS_BLOCK_DEV_SCAN;
 368	} else if (dc->disc_evt & DCDE_DEV_CHANGE) {
 369		dc->disc_evt &= ~DCDE_DEV_CHANGE;
 370
 371		dc->flags |= DCF_DEV_CHANGE;
 372		dc->state = DCS_DEV_RMV;
 373	}
 374
 375	/* Continue interrupt driven discovery */
 376	if (!test_bit(AF_DISC_POLLED, &a->flags))
 377		ret = esas2r_disc_continue(a, rq);
 378	else
 379		ret = true;
 380
 381	esas2r_trace_exit();
 382
 383	return ret;
 384}
 385
 386static bool esas2r_disc_continue(struct esas2r_adapter *a,
 387				 struct esas2r_request *rq)
 388{
 389	struct esas2r_disc_context *dc =
 390		(struct esas2r_disc_context *)rq->interrupt_cx;
 391	bool rslt;
 392
 393	/* Device discovery/removal */
 394	while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) {
 395		rslt = false;
 396
 397		switch (dc->state) {
 398		case DCS_DEV_RMV:
 399
 400			rslt = esas2r_disc_dev_remove(a, rq);
 401			break;
 402
 403		case DCS_DEV_ADD:
 404
 405			rslt = esas2r_disc_dev_add(a, rq);
 406			break;
 407
 408		case DCS_BLOCK_DEV_SCAN:
 409
 410			rslt = esas2r_disc_block_dev_scan(a, rq);
 411			break;
 412
 413		case DCS_RAID_GRP_INFO:
 414
 415			rslt = esas2r_disc_raid_grp_info(a, rq);
 416			break;
 417
 418		case DCS_PART_INFO:
 419
 420			rslt = esas2r_disc_part_info(a, rq);
 421			break;
 422
 423		case DCS_PT_DEV_INFO:
 424
 425			rslt = esas2r_disc_passthru_dev_info(a, rq);
 426			break;
 427		case DCS_PT_DEV_ADDR:
 428
 429			rslt = esas2r_disc_passthru_dev_addr(a, rq);
 430			break;
 431		case DCS_DISC_DONE:
 432
 433			dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN);
 434			break;
 435
 436		default:
 437
 438			esas2r_bugon();
 439			dc->state = DCS_DISC_DONE;
 440			break;
 441		}
 442
 443		if (rslt)
 444			return true;
 445	}
 446
 447	/* Discovery is done...for now. */
 448	rq->interrupt_cx = NULL;
 449
 450	if (!test_bit(AF_DISC_PENDING, &a->flags))
 451		esas2r_disc_fix_curr_requests(a);
 452
 453	clear_bit(AF_DISC_IN_PROG, &a->flags);
 454
 455	/* Start the next discovery. */
 456	return esas2r_disc_start_port(a);
 457}
 458
 459static bool esas2r_disc_start_request(struct esas2r_adapter *a,
 460				      struct esas2r_request *rq)
 461{
 462	unsigned long flags;
 463
 464	/* Set the timeout to a minimum value. */
 465	if (rq->timeout < ESAS2R_DEFAULT_TMO)
 466		rq->timeout = ESAS2R_DEFAULT_TMO;
 467
 468	/*
 469	 * Override the request type to distinguish discovery requests.  If we
 470	 * end up deferring the request, esas2r_disc_local_start_request()
 471	 * will be called to restart it.
 472	 */
 473	rq->req_type = RT_DISC_REQ;
 474
 475	spin_lock_irqsave(&a->queue_lock, flags);
 476
 477	if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
 478	    !test_bit(AF_FLASHING, &a->flags))
 479		esas2r_disc_local_start_request(a, rq);
 480	else
 481		list_add_tail(&rq->req_list, &a->defer_list);
 482
 483	spin_unlock_irqrestore(&a->queue_lock, flags);
 484
 485	return true;
 486}
 487
 488void esas2r_disc_local_start_request(struct esas2r_adapter *a,
 489				     struct esas2r_request *rq)
 490{
 491	esas2r_trace_enter();
 492
 493	list_add_tail(&rq->req_list, &a->active_list);
 494
 495	esas2r_start_vda_request(a, rq);
 496
 497	esas2r_trace_exit();
 498
 499	return;
 500}
 501
 502static void esas2r_disc_abort(struct esas2r_adapter *a,
 503			      struct esas2r_request *rq)
 504{
 505	struct esas2r_disc_context *dc =
 506		(struct esas2r_disc_context *)rq->interrupt_cx;
 507
 508	esas2r_trace_enter();
 509
 510	/* abort the current discovery */
 511
 512	dc->state = DCS_DISC_DONE;
 513
 514	esas2r_trace_exit();
 515}
 516
 517static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
 518				       struct esas2r_request *rq)
 519{
 520	struct esas2r_disc_context *dc =
 521		(struct esas2r_disc_context *)rq->interrupt_cx;
 522	bool rslt;
 523
 524	esas2r_trace_enter();
 525
 526	esas2r_rq_init_request(rq, a);
 527
 528	esas2r_build_mgt_req(a,
 529			     rq,
 530			     VDAMGT_DEV_SCAN,
 531			     0,
 532			     0,
 533			     0,
 534			     NULL);
 535
 536	rq->comp_cb = esas2r_disc_block_dev_scan_cb;
 537
 538	rq->timeout = 30000;
 539	rq->interrupt_cx = dc;
 540
 541	rslt = esas2r_disc_start_request(a, rq);
 542
 543	esas2r_trace_exit();
 544
 545	return rslt;
 546}
 547
 548static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
 549					  struct esas2r_request *rq)
 550{
 551	struct esas2r_disc_context *dc =
 552		(struct esas2r_disc_context *)rq->interrupt_cx;
 553	unsigned long flags;
 554
 555	esas2r_trace_enter();
 556
 557	spin_lock_irqsave(&a->mem_lock, flags);
 558
 559	if (rq->req_stat == RS_SUCCESS)
 560		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
 561
 562	dc->state = DCS_RAID_GRP_INFO;
 563	dc->raid_grp_ix = 0;
 564
 565	esas2r_rq_destroy_request(rq, a);
 566
 567	/* continue discovery if it's interrupt driven */
 568
 569	if (!(dc->flags & DCF_POLLED))
 570		esas2r_disc_continue(a, rq);
 571
 572	spin_unlock_irqrestore(&a->mem_lock, flags);
 573
 574	esas2r_trace_exit();
 575}
 576
 577static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
 578				      struct esas2r_request *rq)
 579{
 580	struct esas2r_disc_context *dc =
 581		(struct esas2r_disc_context *)rq->interrupt_cx;
 582	bool rslt;
 583	struct atto_vda_grp_info *grpinfo;
 584
 585	esas2r_trace_enter();
 586
 587	esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix);
 588
 589	if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) {
 590		dc->state = DCS_DISC_DONE;
 591
 592		esas2r_trace_exit();
 593
 594		return false;
 595	}
 596
 597	esas2r_rq_init_request(rq, a);
 598
 599	grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
 600
 601	memset(grpinfo, 0, sizeof(struct atto_vda_grp_info));
 602
 603	esas2r_build_mgt_req(a,
 604			     rq,
 605			     VDAMGT_GRP_INFO,
 606			     dc->scan_gen,
 607			     0,
 608			     sizeof(struct atto_vda_grp_info),
 609			     NULL);
 610
 611	grpinfo->grp_index = dc->raid_grp_ix;
 612
 613	rq->comp_cb = esas2r_disc_raid_grp_info_cb;
 614
 615	rq->interrupt_cx = dc;
 616
 617	rslt = esas2r_disc_start_request(a, rq);
 618
 619	esas2r_trace_exit();
 620
 621	return rslt;
 622}
 623
 624static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
 625					 struct esas2r_request *rq)
 626{
 627	struct esas2r_disc_context *dc =
 628		(struct esas2r_disc_context *)rq->interrupt_cx;
 629	unsigned long flags;
 630	struct atto_vda_grp_info *grpinfo;
 631
 632	esas2r_trace_enter();
 633
 634	spin_lock_irqsave(&a->mem_lock, flags);
 635
 636	if (rq->req_stat == RS_SCAN_GEN) {
 637		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
 638		dc->raid_grp_ix = 0;
 639		goto done;
 640	}
 641
 642	if (rq->req_stat == RS_SUCCESS) {
 643		grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
 644
 645		if (grpinfo->status != VDA_GRP_STAT_ONLINE
 646		    && grpinfo->status != VDA_GRP_STAT_DEGRADED) {
 647			/* go to the next group. */
 648
 649			dc->raid_grp_ix++;
 650		} else {
 651			memcpy(&dc->raid_grp_name[0],
 652			       &grpinfo->grp_name[0],
 653			       sizeof(grpinfo->grp_name));
 654
 655			dc->interleave = le32_to_cpu(grpinfo->interleave);
 656			dc->block_size = le32_to_cpu(grpinfo->block_size);
 657
 658			dc->state = DCS_PART_INFO;
 659			dc->part_num = 0;
 660		}
 661	} else {
 662		if (!(rq->req_stat == RS_GRP_INVALID)) {
 663			esas2r_log(ESAS2R_LOG_WARN,
 664				   "A request for RAID group info failed - "
 665				   "returned with %x",
 666				   rq->req_stat);
 667		}
 668
 669		dc->dev_ix = 0;
 670		dc->state = DCS_PT_DEV_INFO;
 671	}
 672
 673done:
 674
 675	esas2r_rq_destroy_request(rq, a);
 676
 677	/* continue discovery if it's interrupt driven */
 678
 679	if (!(dc->flags & DCF_POLLED))
 680		esas2r_disc_continue(a, rq);
 681
 682	spin_unlock_irqrestore(&a->mem_lock, flags);
 683
 684	esas2r_trace_exit();
 685}
 686
 687static bool esas2r_disc_part_info(struct esas2r_adapter *a,
 688				  struct esas2r_request *rq)
 689{
 690	struct esas2r_disc_context *dc =
 691		(struct esas2r_disc_context *)rq->interrupt_cx;
 692	bool rslt;
 693	struct atto_vdapart_info *partinfo;
 694
 695	esas2r_trace_enter();
 696
 697	esas2r_trace("part_num: %d", dc->part_num);
 698
 699	if (dc->part_num >= VDA_MAX_PARTITIONS) {
 700		dc->state = DCS_RAID_GRP_INFO;
 701		dc->raid_grp_ix++;
 702
 703		esas2r_trace_exit();
 704
 705		return false;
 706	}
 707
 708	esas2r_rq_init_request(rq, a);
 709
 710	partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
 711
 712	memset(partinfo, 0, sizeof(struct atto_vdapart_info));
 713
 714	esas2r_build_mgt_req(a,
 715			     rq,
 716			     VDAMGT_PART_INFO,
 717			     dc->scan_gen,
 718			     0,
 719			     sizeof(struct atto_vdapart_info),
 720			     NULL);
 721
 722	partinfo->part_no = dc->part_num;
 723
 724	memcpy(&partinfo->grp_name[0],
 725	       &dc->raid_grp_name[0],
 726	       sizeof(partinfo->grp_name));
 727
 728	rq->comp_cb = esas2r_disc_part_info_cb;
 729
 730	rq->interrupt_cx = dc;
 731
 732	rslt = esas2r_disc_start_request(a, rq);
 733
 734	esas2r_trace_exit();
 735
 736	return rslt;
 737}
 738
 739static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
 740				     struct esas2r_request *rq)
 741{
 742	struct esas2r_disc_context *dc =
 743		(struct esas2r_disc_context *)rq->interrupt_cx;
 744	unsigned long flags;
 745	struct atto_vdapart_info *partinfo;
 746
 747	esas2r_trace_enter();
 748
 749	spin_lock_irqsave(&a->mem_lock, flags);
 750
 751	if (rq->req_stat == RS_SCAN_GEN) {
 752		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
 753		dc->raid_grp_ix = 0;
 754		dc->state = DCS_RAID_GRP_INFO;
 755	} else if (rq->req_stat == RS_SUCCESS) {
 756		partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
 757
 758		dc->part_num = partinfo->part_no;
 759
 760		dc->curr_virt_id = le16_to_cpu(partinfo->target_id);
 761
 762		esas2r_targ_db_add_raid(a, dc);
 763
 764		dc->part_num++;
 765	} else {
 766		if (!(rq->req_stat == RS_PART_LAST)) {
 767			esas2r_log(ESAS2R_LOG_WARN,
 768				   "A request for RAID group partition info "
 769				   "failed - status:%d", rq->req_stat);
 770		}
 771
 772		dc->state = DCS_RAID_GRP_INFO;
 773		dc->raid_grp_ix++;
 774	}
 775
 776	esas2r_rq_destroy_request(rq, a);
 777
 778	/* continue discovery if it's interrupt driven */
 779
 780	if (!(dc->flags & DCF_POLLED))
 781		esas2r_disc_continue(a, rq);
 782
 783	spin_unlock_irqrestore(&a->mem_lock, flags);
 784
 785	esas2r_trace_exit();
 786}
 787
 788static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
 789					  struct esas2r_request *rq)
 790{
 791	struct esas2r_disc_context *dc =
 792		(struct esas2r_disc_context *)rq->interrupt_cx;
 793	bool rslt;
 794	struct atto_vda_devinfo *devinfo;
 795
 796	esas2r_trace_enter();
 797
 798	esas2r_trace("dev_ix: %d", dc->dev_ix);
 799
 800	esas2r_rq_init_request(rq, a);
 801
 802	devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
 803
 804	memset(devinfo, 0, sizeof(struct atto_vda_devinfo));
 805
 806	esas2r_build_mgt_req(a,
 807			     rq,
 808			     VDAMGT_DEV_PT_INFO,
 809			     dc->scan_gen,
 810			     dc->dev_ix,
 811			     sizeof(struct atto_vda_devinfo),
 812			     NULL);
 813
 814	rq->comp_cb = esas2r_disc_passthru_dev_info_cb;
 815
 816	rq->interrupt_cx = dc;
 817
 818	rslt = esas2r_disc_start_request(a, rq);
 819
 820	esas2r_trace_exit();
 821
 822	return rslt;
 823}
 824
 825static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
 826					     struct esas2r_request *rq)
 827{
 828	struct esas2r_disc_context *dc =
 829		(struct esas2r_disc_context *)rq->interrupt_cx;
 830	unsigned long flags;
 831	struct atto_vda_devinfo *devinfo;
 832
 833	esas2r_trace_enter();
 834
 835	spin_lock_irqsave(&a->mem_lock, flags);
 836
 837	if (rq->req_stat == RS_SCAN_GEN) {
 838		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
 839		dc->dev_ix = 0;
 840		dc->state = DCS_PT_DEV_INFO;
 841	} else if (rq->req_stat == RS_SUCCESS) {
 842		devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
 843
 844		dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index);
 845
 846		dc->curr_virt_id = le16_to_cpu(devinfo->target_id);
 847
 848		if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) {
 849			dc->curr_phys_id =
 850				le16_to_cpu(devinfo->phys_target_id);
 851			dc->dev_addr_type = ATTO_GDA_AT_PORT;
 852			dc->state = DCS_PT_DEV_ADDR;
 853
 854			esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
 855			esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
 856		} else {
 857			dc->dev_ix++;
 858		}
 859	} else {
 860		if (!(rq->req_stat == RS_DEV_INVALID)) {
 861			esas2r_log(ESAS2R_LOG_WARN,
 862				   "A request for device information failed - "
 863				   "status:%d", rq->req_stat);
 864		}
 865
 866		dc->state = DCS_DISC_DONE;
 867	}
 868
 869	esas2r_rq_destroy_request(rq, a);
 870
 871	/* continue discovery if it's interrupt driven */
 872
 873	if (!(dc->flags & DCF_POLLED))
 874		esas2r_disc_continue(a, rq);
 875
 876	spin_unlock_irqrestore(&a->mem_lock, flags);
 877
 878	esas2r_trace_exit();
 879}
 880
 881static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
 882					  struct esas2r_request *rq)
 883{
 884	struct esas2r_disc_context *dc =
 885		(struct esas2r_disc_context *)rq->interrupt_cx;
 886	bool rslt;
 887	struct atto_ioctl *hi;
 888	struct esas2r_sg_context sgc;
 889
 890	esas2r_trace_enter();
 891
 892	esas2r_rq_init_request(rq, a);
 893
 894	/* format the request. */
 895
 896	sgc.cur_offset = NULL;
 897	sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr;
 898	sgc.length = offsetof(struct atto_ioctl, data)
 899		     + sizeof(struct atto_hba_get_device_address);
 900
 901	esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge);
 902
 903	esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA);
 904
 905	if (!esas2r_build_sg_list(a, rq, &sgc)) {
 906		esas2r_rq_destroy_request(rq, a);
 907
 908		esas2r_trace_exit();
 909
 910		return false;
 911	}
 912
 913	rq->comp_cb = esas2r_disc_passthru_dev_addr_cb;
 914
 915	rq->interrupt_cx = dc;
 916
 917	/* format the IOCTL data. */
 918
 919	hi = (struct atto_ioctl *)a->disc_buffer;
 920
 921	memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN);
 922
 923	hi->version = ATTO_VER_GET_DEV_ADDR0;
 924	hi->function = ATTO_FUNC_GET_DEV_ADDR;
 925	hi->flags = HBAF_TUNNEL;
 926
 927	hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id);
 928	hi->data.get_dev_addr.addr_type = dc->dev_addr_type;
 929
 930	/* start it up. */
 931
 932	rslt = esas2r_disc_start_request(a, rq);
 933
 934	esas2r_trace_exit();
 935
 936	return rslt;
 937}
 938
 939static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
 940					     struct esas2r_request *rq)
 941{
 942	struct esas2r_disc_context *dc =
 943		(struct esas2r_disc_context *)rq->interrupt_cx;
 944	struct esas2r_target *t = NULL;
 945	unsigned long flags;
 946	struct atto_ioctl *hi;
 947	u16 addrlen;
 948
 949	esas2r_trace_enter();
 950
 951	spin_lock_irqsave(&a->mem_lock, flags);
 952
 953	hi = (struct atto_ioctl *)a->disc_buffer;
 954
 955	if (rq->req_stat == RS_SUCCESS
 956	    && hi->status == ATTO_STS_SUCCESS) {
 957		addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len);
 958
 959		if (dc->dev_addr_type == ATTO_GDA_AT_PORT) {
 960			if (addrlen == sizeof(u64))
 961				memcpy(&dc->sas_addr,
 962				       &hi->data.get_dev_addr.address[0],
 963				       addrlen);
 964			else
 965				memset(&dc->sas_addr, 0, sizeof(dc->sas_addr));
 966
 967			/* Get the unique identifier. */
 968			dc->dev_addr_type = ATTO_GDA_AT_UNIQUE;
 969
 970			goto next_dev_addr;
 971		} else {
 972			/* Add the pass through target. */
 973			if (HIBYTE(addrlen) == 0) {
 974				t = esas2r_targ_db_add_pthru(a,
 975							     dc,
 976							     &hi->data.
 977							     get_dev_addr.
 978							     address[0],
 979							     (u8)hi->data.
 980							     get_dev_addr.
 981							     addr_len);
 982
 983				if (t)
 984					memcpy(&t->sas_addr, &dc->sas_addr,
 985					       sizeof(t->sas_addr));
 986			} else {
 987				/* getting the back end data failed */
 988
 989				esas2r_log(ESAS2R_LOG_WARN,
 990					   "an error occurred retrieving the "
 991					   "back end data (%s:%d)",
 992					   __func__,
 993					   __LINE__);
 994			}
 995		}
 996	} else {
 997		/* getting the back end data failed */
 998
 999		esas2r_log(ESAS2R_LOG_WARN,
1000			   "an error occurred retrieving the back end data - "
1001			   "rq->req_stat:%d hi->status:%d",
1002			   rq->req_stat, hi->status);
1003	}
1004
1005	/* proceed to the next device. */
1006
1007	if (dc->flags & DCF_DEV_SCAN) {
1008		dc->dev_ix++;
1009		dc->state = DCS_PT_DEV_INFO;
1010	} else if (dc->flags & DCF_DEV_CHANGE) {
1011		dc->curr_targ++;
1012		dc->state = DCS_DEV_ADD;
1013	} else {
1014		esas2r_bugon();
1015	}
1016
1017next_dev_addr:
1018	esas2r_rq_destroy_request(rq, a);
1019
1020	/* continue discovery if it's interrupt driven */
1021
1022	if (!(dc->flags & DCF_POLLED))
1023		esas2r_disc_continue(a, rq);
1024
1025	spin_unlock_irqrestore(&a->mem_lock, flags);
1026
1027	esas2r_trace_exit();
1028}
1029
1030static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr)
1031{
1032	struct esas2r_adapter *a = sgc->adapter;
1033
1034	if (sgc->length > ESAS2R_DISC_BUF_LEN)
1035		esas2r_bugon();
1036
1037	*addr = a->uncached_phys
1038		+ (u64)((u8 *)a->disc_buffer - a->uncached);
1039
1040	return sgc->length;
1041}
1042
1043static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
1044				   struct esas2r_request *rq)
1045{
1046	struct esas2r_disc_context *dc =
1047		(struct esas2r_disc_context *)rq->interrupt_cx;
1048	struct esas2r_target *t;
1049	struct esas2r_target *t2;
1050
1051	esas2r_trace_enter();
1052
1053	/* process removals. */
1054
1055	for (t = a->targetdb; t < a->targetdb_end; t++) {
1056		if (t->new_target_state != TS_NOT_PRESENT)
1057			continue;
1058
1059		t->new_target_state = TS_INVALID;
1060
1061		/* remove the right target! */
1062
1063		t2 =
1064			esas2r_targ_db_find_by_virt_id(a,
1065						       esas2r_targ_get_id(t,
1066									  a));
1067
1068		if (t2)
1069			esas2r_targ_db_remove(a, t2);
1070	}
1071
1072	/* removals complete.  process arrivals. */
1073
1074	dc->state = DCS_DEV_ADD;
1075	dc->curr_targ = a->targetdb;
1076
1077	esas2r_trace_exit();
1078
1079	return false;
1080}
1081
1082static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
1083				struct esas2r_request *rq)
1084{
1085	struct esas2r_disc_context *dc =
1086		(struct esas2r_disc_context *)rq->interrupt_cx;
1087	struct esas2r_target *t = dc->curr_targ;
1088
1089	if (t >= a->targetdb_end) {
1090		/* done processing state changes. */
1091
1092		dc->state = DCS_DISC_DONE;
1093	} else if (t->new_target_state == TS_PRESENT) {
1094		struct atto_vda_ae_lu *luevt = &t->lu_event;
1095
1096		esas2r_trace_enter();
1097
1098		/* clear this now in case more events come in. */
1099
1100		t->new_target_state = TS_INVALID;
1101
1102		/* setup the discovery context for adding this device. */
1103
1104		dc->curr_virt_id = esas2r_targ_get_id(t, a);
1105
1106		if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
1107		     + sizeof(struct atto_vda_ae_lu_tgt_lun_raid))
1108		    && !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) {
1109			dc->block_size = luevt->id.tgtlun_raid.dwblock_size;
1110			dc->interleave = luevt->id.tgtlun_raid.dwinterleave;
1111		} else {
1112			dc->block_size = 0;
1113			dc->interleave = 0;
1114		}
1115
1116		/* determine the device type being added. */
1117
1118		if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) {
1119			if (luevt->dwevent & VDAAE_LU_PHYS_ID) {
1120				dc->state = DCS_PT_DEV_ADDR;
1121				dc->dev_addr_type = ATTO_GDA_AT_PORT;
1122				dc->curr_phys_id = luevt->wphys_target_id;
1123			} else {
1124				esas2r_log(ESAS2R_LOG_WARN,
1125					   "luevt->dwevent does not have the "
1126					   "VDAAE_LU_PHYS_ID bit set (%s:%d)",
1127					   __func__, __LINE__);
1128			}
1129		} else {
1130			dc->raid_grp_name[0] = 0;
1131
1132			esas2r_targ_db_add_raid(a, dc);
1133		}
1134
1135		esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
1136		esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
1137		esas2r_trace("dwevent: %d", luevt->dwevent);
1138
1139		esas2r_trace_exit();
1140	}
1141
1142	if (dc->state == DCS_DEV_ADD) {
1143		/* go to the next device. */
1144
1145		dc->curr_targ++;
1146	}
1147
1148	return false;
1149}
1150
1151/*
1152 * When discovery is done, find all requests on defer queue and
1153 * test if they need to be modified. If a target is no longer present
1154 * then complete the request with RS_SEL. Otherwise, update the
1155 * target_id since after a hibernate it can be a different value.
1156 * VDA does not make passthrough target IDs persistent.
1157 */
1158static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a)
1159{
1160	unsigned long flags;
1161	struct esas2r_target *t;
1162	struct esas2r_request *rq;
1163	struct list_head *element;
1164
1165	/* update virt_targ_id in any outstanding esas2r_requests  */
1166
1167	spin_lock_irqsave(&a->queue_lock, flags);
1168
1169	list_for_each(element, &a->defer_list) {
1170		rq = list_entry(element, struct esas2r_request, req_list);
1171		if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
1172			t = a->targetdb + rq->target_id;
1173
1174			if (t->target_state == TS_PRESENT)
1175				rq->vrq->scsi.target_id = le16_to_cpu(
1176					t->virt_targ_id);
1177			else
1178				rq->req_stat = RS_SEL;
1179		}
1180
1181	}
1182
1183	spin_unlock_irqrestore(&a->queue_lock, flags);
1184}
   1/*
   2 *  linux/drivers/scsi/esas2r/esas2r_disc.c
   3 *      esas2r device discovery routines
   4 *
   5 *  Copyright (c) 2001-2013 ATTO Technology, Inc.
   6 *  (mailto:linuxdrivers@attotech.com)
   7 */
   8/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
   9/*
  10 *  This program is free software; you can redistribute it and/or modify
  11 *  it under the terms of the GNU General Public License as published by
  12 *  the Free Software Foundation; version 2 of the License.
  13 *
  14 *  This program is distributed in the hope that it will be useful,
  15 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 *  GNU General Public License for more details.
  18 *
  19 *  NO WARRANTY
  20 *  THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  21 *  CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  22 *  LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  23 *  MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  24 *  solely responsible for determining the appropriateness of using and
  25 *  distributing the Program and assumes all risks associated with its
  26 *  exercise of rights under this Agreement, including but not limited to
  27 *  the risks and costs of program errors, damage to or loss of data,
  28 *  programs or equipment, and unavailability or interruption of operations.
  29 *
  30 *  DISCLAIMER OF LIABILITY
  31 *  NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  32 *  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33 *  DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  34 *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  35 *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  36 *  USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  37 *  HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  38 *
  39 *  You should have received a copy of the GNU General Public License
  40 *  along with this program; if not, write to the Free Software
  41 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  42 */
  43/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
  44
  45#include "esas2r.h"
  46
  47/* Miscellaneous internal discovery routines */
  48static void esas2r_disc_abort(struct esas2r_adapter *a,
  49			      struct esas2r_request *rq);
  50static bool esas2r_disc_continue(struct esas2r_adapter *a,
  51				 struct esas2r_request *rq);
  52static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a);
  53static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr);
  54static bool esas2r_disc_start_request(struct esas2r_adapter *a,
  55				      struct esas2r_request *rq);
  56
  57/* Internal discovery routines that process the states */
  58static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
  59				       struct esas2r_request *rq);
  60static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
  61					  struct esas2r_request *rq);
  62static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
  63				struct esas2r_request *rq);
  64static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
  65				   struct esas2r_request *rq);
  66static bool esas2r_disc_part_info(struct esas2r_adapter *a,
  67				  struct esas2r_request *rq);
  68static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
  69				     struct esas2r_request *rq);
  70static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
  71					  struct esas2r_request *rq);
  72static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
  73					     struct esas2r_request *rq);
  74static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
  75					  struct esas2r_request *rq);
  76static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
  77					     struct esas2r_request *rq);
  78static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
  79				      struct esas2r_request *rq);
  80static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
  81					 struct esas2r_request *rq);
  82
  83void esas2r_disc_initialize(struct esas2r_adapter *a)
  84{
  85	struct esas2r_sas_nvram *nvr = a->nvram;
  86
  87	esas2r_trace_enter();
  88
  89	clear_bit(AF_DISC_IN_PROG, &a->flags);
  90	clear_bit(AF2_DEV_SCAN, &a->flags2);
  91	clear_bit(AF2_DEV_CNT_OK, &a->flags2);
  92
  93	a->disc_start_time = jiffies_to_msecs(jiffies);
  94	a->disc_wait_time = nvr->dev_wait_time * 1000;
  95	a->disc_wait_cnt = nvr->dev_wait_count;
  96
  97	if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS)
  98		a->disc_wait_cnt = ESAS2R_MAX_TARGETS;
  99
 100	/*
 101	 * If we are doing chip reset or power management processing, always
 102	 * wait for devices.  use the NVRAM device count if it is greater than
 103	 * previously discovered devices.
 104	 */
 105
 106	esas2r_hdebug("starting discovery...");
 107
 108	a->general_req.interrupt_cx = NULL;
 109
 110	if (test_bit(AF_CHPRST_DETECTED, &a->flags) ||
 111	    test_bit(AF_POWER_MGT, &a->flags)) {
 112		if (a->prev_dev_cnt == 0) {
 113			/* Don't bother waiting if there is nothing to wait
 114			 * for.
 115			 */
 116			a->disc_wait_time = 0;
 117		} else {
 118			/*
 119			 * Set the device wait count to what was previously
 120			 * found.  We don't care if the user only configured
 121			 * a time because we know the exact count to wait for.
 122			 * There is no need to honor the user's wishes to
 123			 * always wait the full time.
 124			 */
 125			a->disc_wait_cnt = a->prev_dev_cnt;
 126
 127			/*
 128			 * bump the minimum wait time to 15 seconds since the
 129			 * default is 3 (system boot or the boot driver usually
 130			 * buys us more time).
 131			 */
 132			if (a->disc_wait_time < 15000)
 133				a->disc_wait_time = 15000;
 134		}
 135	}
 136
 137	esas2r_trace("disc wait count: %d", a->disc_wait_cnt);
 138	esas2r_trace("disc wait time: %d", a->disc_wait_time);
 139
 140	if (a->disc_wait_time == 0)
 141		esas2r_disc_check_complete(a);
 142
 143	esas2r_trace_exit();
 144}
 145
 146void esas2r_disc_start_waiting(struct esas2r_adapter *a)
 147{
 148	unsigned long flags;
 149
 150	spin_lock_irqsave(&a->mem_lock, flags);
 151
 152	if (a->disc_ctx.disc_evt)
 153		esas2r_disc_start_port(a);
 154
 155	spin_unlock_irqrestore(&a->mem_lock, flags);
 156}
 157
 158void esas2r_disc_check_for_work(struct esas2r_adapter *a)
 159{
 160	struct esas2r_request *rq = &a->general_req;
 161
 162	/* service any pending interrupts first */
 163
 164	esas2r_polled_interrupt(a);
 165
 166	/*
 167	 * now, interrupt processing may have queued up a discovery event.  go
 168	 * see if we have one to start.  we couldn't start it in the ISR since
 169	 * polled discovery would cause a deadlock.
 170	 */
 171
 172	esas2r_disc_start_waiting(a);
 173
 174	if (rq->interrupt_cx == NULL)
 175		return;
 176
 177	if (rq->req_stat == RS_STARTED
 178	    && rq->timeout <= RQ_MAX_TIMEOUT) {
 179		/* wait for the current discovery request to complete. */
 180		esas2r_wait_request(a, rq);
 181
 182		if (rq->req_stat == RS_TIMEOUT) {
 183			esas2r_disc_abort(a, rq);
 184			esas2r_local_reset_adapter(a);
 185			return;
 186		}
 187	}
 188
 189	if (rq->req_stat == RS_PENDING
 190	    || rq->req_stat == RS_STARTED)
 191		return;
 192
 193	esas2r_disc_continue(a, rq);
 194}
 195
 196void esas2r_disc_check_complete(struct esas2r_adapter *a)
 197{
 198	unsigned long flags;
 199
 200	esas2r_trace_enter();
 201
 202	/* check to see if we should be waiting for devices */
 203	if (a->disc_wait_time) {
 204		u32 currtime = jiffies_to_msecs(jiffies);
 205		u32 time = currtime - a->disc_start_time;
 206
 207		/*
 208		 * Wait until the device wait time is exhausted or the device
 209		 * wait count is satisfied.
 210		 */
 211		if (time < a->disc_wait_time
 212		    && (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt
 213			|| a->disc_wait_cnt == 0)) {
 214			/* After three seconds of waiting, schedule a scan. */
 215			if (time >= 3000
 216			    && !test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
 217				spin_lock_irqsave(&a->mem_lock, flags);
 218				esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
 219				spin_unlock_irqrestore(&a->mem_lock, flags);
 220			}
 221
 222			esas2r_trace_exit();
 223			return;
 224		}
 225
 226		/*
 227		 * We are done waiting...we think.  Adjust the wait time to
 228		 * consume events after the count is met.
 229		 */
 230		if (!test_and_set_bit(AF2_DEV_CNT_OK, &a->flags2))
 231			a->disc_wait_time = time + 3000;
 232
 233		/* If we haven't done a full scan yet, do it now. */
 234		if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
 235			spin_lock_irqsave(&a->mem_lock, flags);
 236			esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
 237			spin_unlock_irqrestore(&a->mem_lock, flags);
 238			esas2r_trace_exit();
 239			return;
 240		}
 241
 242		/*
 243		 * Now, if there is still time left to consume events, continue
 244		 * waiting.
 245		 */
 246		if (time < a->disc_wait_time) {
 247			esas2r_trace_exit();
 248			return;
 249		}
 250	} else {
 251		if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
 252			spin_lock_irqsave(&a->mem_lock, flags);
 253			esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
 254			spin_unlock_irqrestore(&a->mem_lock, flags);
 255		}
 256	}
 257
 258	/* We want to stop waiting for devices. */
 259	a->disc_wait_time = 0;
 260
 261	if (test_bit(AF_DISC_POLLED, &a->flags) &&
 262	    test_bit(AF_DISC_IN_PROG, &a->flags)) {
 263		/*
 264		 * Polled discovery is still pending so continue the active
 265		 * discovery until it is done.  At that point, we will stop
 266		 * polled discovery and transition to interrupt driven
 267		 * discovery.
 268		 */
 269	} else {
 270		/*
 271		 * Done waiting for devices.  Note that we get here immediately
 272		 * after deferred waiting completes because that is interrupt
 273		 * driven; i.e. There is no transition.
 274		 */
 275		esas2r_disc_fix_curr_requests(a);
 276		clear_bit(AF_DISC_PENDING, &a->flags);
 277
 278		/*
 279		 * We have deferred target state changes until now because we
 280		 * don't want to report any removals (due to the first arrival)
 281		 * until the device wait time expires.
 282		 */
 283		set_bit(AF_PORT_CHANGE, &a->flags);
 284	}
 285
 286	esas2r_trace_exit();
 287}
 288
 289void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)
 290{
 291	struct esas2r_disc_context *dc = &a->disc_ctx;
 292
 293	esas2r_trace_enter();
 294
 295	esas2r_trace("disc_event: %d", disc_evt);
 296
 297	/* Initialize the discovery context */
 298	dc->disc_evt |= disc_evt;
 299
 300	/*
 301	 * Don't start discovery before or during polled discovery.  if we did,
 302	 * we would have a deadlock if we are in the ISR already.
 303	 */
 304	if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
 305	    !test_bit(AF_DISC_POLLED, &a->flags))
 306		esas2r_disc_start_port(a);
 307
 308	esas2r_trace_exit();
 309}
 310
 311bool esas2r_disc_start_port(struct esas2r_adapter *a)
 312{
 313	struct esas2r_request *rq = &a->general_req;
 314	struct esas2r_disc_context *dc = &a->disc_ctx;
 315	bool ret;
 316
 317	esas2r_trace_enter();
 318
 319	if (test_bit(AF_DISC_IN_PROG, &a->flags)) {
 320		esas2r_trace_exit();
 321
 322		return false;
 323	}
 324
 325	/* If there is a discovery waiting, process it. */
 326	if (dc->disc_evt) {
 327		if (test_bit(AF_DISC_POLLED, &a->flags)
 328		    && a->disc_wait_time == 0) {
 329			/*
 330			 * We are doing polled discovery, but we no longer want
 331			 * to wait for devices.  Stop polled discovery and
 332			 * transition to interrupt driven discovery.
 333			 */
 334
 335			esas2r_trace_exit();
 336
 337			return false;
 338		}
 339	} else {
 340		/* Discovery is complete. */
 341
 342		esas2r_hdebug("disc done");
 343
 344		set_bit(AF_PORT_CHANGE, &a->flags);
 345
 346		esas2r_trace_exit();
 347
 348		return false;
 349	}
 350
 351	/* Handle the discovery context */
 352	esas2r_trace("disc_evt: %d", dc->disc_evt);
 353	set_bit(AF_DISC_IN_PROG, &a->flags);
 354	dc->flags = 0;
 355
 356	if (test_bit(AF_DISC_POLLED, &a->flags))
 357		dc->flags |= DCF_POLLED;
 358
 359	rq->interrupt_cx = dc;
 360	rq->req_stat = RS_SUCCESS;
 361
 362	/* Decode the event code */
 363	if (dc->disc_evt & DCDE_DEV_SCAN) {
 364		dc->disc_evt &= ~DCDE_DEV_SCAN;
 365
 366		dc->flags |= DCF_DEV_SCAN;
 367		dc->state = DCS_BLOCK_DEV_SCAN;
 368	} else if (dc->disc_evt & DCDE_DEV_CHANGE) {
 369		dc->disc_evt &= ~DCDE_DEV_CHANGE;
 370
 371		dc->flags |= DCF_DEV_CHANGE;
 372		dc->state = DCS_DEV_RMV;
 373	}
 374
 375	/* Continue interrupt driven discovery */
 376	if (!test_bit(AF_DISC_POLLED, &a->flags))
 377		ret = esas2r_disc_continue(a, rq);
 378	else
 379		ret = true;
 380
 381	esas2r_trace_exit();
 382
 383	return ret;
 384}
 385
 386static bool esas2r_disc_continue(struct esas2r_adapter *a,
 387				 struct esas2r_request *rq)
 388{
 389	struct esas2r_disc_context *dc =
 390		(struct esas2r_disc_context *)rq->interrupt_cx;
 391	bool rslt;
 392
 393	/* Device discovery/removal */
 394	while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) {
 395		rslt = false;
 396
 397		switch (dc->state) {
 398		case DCS_DEV_RMV:
 399
 400			rslt = esas2r_disc_dev_remove(a, rq);
 401			break;
 402
 403		case DCS_DEV_ADD:
 404
 405			rslt = esas2r_disc_dev_add(a, rq);
 406			break;
 407
 408		case DCS_BLOCK_DEV_SCAN:
 409
 410			rslt = esas2r_disc_block_dev_scan(a, rq);
 411			break;
 412
 413		case DCS_RAID_GRP_INFO:
 414
 415			rslt = esas2r_disc_raid_grp_info(a, rq);
 416			break;
 417
 418		case DCS_PART_INFO:
 419
 420			rslt = esas2r_disc_part_info(a, rq);
 421			break;
 422
 423		case DCS_PT_DEV_INFO:
 424
 425			rslt = esas2r_disc_passthru_dev_info(a, rq);
 426			break;
 427		case DCS_PT_DEV_ADDR:
 428
 429			rslt = esas2r_disc_passthru_dev_addr(a, rq);
 430			break;
 431		case DCS_DISC_DONE:
 432
 433			dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN);
 434			break;
 435
 436		default:
 437
 438			esas2r_bugon();
 439			dc->state = DCS_DISC_DONE;
 440			break;
 441		}
 442
 443		if (rslt)
 444			return true;
 445	}
 446
 447	/* Discovery is done...for now. */
 448	rq->interrupt_cx = NULL;
 449
 450	if (!test_bit(AF_DISC_PENDING, &a->flags))
 451		esas2r_disc_fix_curr_requests(a);
 452
 453	clear_bit(AF_DISC_IN_PROG, &a->flags);
 454
 455	/* Start the next discovery. */
 456	return esas2r_disc_start_port(a);
 457}
 458
 459static bool esas2r_disc_start_request(struct esas2r_adapter *a,
 460				      struct esas2r_request *rq)
 461{
 462	unsigned long flags;
 463
 464	/* Set the timeout to a minimum value. */
 465	if (rq->timeout < ESAS2R_DEFAULT_TMO)
 466		rq->timeout = ESAS2R_DEFAULT_TMO;
 467
 468	/*
 469	 * Override the request type to distinguish discovery requests.  If we
 470	 * end up deferring the request, esas2r_disc_local_start_request()
 471	 * will be called to restart it.
 472	 */
 473	rq->req_type = RT_DISC_REQ;
 474
 475	spin_lock_irqsave(&a->queue_lock, flags);
 476
 477	if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
 478	    !test_bit(AF_FLASHING, &a->flags))
 479		esas2r_disc_local_start_request(a, rq);
 480	else
 481		list_add_tail(&rq->req_list, &a->defer_list);
 482
 483	spin_unlock_irqrestore(&a->queue_lock, flags);
 484
 485	return true;
 486}
 487
 488void esas2r_disc_local_start_request(struct esas2r_adapter *a,
 489				     struct esas2r_request *rq)
 490{
 491	esas2r_trace_enter();
 492
 493	list_add_tail(&rq->req_list, &a->active_list);
 494
 495	esas2r_start_vda_request(a, rq);
 496
 497	esas2r_trace_exit();
 498
 499	return;
 500}
 501
 502static void esas2r_disc_abort(struct esas2r_adapter *a,
 503			      struct esas2r_request *rq)
 504{
 505	struct esas2r_disc_context *dc =
 506		(struct esas2r_disc_context *)rq->interrupt_cx;
 507
 508	esas2r_trace_enter();
 509
 510	/* abort the current discovery */
 511
 512	dc->state = DCS_DISC_DONE;
 513
 514	esas2r_trace_exit();
 515}
 516
 517static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
 518				       struct esas2r_request *rq)
 519{
 520	struct esas2r_disc_context *dc =
 521		(struct esas2r_disc_context *)rq->interrupt_cx;
 522	bool rslt;
 523
 524	esas2r_trace_enter();
 525
 526	esas2r_rq_init_request(rq, a);
 527
 528	esas2r_build_mgt_req(a,
 529			     rq,
 530			     VDAMGT_DEV_SCAN,
 531			     0,
 532			     0,
 533			     0,
 534			     NULL);
 535
 536	rq->comp_cb = esas2r_disc_block_dev_scan_cb;
 537
 538	rq->timeout = 30000;
 539	rq->interrupt_cx = dc;
 540
 541	rslt = esas2r_disc_start_request(a, rq);
 542
 543	esas2r_trace_exit();
 544
 545	return rslt;
 546}
 547
 548static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
 549					  struct esas2r_request *rq)
 550{
 551	struct esas2r_disc_context *dc =
 552		(struct esas2r_disc_context *)rq->interrupt_cx;
 553	unsigned long flags;
 554
 555	esas2r_trace_enter();
 556
 557	spin_lock_irqsave(&a->mem_lock, flags);
 558
 559	if (rq->req_stat == RS_SUCCESS)
 560		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
 561
 562	dc->state = DCS_RAID_GRP_INFO;
 563	dc->raid_grp_ix = 0;
 564
 565	esas2r_rq_destroy_request(rq, a);
 566
 567	/* continue discovery if it's interrupt driven */
 568
 569	if (!(dc->flags & DCF_POLLED))
 570		esas2r_disc_continue(a, rq);
 571
 572	spin_unlock_irqrestore(&a->mem_lock, flags);
 573
 574	esas2r_trace_exit();
 575}
 576
 577static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
 578				      struct esas2r_request *rq)
 579{
 580	struct esas2r_disc_context *dc =
 581		(struct esas2r_disc_context *)rq->interrupt_cx;
 582	bool rslt;
 583	struct atto_vda_grp_info *grpinfo;
 584
 585	esas2r_trace_enter();
 586
 587	esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix);
 588
 589	if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) {
 590		dc->state = DCS_DISC_DONE;
 591
 592		esas2r_trace_exit();
 593
 594		return false;
 595	}
 596
 597	esas2r_rq_init_request(rq, a);
 598
 599	grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
 600
 601	memset(grpinfo, 0, sizeof(struct atto_vda_grp_info));
 602
 603	esas2r_build_mgt_req(a,
 604			     rq,
 605			     VDAMGT_GRP_INFO,
 606			     dc->scan_gen,
 607			     0,
 608			     sizeof(struct atto_vda_grp_info),
 609			     NULL);
 610
 611	grpinfo->grp_index = dc->raid_grp_ix;
 612
 613	rq->comp_cb = esas2r_disc_raid_grp_info_cb;
 614
 615	rq->interrupt_cx = dc;
 616
 617	rslt = esas2r_disc_start_request(a, rq);
 618
 619	esas2r_trace_exit();
 620
 621	return rslt;
 622}
 623
 624static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
 625					 struct esas2r_request *rq)
 626{
 627	struct esas2r_disc_context *dc =
 628		(struct esas2r_disc_context *)rq->interrupt_cx;
 629	unsigned long flags;
 630	struct atto_vda_grp_info *grpinfo;
 631
 632	esas2r_trace_enter();
 633
 634	spin_lock_irqsave(&a->mem_lock, flags);
 635
 636	if (rq->req_stat == RS_SCAN_GEN) {
 637		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
 638		dc->raid_grp_ix = 0;
 639		goto done;
 640	}
 641
 642	if (rq->req_stat == RS_SUCCESS) {
 643		grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
 644
 645		if (grpinfo->status != VDA_GRP_STAT_ONLINE
 646		    && grpinfo->status != VDA_GRP_STAT_DEGRADED) {
 647			/* go to the next group. */
 648
 649			dc->raid_grp_ix++;
 650		} else {
 651			memcpy(&dc->raid_grp_name[0],
 652			       &grpinfo->grp_name[0],
 653			       sizeof(grpinfo->grp_name));
 654
 655			dc->interleave = le32_to_cpu(grpinfo->interleave);
 656			dc->block_size = le32_to_cpu(grpinfo->block_size);
 657
 658			dc->state = DCS_PART_INFO;
 659			dc->part_num = 0;
 660		}
 661	} else {
 662		if (!(rq->req_stat == RS_GRP_INVALID)) {
 663			esas2r_log(ESAS2R_LOG_WARN,
 664				   "A request for RAID group info failed - "
 665				   "returned with %x",
 666				   rq->req_stat);
 667		}
 668
 669		dc->dev_ix = 0;
 670		dc->state = DCS_PT_DEV_INFO;
 671	}
 672
 673done:
 674
 675	esas2r_rq_destroy_request(rq, a);
 676
 677	/* continue discovery if it's interrupt driven */
 678
 679	if (!(dc->flags & DCF_POLLED))
 680		esas2r_disc_continue(a, rq);
 681
 682	spin_unlock_irqrestore(&a->mem_lock, flags);
 683
 684	esas2r_trace_exit();
 685}
 686
 687static bool esas2r_disc_part_info(struct esas2r_adapter *a,
 688				  struct esas2r_request *rq)
 689{
 690	struct esas2r_disc_context *dc =
 691		(struct esas2r_disc_context *)rq->interrupt_cx;
 692	bool rslt;
 693	struct atto_vdapart_info *partinfo;
 694
 695	esas2r_trace_enter();
 696
 697	esas2r_trace("part_num: %d", dc->part_num);
 698
 699	if (dc->part_num >= VDA_MAX_PARTITIONS) {
 700		dc->state = DCS_RAID_GRP_INFO;
 701		dc->raid_grp_ix++;
 702
 703		esas2r_trace_exit();
 704
 705		return false;
 706	}
 707
 708	esas2r_rq_init_request(rq, a);
 709
 710	partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
 711
 712	memset(partinfo, 0, sizeof(struct atto_vdapart_info));
 713
 714	esas2r_build_mgt_req(a,
 715			     rq,
 716			     VDAMGT_PART_INFO,
 717			     dc->scan_gen,
 718			     0,
 719			     sizeof(struct atto_vdapart_info),
 720			     NULL);
 721
 722	partinfo->part_no = dc->part_num;
 723
 724	memcpy(&partinfo->grp_name[0],
 725	       &dc->raid_grp_name[0],
 726	       sizeof(partinfo->grp_name));
 727
 728	rq->comp_cb = esas2r_disc_part_info_cb;
 729
 730	rq->interrupt_cx = dc;
 731
 732	rslt = esas2r_disc_start_request(a, rq);
 733
 734	esas2r_trace_exit();
 735
 736	return rslt;
 737}
 738
 739static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
 740				     struct esas2r_request *rq)
 741{
 742	struct esas2r_disc_context *dc =
 743		(struct esas2r_disc_context *)rq->interrupt_cx;
 744	unsigned long flags;
 745	struct atto_vdapart_info *partinfo;
 746
 747	esas2r_trace_enter();
 748
 749	spin_lock_irqsave(&a->mem_lock, flags);
 750
 751	if (rq->req_stat == RS_SCAN_GEN) {
 752		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
 753		dc->raid_grp_ix = 0;
 754		dc->state = DCS_RAID_GRP_INFO;
 755	} else if (rq->req_stat == RS_SUCCESS) {
 756		partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
 757
 758		dc->part_num = partinfo->part_no;
 759
 760		dc->curr_virt_id = le16_to_cpu(partinfo->target_id);
 761
 762		esas2r_targ_db_add_raid(a, dc);
 763
 764		dc->part_num++;
 765	} else {
 766		if (!(rq->req_stat == RS_PART_LAST)) {
 767			esas2r_log(ESAS2R_LOG_WARN,
 768				   "A request for RAID group partition info "
 769				   "failed - status:%d", rq->req_stat);
 770		}
 771
 772		dc->state = DCS_RAID_GRP_INFO;
 773		dc->raid_grp_ix++;
 774	}
 775
 776	esas2r_rq_destroy_request(rq, a);
 777
 778	/* continue discovery if it's interrupt driven */
 779
 780	if (!(dc->flags & DCF_POLLED))
 781		esas2r_disc_continue(a, rq);
 782
 783	spin_unlock_irqrestore(&a->mem_lock, flags);
 784
 785	esas2r_trace_exit();
 786}
 787
 788static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
 789					  struct esas2r_request *rq)
 790{
 791	struct esas2r_disc_context *dc =
 792		(struct esas2r_disc_context *)rq->interrupt_cx;
 793	bool rslt;
 794	struct atto_vda_devinfo *devinfo;
 795
 796	esas2r_trace_enter();
 797
 798	esas2r_trace("dev_ix: %d", dc->dev_ix);
 799
 800	esas2r_rq_init_request(rq, a);
 801
 802	devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
 803
 804	memset(devinfo, 0, sizeof(struct atto_vda_devinfo));
 805
 806	esas2r_build_mgt_req(a,
 807			     rq,
 808			     VDAMGT_DEV_PT_INFO,
 809			     dc->scan_gen,
 810			     dc->dev_ix,
 811			     sizeof(struct atto_vda_devinfo),
 812			     NULL);
 813
 814	rq->comp_cb = esas2r_disc_passthru_dev_info_cb;
 815
 816	rq->interrupt_cx = dc;
 817
 818	rslt = esas2r_disc_start_request(a, rq);
 819
 820	esas2r_trace_exit();
 821
 822	return rslt;
 823}
 824
 825static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
 826					     struct esas2r_request *rq)
 827{
 828	struct esas2r_disc_context *dc =
 829		(struct esas2r_disc_context *)rq->interrupt_cx;
 830	unsigned long flags;
 831	struct atto_vda_devinfo *devinfo;
 832
 833	esas2r_trace_enter();
 834
 835	spin_lock_irqsave(&a->mem_lock, flags);
 836
 837	if (rq->req_stat == RS_SCAN_GEN) {
 838		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
 839		dc->dev_ix = 0;
 840		dc->state = DCS_PT_DEV_INFO;
 841	} else if (rq->req_stat == RS_SUCCESS) {
 842		devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
 843
 844		dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index);
 845
 846		dc->curr_virt_id = le16_to_cpu(devinfo->target_id);
 847
 848		if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) {
 849			dc->curr_phys_id =
 850				le16_to_cpu(devinfo->phys_target_id);
 851			dc->dev_addr_type = ATTO_GDA_AT_PORT;
 852			dc->state = DCS_PT_DEV_ADDR;
 853
 854			esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
 855			esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
 856		} else {
 857			dc->dev_ix++;
 858		}
 859	} else {
 860		if (!(rq->req_stat == RS_DEV_INVALID)) {
 861			esas2r_log(ESAS2R_LOG_WARN,
 862				   "A request for device information failed - "
 863				   "status:%d", rq->req_stat);
 864		}
 865
 866		dc->state = DCS_DISC_DONE;
 867	}
 868
 869	esas2r_rq_destroy_request(rq, a);
 870
 871	/* continue discovery if it's interrupt driven */
 872
 873	if (!(dc->flags & DCF_POLLED))
 874		esas2r_disc_continue(a, rq);
 875
 876	spin_unlock_irqrestore(&a->mem_lock, flags);
 877
 878	esas2r_trace_exit();
 879}
 880
 881static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
 882					  struct esas2r_request *rq)
 883{
 884	struct esas2r_disc_context *dc =
 885		(struct esas2r_disc_context *)rq->interrupt_cx;
 886	bool rslt;
 887	struct atto_ioctl *hi;
 888	struct esas2r_sg_context sgc;
 889
 890	esas2r_trace_enter();
 891
 892	esas2r_rq_init_request(rq, a);
 893
 894	/* format the request. */
 895
 896	sgc.cur_offset = NULL;
 897	sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr;
 898	sgc.length = offsetof(struct atto_ioctl, data)
 899		     + sizeof(struct atto_hba_get_device_address);
 900
 901	esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge);
 902
 903	esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA);
 904
 905	if (!esas2r_build_sg_list(a, rq, &sgc)) {
 906		esas2r_rq_destroy_request(rq, a);
 907
 908		esas2r_trace_exit();
 909
 910		return false;
 911	}
 912
 913	rq->comp_cb = esas2r_disc_passthru_dev_addr_cb;
 914
 915	rq->interrupt_cx = dc;
 916
 917	/* format the IOCTL data. */
 918
 919	hi = (struct atto_ioctl *)a->disc_buffer;
 920
 921	memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN);
 922
 923	hi->version = ATTO_VER_GET_DEV_ADDR0;
 924	hi->function = ATTO_FUNC_GET_DEV_ADDR;
 925	hi->flags = HBAF_TUNNEL;
 926
 927	hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id);
 928	hi->data.get_dev_addr.addr_type = dc->dev_addr_type;
 929
 930	/* start it up. */
 931
 932	rslt = esas2r_disc_start_request(a, rq);
 933
 934	esas2r_trace_exit();
 935
 936	return rslt;
 937}
 938
 939static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
 940					     struct esas2r_request *rq)
 941{
 942	struct esas2r_disc_context *dc =
 943		(struct esas2r_disc_context *)rq->interrupt_cx;
 944	struct esas2r_target *t = NULL;
 945	unsigned long flags;
 946	struct atto_ioctl *hi;
 947	u16 addrlen;
 948
 949	esas2r_trace_enter();
 950
 951	spin_lock_irqsave(&a->mem_lock, flags);
 952
 953	hi = (struct atto_ioctl *)a->disc_buffer;
 954
 955	if (rq->req_stat == RS_SUCCESS
 956	    && hi->status == ATTO_STS_SUCCESS) {
 957		addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len);
 958
 959		if (dc->dev_addr_type == ATTO_GDA_AT_PORT) {
 960			if (addrlen == sizeof(u64))
 961				memcpy(&dc->sas_addr,
 962				       &hi->data.get_dev_addr.address[0],
 963				       addrlen);
 964			else
 965				memset(&dc->sas_addr, 0, sizeof(dc->sas_addr));
 966
 967			/* Get the unique identifier. */
 968			dc->dev_addr_type = ATTO_GDA_AT_UNIQUE;
 969
 970			goto next_dev_addr;
 971		} else {
 972			/* Add the pass through target. */
 973			if (HIBYTE(addrlen) == 0) {
 974				t = esas2r_targ_db_add_pthru(a,
 975							     dc,
 976							     &hi->data.
 977							     get_dev_addr.
 978							     address[0],
 979							     (u8)hi->data.
 980							     get_dev_addr.
 981							     addr_len);
 982
 983				if (t)
 984					memcpy(&t->sas_addr, &dc->sas_addr,
 985					       sizeof(t->sas_addr));
 986			} else {
 987				/* getting the back end data failed */
 988
 989				esas2r_log(ESAS2R_LOG_WARN,
 990					   "an error occurred retrieving the "
 991					   "back end data (%s:%d)",
 992					   __func__,
 993					   __LINE__);
 994			}
 995		}
 996	} else {
 997		/* getting the back end data failed */
 998
 999		esas2r_log(ESAS2R_LOG_WARN,
1000			   "an error occurred retrieving the back end data - "
1001			   "rq->req_stat:%d hi->status:%d",
1002			   rq->req_stat, hi->status);
1003	}
1004
1005	/* proceed to the next device. */
1006
1007	if (dc->flags & DCF_DEV_SCAN) {
1008		dc->dev_ix++;
1009		dc->state = DCS_PT_DEV_INFO;
1010	} else if (dc->flags & DCF_DEV_CHANGE) {
1011		dc->curr_targ++;
1012		dc->state = DCS_DEV_ADD;
1013	} else {
1014		esas2r_bugon();
1015	}
1016
1017next_dev_addr:
1018	esas2r_rq_destroy_request(rq, a);
1019
1020	/* continue discovery if it's interrupt driven */
1021
1022	if (!(dc->flags & DCF_POLLED))
1023		esas2r_disc_continue(a, rq);
1024
1025	spin_unlock_irqrestore(&a->mem_lock, flags);
1026
1027	esas2r_trace_exit();
1028}
1029
1030static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr)
1031{
1032	struct esas2r_adapter *a = sgc->adapter;
1033
1034	if (sgc->length > ESAS2R_DISC_BUF_LEN)
1035		esas2r_bugon();
1036
1037	*addr = a->uncached_phys
1038		+ (u64)((u8 *)a->disc_buffer - a->uncached);
1039
1040	return sgc->length;
1041}
1042
1043static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
1044				   struct esas2r_request *rq)
1045{
1046	struct esas2r_disc_context *dc =
1047		(struct esas2r_disc_context *)rq->interrupt_cx;
1048	struct esas2r_target *t;
1049	struct esas2r_target *t2;
1050
1051	esas2r_trace_enter();
1052
1053	/* process removals. */
1054
1055	for (t = a->targetdb; t < a->targetdb_end; t++) {
1056		if (t->new_target_state != TS_NOT_PRESENT)
1057			continue;
1058
1059		t->new_target_state = TS_INVALID;
1060
1061		/* remove the right target! */
1062
1063		t2 =
1064			esas2r_targ_db_find_by_virt_id(a,
1065						       esas2r_targ_get_id(t,
1066									  a));
1067
1068		if (t2)
1069			esas2r_targ_db_remove(a, t2);
1070	}
1071
1072	/* removals complete.  process arrivals. */
1073
1074	dc->state = DCS_DEV_ADD;
1075	dc->curr_targ = a->targetdb;
1076
1077	esas2r_trace_exit();
1078
1079	return false;
1080}
1081
1082static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
1083				struct esas2r_request *rq)
1084{
1085	struct esas2r_disc_context *dc =
1086		(struct esas2r_disc_context *)rq->interrupt_cx;
1087	struct esas2r_target *t = dc->curr_targ;
1088
1089	if (t >= a->targetdb_end) {
1090		/* done processing state changes. */
1091
1092		dc->state = DCS_DISC_DONE;
1093	} else if (t->new_target_state == TS_PRESENT) {
1094		struct atto_vda_ae_lu *luevt = &t->lu_event;
1095
1096		esas2r_trace_enter();
1097
1098		/* clear this now in case more events come in. */
1099
1100		t->new_target_state = TS_INVALID;
1101
1102		/* setup the discovery context for adding this device. */
1103
1104		dc->curr_virt_id = esas2r_targ_get_id(t, a);
1105
1106		if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
1107		     + sizeof(struct atto_vda_ae_lu_tgt_lun_raid))
1108		    && !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) {
1109			dc->block_size = luevt->id.tgtlun_raid.dwblock_size;
1110			dc->interleave = luevt->id.tgtlun_raid.dwinterleave;
1111		} else {
1112			dc->block_size = 0;
1113			dc->interleave = 0;
1114		}
1115
1116		/* determine the device type being added. */
1117
1118		if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) {
1119			if (luevt->dwevent & VDAAE_LU_PHYS_ID) {
1120				dc->state = DCS_PT_DEV_ADDR;
1121				dc->dev_addr_type = ATTO_GDA_AT_PORT;
1122				dc->curr_phys_id = luevt->wphys_target_id;
1123			} else {
1124				esas2r_log(ESAS2R_LOG_WARN,
1125					   "luevt->dwevent does not have the "
1126					   "VDAAE_LU_PHYS_ID bit set (%s:%d)",
1127					   __func__, __LINE__);
1128			}
1129		} else {
1130			dc->raid_grp_name[0] = 0;
1131
1132			esas2r_targ_db_add_raid(a, dc);
1133		}
1134
1135		esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
1136		esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
1137		esas2r_trace("dwevent: %d", luevt->dwevent);
1138
1139		esas2r_trace_exit();
1140	}
1141
1142	if (dc->state == DCS_DEV_ADD) {
1143		/* go to the next device. */
1144
1145		dc->curr_targ++;
1146	}
1147
1148	return false;
1149}
1150
1151/*
1152 * When discovery is done, find all requests on defer queue and
1153 * test if they need to be modified. If a target is no longer present
1154 * then complete the request with RS_SEL. Otherwise, update the
1155 * target_id since after a hibernate it can be a different value.
1156 * VDA does not make passthrough target IDs persistent.
1157 */
1158static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a)
1159{
1160	unsigned long flags;
1161	struct esas2r_target *t;
1162	struct esas2r_request *rq;
1163	struct list_head *element;
1164
1165	/* update virt_targ_id in any outstanding esas2r_requests  */
1166
1167	spin_lock_irqsave(&a->queue_lock, flags);
1168
1169	list_for_each(element, &a->defer_list) {
1170		rq = list_entry(element, struct esas2r_request, req_list);
1171		if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
1172			t = a->targetdb + rq->target_id;
1173
1174			if (t->target_state == TS_PRESENT)
1175				rq->vrq->scsi.target_id = le16_to_cpu(
1176					t->virt_targ_id);
1177			else
1178				rq->req_stat = RS_SEL;
1179		}
1180
1181	}
1182
1183	spin_unlock_irqrestore(&a->queue_lock, flags);
1184}