Loading...
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55#include <linux/circ_buf.h>
56#include <linux/device.h>
57#include <scsi/sas.h>
58#include "host.h"
59#include "isci.h"
60#include "port.h"
61#include "host.h"
62#include "probe_roms.h"
63#include "remote_device.h"
64#include "request.h"
65#include "scu_completion_codes.h"
66#include "scu_event_codes.h"
67#include "registers.h"
68#include "scu_remote_node_context.h"
69#include "scu_task_context.h"
70
71#define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
72
73#define smu_max_ports(dcc_value) \
74 (\
75 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
76 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
77 )
78
79#define smu_max_task_contexts(dcc_value) \
80 (\
81 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
82 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
83 )
84
85#define smu_max_rncs(dcc_value) \
86 (\
87 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
88 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
89 )
90
91#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
92
93/**
94 *
95 *
96 * The number of milliseconds to wait while a given phy is consuming power
97 * before allowing another set of phys to consume power. Ultimately, this will
98 * be specified by OEM parameter.
99 */
100#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
101
102/**
103 * NORMALIZE_PUT_POINTER() -
104 *
105 * This macro will normalize the completion queue put pointer so its value can
106 * be used as an array inde
107 */
108#define NORMALIZE_PUT_POINTER(x) \
109 ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
110
111
112/**
113 * NORMALIZE_EVENT_POINTER() -
114 *
115 * This macro will normalize the completion queue event entry so its value can
116 * be used as an index.
117 */
118#define NORMALIZE_EVENT_POINTER(x) \
119 (\
120 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
121 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
122 )
123
124/**
125 * NORMALIZE_GET_POINTER() -
126 *
127 * This macro will normalize the completion queue get pointer so its value can
128 * be used as an index into an array
129 */
130#define NORMALIZE_GET_POINTER(x) \
131 ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
132
133/**
134 * NORMALIZE_GET_POINTER_CYCLE_BIT() -
135 *
136 * This macro will normalize the completion queue cycle pointer so it matches
137 * the completion queue cycle bit
138 */
139#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
140 ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
141
142/**
143 * COMPLETION_QUEUE_CYCLE_BIT() -
144 *
145 * This macro will return the cycle bit of the completion queue entry
146 */
147#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
148
149/* Init the state machine and call the state entry function (if any) */
150void sci_init_sm(struct sci_base_state_machine *sm,
151 const struct sci_base_state *state_table, u32 initial_state)
152{
153 sci_state_transition_t handler;
154
155 sm->initial_state_id = initial_state;
156 sm->previous_state_id = initial_state;
157 sm->current_state_id = initial_state;
158 sm->state_table = state_table;
159
160 handler = sm->state_table[initial_state].enter_state;
161 if (handler)
162 handler(sm);
163}
164
165/* Call the state exit fn, update the current state, call the state entry fn */
166void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
167{
168 sci_state_transition_t handler;
169
170 handler = sm->state_table[sm->current_state_id].exit_state;
171 if (handler)
172 handler(sm);
173
174 sm->previous_state_id = sm->current_state_id;
175 sm->current_state_id = next_state;
176
177 handler = sm->state_table[sm->current_state_id].enter_state;
178 if (handler)
179 handler(sm);
180}
181
182static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
183{
184 u32 get_value = ihost->completion_queue_get;
185 u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
186
187 if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
188 COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
189 return true;
190
191 return false;
192}
193
194static bool sci_controller_isr(struct isci_host *ihost)
195{
196 if (sci_controller_completion_queue_has_entries(ihost)) {
197 return true;
198 } else {
199 /*
200 * we have a spurious interrupt it could be that we have already
201 * emptied the completion queue from a previous interrupt */
202 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
203
204 /*
205 * There is a race in the hardware that could cause us not to be notified
206 * of an interrupt completion if we do not take this step. We will mask
207 * then unmask the interrupts so if there is another interrupt pending
208 * the clearing of the interrupt source we get the next interrupt message. */
209 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
210 writel(0, &ihost->smu_registers->interrupt_mask);
211 }
212
213 return false;
214}
215
216irqreturn_t isci_msix_isr(int vec, void *data)
217{
218 struct isci_host *ihost = data;
219
220 if (sci_controller_isr(ihost))
221 tasklet_schedule(&ihost->completion_tasklet);
222
223 return IRQ_HANDLED;
224}
225
226static bool sci_controller_error_isr(struct isci_host *ihost)
227{
228 u32 interrupt_status;
229
230 interrupt_status =
231 readl(&ihost->smu_registers->interrupt_status);
232 interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
233
234 if (interrupt_status != 0) {
235 /*
236 * There is an error interrupt pending so let it through and handle
237 * in the callback */
238 return true;
239 }
240
241 /*
242 * There is a race in the hardware that could cause us not to be notified
243 * of an interrupt completion if we do not take this step. We will mask
244 * then unmask the error interrupts so if there was another interrupt
245 * pending we will be notified.
246 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
247 writel(0xff, &ihost->smu_registers->interrupt_mask);
248 writel(0, &ihost->smu_registers->interrupt_mask);
249
250 return false;
251}
252
253static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
254{
255 u32 index = SCU_GET_COMPLETION_INDEX(ent);
256 struct isci_request *ireq = ihost->reqs[index];
257
258 /* Make sure that we really want to process this IO request */
259 if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
260 ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
261 ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
262 /* Yep this is a valid io request pass it along to the
263 * io request handler
264 */
265 sci_io_request_tc_completion(ireq, ent);
266}
267
268static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
269{
270 u32 index;
271 struct isci_request *ireq;
272 struct isci_remote_device *idev;
273
274 index = SCU_GET_COMPLETION_INDEX(ent);
275
276 switch (scu_get_command_request_type(ent)) {
277 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
278 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
279 ireq = ihost->reqs[index];
280 dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
281 __func__, ent, ireq);
282 /* @todo For a post TC operation we need to fail the IO
283 * request
284 */
285 break;
286 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
287 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
288 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
289 idev = ihost->device_table[index];
290 dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
291 __func__, ent, idev);
292 /* @todo For a port RNC operation we need to fail the
293 * device
294 */
295 break;
296 default:
297 dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
298 __func__, ent);
299 break;
300 }
301}
302
303static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
304{
305 u32 index;
306 u32 frame_index;
307
308 struct scu_unsolicited_frame_header *frame_header;
309 struct isci_phy *iphy;
310 struct isci_remote_device *idev;
311
312 enum sci_status result = SCI_FAILURE;
313
314 frame_index = SCU_GET_FRAME_INDEX(ent);
315
316 frame_header = ihost->uf_control.buffers.array[frame_index].header;
317 ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
318
319 if (SCU_GET_FRAME_ERROR(ent)) {
320 /*
321 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
322 * / this cause a problem? We expect the phy initialization will
323 * / fail if there is an error in the frame. */
324 sci_controller_release_frame(ihost, frame_index);
325 return;
326 }
327
328 if (frame_header->is_address_frame) {
329 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
330 iphy = &ihost->phys[index];
331 result = sci_phy_frame_handler(iphy, frame_index);
332 } else {
333
334 index = SCU_GET_COMPLETION_INDEX(ent);
335
336 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
337 /*
338 * This is a signature fis or a frame from a direct attached SATA
339 * device that has not yet been created. In either case forwared
340 * the frame to the PE and let it take care of the frame data. */
341 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
342 iphy = &ihost->phys[index];
343 result = sci_phy_frame_handler(iphy, frame_index);
344 } else {
345 if (index < ihost->remote_node_entries)
346 idev = ihost->device_table[index];
347 else
348 idev = NULL;
349
350 if (idev != NULL)
351 result = sci_remote_device_frame_handler(idev, frame_index);
352 else
353 sci_controller_release_frame(ihost, frame_index);
354 }
355 }
356
357 if (result != SCI_SUCCESS) {
358 /*
359 * / @todo Is there any reason to report some additional error message
360 * / when we get this failure notifiction? */
361 }
362}
363
364static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
365{
366 struct isci_remote_device *idev;
367 struct isci_request *ireq;
368 struct isci_phy *iphy;
369 u32 index;
370
371 index = SCU_GET_COMPLETION_INDEX(ent);
372
373 switch (scu_get_event_type(ent)) {
374 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
375 /* / @todo The driver did something wrong and we need to fix the condtion. */
376 dev_err(&ihost->pdev->dev,
377 "%s: SCIC Controller 0x%p received SMU command error "
378 "0x%x\n",
379 __func__,
380 ihost,
381 ent);
382 break;
383
384 case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
385 case SCU_EVENT_TYPE_SMU_ERROR:
386 case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
387 /*
388 * / @todo This is a hardware failure and its likely that we want to
389 * / reset the controller. */
390 dev_err(&ihost->pdev->dev,
391 "%s: SCIC Controller 0x%p received fatal controller "
392 "event 0x%x\n",
393 __func__,
394 ihost,
395 ent);
396 break;
397
398 case SCU_EVENT_TYPE_TRANSPORT_ERROR:
399 ireq = ihost->reqs[index];
400 sci_io_request_event_handler(ireq, ent);
401 break;
402
403 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
404 switch (scu_get_event_specifier(ent)) {
405 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
406 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
407 ireq = ihost->reqs[index];
408 if (ireq != NULL)
409 sci_io_request_event_handler(ireq, ent);
410 else
411 dev_warn(&ihost->pdev->dev,
412 "%s: SCIC Controller 0x%p received "
413 "event 0x%x for io request object "
414 "that doesnt exist.\n",
415 __func__,
416 ihost,
417 ent);
418
419 break;
420
421 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
422 idev = ihost->device_table[index];
423 if (idev != NULL)
424 sci_remote_device_event_handler(idev, ent);
425 else
426 dev_warn(&ihost->pdev->dev,
427 "%s: SCIC Controller 0x%p received "
428 "event 0x%x for remote device object "
429 "that doesnt exist.\n",
430 __func__,
431 ihost,
432 ent);
433
434 break;
435 }
436 break;
437
438 case SCU_EVENT_TYPE_BROADCAST_CHANGE:
439 /*
440 * direct the broadcast change event to the phy first and then let
441 * the phy redirect the broadcast change to the port object */
442 case SCU_EVENT_TYPE_ERR_CNT_EVENT:
443 /*
444 * direct error counter event to the phy object since that is where
445 * we get the event notification. This is a type 4 event. */
446 case SCU_EVENT_TYPE_OSSP_EVENT:
447 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
448 iphy = &ihost->phys[index];
449 sci_phy_event_handler(iphy, ent);
450 break;
451
452 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
453 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
454 case SCU_EVENT_TYPE_RNC_OPS_MISC:
455 if (index < ihost->remote_node_entries) {
456 idev = ihost->device_table[index];
457
458 if (idev != NULL)
459 sci_remote_device_event_handler(idev, ent);
460 } else
461 dev_err(&ihost->pdev->dev,
462 "%s: SCIC Controller 0x%p received event 0x%x "
463 "for remote device object 0x%0x that doesnt "
464 "exist.\n",
465 __func__,
466 ihost,
467 ent,
468 index);
469
470 break;
471
472 default:
473 dev_warn(&ihost->pdev->dev,
474 "%s: SCIC Controller received unknown event code %x\n",
475 __func__,
476 ent);
477 break;
478 }
479}
480
481static void sci_controller_process_completions(struct isci_host *ihost)
482{
483 u32 completion_count = 0;
484 u32 ent;
485 u32 get_index;
486 u32 get_cycle;
487 u32 event_get;
488 u32 event_cycle;
489
490 dev_dbg(&ihost->pdev->dev,
491 "%s: completion queue begining get:0x%08x\n",
492 __func__,
493 ihost->completion_queue_get);
494
495 /* Get the component parts of the completion queue */
496 get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
497 get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
498
499 event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
500 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
501
502 while (
503 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
504 == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
505 ) {
506 completion_count++;
507
508 ent = ihost->completion_queue[get_index];
509
510 /* increment the get pointer and check for rollover to toggle the cycle bit */
511 get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
512 (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
513 get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
514
515 dev_dbg(&ihost->pdev->dev,
516 "%s: completion queue entry:0x%08x\n",
517 __func__,
518 ent);
519
520 switch (SCU_GET_COMPLETION_TYPE(ent)) {
521 case SCU_COMPLETION_TYPE_TASK:
522 sci_controller_task_completion(ihost, ent);
523 break;
524
525 case SCU_COMPLETION_TYPE_SDMA:
526 sci_controller_sdma_completion(ihost, ent);
527 break;
528
529 case SCU_COMPLETION_TYPE_UFI:
530 sci_controller_unsolicited_frame(ihost, ent);
531 break;
532
533 case SCU_COMPLETION_TYPE_EVENT:
534 sci_controller_event_completion(ihost, ent);
535 break;
536
537 case SCU_COMPLETION_TYPE_NOTIFY: {
538 event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
539 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
540 event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
541
542 sci_controller_event_completion(ihost, ent);
543 break;
544 }
545 default:
546 dev_warn(&ihost->pdev->dev,
547 "%s: SCIC Controller received unknown "
548 "completion type %x\n",
549 __func__,
550 ent);
551 break;
552 }
553 }
554
555 /* Update the get register if we completed one or more entries */
556 if (completion_count > 0) {
557 ihost->completion_queue_get =
558 SMU_CQGR_GEN_BIT(ENABLE) |
559 SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
560 event_cycle |
561 SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
562 get_cycle |
563 SMU_CQGR_GEN_VAL(POINTER, get_index);
564
565 writel(ihost->completion_queue_get,
566 &ihost->smu_registers->completion_queue_get);
567
568 }
569
570 dev_dbg(&ihost->pdev->dev,
571 "%s: completion queue ending get:0x%08x\n",
572 __func__,
573 ihost->completion_queue_get);
574
575}
576
577static void sci_controller_error_handler(struct isci_host *ihost)
578{
579 u32 interrupt_status;
580
581 interrupt_status =
582 readl(&ihost->smu_registers->interrupt_status);
583
584 if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
585 sci_controller_completion_queue_has_entries(ihost)) {
586
587 sci_controller_process_completions(ihost);
588 writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
589 } else {
590 dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
591 interrupt_status);
592
593 sci_change_state(&ihost->sm, SCIC_FAILED);
594
595 return;
596 }
597
598 /* If we dont process any completions I am not sure that we want to do this.
599 * We are in the middle of a hardware fault and should probably be reset.
600 */
601 writel(0, &ihost->smu_registers->interrupt_mask);
602}
603
604irqreturn_t isci_intx_isr(int vec, void *data)
605{
606 irqreturn_t ret = IRQ_NONE;
607 struct isci_host *ihost = data;
608
609 if (sci_controller_isr(ihost)) {
610 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
611 tasklet_schedule(&ihost->completion_tasklet);
612 ret = IRQ_HANDLED;
613 } else if (sci_controller_error_isr(ihost)) {
614 spin_lock(&ihost->scic_lock);
615 sci_controller_error_handler(ihost);
616 spin_unlock(&ihost->scic_lock);
617 ret = IRQ_HANDLED;
618 }
619
620 return ret;
621}
622
623irqreturn_t isci_error_isr(int vec, void *data)
624{
625 struct isci_host *ihost = data;
626
627 if (sci_controller_error_isr(ihost))
628 sci_controller_error_handler(ihost);
629
630 return IRQ_HANDLED;
631}
632
633/**
634 * isci_host_start_complete() - This function is called by the core library,
635 * through the ISCI Module, to indicate controller start status.
636 * @isci_host: This parameter specifies the ISCI host object
637 * @completion_status: This parameter specifies the completion status from the
638 * core library.
639 *
640 */
641static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
642{
643 if (completion_status != SCI_SUCCESS)
644 dev_info(&ihost->pdev->dev,
645 "controller start timed out, continuing...\n");
646 isci_host_change_state(ihost, isci_ready);
647 clear_bit(IHOST_START_PENDING, &ihost->flags);
648 wake_up(&ihost->eventq);
649}
650
651int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
652{
653 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
654
655 if (test_bit(IHOST_START_PENDING, &ihost->flags))
656 return 0;
657
658 /* todo: use sas_flush_discovery once it is upstream */
659 scsi_flush_work(shost);
660
661 scsi_flush_work(shost);
662
663 dev_dbg(&ihost->pdev->dev,
664 "%s: ihost->status = %d, time = %ld\n",
665 __func__, isci_host_get_state(ihost), time);
666
667 return 1;
668
669}
670
671/**
672 * sci_controller_get_suggested_start_timeout() - This method returns the
673 * suggested sci_controller_start() timeout amount. The user is free to
674 * use any timeout value, but this method provides the suggested minimum
675 * start timeout value. The returned value is based upon empirical
676 * information determined as a result of interoperability testing.
677 * @controller: the handle to the controller object for which to return the
678 * suggested start timeout.
679 *
680 * This method returns the number of milliseconds for the suggested start
681 * operation timeout.
682 */
683static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
684{
685 /* Validate the user supplied parameters. */
686 if (!ihost)
687 return 0;
688
689 /*
690 * The suggested minimum timeout value for a controller start operation:
691 *
692 * Signature FIS Timeout
693 * + Phy Start Timeout
694 * + Number of Phy Spin Up Intervals
695 * ---------------------------------
696 * Number of milliseconds for the controller start operation.
697 *
698 * NOTE: The number of phy spin up intervals will be equivalent
699 * to the number of phys divided by the number phys allowed
700 * per interval - 1 (once OEM parameters are supported).
701 * Currently we assume only 1 phy per interval. */
702
703 return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
704 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
705 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
706}
707
708static void sci_controller_enable_interrupts(struct isci_host *ihost)
709{
710 BUG_ON(ihost->smu_registers == NULL);
711 writel(0, &ihost->smu_registers->interrupt_mask);
712}
713
714void sci_controller_disable_interrupts(struct isci_host *ihost)
715{
716 BUG_ON(ihost->smu_registers == NULL);
717 writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
718}
719
720static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
721{
722 u32 port_task_scheduler_value;
723
724 port_task_scheduler_value =
725 readl(&ihost->scu_registers->peg0.ptsg.control);
726 port_task_scheduler_value |=
727 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
728 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
729 writel(port_task_scheduler_value,
730 &ihost->scu_registers->peg0.ptsg.control);
731}
732
733static void sci_controller_assign_task_entries(struct isci_host *ihost)
734{
735 u32 task_assignment;
736
737 /*
738 * Assign all the TCs to function 0
739 * TODO: Do we actually need to read this register to write it back?
740 */
741
742 task_assignment =
743 readl(&ihost->smu_registers->task_context_assignment[0]);
744
745 task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
746 (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) |
747 (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
748
749 writel(task_assignment,
750 &ihost->smu_registers->task_context_assignment[0]);
751
752}
753
754static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
755{
756 u32 index;
757 u32 completion_queue_control_value;
758 u32 completion_queue_get_value;
759 u32 completion_queue_put_value;
760
761 ihost->completion_queue_get = 0;
762
763 completion_queue_control_value =
764 (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
765 SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
766
767 writel(completion_queue_control_value,
768 &ihost->smu_registers->completion_queue_control);
769
770
771 /* Set the completion queue get pointer and enable the queue */
772 completion_queue_get_value = (
773 (SMU_CQGR_GEN_VAL(POINTER, 0))
774 | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
775 | (SMU_CQGR_GEN_BIT(ENABLE))
776 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
777 );
778
779 writel(completion_queue_get_value,
780 &ihost->smu_registers->completion_queue_get);
781
782 /* Set the completion queue put pointer */
783 completion_queue_put_value = (
784 (SMU_CQPR_GEN_VAL(POINTER, 0))
785 | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
786 );
787
788 writel(completion_queue_put_value,
789 &ihost->smu_registers->completion_queue_put);
790
791 /* Initialize the cycle bit of the completion queue entries */
792 for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
793 /*
794 * If get.cycle_bit != completion_queue.cycle_bit
795 * its not a valid completion queue entry
796 * so at system start all entries are invalid */
797 ihost->completion_queue[index] = 0x80000000;
798 }
799}
800
801static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
802{
803 u32 frame_queue_control_value;
804 u32 frame_queue_get_value;
805 u32 frame_queue_put_value;
806
807 /* Write the queue size */
808 frame_queue_control_value =
809 SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
810
811 writel(frame_queue_control_value,
812 &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
813
814 /* Setup the get pointer for the unsolicited frame queue */
815 frame_queue_get_value = (
816 SCU_UFQGP_GEN_VAL(POINTER, 0)
817 | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
818 );
819
820 writel(frame_queue_get_value,
821 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
822 /* Setup the put pointer for the unsolicited frame queue */
823 frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
824 writel(frame_queue_put_value,
825 &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
826}
827
828static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
829{
830 if (ihost->sm.current_state_id == SCIC_STARTING) {
831 /*
832 * We move into the ready state, because some of the phys/ports
833 * may be up and operational.
834 */
835 sci_change_state(&ihost->sm, SCIC_READY);
836
837 isci_host_start_complete(ihost, status);
838 }
839}
840
841static bool is_phy_starting(struct isci_phy *iphy)
842{
843 enum sci_phy_states state;
844
845 state = iphy->sm.current_state_id;
846 switch (state) {
847 case SCI_PHY_STARTING:
848 case SCI_PHY_SUB_INITIAL:
849 case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
850 case SCI_PHY_SUB_AWAIT_IAF_UF:
851 case SCI_PHY_SUB_AWAIT_SAS_POWER:
852 case SCI_PHY_SUB_AWAIT_SATA_POWER:
853 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
854 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
855 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
856 case SCI_PHY_SUB_FINAL:
857 return true;
858 default:
859 return false;
860 }
861}
862
863/**
864 * sci_controller_start_next_phy - start phy
865 * @scic: controller
866 *
867 * If all the phys have been started, then attempt to transition the
868 * controller to the READY state and inform the user
869 * (sci_cb_controller_start_complete()).
870 */
871static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
872{
873 struct sci_oem_params *oem = &ihost->oem_parameters;
874 struct isci_phy *iphy;
875 enum sci_status status;
876
877 status = SCI_SUCCESS;
878
879 if (ihost->phy_startup_timer_pending)
880 return status;
881
882 if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
883 bool is_controller_start_complete = true;
884 u32 state;
885 u8 index;
886
887 for (index = 0; index < SCI_MAX_PHYS; index++) {
888 iphy = &ihost->phys[index];
889 state = iphy->sm.current_state_id;
890
891 if (!phy_get_non_dummy_port(iphy))
892 continue;
893
894 /* The controller start operation is complete iff:
895 * - all links have been given an opportunity to start
896 * - have no indication of a connected device
897 * - have an indication of a connected device and it has
898 * finished the link training process.
899 */
900 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
901 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
902 (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
903 is_controller_start_complete = false;
904 break;
905 }
906 }
907
908 /*
909 * The controller has successfully finished the start process.
910 * Inform the SCI Core user and transition to the READY state. */
911 if (is_controller_start_complete == true) {
912 sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
913 sci_del_timer(&ihost->phy_timer);
914 ihost->phy_startup_timer_pending = false;
915 }
916 } else {
917 iphy = &ihost->phys[ihost->next_phy_to_start];
918
919 if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
920 if (phy_get_non_dummy_port(iphy) == NULL) {
921 ihost->next_phy_to_start++;
922
923 /* Caution recursion ahead be forwarned
924 *
925 * The PHY was never added to a PORT in MPC mode
926 * so start the next phy in sequence This phy
927 * will never go link up and will not draw power
928 * the OEM parameters either configured the phy
929 * incorrectly for the PORT or it was never
930 * assigned to a PORT
931 */
932 return sci_controller_start_next_phy(ihost);
933 }
934 }
935
936 status = sci_phy_start(iphy);
937
938 if (status == SCI_SUCCESS) {
939 sci_mod_timer(&ihost->phy_timer,
940 SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
941 ihost->phy_startup_timer_pending = true;
942 } else {
943 dev_warn(&ihost->pdev->dev,
944 "%s: Controller stop operation failed "
945 "to stop phy %d because of status "
946 "%d.\n",
947 __func__,
948 ihost->phys[ihost->next_phy_to_start].phy_index,
949 status);
950 }
951
952 ihost->next_phy_to_start++;
953 }
954
955 return status;
956}
957
958static void phy_startup_timeout(unsigned long data)
959{
960 struct sci_timer *tmr = (struct sci_timer *)data;
961 struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
962 unsigned long flags;
963 enum sci_status status;
964
965 spin_lock_irqsave(&ihost->scic_lock, flags);
966
967 if (tmr->cancel)
968 goto done;
969
970 ihost->phy_startup_timer_pending = false;
971
972 do {
973 status = sci_controller_start_next_phy(ihost);
974 } while (status != SCI_SUCCESS);
975
976done:
977 spin_unlock_irqrestore(&ihost->scic_lock, flags);
978}
979
980static u16 isci_tci_active(struct isci_host *ihost)
981{
982 return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
983}
984
985static enum sci_status sci_controller_start(struct isci_host *ihost,
986 u32 timeout)
987{
988 enum sci_status result;
989 u16 index;
990
991 if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
992 dev_warn(&ihost->pdev->dev,
993 "SCIC Controller start operation requested in "
994 "invalid state\n");
995 return SCI_FAILURE_INVALID_STATE;
996 }
997
998 /* Build the TCi free pool */
999 BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
1000 ihost->tci_head = 0;
1001 ihost->tci_tail = 0;
1002 for (index = 0; index < ihost->task_context_entries; index++)
1003 isci_tci_free(ihost, index);
1004
1005 /* Build the RNi free pool */
1006 sci_remote_node_table_initialize(&ihost->available_remote_nodes,
1007 ihost->remote_node_entries);
1008
1009 /*
1010 * Before anything else lets make sure we will not be
1011 * interrupted by the hardware.
1012 */
1013 sci_controller_disable_interrupts(ihost);
1014
1015 /* Enable the port task scheduler */
1016 sci_controller_enable_port_task_scheduler(ihost);
1017
1018 /* Assign all the task entries to ihost physical function */
1019 sci_controller_assign_task_entries(ihost);
1020
1021 /* Now initialize the completion queue */
1022 sci_controller_initialize_completion_queue(ihost);
1023
1024 /* Initialize the unsolicited frame queue for use */
1025 sci_controller_initialize_unsolicited_frame_queue(ihost);
1026
1027 /* Start all of the ports on this controller */
1028 for (index = 0; index < ihost->logical_port_entries; index++) {
1029 struct isci_port *iport = &ihost->ports[index];
1030
1031 result = sci_port_start(iport);
1032 if (result)
1033 return result;
1034 }
1035
1036 sci_controller_start_next_phy(ihost);
1037
1038 sci_mod_timer(&ihost->timer, timeout);
1039
1040 sci_change_state(&ihost->sm, SCIC_STARTING);
1041
1042 return SCI_SUCCESS;
1043}
1044
1045void isci_host_scan_start(struct Scsi_Host *shost)
1046{
1047 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1048 unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
1049
1050 set_bit(IHOST_START_PENDING, &ihost->flags);
1051
1052 spin_lock_irq(&ihost->scic_lock);
1053 sci_controller_start(ihost, tmo);
1054 sci_controller_enable_interrupts(ihost);
1055 spin_unlock_irq(&ihost->scic_lock);
1056}
1057
1058static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
1059{
1060 isci_host_change_state(ihost, isci_stopped);
1061 sci_controller_disable_interrupts(ihost);
1062 clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1063 wake_up(&ihost->eventq);
1064}
1065
1066static void sci_controller_completion_handler(struct isci_host *ihost)
1067{
1068 /* Empty out the completion queue */
1069 if (sci_controller_completion_queue_has_entries(ihost))
1070 sci_controller_process_completions(ihost);
1071
1072 /* Clear the interrupt and enable all interrupts again */
1073 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
1074 /* Could we write the value of SMU_ISR_COMPLETION? */
1075 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
1076 writel(0, &ihost->smu_registers->interrupt_mask);
1077}
1078
1079/**
1080 * isci_host_completion_routine() - This function is the delayed service
1081 * routine that calls the sci core library's completion handler. It's
1082 * scheduled as a tasklet from the interrupt service routine when interrupts
1083 * in use, or set as the timeout function in polled mode.
1084 * @data: This parameter specifies the ISCI host object
1085 *
1086 */
1087static void isci_host_completion_routine(unsigned long data)
1088{
1089 struct isci_host *ihost = (struct isci_host *)data;
1090 struct list_head completed_request_list;
1091 struct list_head errored_request_list;
1092 struct list_head *current_position;
1093 struct list_head *next_position;
1094 struct isci_request *request;
1095 struct isci_request *next_request;
1096 struct sas_task *task;
1097 u16 active;
1098
1099 INIT_LIST_HEAD(&completed_request_list);
1100 INIT_LIST_HEAD(&errored_request_list);
1101
1102 spin_lock_irq(&ihost->scic_lock);
1103
1104 sci_controller_completion_handler(ihost);
1105
1106 /* Take the lists of completed I/Os from the host. */
1107
1108 list_splice_init(&ihost->requests_to_complete,
1109 &completed_request_list);
1110
1111 /* Take the list of errored I/Os from the host. */
1112 list_splice_init(&ihost->requests_to_errorback,
1113 &errored_request_list);
1114
1115 spin_unlock_irq(&ihost->scic_lock);
1116
1117 /* Process any completions in the lists. */
1118 list_for_each_safe(current_position, next_position,
1119 &completed_request_list) {
1120
1121 request = list_entry(current_position, struct isci_request,
1122 completed_node);
1123 task = isci_request_access_task(request);
1124
1125 /* Normal notification (task_done) */
1126 dev_dbg(&ihost->pdev->dev,
1127 "%s: Normal - request/task = %p/%p\n",
1128 __func__,
1129 request,
1130 task);
1131
1132 /* Return the task to libsas */
1133 if (task != NULL) {
1134
1135 task->lldd_task = NULL;
1136 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1137
1138 /* If the task is already in the abort path,
1139 * the task_done callback cannot be called.
1140 */
1141 task->task_done(task);
1142 }
1143 }
1144
1145 spin_lock_irq(&ihost->scic_lock);
1146 isci_free_tag(ihost, request->io_tag);
1147 spin_unlock_irq(&ihost->scic_lock);
1148 }
1149 list_for_each_entry_safe(request, next_request, &errored_request_list,
1150 completed_node) {
1151
1152 task = isci_request_access_task(request);
1153
1154 /* Use sas_task_abort */
1155 dev_warn(&ihost->pdev->dev,
1156 "%s: Error - request/task = %p/%p\n",
1157 __func__,
1158 request,
1159 task);
1160
1161 if (task != NULL) {
1162
1163 /* Put the task into the abort path if it's not there
1164 * already.
1165 */
1166 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
1167 sas_task_abort(task);
1168
1169 } else {
1170 /* This is a case where the request has completed with a
1171 * status such that it needed further target servicing,
1172 * but the sas_task reference has already been removed
1173 * from the request. Since it was errored, it was not
1174 * being aborted, so there is nothing to do except free
1175 * it.
1176 */
1177
1178 spin_lock_irq(&ihost->scic_lock);
1179 /* Remove the request from the remote device's list
1180 * of pending requests.
1181 */
1182 list_del_init(&request->dev_node);
1183 isci_free_tag(ihost, request->io_tag);
1184 spin_unlock_irq(&ihost->scic_lock);
1185 }
1186 }
1187
1188 /* the coalesence timeout doubles at each encoding step, so
1189 * update it based on the ilog2 value of the outstanding requests
1190 */
1191 active = isci_tci_active(ihost);
1192 writel(SMU_ICC_GEN_VAL(NUMBER, active) |
1193 SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
1194 &ihost->smu_registers->interrupt_coalesce_control);
1195}
1196
1197/**
1198 * sci_controller_stop() - This method will stop an individual controller
1199 * object.This method will invoke the associated user callback upon
1200 * completion. The completion callback is called when the following
1201 * conditions are met: -# the method return status is SCI_SUCCESS. -# the
1202 * controller has been quiesced. This method will ensure that all IO
1203 * requests are quiesced, phys are stopped, and all additional operation by
1204 * the hardware is halted.
1205 * @controller: the handle to the controller object to stop.
1206 * @timeout: This parameter specifies the number of milliseconds in which the
1207 * stop operation should complete.
1208 *
1209 * The controller must be in the STARTED or STOPPED state. Indicate if the
1210 * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
1211 * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
1212 * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
1213 * controller is not either in the STARTED or STOPPED states.
1214 */
1215static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
1216{
1217 if (ihost->sm.current_state_id != SCIC_READY) {
1218 dev_warn(&ihost->pdev->dev,
1219 "SCIC Controller stop operation requested in "
1220 "invalid state\n");
1221 return SCI_FAILURE_INVALID_STATE;
1222 }
1223
1224 sci_mod_timer(&ihost->timer, timeout);
1225 sci_change_state(&ihost->sm, SCIC_STOPPING);
1226 return SCI_SUCCESS;
1227}
1228
1229/**
1230 * sci_controller_reset() - This method will reset the supplied core
1231 * controller regardless of the state of said controller. This operation is
1232 * considered destructive. In other words, all current operations are wiped
1233 * out. No IO completions for outstanding devices occur. Outstanding IO
1234 * requests are not aborted or completed at the actual remote device.
1235 * @controller: the handle to the controller object to reset.
1236 *
1237 * Indicate if the controller reset method succeeded or failed in some way.
1238 * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
1239 * the controller reset operation is unable to complete.
1240 */
1241static enum sci_status sci_controller_reset(struct isci_host *ihost)
1242{
1243 switch (ihost->sm.current_state_id) {
1244 case SCIC_RESET:
1245 case SCIC_READY:
1246 case SCIC_STOPPED:
1247 case SCIC_FAILED:
1248 /*
1249 * The reset operation is not a graceful cleanup, just
1250 * perform the state transition.
1251 */
1252 sci_change_state(&ihost->sm, SCIC_RESETTING);
1253 return SCI_SUCCESS;
1254 default:
1255 dev_warn(&ihost->pdev->dev,
1256 "SCIC Controller reset operation requested in "
1257 "invalid state\n");
1258 return SCI_FAILURE_INVALID_STATE;
1259 }
1260}
1261
1262void isci_host_deinit(struct isci_host *ihost)
1263{
1264 int i;
1265
1266 isci_host_change_state(ihost, isci_stopping);
1267 for (i = 0; i < SCI_MAX_PORTS; i++) {
1268 struct isci_port *iport = &ihost->ports[i];
1269 struct isci_remote_device *idev, *d;
1270
1271 list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
1272 if (test_bit(IDEV_ALLOCATED, &idev->flags))
1273 isci_remote_device_stop(ihost, idev);
1274 }
1275 }
1276
1277 set_bit(IHOST_STOP_PENDING, &ihost->flags);
1278
1279 spin_lock_irq(&ihost->scic_lock);
1280 sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
1281 spin_unlock_irq(&ihost->scic_lock);
1282
1283 wait_for_stop(ihost);
1284 sci_controller_reset(ihost);
1285
1286 /* Cancel any/all outstanding port timers */
1287 for (i = 0; i < ihost->logical_port_entries; i++) {
1288 struct isci_port *iport = &ihost->ports[i];
1289 del_timer_sync(&iport->timer.timer);
1290 }
1291
1292 /* Cancel any/all outstanding phy timers */
1293 for (i = 0; i < SCI_MAX_PHYS; i++) {
1294 struct isci_phy *iphy = &ihost->phys[i];
1295 del_timer_sync(&iphy->sata_timer.timer);
1296 }
1297
1298 del_timer_sync(&ihost->port_agent.timer.timer);
1299
1300 del_timer_sync(&ihost->power_control.timer.timer);
1301
1302 del_timer_sync(&ihost->timer.timer);
1303
1304 del_timer_sync(&ihost->phy_timer.timer);
1305}
1306
1307static void __iomem *scu_base(struct isci_host *isci_host)
1308{
1309 struct pci_dev *pdev = isci_host->pdev;
1310 int id = isci_host->id;
1311
1312 return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
1313}
1314
1315static void __iomem *smu_base(struct isci_host *isci_host)
1316{
1317 struct pci_dev *pdev = isci_host->pdev;
1318 int id = isci_host->id;
1319
1320 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1321}
1322
1323static void isci_user_parameters_get(struct sci_user_parameters *u)
1324{
1325 int i;
1326
1327 for (i = 0; i < SCI_MAX_PHYS; i++) {
1328 struct sci_phy_user_params *u_phy = &u->phys[i];
1329
1330 u_phy->max_speed_generation = phy_gen;
1331
1332 /* we are not exporting these for now */
1333 u_phy->align_insertion_frequency = 0x7f;
1334 u_phy->in_connection_align_insertion_frequency = 0xff;
1335 u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
1336 }
1337
1338 u->stp_inactivity_timeout = stp_inactive_to;
1339 u->ssp_inactivity_timeout = ssp_inactive_to;
1340 u->stp_max_occupancy_timeout = stp_max_occ_to;
1341 u->ssp_max_occupancy_timeout = ssp_max_occ_to;
1342 u->no_outbound_task_timeout = no_outbound_task_to;
1343 u->max_number_concurrent_device_spin_up = max_concurr_spinup;
1344}
1345
1346static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
1347{
1348 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1349
1350 sci_change_state(&ihost->sm, SCIC_RESET);
1351}
1352
1353static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
1354{
1355 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1356
1357 sci_del_timer(&ihost->timer);
1358}
1359
1360#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
1361#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
1362#define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
1363#define INTERRUPT_COALESCE_NUMBER_MAX 256
1364#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
1365#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
1366
1367/**
1368 * sci_controller_set_interrupt_coalescence() - This method allows the user to
1369 * configure the interrupt coalescence.
1370 * @controller: This parameter represents the handle to the controller object
1371 * for which its interrupt coalesce register is overridden.
1372 * @coalesce_number: Used to control the number of entries in the Completion
1373 * Queue before an interrupt is generated. If the number of entries exceed
1374 * this number, an interrupt will be generated. The valid range of the input
1375 * is [0, 256]. A setting of 0 results in coalescing being disabled.
1376 * @coalesce_timeout: Timeout value in microseconds. The valid range of the
1377 * input is [0, 2700000] . A setting of 0 is allowed and results in no
1378 * interrupt coalescing timeout.
1379 *
1380 * Indicate if the user successfully set the interrupt coalesce parameters.
1381 * SCI_SUCCESS The user successfully updated the interrutp coalescence.
1382 * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
1383 */
1384static enum sci_status
1385sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
1386 u32 coalesce_number,
1387 u32 coalesce_timeout)
1388{
1389 u8 timeout_encode = 0;
1390 u32 min = 0;
1391 u32 max = 0;
1392
1393 /* Check if the input parameters fall in the range. */
1394 if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
1395 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1396
1397 /*
1398 * Defined encoding for interrupt coalescing timeout:
1399 * Value Min Max Units
1400 * ----- --- --- -----
1401 * 0 - - Disabled
1402 * 1 13.3 20.0 ns
1403 * 2 26.7 40.0
1404 * 3 53.3 80.0
1405 * 4 106.7 160.0
1406 * 5 213.3 320.0
1407 * 6 426.7 640.0
1408 * 7 853.3 1280.0
1409 * 8 1.7 2.6 us
1410 * 9 3.4 5.1
1411 * 10 6.8 10.2
1412 * 11 13.7 20.5
1413 * 12 27.3 41.0
1414 * 13 54.6 81.9
1415 * 14 109.2 163.8
1416 * 15 218.5 327.7
1417 * 16 436.9 655.4
1418 * 17 873.8 1310.7
1419 * 18 1.7 2.6 ms
1420 * 19 3.5 5.2
1421 * 20 7.0 10.5
1422 * 21 14.0 21.0
1423 * 22 28.0 41.9
1424 * 23 55.9 83.9
1425 * 24 111.8 167.8
1426 * 25 223.7 335.5
1427 * 26 447.4 671.1
1428 * 27 894.8 1342.2
1429 * 28 1.8 2.7 s
1430 * Others Undefined */
1431
1432 /*
1433 * Use the table above to decide the encode of interrupt coalescing timeout
1434 * value for register writing. */
1435 if (coalesce_timeout == 0)
1436 timeout_encode = 0;
1437 else{
1438 /* make the timeout value in unit of (10 ns). */
1439 coalesce_timeout = coalesce_timeout * 100;
1440 min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
1441 max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
1442
1443 /* get the encode of timeout for register writing. */
1444 for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
1445 timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
1446 timeout_encode++) {
1447 if (min <= coalesce_timeout && max > coalesce_timeout)
1448 break;
1449 else if (coalesce_timeout >= max && coalesce_timeout < min * 2
1450 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
1451 if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
1452 break;
1453 else{
1454 timeout_encode++;
1455 break;
1456 }
1457 } else {
1458 max = max * 2;
1459 min = min * 2;
1460 }
1461 }
1462
1463 if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
1464 /* the value is out of range. */
1465 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1466 }
1467
1468 writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
1469 SMU_ICC_GEN_VAL(TIMER, timeout_encode),
1470 &ihost->smu_registers->interrupt_coalesce_control);
1471
1472
1473 ihost->interrupt_coalesce_number = (u16)coalesce_number;
1474 ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
1475
1476 return SCI_SUCCESS;
1477}
1478
1479
1480static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
1481{
1482 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1483
1484 /* set the default interrupt coalescence number and timeout value. */
1485 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1486}
1487
1488static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
1489{
1490 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1491
1492 /* disable interrupt coalescence. */
1493 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1494}
1495
1496static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1497{
1498 u32 index;
1499 enum sci_status status;
1500 enum sci_status phy_status;
1501
1502 status = SCI_SUCCESS;
1503
1504 for (index = 0; index < SCI_MAX_PHYS; index++) {
1505 phy_status = sci_phy_stop(&ihost->phys[index]);
1506
1507 if (phy_status != SCI_SUCCESS &&
1508 phy_status != SCI_FAILURE_INVALID_STATE) {
1509 status = SCI_FAILURE;
1510
1511 dev_warn(&ihost->pdev->dev,
1512 "%s: Controller stop operation failed to stop "
1513 "phy %d because of status %d.\n",
1514 __func__,
1515 ihost->phys[index].phy_index, phy_status);
1516 }
1517 }
1518
1519 return status;
1520}
1521
1522static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
1523{
1524 u32 index;
1525 enum sci_status port_status;
1526 enum sci_status status = SCI_SUCCESS;
1527
1528 for (index = 0; index < ihost->logical_port_entries; index++) {
1529 struct isci_port *iport = &ihost->ports[index];
1530
1531 port_status = sci_port_stop(iport);
1532
1533 if ((port_status != SCI_SUCCESS) &&
1534 (port_status != SCI_FAILURE_INVALID_STATE)) {
1535 status = SCI_FAILURE;
1536
1537 dev_warn(&ihost->pdev->dev,
1538 "%s: Controller stop operation failed to "
1539 "stop port %d because of status %d.\n",
1540 __func__,
1541 iport->logical_port_index,
1542 port_status);
1543 }
1544 }
1545
1546 return status;
1547}
1548
1549static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
1550{
1551 u32 index;
1552 enum sci_status status;
1553 enum sci_status device_status;
1554
1555 status = SCI_SUCCESS;
1556
1557 for (index = 0; index < ihost->remote_node_entries; index++) {
1558 if (ihost->device_table[index] != NULL) {
1559 /* / @todo What timeout value do we want to provide to this request? */
1560 device_status = sci_remote_device_stop(ihost->device_table[index], 0);
1561
1562 if ((device_status != SCI_SUCCESS) &&
1563 (device_status != SCI_FAILURE_INVALID_STATE)) {
1564 dev_warn(&ihost->pdev->dev,
1565 "%s: Controller stop operation failed "
1566 "to stop device 0x%p because of "
1567 "status %d.\n",
1568 __func__,
1569 ihost->device_table[index], device_status);
1570 }
1571 }
1572 }
1573
1574 return status;
1575}
1576
1577static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
1578{
1579 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1580
1581 /* Stop all of the components for this controller */
1582 sci_controller_stop_phys(ihost);
1583 sci_controller_stop_ports(ihost);
1584 sci_controller_stop_devices(ihost);
1585}
1586
1587static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
1588{
1589 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1590
1591 sci_del_timer(&ihost->timer);
1592}
1593
1594static void sci_controller_reset_hardware(struct isci_host *ihost)
1595{
1596 /* Disable interrupts so we dont take any spurious interrupts */
1597 sci_controller_disable_interrupts(ihost);
1598
1599 /* Reset the SCU */
1600 writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
1601
1602 /* Delay for 1ms to before clearing the CQP and UFQPR. */
1603 udelay(1000);
1604
1605 /* The write to the CQGR clears the CQP */
1606 writel(0x00000000, &ihost->smu_registers->completion_queue_get);
1607
1608 /* The write to the UFQGP clears the UFQPR */
1609 writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
1610}
1611
1612static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
1613{
1614 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1615
1616 sci_controller_reset_hardware(ihost);
1617 sci_change_state(&ihost->sm, SCIC_RESET);
1618}
1619
1620static const struct sci_base_state sci_controller_state_table[] = {
1621 [SCIC_INITIAL] = {
1622 .enter_state = sci_controller_initial_state_enter,
1623 },
1624 [SCIC_RESET] = {},
1625 [SCIC_INITIALIZING] = {},
1626 [SCIC_INITIALIZED] = {},
1627 [SCIC_STARTING] = {
1628 .exit_state = sci_controller_starting_state_exit,
1629 },
1630 [SCIC_READY] = {
1631 .enter_state = sci_controller_ready_state_enter,
1632 .exit_state = sci_controller_ready_state_exit,
1633 },
1634 [SCIC_RESETTING] = {
1635 .enter_state = sci_controller_resetting_state_enter,
1636 },
1637 [SCIC_STOPPING] = {
1638 .enter_state = sci_controller_stopping_state_enter,
1639 .exit_state = sci_controller_stopping_state_exit,
1640 },
1641 [SCIC_STOPPED] = {},
1642 [SCIC_FAILED] = {}
1643};
1644
1645static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
1646{
1647 /* these defaults are overridden by the platform / firmware */
1648 u16 index;
1649
1650 /* Default to APC mode. */
1651 ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
1652
1653 /* Default to APC mode. */
1654 ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1;
1655
1656 /* Default to no SSC operation. */
1657 ihost->oem_parameters.controller.do_enable_ssc = false;
1658
1659 /* Initialize all of the port parameter information to narrow ports. */
1660 for (index = 0; index < SCI_MAX_PORTS; index++) {
1661 ihost->oem_parameters.ports[index].phy_mask = 0;
1662 }
1663
1664 /* Initialize all of the phy parameter information. */
1665 for (index = 0; index < SCI_MAX_PHYS; index++) {
1666 /* Default to 6G (i.e. Gen 3) for now. */
1667 ihost->user_parameters.phys[index].max_speed_generation = 3;
1668
1669 /* the frequencies cannot be 0 */
1670 ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
1671 ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
1672 ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
1673
1674 /*
1675 * Previous Vitesse based expanders had a arbitration issue that
1676 * is worked around by having the upper 32-bits of SAS address
1677 * with a value greater then the Vitesse company identifier.
1678 * Hence, usage of 0x5FCFFFFF. */
1679 ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
1680 ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
1681 }
1682
1683 ihost->user_parameters.stp_inactivity_timeout = 5;
1684 ihost->user_parameters.ssp_inactivity_timeout = 5;
1685 ihost->user_parameters.stp_max_occupancy_timeout = 5;
1686 ihost->user_parameters.ssp_max_occupancy_timeout = 20;
1687 ihost->user_parameters.no_outbound_task_timeout = 20;
1688}
1689
1690static void controller_timeout(unsigned long data)
1691{
1692 struct sci_timer *tmr = (struct sci_timer *)data;
1693 struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
1694 struct sci_base_state_machine *sm = &ihost->sm;
1695 unsigned long flags;
1696
1697 spin_lock_irqsave(&ihost->scic_lock, flags);
1698
1699 if (tmr->cancel)
1700 goto done;
1701
1702 if (sm->current_state_id == SCIC_STARTING)
1703 sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
1704 else if (sm->current_state_id == SCIC_STOPPING) {
1705 sci_change_state(sm, SCIC_FAILED);
1706 isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
1707 } else /* / @todo Now what do we want to do in this case? */
1708 dev_err(&ihost->pdev->dev,
1709 "%s: Controller timer fired when controller was not "
1710 "in a state being timed.\n",
1711 __func__);
1712
1713done:
1714 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1715}
1716
1717static enum sci_status sci_controller_construct(struct isci_host *ihost,
1718 void __iomem *scu_base,
1719 void __iomem *smu_base)
1720{
1721 u8 i;
1722
1723 sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
1724
1725 ihost->scu_registers = scu_base;
1726 ihost->smu_registers = smu_base;
1727
1728 sci_port_configuration_agent_construct(&ihost->port_agent);
1729
1730 /* Construct the ports for this controller */
1731 for (i = 0; i < SCI_MAX_PORTS; i++)
1732 sci_port_construct(&ihost->ports[i], i, ihost);
1733 sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
1734
1735 /* Construct the phys for this controller */
1736 for (i = 0; i < SCI_MAX_PHYS; i++) {
1737 /* Add all the PHYs to the dummy port */
1738 sci_phy_construct(&ihost->phys[i],
1739 &ihost->ports[SCI_MAX_PORTS], i);
1740 }
1741
1742 ihost->invalid_phy_mask = 0;
1743
1744 sci_init_timer(&ihost->timer, controller_timeout);
1745
1746 /* Initialize the User and OEM parameters to default values. */
1747 sci_controller_set_default_config_parameters(ihost);
1748
1749 return sci_controller_reset(ihost);
1750}
1751
1752int sci_oem_parameters_validate(struct sci_oem_params *oem)
1753{
1754 int i;
1755
1756 for (i = 0; i < SCI_MAX_PORTS; i++)
1757 if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
1758 return -EINVAL;
1759
1760 for (i = 0; i < SCI_MAX_PHYS; i++)
1761 if (oem->phys[i].sas_address.high == 0 &&
1762 oem->phys[i].sas_address.low == 0)
1763 return -EINVAL;
1764
1765 if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
1766 for (i = 0; i < SCI_MAX_PHYS; i++)
1767 if (oem->ports[i].phy_mask != 0)
1768 return -EINVAL;
1769 } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
1770 u8 phy_mask = 0;
1771
1772 for (i = 0; i < SCI_MAX_PHYS; i++)
1773 phy_mask |= oem->ports[i].phy_mask;
1774
1775 if (phy_mask == 0)
1776 return -EINVAL;
1777 } else
1778 return -EINVAL;
1779
1780 if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
1781 return -EINVAL;
1782
1783 return 0;
1784}
1785
1786static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
1787{
1788 u32 state = ihost->sm.current_state_id;
1789
1790 if (state == SCIC_RESET ||
1791 state == SCIC_INITIALIZING ||
1792 state == SCIC_INITIALIZED) {
1793
1794 if (sci_oem_parameters_validate(&ihost->oem_parameters))
1795 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1796
1797 return SCI_SUCCESS;
1798 }
1799
1800 return SCI_FAILURE_INVALID_STATE;
1801}
1802
1803static void power_control_timeout(unsigned long data)
1804{
1805 struct sci_timer *tmr = (struct sci_timer *)data;
1806 struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
1807 struct isci_phy *iphy;
1808 unsigned long flags;
1809 u8 i;
1810
1811 spin_lock_irqsave(&ihost->scic_lock, flags);
1812
1813 if (tmr->cancel)
1814 goto done;
1815
1816 ihost->power_control.phys_granted_power = 0;
1817
1818 if (ihost->power_control.phys_waiting == 0) {
1819 ihost->power_control.timer_started = false;
1820 goto done;
1821 }
1822
1823 for (i = 0; i < SCI_MAX_PHYS; i++) {
1824
1825 if (ihost->power_control.phys_waiting == 0)
1826 break;
1827
1828 iphy = ihost->power_control.requesters[i];
1829 if (iphy == NULL)
1830 continue;
1831
1832 if (ihost->power_control.phys_granted_power >=
1833 ihost->oem_parameters.controller.max_concurrent_dev_spin_up)
1834 break;
1835
1836 ihost->power_control.requesters[i] = NULL;
1837 ihost->power_control.phys_waiting--;
1838 ihost->power_control.phys_granted_power++;
1839 sci_phy_consume_power_handler(iphy);
1840 }
1841
1842 /*
1843 * It doesn't matter if the power list is empty, we need to start the
1844 * timer in case another phy becomes ready.
1845 */
1846 sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1847 ihost->power_control.timer_started = true;
1848
1849done:
1850 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1851}
1852
1853void sci_controller_power_control_queue_insert(struct isci_host *ihost,
1854 struct isci_phy *iphy)
1855{
1856 BUG_ON(iphy == NULL);
1857
1858 if (ihost->power_control.phys_granted_power <
1859 ihost->oem_parameters.controller.max_concurrent_dev_spin_up) {
1860 ihost->power_control.phys_granted_power++;
1861 sci_phy_consume_power_handler(iphy);
1862
1863 /*
1864 * stop and start the power_control timer. When the timer fires, the
1865 * no_of_phys_granted_power will be set to 0
1866 */
1867 if (ihost->power_control.timer_started)
1868 sci_del_timer(&ihost->power_control.timer);
1869
1870 sci_mod_timer(&ihost->power_control.timer,
1871 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1872 ihost->power_control.timer_started = true;
1873
1874 } else {
1875 /* Add the phy in the waiting list */
1876 ihost->power_control.requesters[iphy->phy_index] = iphy;
1877 ihost->power_control.phys_waiting++;
1878 }
1879}
1880
1881void sci_controller_power_control_queue_remove(struct isci_host *ihost,
1882 struct isci_phy *iphy)
1883{
1884 BUG_ON(iphy == NULL);
1885
1886 if (ihost->power_control.requesters[iphy->phy_index])
1887 ihost->power_control.phys_waiting--;
1888
1889 ihost->power_control.requesters[iphy->phy_index] = NULL;
1890}
1891
1892#define AFE_REGISTER_WRITE_DELAY 10
1893
1894/* Initialize the AFE for this phy index. We need to read the AFE setup from
1895 * the OEM parameters
1896 */
1897static void sci_controller_afe_initialization(struct isci_host *ihost)
1898{
1899 const struct sci_oem_params *oem = &ihost->oem_parameters;
1900 struct pci_dev *pdev = ihost->pdev;
1901 u32 afe_status;
1902 u32 phy_id;
1903
1904 /* Clear DFX Status registers */
1905 writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
1906 udelay(AFE_REGISTER_WRITE_DELAY);
1907
1908 if (is_b0(pdev)) {
1909 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
1910 * Timer, PM Stagger Timer */
1911 writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
1912 udelay(AFE_REGISTER_WRITE_DELAY);
1913 }
1914
1915 /* Configure bias currents to normal */
1916 if (is_a2(pdev))
1917 writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
1918 else if (is_b0(pdev) || is_c0(pdev))
1919 writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
1920
1921 udelay(AFE_REGISTER_WRITE_DELAY);
1922
1923 /* Enable PLL */
1924 if (is_b0(pdev) || is_c0(pdev))
1925 writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
1926 else
1927 writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
1928
1929 udelay(AFE_REGISTER_WRITE_DELAY);
1930
1931 /* Wait for the PLL to lock */
1932 do {
1933 afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
1934 udelay(AFE_REGISTER_WRITE_DELAY);
1935 } while ((afe_status & 0x00001000) == 0);
1936
1937 if (is_a2(pdev)) {
1938 /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
1939 writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
1940 udelay(AFE_REGISTER_WRITE_DELAY);
1941 }
1942
1943 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
1944 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
1945
1946 if (is_b0(pdev)) {
1947 /* Configure transmitter SSC parameters */
1948 writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
1949 udelay(AFE_REGISTER_WRITE_DELAY);
1950 } else if (is_c0(pdev)) {
1951 /* Configure transmitter SSC parameters */
1952 writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
1953 udelay(AFE_REGISTER_WRITE_DELAY);
1954
1955 /*
1956 * All defaults, except the Receive Word Alignament/Comma Detect
1957 * Enable....(0xe800) */
1958 writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
1959 udelay(AFE_REGISTER_WRITE_DELAY);
1960 } else {
1961 /*
1962 * All defaults, except the Receive Word Alignament/Comma Detect
1963 * Enable....(0xe800) */
1964 writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
1965 udelay(AFE_REGISTER_WRITE_DELAY);
1966
1967 writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
1968 udelay(AFE_REGISTER_WRITE_DELAY);
1969 }
1970
1971 /*
1972 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
1973 * & increase TX int & ext bias 20%....(0xe85c) */
1974 if (is_a2(pdev))
1975 writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1976 else if (is_b0(pdev)) {
1977 /* Power down TX and RX (PWRDNTX and PWRDNRX) */
1978 writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1979 udelay(AFE_REGISTER_WRITE_DELAY);
1980
1981 /*
1982 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
1983 * & increase TX int & ext bias 20%....(0xe85c) */
1984 writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1985 } else {
1986 writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1987 udelay(AFE_REGISTER_WRITE_DELAY);
1988
1989 /*
1990 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
1991 * & increase TX int & ext bias 20%....(0xe85c) */
1992 writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
1993 }
1994 udelay(AFE_REGISTER_WRITE_DELAY);
1995
1996 if (is_a2(pdev)) {
1997 /* Enable TX equalization (0xe824) */
1998 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
1999 udelay(AFE_REGISTER_WRITE_DELAY);
2000 }
2001
2002 /*
2003 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
2004 * RDD=0x0(RX Detect Enabled) ....(0xe800) */
2005 writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
2006 udelay(AFE_REGISTER_WRITE_DELAY);
2007
2008 /* Leave DFE/FFE on */
2009 if (is_a2(pdev))
2010 writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2011 else if (is_b0(pdev)) {
2012 writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2013 udelay(AFE_REGISTER_WRITE_DELAY);
2014 /* Enable TX equalization (0xe824) */
2015 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2016 } else {
2017 writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
2018 udelay(AFE_REGISTER_WRITE_DELAY);
2019
2020 writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2021 udelay(AFE_REGISTER_WRITE_DELAY);
2022
2023 /* Enable TX equalization (0xe824) */
2024 writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2025 }
2026
2027 udelay(AFE_REGISTER_WRITE_DELAY);
2028
2029 writel(oem_phy->afe_tx_amp_control0,
2030 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
2031 udelay(AFE_REGISTER_WRITE_DELAY);
2032
2033 writel(oem_phy->afe_tx_amp_control1,
2034 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
2035 udelay(AFE_REGISTER_WRITE_DELAY);
2036
2037 writel(oem_phy->afe_tx_amp_control2,
2038 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
2039 udelay(AFE_REGISTER_WRITE_DELAY);
2040
2041 writel(oem_phy->afe_tx_amp_control3,
2042 &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
2043 udelay(AFE_REGISTER_WRITE_DELAY);
2044 }
2045
2046 /* Transfer control to the PEs */
2047 writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
2048 udelay(AFE_REGISTER_WRITE_DELAY);
2049}
2050
2051static void sci_controller_initialize_power_control(struct isci_host *ihost)
2052{
2053 sci_init_timer(&ihost->power_control.timer, power_control_timeout);
2054
2055 memset(ihost->power_control.requesters, 0,
2056 sizeof(ihost->power_control.requesters));
2057
2058 ihost->power_control.phys_waiting = 0;
2059 ihost->power_control.phys_granted_power = 0;
2060}
2061
2062static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2063{
2064 struct sci_base_state_machine *sm = &ihost->sm;
2065 enum sci_status result = SCI_FAILURE;
2066 unsigned long i, state, val;
2067
2068 if (ihost->sm.current_state_id != SCIC_RESET) {
2069 dev_warn(&ihost->pdev->dev,
2070 "SCIC Controller initialize operation requested "
2071 "in invalid state\n");
2072 return SCI_FAILURE_INVALID_STATE;
2073 }
2074
2075 sci_change_state(sm, SCIC_INITIALIZING);
2076
2077 sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
2078
2079 ihost->next_phy_to_start = 0;
2080 ihost->phy_startup_timer_pending = false;
2081
2082 sci_controller_initialize_power_control(ihost);
2083
2084 /*
2085 * There is nothing to do here for B0 since we do not have to
2086 * program the AFE registers.
2087 * / @todo The AFE settings are supposed to be correct for the B0 but
2088 * / presently they seem to be wrong. */
2089 sci_controller_afe_initialization(ihost);
2090
2091
2092 /* Take the hardware out of reset */
2093 writel(0, &ihost->smu_registers->soft_reset_control);
2094
2095 /*
2096 * / @todo Provide meaningfull error code for hardware failure
2097 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
2098 for (i = 100; i >= 1; i--) {
2099 u32 status;
2100
2101 /* Loop until the hardware reports success */
2102 udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2103 status = readl(&ihost->smu_registers->control_status);
2104
2105 if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
2106 break;
2107 }
2108 if (i == 0)
2109 goto out;
2110
2111 /*
2112 * Determine what are the actaul device capacities that the
2113 * hardware will support */
2114 val = readl(&ihost->smu_registers->device_context_capacity);
2115
2116 /* Record the smaller of the two capacity values */
2117 ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
2118 ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
2119 ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
2120
2121 /*
2122 * Make all PEs that are unassigned match up with the
2123 * logical ports
2124 */
2125 for (i = 0; i < ihost->logical_port_entries; i++) {
2126 struct scu_port_task_scheduler_group_registers __iomem
2127 *ptsg = &ihost->scu_registers->peg0.ptsg;
2128
2129 writel(i, &ptsg->protocol_engine[i]);
2130 }
2131
2132 /* Initialize hardware PCI Relaxed ordering in DMA engines */
2133 val = readl(&ihost->scu_registers->sdma.pdma_configuration);
2134 val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2135 writel(val, &ihost->scu_registers->sdma.pdma_configuration);
2136
2137 val = readl(&ihost->scu_registers->sdma.cdma_configuration);
2138 val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2139 writel(val, &ihost->scu_registers->sdma.cdma_configuration);
2140
2141 /*
2142 * Initialize the PHYs before the PORTs because the PHY registers
2143 * are accessed during the port initialization.
2144 */
2145 for (i = 0; i < SCI_MAX_PHYS; i++) {
2146 result = sci_phy_initialize(&ihost->phys[i],
2147 &ihost->scu_registers->peg0.pe[i].tl,
2148 &ihost->scu_registers->peg0.pe[i].ll);
2149 if (result != SCI_SUCCESS)
2150 goto out;
2151 }
2152
2153 for (i = 0; i < ihost->logical_port_entries; i++) {
2154 struct isci_port *iport = &ihost->ports[i];
2155
2156 iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
2157 iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
2158 iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
2159 }
2160
2161 result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
2162
2163 out:
2164 /* Advance the controller state machine */
2165 if (result == SCI_SUCCESS)
2166 state = SCIC_INITIALIZED;
2167 else
2168 state = SCIC_FAILED;
2169 sci_change_state(sm, state);
2170
2171 return result;
2172}
2173
2174static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
2175 struct sci_user_parameters *sci_parms)
2176{
2177 u32 state = ihost->sm.current_state_id;
2178
2179 if (state == SCIC_RESET ||
2180 state == SCIC_INITIALIZING ||
2181 state == SCIC_INITIALIZED) {
2182 u16 index;
2183
2184 /*
2185 * Validate the user parameters. If they are not legal, then
2186 * return a failure.
2187 */
2188 for (index = 0; index < SCI_MAX_PHYS; index++) {
2189 struct sci_phy_user_params *user_phy;
2190
2191 user_phy = &sci_parms->phys[index];
2192
2193 if (!((user_phy->max_speed_generation <=
2194 SCIC_SDS_PARM_MAX_SPEED) &&
2195 (user_phy->max_speed_generation >
2196 SCIC_SDS_PARM_NO_SPEED)))
2197 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2198
2199 if (user_phy->in_connection_align_insertion_frequency <
2200 3)
2201 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2202
2203 if ((user_phy->in_connection_align_insertion_frequency <
2204 3) ||
2205 (user_phy->align_insertion_frequency == 0) ||
2206 (user_phy->
2207 notify_enable_spin_up_insertion_frequency ==
2208 0))
2209 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2210 }
2211
2212 if ((sci_parms->stp_inactivity_timeout == 0) ||
2213 (sci_parms->ssp_inactivity_timeout == 0) ||
2214 (sci_parms->stp_max_occupancy_timeout == 0) ||
2215 (sci_parms->ssp_max_occupancy_timeout == 0) ||
2216 (sci_parms->no_outbound_task_timeout == 0))
2217 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2218
2219 memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
2220
2221 return SCI_SUCCESS;
2222 }
2223
2224 return SCI_FAILURE_INVALID_STATE;
2225}
2226
2227static int sci_controller_mem_init(struct isci_host *ihost)
2228{
2229 struct device *dev = &ihost->pdev->dev;
2230 dma_addr_t dma;
2231 size_t size;
2232 int err;
2233
2234 size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
2235 ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
2236 if (!ihost->completion_queue)
2237 return -ENOMEM;
2238
2239 writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
2240 writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
2241
2242 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
2243 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
2244 GFP_KERNEL);
2245 if (!ihost->remote_node_context_table)
2246 return -ENOMEM;
2247
2248 writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
2249 writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
2250
2251 size = ihost->task_context_entries * sizeof(struct scu_task_context),
2252 ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
2253 if (!ihost->task_context_table)
2254 return -ENOMEM;
2255
2256 ihost->task_context_dma = dma;
2257 writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
2258 writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
2259
2260 err = sci_unsolicited_frame_control_construct(ihost);
2261 if (err)
2262 return err;
2263
2264 /*
2265 * Inform the silicon as to the location of the UF headers and
2266 * address table.
2267 */
2268 writel(lower_32_bits(ihost->uf_control.headers.physical_address),
2269 &ihost->scu_registers->sdma.uf_header_base_address_lower);
2270 writel(upper_32_bits(ihost->uf_control.headers.physical_address),
2271 &ihost->scu_registers->sdma.uf_header_base_address_upper);
2272
2273 writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
2274 &ihost->scu_registers->sdma.uf_address_table_lower);
2275 writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
2276 &ihost->scu_registers->sdma.uf_address_table_upper);
2277
2278 return 0;
2279}
2280
2281int isci_host_init(struct isci_host *ihost)
2282{
2283 int err = 0, i;
2284 enum sci_status status;
2285 struct sci_user_parameters sci_user_params;
2286 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
2287
2288 spin_lock_init(&ihost->state_lock);
2289 spin_lock_init(&ihost->scic_lock);
2290 init_waitqueue_head(&ihost->eventq);
2291
2292 isci_host_change_state(ihost, isci_starting);
2293
2294 status = sci_controller_construct(ihost, scu_base(ihost),
2295 smu_base(ihost));
2296
2297 if (status != SCI_SUCCESS) {
2298 dev_err(&ihost->pdev->dev,
2299 "%s: sci_controller_construct failed - status = %x\n",
2300 __func__,
2301 status);
2302 return -ENODEV;
2303 }
2304
2305 ihost->sas_ha.dev = &ihost->pdev->dev;
2306 ihost->sas_ha.lldd_ha = ihost;
2307
2308 /*
2309 * grab initial values stored in the controller object for OEM and USER
2310 * parameters
2311 */
2312 isci_user_parameters_get(&sci_user_params);
2313 status = sci_user_parameters_set(ihost, &sci_user_params);
2314 if (status != SCI_SUCCESS) {
2315 dev_warn(&ihost->pdev->dev,
2316 "%s: sci_user_parameters_set failed\n",
2317 __func__);
2318 return -ENODEV;
2319 }
2320
2321 /* grab any OEM parameters specified in orom */
2322 if (pci_info->orom) {
2323 status = isci_parse_oem_parameters(&ihost->oem_parameters,
2324 pci_info->orom,
2325 ihost->id);
2326 if (status != SCI_SUCCESS) {
2327 dev_warn(&ihost->pdev->dev,
2328 "parsing firmware oem parameters failed\n");
2329 return -EINVAL;
2330 }
2331 }
2332
2333 status = sci_oem_parameters_set(ihost);
2334 if (status != SCI_SUCCESS) {
2335 dev_warn(&ihost->pdev->dev,
2336 "%s: sci_oem_parameters_set failed\n",
2337 __func__);
2338 return -ENODEV;
2339 }
2340
2341 tasklet_init(&ihost->completion_tasklet,
2342 isci_host_completion_routine, (unsigned long)ihost);
2343
2344 INIT_LIST_HEAD(&ihost->requests_to_complete);
2345 INIT_LIST_HEAD(&ihost->requests_to_errorback);
2346
2347 spin_lock_irq(&ihost->scic_lock);
2348 status = sci_controller_initialize(ihost);
2349 spin_unlock_irq(&ihost->scic_lock);
2350 if (status != SCI_SUCCESS) {
2351 dev_warn(&ihost->pdev->dev,
2352 "%s: sci_controller_initialize failed -"
2353 " status = 0x%x\n",
2354 __func__, status);
2355 return -ENODEV;
2356 }
2357
2358 err = sci_controller_mem_init(ihost);
2359 if (err)
2360 return err;
2361
2362 for (i = 0; i < SCI_MAX_PORTS; i++)
2363 isci_port_init(&ihost->ports[i], ihost, i);
2364
2365 for (i = 0; i < SCI_MAX_PHYS; i++)
2366 isci_phy_init(&ihost->phys[i], ihost, i);
2367
2368 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
2369 struct isci_remote_device *idev = &ihost->devices[i];
2370
2371 INIT_LIST_HEAD(&idev->reqs_in_process);
2372 INIT_LIST_HEAD(&idev->node);
2373 }
2374
2375 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
2376 struct isci_request *ireq;
2377 dma_addr_t dma;
2378
2379 ireq = dmam_alloc_coherent(&ihost->pdev->dev,
2380 sizeof(struct isci_request), &dma,
2381 GFP_KERNEL);
2382 if (!ireq)
2383 return -ENOMEM;
2384
2385 ireq->tc = &ihost->task_context_table[i];
2386 ireq->owning_controller = ihost;
2387 spin_lock_init(&ireq->state_lock);
2388 ireq->request_daddr = dma;
2389 ireq->isci_host = ihost;
2390 ihost->reqs[i] = ireq;
2391 }
2392
2393 return 0;
2394}
2395
2396void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
2397 struct isci_phy *iphy)
2398{
2399 switch (ihost->sm.current_state_id) {
2400 case SCIC_STARTING:
2401 sci_del_timer(&ihost->phy_timer);
2402 ihost->phy_startup_timer_pending = false;
2403 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2404 iport, iphy);
2405 sci_controller_start_next_phy(ihost);
2406 break;
2407 case SCIC_READY:
2408 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2409 iport, iphy);
2410 break;
2411 default:
2412 dev_dbg(&ihost->pdev->dev,
2413 "%s: SCIC Controller linkup event from phy %d in "
2414 "unexpected state %d\n", __func__, iphy->phy_index,
2415 ihost->sm.current_state_id);
2416 }
2417}
2418
2419void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
2420 struct isci_phy *iphy)
2421{
2422 switch (ihost->sm.current_state_id) {
2423 case SCIC_STARTING:
2424 case SCIC_READY:
2425 ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
2426 iport, iphy);
2427 break;
2428 default:
2429 dev_dbg(&ihost->pdev->dev,
2430 "%s: SCIC Controller linkdown event from phy %d in "
2431 "unexpected state %d\n",
2432 __func__,
2433 iphy->phy_index,
2434 ihost->sm.current_state_id);
2435 }
2436}
2437
2438static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
2439{
2440 u32 index;
2441
2442 for (index = 0; index < ihost->remote_node_entries; index++) {
2443 if ((ihost->device_table[index] != NULL) &&
2444 (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
2445 return true;
2446 }
2447
2448 return false;
2449}
2450
2451void sci_controller_remote_device_stopped(struct isci_host *ihost,
2452 struct isci_remote_device *idev)
2453{
2454 if (ihost->sm.current_state_id != SCIC_STOPPING) {
2455 dev_dbg(&ihost->pdev->dev,
2456 "SCIC Controller 0x%p remote device stopped event "
2457 "from device 0x%p in unexpected state %d\n",
2458 ihost, idev,
2459 ihost->sm.current_state_id);
2460 return;
2461 }
2462
2463 if (!sci_controller_has_remote_devices_stopping(ihost))
2464 sci_change_state(&ihost->sm, SCIC_STOPPED);
2465}
2466
2467void sci_controller_post_request(struct isci_host *ihost, u32 request)
2468{
2469 dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
2470 __func__, ihost->id, request);
2471
2472 writel(request, &ihost->smu_registers->post_context_port);
2473}
2474
2475struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
2476{
2477 u16 task_index;
2478 u16 task_sequence;
2479
2480 task_index = ISCI_TAG_TCI(io_tag);
2481
2482 if (task_index < ihost->task_context_entries) {
2483 struct isci_request *ireq = ihost->reqs[task_index];
2484
2485 if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
2486 task_sequence = ISCI_TAG_SEQ(io_tag);
2487
2488 if (task_sequence == ihost->io_request_sequence[task_index])
2489 return ireq;
2490 }
2491 }
2492
2493 return NULL;
2494}
2495
2496/**
2497 * This method allocates remote node index and the reserves the remote node
2498 * context space for use. This method can fail if there are no more remote
2499 * node index available.
2500 * @scic: This is the controller object which contains the set of
2501 * free remote node ids
2502 * @sci_dev: This is the device object which is requesting the a remote node
2503 * id
2504 * @node_id: This is the remote node id that is assinged to the device if one
2505 * is available
2506 *
2507 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
2508 * node index available.
2509 */
2510enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
2511 struct isci_remote_device *idev,
2512 u16 *node_id)
2513{
2514 u16 node_index;
2515 u32 remote_node_count = sci_remote_device_node_count(idev);
2516
2517 node_index = sci_remote_node_table_allocate_remote_node(
2518 &ihost->available_remote_nodes, remote_node_count
2519 );
2520
2521 if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
2522 ihost->device_table[node_index] = idev;
2523
2524 *node_id = node_index;
2525
2526 return SCI_SUCCESS;
2527 }
2528
2529 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
2530}
2531
2532void sci_controller_free_remote_node_context(struct isci_host *ihost,
2533 struct isci_remote_device *idev,
2534 u16 node_id)
2535{
2536 u32 remote_node_count = sci_remote_device_node_count(idev);
2537
2538 if (ihost->device_table[node_id] == idev) {
2539 ihost->device_table[node_id] = NULL;
2540
2541 sci_remote_node_table_release_remote_node_index(
2542 &ihost->available_remote_nodes, remote_node_count, node_id
2543 );
2544 }
2545}
2546
2547void sci_controller_copy_sata_response(void *response_buffer,
2548 void *frame_header,
2549 void *frame_buffer)
2550{
2551 /* XXX type safety? */
2552 memcpy(response_buffer, frame_header, sizeof(u32));
2553
2554 memcpy(response_buffer + sizeof(u32),
2555 frame_buffer,
2556 sizeof(struct dev_to_host_fis) - sizeof(u32));
2557}
2558
2559void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
2560{
2561 if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
2562 writel(ihost->uf_control.get,
2563 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
2564}
2565
2566void isci_tci_free(struct isci_host *ihost, u16 tci)
2567{
2568 u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
2569
2570 ihost->tci_pool[tail] = tci;
2571 ihost->tci_tail = tail + 1;
2572}
2573
2574static u16 isci_tci_alloc(struct isci_host *ihost)
2575{
2576 u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
2577 u16 tci = ihost->tci_pool[head];
2578
2579 ihost->tci_head = head + 1;
2580 return tci;
2581}
2582
2583static u16 isci_tci_space(struct isci_host *ihost)
2584{
2585 return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
2586}
2587
2588u16 isci_alloc_tag(struct isci_host *ihost)
2589{
2590 if (isci_tci_space(ihost)) {
2591 u16 tci = isci_tci_alloc(ihost);
2592 u8 seq = ihost->io_request_sequence[tci];
2593
2594 return ISCI_TAG(seq, tci);
2595 }
2596
2597 return SCI_CONTROLLER_INVALID_IO_TAG;
2598}
2599
2600enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
2601{
2602 u16 tci = ISCI_TAG_TCI(io_tag);
2603 u16 seq = ISCI_TAG_SEQ(io_tag);
2604
2605 /* prevent tail from passing head */
2606 if (isci_tci_active(ihost) == 0)
2607 return SCI_FAILURE_INVALID_IO_TAG;
2608
2609 if (seq == ihost->io_request_sequence[tci]) {
2610 ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
2611
2612 isci_tci_free(ihost, tci);
2613
2614 return SCI_SUCCESS;
2615 }
2616 return SCI_FAILURE_INVALID_IO_TAG;
2617}
2618
2619enum sci_status sci_controller_start_io(struct isci_host *ihost,
2620 struct isci_remote_device *idev,
2621 struct isci_request *ireq)
2622{
2623 enum sci_status status;
2624
2625 if (ihost->sm.current_state_id != SCIC_READY) {
2626 dev_warn(&ihost->pdev->dev, "invalid state to start I/O");
2627 return SCI_FAILURE_INVALID_STATE;
2628 }
2629
2630 status = sci_remote_device_start_io(ihost, idev, ireq);
2631 if (status != SCI_SUCCESS)
2632 return status;
2633
2634 set_bit(IREQ_ACTIVE, &ireq->flags);
2635 sci_controller_post_request(ihost, ireq->post_context);
2636 return SCI_SUCCESS;
2637}
2638
2639enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
2640 struct isci_remote_device *idev,
2641 struct isci_request *ireq)
2642{
2643 /* terminate an ongoing (i.e. started) core IO request. This does not
2644 * abort the IO request at the target, but rather removes the IO
2645 * request from the host controller.
2646 */
2647 enum sci_status status;
2648
2649 if (ihost->sm.current_state_id != SCIC_READY) {
2650 dev_warn(&ihost->pdev->dev,
2651 "invalid state to terminate request\n");
2652 return SCI_FAILURE_INVALID_STATE;
2653 }
2654
2655 status = sci_io_request_terminate(ireq);
2656 if (status != SCI_SUCCESS)
2657 return status;
2658
2659 /*
2660 * Utilize the original post context command and or in the POST_TC_ABORT
2661 * request sub-type.
2662 */
2663 sci_controller_post_request(ihost,
2664 ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2665 return SCI_SUCCESS;
2666}
2667
2668/**
2669 * sci_controller_complete_io() - This method will perform core specific
2670 * completion operations for an IO request. After this method is invoked,
2671 * the user should consider the IO request as invalid until it is properly
2672 * reused (i.e. re-constructed).
2673 * @ihost: The handle to the controller object for which to complete the
2674 * IO request.
2675 * @idev: The handle to the remote device object for which to complete
2676 * the IO request.
2677 * @ireq: the handle to the io request object to complete.
2678 */
2679enum sci_status sci_controller_complete_io(struct isci_host *ihost,
2680 struct isci_remote_device *idev,
2681 struct isci_request *ireq)
2682{
2683 enum sci_status status;
2684 u16 index;
2685
2686 switch (ihost->sm.current_state_id) {
2687 case SCIC_STOPPING:
2688 /* XXX: Implement this function */
2689 return SCI_FAILURE;
2690 case SCIC_READY:
2691 status = sci_remote_device_complete_io(ihost, idev, ireq);
2692 if (status != SCI_SUCCESS)
2693 return status;
2694
2695 index = ISCI_TAG_TCI(ireq->io_tag);
2696 clear_bit(IREQ_ACTIVE, &ireq->flags);
2697 return SCI_SUCCESS;
2698 default:
2699 dev_warn(&ihost->pdev->dev, "invalid state to complete I/O");
2700 return SCI_FAILURE_INVALID_STATE;
2701 }
2702
2703}
2704
2705enum sci_status sci_controller_continue_io(struct isci_request *ireq)
2706{
2707 struct isci_host *ihost = ireq->owning_controller;
2708
2709 if (ihost->sm.current_state_id != SCIC_READY) {
2710 dev_warn(&ihost->pdev->dev, "invalid state to continue I/O");
2711 return SCI_FAILURE_INVALID_STATE;
2712 }
2713
2714 set_bit(IREQ_ACTIVE, &ireq->flags);
2715 sci_controller_post_request(ihost, ireq->post_context);
2716 return SCI_SUCCESS;
2717}
2718
2719/**
2720 * sci_controller_start_task() - This method is called by the SCIC user to
2721 * send/start a framework task management request.
2722 * @controller: the handle to the controller object for which to start the task
2723 * management request.
2724 * @remote_device: the handle to the remote device object for which to start
2725 * the task management request.
2726 * @task_request: the handle to the task request object to start.
2727 */
2728enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
2729 struct isci_remote_device *idev,
2730 struct isci_request *ireq)
2731{
2732 enum sci_status status;
2733
2734 if (ihost->sm.current_state_id != SCIC_READY) {
2735 dev_warn(&ihost->pdev->dev,
2736 "%s: SCIC Controller starting task from invalid "
2737 "state\n",
2738 __func__);
2739 return SCI_TASK_FAILURE_INVALID_STATE;
2740 }
2741
2742 status = sci_remote_device_start_task(ihost, idev, ireq);
2743 switch (status) {
2744 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
2745 set_bit(IREQ_ACTIVE, &ireq->flags);
2746
2747 /*
2748 * We will let framework know this task request started successfully,
2749 * although core is still woring on starting the request (to post tc when
2750 * RNC is resumed.)
2751 */
2752 return SCI_SUCCESS;
2753 case SCI_SUCCESS:
2754 set_bit(IREQ_ACTIVE, &ireq->flags);
2755 sci_controller_post_request(ihost, ireq->post_context);
2756 break;
2757 default:
2758 break;
2759 }
2760
2761 return status;
2762}
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55#include <linux/circ_buf.h>
56#include <linux/device.h>
57#include <scsi/sas.h>
58#include "host.h"
59#include "isci.h"
60#include "port.h"
61#include "probe_roms.h"
62#include "remote_device.h"
63#include "request.h"
64#include "scu_completion_codes.h"
65#include "scu_event_codes.h"
66#include "registers.h"
67#include "scu_remote_node_context.h"
68#include "scu_task_context.h"
69
70#define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
71
72#define smu_max_ports(dcc_value) \
73 (\
74 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
75 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
76 )
77
78#define smu_max_task_contexts(dcc_value) \
79 (\
80 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
81 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
82 )
83
84#define smu_max_rncs(dcc_value) \
85 (\
86 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
87 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
88 )
89
90#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
91
92/**
93 *
94 *
95 * The number of milliseconds to wait while a given phy is consuming power
96 * before allowing another set of phys to consume power. Ultimately, this will
97 * be specified by OEM parameter.
98 */
99#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
100
101/**
102 * NORMALIZE_PUT_POINTER() -
103 *
104 * This macro will normalize the completion queue put pointer so its value can
105 * be used as an array inde
106 */
107#define NORMALIZE_PUT_POINTER(x) \
108 ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
109
110
111/**
112 * NORMALIZE_EVENT_POINTER() -
113 *
114 * This macro will normalize the completion queue event entry so its value can
115 * be used as an index.
116 */
117#define NORMALIZE_EVENT_POINTER(x) \
118 (\
119 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
120 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
121 )
122
123/**
124 * NORMALIZE_GET_POINTER() -
125 *
126 * This macro will normalize the completion queue get pointer so its value can
127 * be used as an index into an array
128 */
129#define NORMALIZE_GET_POINTER(x) \
130 ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
131
132/**
133 * NORMALIZE_GET_POINTER_CYCLE_BIT() -
134 *
135 * This macro will normalize the completion queue cycle pointer so it matches
136 * the completion queue cycle bit
137 */
138#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
139 ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
140
141/**
142 * COMPLETION_QUEUE_CYCLE_BIT() -
143 *
144 * This macro will return the cycle bit of the completion queue entry
145 */
146#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
147
148/* Init the state machine and call the state entry function (if any) */
149void sci_init_sm(struct sci_base_state_machine *sm,
150 const struct sci_base_state *state_table, u32 initial_state)
151{
152 sci_state_transition_t handler;
153
154 sm->initial_state_id = initial_state;
155 sm->previous_state_id = initial_state;
156 sm->current_state_id = initial_state;
157 sm->state_table = state_table;
158
159 handler = sm->state_table[initial_state].enter_state;
160 if (handler)
161 handler(sm);
162}
163
164/* Call the state exit fn, update the current state, call the state entry fn */
165void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
166{
167 sci_state_transition_t handler;
168
169 handler = sm->state_table[sm->current_state_id].exit_state;
170 if (handler)
171 handler(sm);
172
173 sm->previous_state_id = sm->current_state_id;
174 sm->current_state_id = next_state;
175
176 handler = sm->state_table[sm->current_state_id].enter_state;
177 if (handler)
178 handler(sm);
179}
180
181static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
182{
183 u32 get_value = ihost->completion_queue_get;
184 u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
185
186 if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
187 COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
188 return true;
189
190 return false;
191}
192
193static bool sci_controller_isr(struct isci_host *ihost)
194{
195 if (sci_controller_completion_queue_has_entries(ihost))
196 return true;
197
198 /* we have a spurious interrupt it could be that we have already
199 * emptied the completion queue from a previous interrupt
200 * FIXME: really!?
201 */
202 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
203
204 /* There is a race in the hardware that could cause us not to be
205 * notified of an interrupt completion if we do not take this
206 * step. We will mask then unmask the interrupts so if there is
207 * another interrupt pending the clearing of the interrupt
208 * source we get the next interrupt message.
209 */
210 spin_lock(&ihost->scic_lock);
211 if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) {
212 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
213 writel(0, &ihost->smu_registers->interrupt_mask);
214 }
215 spin_unlock(&ihost->scic_lock);
216
217 return false;
218}
219
220irqreturn_t isci_msix_isr(int vec, void *data)
221{
222 struct isci_host *ihost = data;
223
224 if (sci_controller_isr(ihost))
225 tasklet_schedule(&ihost->completion_tasklet);
226
227 return IRQ_HANDLED;
228}
229
230static bool sci_controller_error_isr(struct isci_host *ihost)
231{
232 u32 interrupt_status;
233
234 interrupt_status =
235 readl(&ihost->smu_registers->interrupt_status);
236 interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
237
238 if (interrupt_status != 0) {
239 /*
240 * There is an error interrupt pending so let it through and handle
241 * in the callback */
242 return true;
243 }
244
245 /*
246 * There is a race in the hardware that could cause us not to be notified
247 * of an interrupt completion if we do not take this step. We will mask
248 * then unmask the error interrupts so if there was another interrupt
249 * pending we will be notified.
250 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
251 writel(0xff, &ihost->smu_registers->interrupt_mask);
252 writel(0, &ihost->smu_registers->interrupt_mask);
253
254 return false;
255}
256
257static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
258{
259 u32 index = SCU_GET_COMPLETION_INDEX(ent);
260 struct isci_request *ireq = ihost->reqs[index];
261
262 /* Make sure that we really want to process this IO request */
263 if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
264 ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
265 ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
266 /* Yep this is a valid io request pass it along to the
267 * io request handler
268 */
269 sci_io_request_tc_completion(ireq, ent);
270}
271
272static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
273{
274 u32 index;
275 struct isci_request *ireq;
276 struct isci_remote_device *idev;
277
278 index = SCU_GET_COMPLETION_INDEX(ent);
279
280 switch (scu_get_command_request_type(ent)) {
281 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
282 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
283 ireq = ihost->reqs[index];
284 dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
285 __func__, ent, ireq);
286 /* @todo For a post TC operation we need to fail the IO
287 * request
288 */
289 break;
290 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
291 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
292 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
293 idev = ihost->device_table[index];
294 dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
295 __func__, ent, idev);
296 /* @todo For a port RNC operation we need to fail the
297 * device
298 */
299 break;
300 default:
301 dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
302 __func__, ent);
303 break;
304 }
305}
306
307static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
308{
309 u32 index;
310 u32 frame_index;
311
312 struct scu_unsolicited_frame_header *frame_header;
313 struct isci_phy *iphy;
314 struct isci_remote_device *idev;
315
316 enum sci_status result = SCI_FAILURE;
317
318 frame_index = SCU_GET_FRAME_INDEX(ent);
319
320 frame_header = ihost->uf_control.buffers.array[frame_index].header;
321 ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
322
323 if (SCU_GET_FRAME_ERROR(ent)) {
324 /*
325 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
326 * / this cause a problem? We expect the phy initialization will
327 * / fail if there is an error in the frame. */
328 sci_controller_release_frame(ihost, frame_index);
329 return;
330 }
331
332 if (frame_header->is_address_frame) {
333 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
334 iphy = &ihost->phys[index];
335 result = sci_phy_frame_handler(iphy, frame_index);
336 } else {
337
338 index = SCU_GET_COMPLETION_INDEX(ent);
339
340 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
341 /*
342 * This is a signature fis or a frame from a direct attached SATA
343 * device that has not yet been created. In either case forwared
344 * the frame to the PE and let it take care of the frame data. */
345 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
346 iphy = &ihost->phys[index];
347 result = sci_phy_frame_handler(iphy, frame_index);
348 } else {
349 if (index < ihost->remote_node_entries)
350 idev = ihost->device_table[index];
351 else
352 idev = NULL;
353
354 if (idev != NULL)
355 result = sci_remote_device_frame_handler(idev, frame_index);
356 else
357 sci_controller_release_frame(ihost, frame_index);
358 }
359 }
360
361 if (result != SCI_SUCCESS) {
362 /*
363 * / @todo Is there any reason to report some additional error message
364 * / when we get this failure notifiction? */
365 }
366}
367
368static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
369{
370 struct isci_remote_device *idev;
371 struct isci_request *ireq;
372 struct isci_phy *iphy;
373 u32 index;
374
375 index = SCU_GET_COMPLETION_INDEX(ent);
376
377 switch (scu_get_event_type(ent)) {
378 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
379 /* / @todo The driver did something wrong and we need to fix the condtion. */
380 dev_err(&ihost->pdev->dev,
381 "%s: SCIC Controller 0x%p received SMU command error "
382 "0x%x\n",
383 __func__,
384 ihost,
385 ent);
386 break;
387
388 case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
389 case SCU_EVENT_TYPE_SMU_ERROR:
390 case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
391 /*
392 * / @todo This is a hardware failure and its likely that we want to
393 * / reset the controller. */
394 dev_err(&ihost->pdev->dev,
395 "%s: SCIC Controller 0x%p received fatal controller "
396 "event 0x%x\n",
397 __func__,
398 ihost,
399 ent);
400 break;
401
402 case SCU_EVENT_TYPE_TRANSPORT_ERROR:
403 ireq = ihost->reqs[index];
404 sci_io_request_event_handler(ireq, ent);
405 break;
406
407 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
408 switch (scu_get_event_specifier(ent)) {
409 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
410 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
411 ireq = ihost->reqs[index];
412 if (ireq != NULL)
413 sci_io_request_event_handler(ireq, ent);
414 else
415 dev_warn(&ihost->pdev->dev,
416 "%s: SCIC Controller 0x%p received "
417 "event 0x%x for io request object "
418 "that doesnt exist.\n",
419 __func__,
420 ihost,
421 ent);
422
423 break;
424
425 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
426 idev = ihost->device_table[index];
427 if (idev != NULL)
428 sci_remote_device_event_handler(idev, ent);
429 else
430 dev_warn(&ihost->pdev->dev,
431 "%s: SCIC Controller 0x%p received "
432 "event 0x%x for remote device object "
433 "that doesnt exist.\n",
434 __func__,
435 ihost,
436 ent);
437
438 break;
439 }
440 break;
441
442 case SCU_EVENT_TYPE_BROADCAST_CHANGE:
443 /*
444 * direct the broadcast change event to the phy first and then let
445 * the phy redirect the broadcast change to the port object */
446 case SCU_EVENT_TYPE_ERR_CNT_EVENT:
447 /*
448 * direct error counter event to the phy object since that is where
449 * we get the event notification. This is a type 4 event. */
450 case SCU_EVENT_TYPE_OSSP_EVENT:
451 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
452 iphy = &ihost->phys[index];
453 sci_phy_event_handler(iphy, ent);
454 break;
455
456 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
457 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
458 case SCU_EVENT_TYPE_RNC_OPS_MISC:
459 if (index < ihost->remote_node_entries) {
460 idev = ihost->device_table[index];
461
462 if (idev != NULL)
463 sci_remote_device_event_handler(idev, ent);
464 } else
465 dev_err(&ihost->pdev->dev,
466 "%s: SCIC Controller 0x%p received event 0x%x "
467 "for remote device object 0x%0x that doesnt "
468 "exist.\n",
469 __func__,
470 ihost,
471 ent,
472 index);
473
474 break;
475
476 default:
477 dev_warn(&ihost->pdev->dev,
478 "%s: SCIC Controller received unknown event code %x\n",
479 __func__,
480 ent);
481 break;
482 }
483}
484
485static void sci_controller_process_completions(struct isci_host *ihost)
486{
487 u32 completion_count = 0;
488 u32 ent;
489 u32 get_index;
490 u32 get_cycle;
491 u32 event_get;
492 u32 event_cycle;
493
494 dev_dbg(&ihost->pdev->dev,
495 "%s: completion queue beginning get:0x%08x\n",
496 __func__,
497 ihost->completion_queue_get);
498
499 /* Get the component parts of the completion queue */
500 get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
501 get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
502
503 event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
504 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
505
506 while (
507 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
508 == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
509 ) {
510 completion_count++;
511
512 ent = ihost->completion_queue[get_index];
513
514 /* increment the get pointer and check for rollover to toggle the cycle bit */
515 get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
516 (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
517 get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
518
519 dev_dbg(&ihost->pdev->dev,
520 "%s: completion queue entry:0x%08x\n",
521 __func__,
522 ent);
523
524 switch (SCU_GET_COMPLETION_TYPE(ent)) {
525 case SCU_COMPLETION_TYPE_TASK:
526 sci_controller_task_completion(ihost, ent);
527 break;
528
529 case SCU_COMPLETION_TYPE_SDMA:
530 sci_controller_sdma_completion(ihost, ent);
531 break;
532
533 case SCU_COMPLETION_TYPE_UFI:
534 sci_controller_unsolicited_frame(ihost, ent);
535 break;
536
537 case SCU_COMPLETION_TYPE_EVENT:
538 sci_controller_event_completion(ihost, ent);
539 break;
540
541 case SCU_COMPLETION_TYPE_NOTIFY: {
542 event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
543 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
544 event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
545
546 sci_controller_event_completion(ihost, ent);
547 break;
548 }
549 default:
550 dev_warn(&ihost->pdev->dev,
551 "%s: SCIC Controller received unknown "
552 "completion type %x\n",
553 __func__,
554 ent);
555 break;
556 }
557 }
558
559 /* Update the get register if we completed one or more entries */
560 if (completion_count > 0) {
561 ihost->completion_queue_get =
562 SMU_CQGR_GEN_BIT(ENABLE) |
563 SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
564 event_cycle |
565 SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
566 get_cycle |
567 SMU_CQGR_GEN_VAL(POINTER, get_index);
568
569 writel(ihost->completion_queue_get,
570 &ihost->smu_registers->completion_queue_get);
571
572 }
573
574 dev_dbg(&ihost->pdev->dev,
575 "%s: completion queue ending get:0x%08x\n",
576 __func__,
577 ihost->completion_queue_get);
578
579}
580
581static void sci_controller_error_handler(struct isci_host *ihost)
582{
583 u32 interrupt_status;
584
585 interrupt_status =
586 readl(&ihost->smu_registers->interrupt_status);
587
588 if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
589 sci_controller_completion_queue_has_entries(ihost)) {
590
591 sci_controller_process_completions(ihost);
592 writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
593 } else {
594 dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
595 interrupt_status);
596
597 sci_change_state(&ihost->sm, SCIC_FAILED);
598
599 return;
600 }
601
602 /* If we dont process any completions I am not sure that we want to do this.
603 * We are in the middle of a hardware fault and should probably be reset.
604 */
605 writel(0, &ihost->smu_registers->interrupt_mask);
606}
607
608irqreturn_t isci_intx_isr(int vec, void *data)
609{
610 irqreturn_t ret = IRQ_NONE;
611 struct isci_host *ihost = data;
612
613 if (sci_controller_isr(ihost)) {
614 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
615 tasklet_schedule(&ihost->completion_tasklet);
616 ret = IRQ_HANDLED;
617 } else if (sci_controller_error_isr(ihost)) {
618 spin_lock(&ihost->scic_lock);
619 sci_controller_error_handler(ihost);
620 spin_unlock(&ihost->scic_lock);
621 ret = IRQ_HANDLED;
622 }
623
624 return ret;
625}
626
627irqreturn_t isci_error_isr(int vec, void *data)
628{
629 struct isci_host *ihost = data;
630
631 if (sci_controller_error_isr(ihost))
632 sci_controller_error_handler(ihost);
633
634 return IRQ_HANDLED;
635}
636
637/**
638 * isci_host_start_complete() - This function is called by the core library,
639 * through the ISCI Module, to indicate controller start status.
640 * @isci_host: This parameter specifies the ISCI host object
641 * @completion_status: This parameter specifies the completion status from the
642 * core library.
643 *
644 */
645static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
646{
647 if (completion_status != SCI_SUCCESS)
648 dev_info(&ihost->pdev->dev,
649 "controller start timed out, continuing...\n");
650 clear_bit(IHOST_START_PENDING, &ihost->flags);
651 wake_up(&ihost->eventq);
652}
653
654int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
655{
656 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
657 struct isci_host *ihost = ha->lldd_ha;
658
659 if (test_bit(IHOST_START_PENDING, &ihost->flags))
660 return 0;
661
662 sas_drain_work(ha);
663
664 return 1;
665}
666
667/**
668 * sci_controller_get_suggested_start_timeout() - This method returns the
669 * suggested sci_controller_start() timeout amount. The user is free to
670 * use any timeout value, but this method provides the suggested minimum
671 * start timeout value. The returned value is based upon empirical
672 * information determined as a result of interoperability testing.
673 * @controller: the handle to the controller object for which to return the
674 * suggested start timeout.
675 *
676 * This method returns the number of milliseconds for the suggested start
677 * operation timeout.
678 */
679static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
680{
681 /* Validate the user supplied parameters. */
682 if (!ihost)
683 return 0;
684
685 /*
686 * The suggested minimum timeout value for a controller start operation:
687 *
688 * Signature FIS Timeout
689 * + Phy Start Timeout
690 * + Number of Phy Spin Up Intervals
691 * ---------------------------------
692 * Number of milliseconds for the controller start operation.
693 *
694 * NOTE: The number of phy spin up intervals will be equivalent
695 * to the number of phys divided by the number phys allowed
696 * per interval - 1 (once OEM parameters are supported).
697 * Currently we assume only 1 phy per interval. */
698
699 return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
700 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
701 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
702}
703
704static void sci_controller_enable_interrupts(struct isci_host *ihost)
705{
706 set_bit(IHOST_IRQ_ENABLED, &ihost->flags);
707 writel(0, &ihost->smu_registers->interrupt_mask);
708}
709
710void sci_controller_disable_interrupts(struct isci_host *ihost)
711{
712 clear_bit(IHOST_IRQ_ENABLED, &ihost->flags);
713 writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
714 readl(&ihost->smu_registers->interrupt_mask); /* flush */
715}
716
717static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
718{
719 u32 port_task_scheduler_value;
720
721 port_task_scheduler_value =
722 readl(&ihost->scu_registers->peg0.ptsg.control);
723 port_task_scheduler_value |=
724 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
725 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
726 writel(port_task_scheduler_value,
727 &ihost->scu_registers->peg0.ptsg.control);
728}
729
730static void sci_controller_assign_task_entries(struct isci_host *ihost)
731{
732 u32 task_assignment;
733
734 /*
735 * Assign all the TCs to function 0
736 * TODO: Do we actually need to read this register to write it back?
737 */
738
739 task_assignment =
740 readl(&ihost->smu_registers->task_context_assignment[0]);
741
742 task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
743 (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) |
744 (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
745
746 writel(task_assignment,
747 &ihost->smu_registers->task_context_assignment[0]);
748
749}
750
751static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
752{
753 u32 index;
754 u32 completion_queue_control_value;
755 u32 completion_queue_get_value;
756 u32 completion_queue_put_value;
757
758 ihost->completion_queue_get = 0;
759
760 completion_queue_control_value =
761 (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
762 SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
763
764 writel(completion_queue_control_value,
765 &ihost->smu_registers->completion_queue_control);
766
767
768 /* Set the completion queue get pointer and enable the queue */
769 completion_queue_get_value = (
770 (SMU_CQGR_GEN_VAL(POINTER, 0))
771 | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
772 | (SMU_CQGR_GEN_BIT(ENABLE))
773 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
774 );
775
776 writel(completion_queue_get_value,
777 &ihost->smu_registers->completion_queue_get);
778
779 /* Set the completion queue put pointer */
780 completion_queue_put_value = (
781 (SMU_CQPR_GEN_VAL(POINTER, 0))
782 | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
783 );
784
785 writel(completion_queue_put_value,
786 &ihost->smu_registers->completion_queue_put);
787
788 /* Initialize the cycle bit of the completion queue entries */
789 for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
790 /*
791 * If get.cycle_bit != completion_queue.cycle_bit
792 * its not a valid completion queue entry
793 * so at system start all entries are invalid */
794 ihost->completion_queue[index] = 0x80000000;
795 }
796}
797
798static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
799{
800 u32 frame_queue_control_value;
801 u32 frame_queue_get_value;
802 u32 frame_queue_put_value;
803
804 /* Write the queue size */
805 frame_queue_control_value =
806 SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
807
808 writel(frame_queue_control_value,
809 &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
810
811 /* Setup the get pointer for the unsolicited frame queue */
812 frame_queue_get_value = (
813 SCU_UFQGP_GEN_VAL(POINTER, 0)
814 | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
815 );
816
817 writel(frame_queue_get_value,
818 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
819 /* Setup the put pointer for the unsolicited frame queue */
820 frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
821 writel(frame_queue_put_value,
822 &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
823}
824
825void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
826{
827 if (ihost->sm.current_state_id == SCIC_STARTING) {
828 /*
829 * We move into the ready state, because some of the phys/ports
830 * may be up and operational.
831 */
832 sci_change_state(&ihost->sm, SCIC_READY);
833
834 isci_host_start_complete(ihost, status);
835 }
836}
837
838static bool is_phy_starting(struct isci_phy *iphy)
839{
840 enum sci_phy_states state;
841
842 state = iphy->sm.current_state_id;
843 switch (state) {
844 case SCI_PHY_STARTING:
845 case SCI_PHY_SUB_INITIAL:
846 case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
847 case SCI_PHY_SUB_AWAIT_IAF_UF:
848 case SCI_PHY_SUB_AWAIT_SAS_POWER:
849 case SCI_PHY_SUB_AWAIT_SATA_POWER:
850 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
851 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
852 case SCI_PHY_SUB_AWAIT_OSSP_EN:
853 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
854 case SCI_PHY_SUB_FINAL:
855 return true;
856 default:
857 return false;
858 }
859}
860
861bool is_controller_start_complete(struct isci_host *ihost)
862{
863 int i;
864
865 for (i = 0; i < SCI_MAX_PHYS; i++) {
866 struct isci_phy *iphy = &ihost->phys[i];
867 u32 state = iphy->sm.current_state_id;
868
869 /* in apc mode we need to check every phy, in
870 * mpc mode we only need to check phys that have
871 * been configured into a port
872 */
873 if (is_port_config_apc(ihost))
874 /* pass */;
875 else if (!phy_get_non_dummy_port(iphy))
876 continue;
877
878 /* The controller start operation is complete iff:
879 * - all links have been given an opportunity to start
880 * - have no indication of a connected device
881 * - have an indication of a connected device and it has
882 * finished the link training process.
883 */
884 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
885 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
886 (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
887 (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask))
888 return false;
889 }
890
891 return true;
892}
893
894/**
895 * sci_controller_start_next_phy - start phy
896 * @scic: controller
897 *
898 * If all the phys have been started, then attempt to transition the
899 * controller to the READY state and inform the user
900 * (sci_cb_controller_start_complete()).
901 */
902static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
903{
904 struct sci_oem_params *oem = &ihost->oem_parameters;
905 struct isci_phy *iphy;
906 enum sci_status status;
907
908 status = SCI_SUCCESS;
909
910 if (ihost->phy_startup_timer_pending)
911 return status;
912
913 if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
914 if (is_controller_start_complete(ihost)) {
915 sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
916 sci_del_timer(&ihost->phy_timer);
917 ihost->phy_startup_timer_pending = false;
918 }
919 } else {
920 iphy = &ihost->phys[ihost->next_phy_to_start];
921
922 if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
923 if (phy_get_non_dummy_port(iphy) == NULL) {
924 ihost->next_phy_to_start++;
925
926 /* Caution recursion ahead be forwarned
927 *
928 * The PHY was never added to a PORT in MPC mode
929 * so start the next phy in sequence This phy
930 * will never go link up and will not draw power
931 * the OEM parameters either configured the phy
932 * incorrectly for the PORT or it was never
933 * assigned to a PORT
934 */
935 return sci_controller_start_next_phy(ihost);
936 }
937 }
938
939 status = sci_phy_start(iphy);
940
941 if (status == SCI_SUCCESS) {
942 sci_mod_timer(&ihost->phy_timer,
943 SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
944 ihost->phy_startup_timer_pending = true;
945 } else {
946 dev_warn(&ihost->pdev->dev,
947 "%s: Controller stop operation failed "
948 "to stop phy %d because of status "
949 "%d.\n",
950 __func__,
951 ihost->phys[ihost->next_phy_to_start].phy_index,
952 status);
953 }
954
955 ihost->next_phy_to_start++;
956 }
957
958 return status;
959}
960
961static void phy_startup_timeout(struct timer_list *t)
962{
963 struct sci_timer *tmr = from_timer(tmr, t, timer);
964 struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
965 unsigned long flags;
966 enum sci_status status;
967
968 spin_lock_irqsave(&ihost->scic_lock, flags);
969
970 if (tmr->cancel)
971 goto done;
972
973 ihost->phy_startup_timer_pending = false;
974
975 do {
976 status = sci_controller_start_next_phy(ihost);
977 } while (status != SCI_SUCCESS);
978
979done:
980 spin_unlock_irqrestore(&ihost->scic_lock, flags);
981}
982
983static u16 isci_tci_active(struct isci_host *ihost)
984{
985 return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
986}
987
988static enum sci_status sci_controller_start(struct isci_host *ihost,
989 u32 timeout)
990{
991 enum sci_status result;
992 u16 index;
993
994 if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
995 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
996 __func__, ihost->sm.current_state_id);
997 return SCI_FAILURE_INVALID_STATE;
998 }
999
1000 /* Build the TCi free pool */
1001 BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
1002 ihost->tci_head = 0;
1003 ihost->tci_tail = 0;
1004 for (index = 0; index < ihost->task_context_entries; index++)
1005 isci_tci_free(ihost, index);
1006
1007 /* Build the RNi free pool */
1008 sci_remote_node_table_initialize(&ihost->available_remote_nodes,
1009 ihost->remote_node_entries);
1010
1011 /*
1012 * Before anything else lets make sure we will not be
1013 * interrupted by the hardware.
1014 */
1015 sci_controller_disable_interrupts(ihost);
1016
1017 /* Enable the port task scheduler */
1018 sci_controller_enable_port_task_scheduler(ihost);
1019
1020 /* Assign all the task entries to ihost physical function */
1021 sci_controller_assign_task_entries(ihost);
1022
1023 /* Now initialize the completion queue */
1024 sci_controller_initialize_completion_queue(ihost);
1025
1026 /* Initialize the unsolicited frame queue for use */
1027 sci_controller_initialize_unsolicited_frame_queue(ihost);
1028
1029 /* Start all of the ports on this controller */
1030 for (index = 0; index < ihost->logical_port_entries; index++) {
1031 struct isci_port *iport = &ihost->ports[index];
1032
1033 result = sci_port_start(iport);
1034 if (result)
1035 return result;
1036 }
1037
1038 sci_controller_start_next_phy(ihost);
1039
1040 sci_mod_timer(&ihost->timer, timeout);
1041
1042 sci_change_state(&ihost->sm, SCIC_STARTING);
1043
1044 return SCI_SUCCESS;
1045}
1046
1047void isci_host_start(struct Scsi_Host *shost)
1048{
1049 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1050 unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
1051
1052 set_bit(IHOST_START_PENDING, &ihost->flags);
1053
1054 spin_lock_irq(&ihost->scic_lock);
1055 sci_controller_start(ihost, tmo);
1056 sci_controller_enable_interrupts(ihost);
1057 spin_unlock_irq(&ihost->scic_lock);
1058}
1059
1060static void isci_host_stop_complete(struct isci_host *ihost)
1061{
1062 sci_controller_disable_interrupts(ihost);
1063 clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1064 wake_up(&ihost->eventq);
1065}
1066
1067static void sci_controller_completion_handler(struct isci_host *ihost)
1068{
1069 /* Empty out the completion queue */
1070 if (sci_controller_completion_queue_has_entries(ihost))
1071 sci_controller_process_completions(ihost);
1072
1073 /* Clear the interrupt and enable all interrupts again */
1074 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
1075 /* Could we write the value of SMU_ISR_COMPLETION? */
1076 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
1077 writel(0, &ihost->smu_registers->interrupt_mask);
1078}
1079
1080void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
1081{
1082 if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) &&
1083 !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1084 if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) {
1085 /* Normal notification (task_done) */
1086 dev_dbg(&ihost->pdev->dev,
1087 "%s: Normal - ireq/task = %p/%p\n",
1088 __func__, ireq, task);
1089 task->lldd_task = NULL;
1090 task->task_done(task);
1091 } else {
1092 dev_dbg(&ihost->pdev->dev,
1093 "%s: Error - ireq/task = %p/%p\n",
1094 __func__, ireq, task);
1095 if (sas_protocol_ata(task->task_proto))
1096 task->lldd_task = NULL;
1097 sas_task_abort(task);
1098 }
1099 } else
1100 task->lldd_task = NULL;
1101
1102 if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
1103 wake_up_all(&ihost->eventq);
1104
1105 if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
1106 isci_free_tag(ihost, ireq->io_tag);
1107}
1108/**
1109 * isci_host_completion_routine() - This function is the delayed service
1110 * routine that calls the sci core library's completion handler. It's
1111 * scheduled as a tasklet from the interrupt service routine when interrupts
1112 * in use, or set as the timeout function in polled mode.
1113 * @data: This parameter specifies the ISCI host object
1114 *
1115 */
1116void isci_host_completion_routine(unsigned long data)
1117{
1118 struct isci_host *ihost = (struct isci_host *)data;
1119 u16 active;
1120
1121 spin_lock_irq(&ihost->scic_lock);
1122 sci_controller_completion_handler(ihost);
1123 spin_unlock_irq(&ihost->scic_lock);
1124
1125 /*
1126 * we subtract SCI_MAX_PORTS to account for the number of dummy TCs
1127 * issued for hardware issue workaround
1128 */
1129 active = isci_tci_active(ihost) - SCI_MAX_PORTS;
1130
1131 /*
1132 * the coalesence timeout doubles at each encoding step, so
1133 * update it based on the ilog2 value of the outstanding requests
1134 */
1135 writel(SMU_ICC_GEN_VAL(NUMBER, active) |
1136 SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
1137 &ihost->smu_registers->interrupt_coalesce_control);
1138}
1139
1140/**
1141 * sci_controller_stop() - This method will stop an individual controller
1142 * object.This method will invoke the associated user callback upon
1143 * completion. The completion callback is called when the following
1144 * conditions are met: -# the method return status is SCI_SUCCESS. -# the
1145 * controller has been quiesced. This method will ensure that all IO
1146 * requests are quiesced, phys are stopped, and all additional operation by
1147 * the hardware is halted.
1148 * @controller: the handle to the controller object to stop.
1149 * @timeout: This parameter specifies the number of milliseconds in which the
1150 * stop operation should complete.
1151 *
1152 * The controller must be in the STARTED or STOPPED state. Indicate if the
1153 * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
1154 * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
1155 * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
1156 * controller is not either in the STARTED or STOPPED states.
1157 */
1158static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
1159{
1160 if (ihost->sm.current_state_id != SCIC_READY) {
1161 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1162 __func__, ihost->sm.current_state_id);
1163 return SCI_FAILURE_INVALID_STATE;
1164 }
1165
1166 sci_mod_timer(&ihost->timer, timeout);
1167 sci_change_state(&ihost->sm, SCIC_STOPPING);
1168 return SCI_SUCCESS;
1169}
1170
1171/**
1172 * sci_controller_reset() - This method will reset the supplied core
1173 * controller regardless of the state of said controller. This operation is
1174 * considered destructive. In other words, all current operations are wiped
1175 * out. No IO completions for outstanding devices occur. Outstanding IO
1176 * requests are not aborted or completed at the actual remote device.
1177 * @controller: the handle to the controller object to reset.
1178 *
1179 * Indicate if the controller reset method succeeded or failed in some way.
1180 * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
1181 * the controller reset operation is unable to complete.
1182 */
1183static enum sci_status sci_controller_reset(struct isci_host *ihost)
1184{
1185 switch (ihost->sm.current_state_id) {
1186 case SCIC_RESET:
1187 case SCIC_READY:
1188 case SCIC_STOPPING:
1189 case SCIC_FAILED:
1190 /*
1191 * The reset operation is not a graceful cleanup, just
1192 * perform the state transition.
1193 */
1194 sci_change_state(&ihost->sm, SCIC_RESETTING);
1195 return SCI_SUCCESS;
1196 default:
1197 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1198 __func__, ihost->sm.current_state_id);
1199 return SCI_FAILURE_INVALID_STATE;
1200 }
1201}
1202
1203static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1204{
1205 u32 index;
1206 enum sci_status status;
1207 enum sci_status phy_status;
1208
1209 status = SCI_SUCCESS;
1210
1211 for (index = 0; index < SCI_MAX_PHYS; index++) {
1212 phy_status = sci_phy_stop(&ihost->phys[index]);
1213
1214 if (phy_status != SCI_SUCCESS &&
1215 phy_status != SCI_FAILURE_INVALID_STATE) {
1216 status = SCI_FAILURE;
1217
1218 dev_warn(&ihost->pdev->dev,
1219 "%s: Controller stop operation failed to stop "
1220 "phy %d because of status %d.\n",
1221 __func__,
1222 ihost->phys[index].phy_index, phy_status);
1223 }
1224 }
1225
1226 return status;
1227}
1228
1229
1230/**
1231 * isci_host_deinit - shutdown frame reception and dma
1232 * @ihost: host to take down
1233 *
1234 * This is called in either the driver shutdown or the suspend path. In
1235 * the shutdown case libsas went through port teardown and normal device
1236 * removal (i.e. physical links stayed up to service scsi_device removal
1237 * commands). In the suspend case we disable the hardware without
1238 * notifying libsas of the link down events since we want libsas to
1239 * remember the domain across the suspend/resume cycle
1240 */
1241void isci_host_deinit(struct isci_host *ihost)
1242{
1243 int i;
1244
1245 /* disable output data selects */
1246 for (i = 0; i < isci_gpio_count(ihost); i++)
1247 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
1248
1249 set_bit(IHOST_STOP_PENDING, &ihost->flags);
1250
1251 spin_lock_irq(&ihost->scic_lock);
1252 sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
1253 spin_unlock_irq(&ihost->scic_lock);
1254
1255 wait_for_stop(ihost);
1256
1257 /* phy stop is after controller stop to allow port and device to
1258 * go idle before shutting down the phys, but the expectation is
1259 * that i/o has been shut off well before we reach this
1260 * function.
1261 */
1262 sci_controller_stop_phys(ihost);
1263
1264 /* disable sgpio: where the above wait should give time for the
1265 * enclosure to sample the gpios going inactive
1266 */
1267 writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
1268
1269 spin_lock_irq(&ihost->scic_lock);
1270 sci_controller_reset(ihost);
1271 spin_unlock_irq(&ihost->scic_lock);
1272
1273 /* Cancel any/all outstanding port timers */
1274 for (i = 0; i < ihost->logical_port_entries; i++) {
1275 struct isci_port *iport = &ihost->ports[i];
1276 del_timer_sync(&iport->timer.timer);
1277 }
1278
1279 /* Cancel any/all outstanding phy timers */
1280 for (i = 0; i < SCI_MAX_PHYS; i++) {
1281 struct isci_phy *iphy = &ihost->phys[i];
1282 del_timer_sync(&iphy->sata_timer.timer);
1283 }
1284
1285 del_timer_sync(&ihost->port_agent.timer.timer);
1286
1287 del_timer_sync(&ihost->power_control.timer.timer);
1288
1289 del_timer_sync(&ihost->timer.timer);
1290
1291 del_timer_sync(&ihost->phy_timer.timer);
1292}
1293
1294static void __iomem *scu_base(struct isci_host *isci_host)
1295{
1296 struct pci_dev *pdev = isci_host->pdev;
1297 int id = isci_host->id;
1298
1299 return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
1300}
1301
1302static void __iomem *smu_base(struct isci_host *isci_host)
1303{
1304 struct pci_dev *pdev = isci_host->pdev;
1305 int id = isci_host->id;
1306
1307 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1308}
1309
1310static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
1311{
1312 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1313
1314 sci_change_state(&ihost->sm, SCIC_RESET);
1315}
1316
1317static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
1318{
1319 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1320
1321 sci_del_timer(&ihost->timer);
1322}
1323
1324#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
1325#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
1326#define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
1327#define INTERRUPT_COALESCE_NUMBER_MAX 256
1328#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
1329#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
1330
1331/**
1332 * sci_controller_set_interrupt_coalescence() - This method allows the user to
1333 * configure the interrupt coalescence.
1334 * @controller: This parameter represents the handle to the controller object
1335 * for which its interrupt coalesce register is overridden.
1336 * @coalesce_number: Used to control the number of entries in the Completion
1337 * Queue before an interrupt is generated. If the number of entries exceed
1338 * this number, an interrupt will be generated. The valid range of the input
1339 * is [0, 256]. A setting of 0 results in coalescing being disabled.
1340 * @coalesce_timeout: Timeout value in microseconds. The valid range of the
1341 * input is [0, 2700000] . A setting of 0 is allowed and results in no
1342 * interrupt coalescing timeout.
1343 *
1344 * Indicate if the user successfully set the interrupt coalesce parameters.
1345 * SCI_SUCCESS The user successfully updated the interrutp coalescence.
1346 * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
1347 */
1348static enum sci_status
1349sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
1350 u32 coalesce_number,
1351 u32 coalesce_timeout)
1352{
1353 u8 timeout_encode = 0;
1354 u32 min = 0;
1355 u32 max = 0;
1356
1357 /* Check if the input parameters fall in the range. */
1358 if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
1359 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1360
1361 /*
1362 * Defined encoding for interrupt coalescing timeout:
1363 * Value Min Max Units
1364 * ----- --- --- -----
1365 * 0 - - Disabled
1366 * 1 13.3 20.0 ns
1367 * 2 26.7 40.0
1368 * 3 53.3 80.0
1369 * 4 106.7 160.0
1370 * 5 213.3 320.0
1371 * 6 426.7 640.0
1372 * 7 853.3 1280.0
1373 * 8 1.7 2.6 us
1374 * 9 3.4 5.1
1375 * 10 6.8 10.2
1376 * 11 13.7 20.5
1377 * 12 27.3 41.0
1378 * 13 54.6 81.9
1379 * 14 109.2 163.8
1380 * 15 218.5 327.7
1381 * 16 436.9 655.4
1382 * 17 873.8 1310.7
1383 * 18 1.7 2.6 ms
1384 * 19 3.5 5.2
1385 * 20 7.0 10.5
1386 * 21 14.0 21.0
1387 * 22 28.0 41.9
1388 * 23 55.9 83.9
1389 * 24 111.8 167.8
1390 * 25 223.7 335.5
1391 * 26 447.4 671.1
1392 * 27 894.8 1342.2
1393 * 28 1.8 2.7 s
1394 * Others Undefined */
1395
1396 /*
1397 * Use the table above to decide the encode of interrupt coalescing timeout
1398 * value for register writing. */
1399 if (coalesce_timeout == 0)
1400 timeout_encode = 0;
1401 else{
1402 /* make the timeout value in unit of (10 ns). */
1403 coalesce_timeout = coalesce_timeout * 100;
1404 min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
1405 max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
1406
1407 /* get the encode of timeout for register writing. */
1408 for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
1409 timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
1410 timeout_encode++) {
1411 if (min <= coalesce_timeout && max > coalesce_timeout)
1412 break;
1413 else if (coalesce_timeout >= max && coalesce_timeout < min * 2
1414 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
1415 if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
1416 break;
1417 else{
1418 timeout_encode++;
1419 break;
1420 }
1421 } else {
1422 max = max * 2;
1423 min = min * 2;
1424 }
1425 }
1426
1427 if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
1428 /* the value is out of range. */
1429 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1430 }
1431
1432 writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
1433 SMU_ICC_GEN_VAL(TIMER, timeout_encode),
1434 &ihost->smu_registers->interrupt_coalesce_control);
1435
1436
1437 ihost->interrupt_coalesce_number = (u16)coalesce_number;
1438 ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
1439
1440 return SCI_SUCCESS;
1441}
1442
1443
1444static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
1445{
1446 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1447 u32 val;
1448
1449 /* enable clock gating for power control of the scu unit */
1450 val = readl(&ihost->smu_registers->clock_gating_control);
1451 val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) |
1452 SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) |
1453 SMU_CGUCR_GEN_BIT(XCLK_ENABLE));
1454 val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE);
1455 writel(val, &ihost->smu_registers->clock_gating_control);
1456
1457 /* set the default interrupt coalescence number and timeout value. */
1458 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1459}
1460
1461static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
1462{
1463 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1464
1465 /* disable interrupt coalescence. */
1466 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1467}
1468
1469static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
1470{
1471 u32 index;
1472 enum sci_status port_status;
1473 enum sci_status status = SCI_SUCCESS;
1474
1475 for (index = 0; index < ihost->logical_port_entries; index++) {
1476 struct isci_port *iport = &ihost->ports[index];
1477
1478 port_status = sci_port_stop(iport);
1479
1480 if ((port_status != SCI_SUCCESS) &&
1481 (port_status != SCI_FAILURE_INVALID_STATE)) {
1482 status = SCI_FAILURE;
1483
1484 dev_warn(&ihost->pdev->dev,
1485 "%s: Controller stop operation failed to "
1486 "stop port %d because of status %d.\n",
1487 __func__,
1488 iport->logical_port_index,
1489 port_status);
1490 }
1491 }
1492
1493 return status;
1494}
1495
1496static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
1497{
1498 u32 index;
1499 enum sci_status status;
1500 enum sci_status device_status;
1501
1502 status = SCI_SUCCESS;
1503
1504 for (index = 0; index < ihost->remote_node_entries; index++) {
1505 if (ihost->device_table[index] != NULL) {
1506 /* / @todo What timeout value do we want to provide to this request? */
1507 device_status = sci_remote_device_stop(ihost->device_table[index], 0);
1508
1509 if ((device_status != SCI_SUCCESS) &&
1510 (device_status != SCI_FAILURE_INVALID_STATE)) {
1511 dev_warn(&ihost->pdev->dev,
1512 "%s: Controller stop operation failed "
1513 "to stop device 0x%p because of "
1514 "status %d.\n",
1515 __func__,
1516 ihost->device_table[index], device_status);
1517 }
1518 }
1519 }
1520
1521 return status;
1522}
1523
1524static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
1525{
1526 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1527
1528 sci_controller_stop_devices(ihost);
1529 sci_controller_stop_ports(ihost);
1530
1531 if (!sci_controller_has_remote_devices_stopping(ihost))
1532 isci_host_stop_complete(ihost);
1533}
1534
1535static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
1536{
1537 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1538
1539 sci_del_timer(&ihost->timer);
1540}
1541
1542static void sci_controller_reset_hardware(struct isci_host *ihost)
1543{
1544 /* Disable interrupts so we dont take any spurious interrupts */
1545 sci_controller_disable_interrupts(ihost);
1546
1547 /* Reset the SCU */
1548 writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
1549
1550 /* Delay for 1ms to before clearing the CQP and UFQPR. */
1551 udelay(1000);
1552
1553 /* The write to the CQGR clears the CQP */
1554 writel(0x00000000, &ihost->smu_registers->completion_queue_get);
1555
1556 /* The write to the UFQGP clears the UFQPR */
1557 writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
1558
1559 /* clear all interrupts */
1560 writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status);
1561}
1562
1563static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
1564{
1565 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1566
1567 sci_controller_reset_hardware(ihost);
1568 sci_change_state(&ihost->sm, SCIC_RESET);
1569}
1570
1571static const struct sci_base_state sci_controller_state_table[] = {
1572 [SCIC_INITIAL] = {
1573 .enter_state = sci_controller_initial_state_enter,
1574 },
1575 [SCIC_RESET] = {},
1576 [SCIC_INITIALIZING] = {},
1577 [SCIC_INITIALIZED] = {},
1578 [SCIC_STARTING] = {
1579 .exit_state = sci_controller_starting_state_exit,
1580 },
1581 [SCIC_READY] = {
1582 .enter_state = sci_controller_ready_state_enter,
1583 .exit_state = sci_controller_ready_state_exit,
1584 },
1585 [SCIC_RESETTING] = {
1586 .enter_state = sci_controller_resetting_state_enter,
1587 },
1588 [SCIC_STOPPING] = {
1589 .enter_state = sci_controller_stopping_state_enter,
1590 .exit_state = sci_controller_stopping_state_exit,
1591 },
1592 [SCIC_FAILED] = {}
1593};
1594
1595static void controller_timeout(struct timer_list *t)
1596{
1597 struct sci_timer *tmr = from_timer(tmr, t, timer);
1598 struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
1599 struct sci_base_state_machine *sm = &ihost->sm;
1600 unsigned long flags;
1601
1602 spin_lock_irqsave(&ihost->scic_lock, flags);
1603
1604 if (tmr->cancel)
1605 goto done;
1606
1607 if (sm->current_state_id == SCIC_STARTING)
1608 sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
1609 else if (sm->current_state_id == SCIC_STOPPING) {
1610 sci_change_state(sm, SCIC_FAILED);
1611 isci_host_stop_complete(ihost);
1612 } else /* / @todo Now what do we want to do in this case? */
1613 dev_err(&ihost->pdev->dev,
1614 "%s: Controller timer fired when controller was not "
1615 "in a state being timed.\n",
1616 __func__);
1617
1618done:
1619 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1620}
1621
1622static enum sci_status sci_controller_construct(struct isci_host *ihost,
1623 void __iomem *scu_base,
1624 void __iomem *smu_base)
1625{
1626 u8 i;
1627
1628 sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
1629
1630 ihost->scu_registers = scu_base;
1631 ihost->smu_registers = smu_base;
1632
1633 sci_port_configuration_agent_construct(&ihost->port_agent);
1634
1635 /* Construct the ports for this controller */
1636 for (i = 0; i < SCI_MAX_PORTS; i++)
1637 sci_port_construct(&ihost->ports[i], i, ihost);
1638 sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
1639
1640 /* Construct the phys for this controller */
1641 for (i = 0; i < SCI_MAX_PHYS; i++) {
1642 /* Add all the PHYs to the dummy port */
1643 sci_phy_construct(&ihost->phys[i],
1644 &ihost->ports[SCI_MAX_PORTS], i);
1645 }
1646
1647 ihost->invalid_phy_mask = 0;
1648
1649 sci_init_timer(&ihost->timer, controller_timeout);
1650
1651 return sci_controller_reset(ihost);
1652}
1653
1654int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
1655{
1656 int i;
1657
1658 for (i = 0; i < SCI_MAX_PORTS; i++)
1659 if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
1660 return -EINVAL;
1661
1662 for (i = 0; i < SCI_MAX_PHYS; i++)
1663 if (oem->phys[i].sas_address.high == 0 &&
1664 oem->phys[i].sas_address.low == 0)
1665 return -EINVAL;
1666
1667 if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
1668 for (i = 0; i < SCI_MAX_PHYS; i++)
1669 if (oem->ports[i].phy_mask != 0)
1670 return -EINVAL;
1671 } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
1672 u8 phy_mask = 0;
1673
1674 for (i = 0; i < SCI_MAX_PHYS; i++)
1675 phy_mask |= oem->ports[i].phy_mask;
1676
1677 if (phy_mask == 0)
1678 return -EINVAL;
1679 } else
1680 return -EINVAL;
1681
1682 if (oem->controller.max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT ||
1683 oem->controller.max_concurr_spin_up < 1)
1684 return -EINVAL;
1685
1686 if (oem->controller.do_enable_ssc) {
1687 if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
1688 return -EINVAL;
1689
1690 if (version >= ISCI_ROM_VER_1_1) {
1691 u8 test = oem->controller.ssc_sata_tx_spread_level;
1692
1693 switch (test) {
1694 case 0:
1695 case 2:
1696 case 3:
1697 case 6:
1698 case 7:
1699 break;
1700 default:
1701 return -EINVAL;
1702 }
1703
1704 test = oem->controller.ssc_sas_tx_spread_level;
1705 if (oem->controller.ssc_sas_tx_type == 0) {
1706 switch (test) {
1707 case 0:
1708 case 2:
1709 case 3:
1710 break;
1711 default:
1712 return -EINVAL;
1713 }
1714 } else if (oem->controller.ssc_sas_tx_type == 1) {
1715 switch (test) {
1716 case 0:
1717 case 3:
1718 case 6:
1719 break;
1720 default:
1721 return -EINVAL;
1722 }
1723 }
1724 }
1725 }
1726
1727 return 0;
1728}
1729
1730static u8 max_spin_up(struct isci_host *ihost)
1731{
1732 if (ihost->user_parameters.max_concurr_spinup)
1733 return min_t(u8, ihost->user_parameters.max_concurr_spinup,
1734 MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
1735 else
1736 return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up,
1737 MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
1738}
1739
1740static void power_control_timeout(struct timer_list *t)
1741{
1742 struct sci_timer *tmr = from_timer(tmr, t, timer);
1743 struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
1744 struct isci_phy *iphy;
1745 unsigned long flags;
1746 u8 i;
1747
1748 spin_lock_irqsave(&ihost->scic_lock, flags);
1749
1750 if (tmr->cancel)
1751 goto done;
1752
1753 ihost->power_control.phys_granted_power = 0;
1754
1755 if (ihost->power_control.phys_waiting == 0) {
1756 ihost->power_control.timer_started = false;
1757 goto done;
1758 }
1759
1760 for (i = 0; i < SCI_MAX_PHYS; i++) {
1761
1762 if (ihost->power_control.phys_waiting == 0)
1763 break;
1764
1765 iphy = ihost->power_control.requesters[i];
1766 if (iphy == NULL)
1767 continue;
1768
1769 if (ihost->power_control.phys_granted_power >= max_spin_up(ihost))
1770 break;
1771
1772 ihost->power_control.requesters[i] = NULL;
1773 ihost->power_control.phys_waiting--;
1774 ihost->power_control.phys_granted_power++;
1775 sci_phy_consume_power_handler(iphy);
1776
1777 if (iphy->protocol == SAS_PROTOCOL_SSP) {
1778 u8 j;
1779
1780 for (j = 0; j < SCI_MAX_PHYS; j++) {
1781 struct isci_phy *requester = ihost->power_control.requesters[j];
1782
1783 /*
1784 * Search the power_control queue to see if there are other phys
1785 * attached to the same remote device. If found, take all of
1786 * them out of await_sas_power state.
1787 */
1788 if (requester != NULL && requester != iphy) {
1789 u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
1790 iphy->frame_rcvd.iaf.sas_addr,
1791 sizeof(requester->frame_rcvd.iaf.sas_addr));
1792
1793 if (other == 0) {
1794 ihost->power_control.requesters[j] = NULL;
1795 ihost->power_control.phys_waiting--;
1796 sci_phy_consume_power_handler(requester);
1797 }
1798 }
1799 }
1800 }
1801 }
1802
1803 /*
1804 * It doesn't matter if the power list is empty, we need to start the
1805 * timer in case another phy becomes ready.
1806 */
1807 sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1808 ihost->power_control.timer_started = true;
1809
1810done:
1811 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1812}
1813
1814void sci_controller_power_control_queue_insert(struct isci_host *ihost,
1815 struct isci_phy *iphy)
1816{
1817 BUG_ON(iphy == NULL);
1818
1819 if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) {
1820 ihost->power_control.phys_granted_power++;
1821 sci_phy_consume_power_handler(iphy);
1822
1823 /*
1824 * stop and start the power_control timer. When the timer fires, the
1825 * no_of_phys_granted_power will be set to 0
1826 */
1827 if (ihost->power_control.timer_started)
1828 sci_del_timer(&ihost->power_control.timer);
1829
1830 sci_mod_timer(&ihost->power_control.timer,
1831 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1832 ihost->power_control.timer_started = true;
1833
1834 } else {
1835 /*
1836 * There are phys, attached to the same sas address as this phy, are
1837 * already in READY state, this phy don't need wait.
1838 */
1839 u8 i;
1840 struct isci_phy *current_phy;
1841
1842 for (i = 0; i < SCI_MAX_PHYS; i++) {
1843 u8 other;
1844 current_phy = &ihost->phys[i];
1845
1846 other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
1847 iphy->frame_rcvd.iaf.sas_addr,
1848 sizeof(current_phy->frame_rcvd.iaf.sas_addr));
1849
1850 if (current_phy->sm.current_state_id == SCI_PHY_READY &&
1851 current_phy->protocol == SAS_PROTOCOL_SSP &&
1852 other == 0) {
1853 sci_phy_consume_power_handler(iphy);
1854 break;
1855 }
1856 }
1857
1858 if (i == SCI_MAX_PHYS) {
1859 /* Add the phy in the waiting list */
1860 ihost->power_control.requesters[iphy->phy_index] = iphy;
1861 ihost->power_control.phys_waiting++;
1862 }
1863 }
1864}
1865
1866void sci_controller_power_control_queue_remove(struct isci_host *ihost,
1867 struct isci_phy *iphy)
1868{
1869 BUG_ON(iphy == NULL);
1870
1871 if (ihost->power_control.requesters[iphy->phy_index])
1872 ihost->power_control.phys_waiting--;
1873
1874 ihost->power_control.requesters[iphy->phy_index] = NULL;
1875}
1876
1877static int is_long_cable(int phy, unsigned char selection_byte)
1878{
1879 return !!(selection_byte & (1 << phy));
1880}
1881
1882static int is_medium_cable(int phy, unsigned char selection_byte)
1883{
1884 return !!(selection_byte & (1 << (phy + 4)));
1885}
1886
1887static enum cable_selections decode_selection_byte(
1888 int phy,
1889 unsigned char selection_byte)
1890{
1891 return ((selection_byte & (1 << phy)) ? 1 : 0)
1892 + (selection_byte & (1 << (phy + 4)) ? 2 : 0);
1893}
1894
1895static unsigned char *to_cable_select(struct isci_host *ihost)
1896{
1897 if (is_cable_select_overridden())
1898 return ((unsigned char *)&cable_selection_override)
1899 + ihost->id;
1900 else
1901 return &ihost->oem_parameters.controller.cable_selection_mask;
1902}
1903
1904enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
1905{
1906 return decode_selection_byte(phy, *to_cable_select(ihost));
1907}
1908
1909char *lookup_cable_names(enum cable_selections selection)
1910{
1911 static char *cable_names[] = {
1912 [short_cable] = "short",
1913 [long_cable] = "long",
1914 [medium_cable] = "medium",
1915 [undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */
1916 };
1917 return (selection <= undefined_cable) ? cable_names[selection]
1918 : cable_names[undefined_cable];
1919}
1920
1921#define AFE_REGISTER_WRITE_DELAY 10
1922
1923static void sci_controller_afe_initialization(struct isci_host *ihost)
1924{
1925 struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
1926 const struct sci_oem_params *oem = &ihost->oem_parameters;
1927 struct pci_dev *pdev = ihost->pdev;
1928 u32 afe_status;
1929 u32 phy_id;
1930 unsigned char cable_selection_mask = *to_cable_select(ihost);
1931
1932 /* Clear DFX Status registers */
1933 writel(0x0081000f, &afe->afe_dfx_master_control0);
1934 udelay(AFE_REGISTER_WRITE_DELAY);
1935
1936 if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
1937 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
1938 * Timer, PM Stagger Timer
1939 */
1940 writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
1941 udelay(AFE_REGISTER_WRITE_DELAY);
1942 }
1943
1944 /* Configure bias currents to normal */
1945 if (is_a2(pdev))
1946 writel(0x00005A00, &afe->afe_bias_control);
1947 else if (is_b0(pdev) || is_c0(pdev))
1948 writel(0x00005F00, &afe->afe_bias_control);
1949 else if (is_c1(pdev))
1950 writel(0x00005500, &afe->afe_bias_control);
1951
1952 udelay(AFE_REGISTER_WRITE_DELAY);
1953
1954 /* Enable PLL */
1955 if (is_a2(pdev))
1956 writel(0x80040908, &afe->afe_pll_control0);
1957 else if (is_b0(pdev) || is_c0(pdev))
1958 writel(0x80040A08, &afe->afe_pll_control0);
1959 else if (is_c1(pdev)) {
1960 writel(0x80000B08, &afe->afe_pll_control0);
1961 udelay(AFE_REGISTER_WRITE_DELAY);
1962 writel(0x00000B08, &afe->afe_pll_control0);
1963 udelay(AFE_REGISTER_WRITE_DELAY);
1964 writel(0x80000B08, &afe->afe_pll_control0);
1965 }
1966
1967 udelay(AFE_REGISTER_WRITE_DELAY);
1968
1969 /* Wait for the PLL to lock */
1970 do {
1971 afe_status = readl(&afe->afe_common_block_status);
1972 udelay(AFE_REGISTER_WRITE_DELAY);
1973 } while ((afe_status & 0x00001000) == 0);
1974
1975 if (is_a2(pdev)) {
1976 /* Shorten SAS SNW lock time (RxLock timer value from 76
1977 * us to 50 us)
1978 */
1979 writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
1980 udelay(AFE_REGISTER_WRITE_DELAY);
1981 }
1982
1983 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
1984 struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_id];
1985 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
1986 int cable_length_long =
1987 is_long_cable(phy_id, cable_selection_mask);
1988 int cable_length_medium =
1989 is_medium_cable(phy_id, cable_selection_mask);
1990
1991 if (is_a2(pdev)) {
1992 /* All defaults, except the Receive Word
1993 * Alignament/Comma Detect Enable....(0xe800)
1994 */
1995 writel(0x00004512, &xcvr->afe_xcvr_control0);
1996 udelay(AFE_REGISTER_WRITE_DELAY);
1997
1998 writel(0x0050100F, &xcvr->afe_xcvr_control1);
1999 udelay(AFE_REGISTER_WRITE_DELAY);
2000 } else if (is_b0(pdev)) {
2001 /* Configure transmitter SSC parameters */
2002 writel(0x00030000, &xcvr->afe_tx_ssc_control);
2003 udelay(AFE_REGISTER_WRITE_DELAY);
2004 } else if (is_c0(pdev)) {
2005 /* Configure transmitter SSC parameters */
2006 writel(0x00010202, &xcvr->afe_tx_ssc_control);
2007 udelay(AFE_REGISTER_WRITE_DELAY);
2008
2009 /* All defaults, except the Receive Word
2010 * Alignament/Comma Detect Enable....(0xe800)
2011 */
2012 writel(0x00014500, &xcvr->afe_xcvr_control0);
2013 udelay(AFE_REGISTER_WRITE_DELAY);
2014 } else if (is_c1(pdev)) {
2015 /* Configure transmitter SSC parameters */
2016 writel(0x00010202, &xcvr->afe_tx_ssc_control);
2017 udelay(AFE_REGISTER_WRITE_DELAY);
2018
2019 /* All defaults, except the Receive Word
2020 * Alignament/Comma Detect Enable....(0xe800)
2021 */
2022 writel(0x0001C500, &xcvr->afe_xcvr_control0);
2023 udelay(AFE_REGISTER_WRITE_DELAY);
2024 }
2025
2026 /* Power up TX and RX out from power down (PWRDNTX and
2027 * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c)
2028 */
2029 if (is_a2(pdev))
2030 writel(0x000003F0, &xcvr->afe_channel_control);
2031 else if (is_b0(pdev)) {
2032 writel(0x000003D7, &xcvr->afe_channel_control);
2033 udelay(AFE_REGISTER_WRITE_DELAY);
2034
2035 writel(0x000003D4, &xcvr->afe_channel_control);
2036 } else if (is_c0(pdev)) {
2037 writel(0x000001E7, &xcvr->afe_channel_control);
2038 udelay(AFE_REGISTER_WRITE_DELAY);
2039
2040 writel(0x000001E4, &xcvr->afe_channel_control);
2041 } else if (is_c1(pdev)) {
2042 writel(cable_length_long ? 0x000002F7 : 0x000001F7,
2043 &xcvr->afe_channel_control);
2044 udelay(AFE_REGISTER_WRITE_DELAY);
2045
2046 writel(cable_length_long ? 0x000002F4 : 0x000001F4,
2047 &xcvr->afe_channel_control);
2048 }
2049 udelay(AFE_REGISTER_WRITE_DELAY);
2050
2051 if (is_a2(pdev)) {
2052 /* Enable TX equalization (0xe824) */
2053 writel(0x00040000, &xcvr->afe_tx_control);
2054 udelay(AFE_REGISTER_WRITE_DELAY);
2055 }
2056
2057 if (is_a2(pdev) || is_b0(pdev))
2058 /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0,
2059 * TPD=0x0(TX Power On), RDD=0x0(RX Detect
2060 * Enabled) ....(0xe800)
2061 */
2062 writel(0x00004100, &xcvr->afe_xcvr_control0);
2063 else if (is_c0(pdev))
2064 writel(0x00014100, &xcvr->afe_xcvr_control0);
2065 else if (is_c1(pdev))
2066 writel(0x0001C100, &xcvr->afe_xcvr_control0);
2067 udelay(AFE_REGISTER_WRITE_DELAY);
2068
2069 /* Leave DFE/FFE on */
2070 if (is_a2(pdev))
2071 writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2072 else if (is_b0(pdev)) {
2073 writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2074 udelay(AFE_REGISTER_WRITE_DELAY);
2075 /* Enable TX equalization (0xe824) */
2076 writel(0x00040000, &xcvr->afe_tx_control);
2077 } else if (is_c0(pdev)) {
2078 writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
2079 udelay(AFE_REGISTER_WRITE_DELAY);
2080
2081 writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
2082 udelay(AFE_REGISTER_WRITE_DELAY);
2083
2084 /* Enable TX equalization (0xe824) */
2085 writel(0x00040000, &xcvr->afe_tx_control);
2086 } else if (is_c1(pdev)) {
2087 writel(cable_length_long ? 0x01500C0C :
2088 cable_length_medium ? 0x01400C0D : 0x02400C0D,
2089 &xcvr->afe_xcvr_control1);
2090 udelay(AFE_REGISTER_WRITE_DELAY);
2091
2092 writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
2093 udelay(AFE_REGISTER_WRITE_DELAY);
2094
2095 writel(cable_length_long ? 0x33091C1F :
2096 cable_length_medium ? 0x3315181F : 0x2B17161F,
2097 &xcvr->afe_rx_ssc_control0);
2098 udelay(AFE_REGISTER_WRITE_DELAY);
2099
2100 /* Enable TX equalization (0xe824) */
2101 writel(0x00040000, &xcvr->afe_tx_control);
2102 }
2103
2104 udelay(AFE_REGISTER_WRITE_DELAY);
2105
2106 writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0);
2107 udelay(AFE_REGISTER_WRITE_DELAY);
2108
2109 writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1);
2110 udelay(AFE_REGISTER_WRITE_DELAY);
2111
2112 writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2);
2113 udelay(AFE_REGISTER_WRITE_DELAY);
2114
2115 writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3);
2116 udelay(AFE_REGISTER_WRITE_DELAY);
2117 }
2118
2119 /* Transfer control to the PEs */
2120 writel(0x00010f00, &afe->afe_dfx_master_control0);
2121 udelay(AFE_REGISTER_WRITE_DELAY);
2122}
2123
2124static void sci_controller_initialize_power_control(struct isci_host *ihost)
2125{
2126 sci_init_timer(&ihost->power_control.timer, power_control_timeout);
2127
2128 memset(ihost->power_control.requesters, 0,
2129 sizeof(ihost->power_control.requesters));
2130
2131 ihost->power_control.phys_waiting = 0;
2132 ihost->power_control.phys_granted_power = 0;
2133}
2134
2135static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2136{
2137 struct sci_base_state_machine *sm = &ihost->sm;
2138 enum sci_status result = SCI_FAILURE;
2139 unsigned long i, state, val;
2140
2141 if (ihost->sm.current_state_id != SCIC_RESET) {
2142 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2143 __func__, ihost->sm.current_state_id);
2144 return SCI_FAILURE_INVALID_STATE;
2145 }
2146
2147 sci_change_state(sm, SCIC_INITIALIZING);
2148
2149 sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
2150
2151 ihost->next_phy_to_start = 0;
2152 ihost->phy_startup_timer_pending = false;
2153
2154 sci_controller_initialize_power_control(ihost);
2155
2156 /*
2157 * There is nothing to do here for B0 since we do not have to
2158 * program the AFE registers.
2159 * / @todo The AFE settings are supposed to be correct for the B0 but
2160 * / presently they seem to be wrong. */
2161 sci_controller_afe_initialization(ihost);
2162
2163
2164 /* Take the hardware out of reset */
2165 writel(0, &ihost->smu_registers->soft_reset_control);
2166
2167 /*
2168 * / @todo Provide meaningfull error code for hardware failure
2169 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
2170 for (i = 100; i >= 1; i--) {
2171 u32 status;
2172
2173 /* Loop until the hardware reports success */
2174 udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2175 status = readl(&ihost->smu_registers->control_status);
2176
2177 if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
2178 break;
2179 }
2180 if (i == 0)
2181 goto out;
2182
2183 /*
2184 * Determine what are the actaul device capacities that the
2185 * hardware will support */
2186 val = readl(&ihost->smu_registers->device_context_capacity);
2187
2188 /* Record the smaller of the two capacity values */
2189 ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
2190 ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
2191 ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
2192
2193 /*
2194 * Make all PEs that are unassigned match up with the
2195 * logical ports
2196 */
2197 for (i = 0; i < ihost->logical_port_entries; i++) {
2198 struct scu_port_task_scheduler_group_registers __iomem
2199 *ptsg = &ihost->scu_registers->peg0.ptsg;
2200
2201 writel(i, &ptsg->protocol_engine[i]);
2202 }
2203
2204 /* Initialize hardware PCI Relaxed ordering in DMA engines */
2205 val = readl(&ihost->scu_registers->sdma.pdma_configuration);
2206 val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2207 writel(val, &ihost->scu_registers->sdma.pdma_configuration);
2208
2209 val = readl(&ihost->scu_registers->sdma.cdma_configuration);
2210 val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2211 writel(val, &ihost->scu_registers->sdma.cdma_configuration);
2212
2213 /*
2214 * Initialize the PHYs before the PORTs because the PHY registers
2215 * are accessed during the port initialization.
2216 */
2217 for (i = 0; i < SCI_MAX_PHYS; i++) {
2218 result = sci_phy_initialize(&ihost->phys[i],
2219 &ihost->scu_registers->peg0.pe[i].tl,
2220 &ihost->scu_registers->peg0.pe[i].ll);
2221 if (result != SCI_SUCCESS)
2222 goto out;
2223 }
2224
2225 for (i = 0; i < ihost->logical_port_entries; i++) {
2226 struct isci_port *iport = &ihost->ports[i];
2227
2228 iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
2229 iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
2230 iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
2231 }
2232
2233 result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
2234
2235 out:
2236 /* Advance the controller state machine */
2237 if (result == SCI_SUCCESS)
2238 state = SCIC_INITIALIZED;
2239 else
2240 state = SCIC_FAILED;
2241 sci_change_state(sm, state);
2242
2243 return result;
2244}
2245
2246static int sci_controller_dma_alloc(struct isci_host *ihost)
2247{
2248 struct device *dev = &ihost->pdev->dev;
2249 size_t size;
2250 int i;
2251
2252 /* detect re-initialization */
2253 if (ihost->completion_queue)
2254 return 0;
2255
2256 size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
2257 ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma,
2258 GFP_KERNEL);
2259 if (!ihost->completion_queue)
2260 return -ENOMEM;
2261
2262 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
2263 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma,
2264 GFP_KERNEL);
2265
2266 if (!ihost->remote_node_context_table)
2267 return -ENOMEM;
2268
2269 size = ihost->task_context_entries * sizeof(struct scu_task_context),
2270 ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma,
2271 GFP_KERNEL);
2272 if (!ihost->task_context_table)
2273 return -ENOMEM;
2274
2275 size = SCI_UFI_TOTAL_SIZE;
2276 ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL);
2277 if (!ihost->ufi_buf)
2278 return -ENOMEM;
2279
2280 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
2281 struct isci_request *ireq;
2282 dma_addr_t dma;
2283
2284 ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL);
2285 if (!ireq)
2286 return -ENOMEM;
2287
2288 ireq->tc = &ihost->task_context_table[i];
2289 ireq->owning_controller = ihost;
2290 ireq->request_daddr = dma;
2291 ireq->isci_host = ihost;
2292 ihost->reqs[i] = ireq;
2293 }
2294
2295 return 0;
2296}
2297
2298static int sci_controller_mem_init(struct isci_host *ihost)
2299{
2300 int err = sci_controller_dma_alloc(ihost);
2301
2302 if (err)
2303 return err;
2304
2305 writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower);
2306 writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper);
2307
2308 writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower);
2309 writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper);
2310
2311 writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower);
2312 writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper);
2313
2314 sci_unsolicited_frame_control_construct(ihost);
2315
2316 /*
2317 * Inform the silicon as to the location of the UF headers and
2318 * address table.
2319 */
2320 writel(lower_32_bits(ihost->uf_control.headers.physical_address),
2321 &ihost->scu_registers->sdma.uf_header_base_address_lower);
2322 writel(upper_32_bits(ihost->uf_control.headers.physical_address),
2323 &ihost->scu_registers->sdma.uf_header_base_address_upper);
2324
2325 writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
2326 &ihost->scu_registers->sdma.uf_address_table_lower);
2327 writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
2328 &ihost->scu_registers->sdma.uf_address_table_upper);
2329
2330 return 0;
2331}
2332
2333/**
2334 * isci_host_init - (re-)initialize hardware and internal (private) state
2335 * @ihost: host to init
2336 *
2337 * Any public facing objects (like asd_sas_port, and asd_sas_phys), or
2338 * one-time initialization objects like locks and waitqueues, are
2339 * not touched (they are initialized in isci_host_alloc)
2340 */
2341int isci_host_init(struct isci_host *ihost)
2342{
2343 int i, err;
2344 enum sci_status status;
2345
2346 spin_lock_irq(&ihost->scic_lock);
2347 status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost));
2348 spin_unlock_irq(&ihost->scic_lock);
2349 if (status != SCI_SUCCESS) {
2350 dev_err(&ihost->pdev->dev,
2351 "%s: sci_controller_construct failed - status = %x\n",
2352 __func__,
2353 status);
2354 return -ENODEV;
2355 }
2356
2357 spin_lock_irq(&ihost->scic_lock);
2358 status = sci_controller_initialize(ihost);
2359 spin_unlock_irq(&ihost->scic_lock);
2360 if (status != SCI_SUCCESS) {
2361 dev_warn(&ihost->pdev->dev,
2362 "%s: sci_controller_initialize failed -"
2363 " status = 0x%x\n",
2364 __func__, status);
2365 return -ENODEV;
2366 }
2367
2368 err = sci_controller_mem_init(ihost);
2369 if (err)
2370 return err;
2371
2372 /* enable sgpio */
2373 writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
2374 for (i = 0; i < isci_gpio_count(ihost); i++)
2375 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
2376 writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
2377
2378 return 0;
2379}
2380
2381void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
2382 struct isci_phy *iphy)
2383{
2384 switch (ihost->sm.current_state_id) {
2385 case SCIC_STARTING:
2386 sci_del_timer(&ihost->phy_timer);
2387 ihost->phy_startup_timer_pending = false;
2388 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2389 iport, iphy);
2390 sci_controller_start_next_phy(ihost);
2391 break;
2392 case SCIC_READY:
2393 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2394 iport, iphy);
2395 break;
2396 default:
2397 dev_dbg(&ihost->pdev->dev,
2398 "%s: SCIC Controller linkup event from phy %d in "
2399 "unexpected state %d\n", __func__, iphy->phy_index,
2400 ihost->sm.current_state_id);
2401 }
2402}
2403
2404void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
2405 struct isci_phy *iphy)
2406{
2407 switch (ihost->sm.current_state_id) {
2408 case SCIC_STARTING:
2409 case SCIC_READY:
2410 ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
2411 iport, iphy);
2412 break;
2413 default:
2414 dev_dbg(&ihost->pdev->dev,
2415 "%s: SCIC Controller linkdown event from phy %d in "
2416 "unexpected state %d\n",
2417 __func__,
2418 iphy->phy_index,
2419 ihost->sm.current_state_id);
2420 }
2421}
2422
2423bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
2424{
2425 u32 index;
2426
2427 for (index = 0; index < ihost->remote_node_entries; index++) {
2428 if ((ihost->device_table[index] != NULL) &&
2429 (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
2430 return true;
2431 }
2432
2433 return false;
2434}
2435
2436void sci_controller_remote_device_stopped(struct isci_host *ihost,
2437 struct isci_remote_device *idev)
2438{
2439 if (ihost->sm.current_state_id != SCIC_STOPPING) {
2440 dev_dbg(&ihost->pdev->dev,
2441 "SCIC Controller 0x%p remote device stopped event "
2442 "from device 0x%p in unexpected state %d\n",
2443 ihost, idev,
2444 ihost->sm.current_state_id);
2445 return;
2446 }
2447
2448 if (!sci_controller_has_remote_devices_stopping(ihost))
2449 isci_host_stop_complete(ihost);
2450}
2451
2452void sci_controller_post_request(struct isci_host *ihost, u32 request)
2453{
2454 dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
2455 __func__, ihost->id, request);
2456
2457 writel(request, &ihost->smu_registers->post_context_port);
2458}
2459
2460struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
2461{
2462 u16 task_index;
2463 u16 task_sequence;
2464
2465 task_index = ISCI_TAG_TCI(io_tag);
2466
2467 if (task_index < ihost->task_context_entries) {
2468 struct isci_request *ireq = ihost->reqs[task_index];
2469
2470 if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
2471 task_sequence = ISCI_TAG_SEQ(io_tag);
2472
2473 if (task_sequence == ihost->io_request_sequence[task_index])
2474 return ireq;
2475 }
2476 }
2477
2478 return NULL;
2479}
2480
2481/**
2482 * This method allocates remote node index and the reserves the remote node
2483 * context space for use. This method can fail if there are no more remote
2484 * node index available.
2485 * @scic: This is the controller object which contains the set of
2486 * free remote node ids
2487 * @sci_dev: This is the device object which is requesting the a remote node
2488 * id
2489 * @node_id: This is the remote node id that is assinged to the device if one
2490 * is available
2491 *
2492 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
2493 * node index available.
2494 */
2495enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
2496 struct isci_remote_device *idev,
2497 u16 *node_id)
2498{
2499 u16 node_index;
2500 u32 remote_node_count = sci_remote_device_node_count(idev);
2501
2502 node_index = sci_remote_node_table_allocate_remote_node(
2503 &ihost->available_remote_nodes, remote_node_count
2504 );
2505
2506 if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
2507 ihost->device_table[node_index] = idev;
2508
2509 *node_id = node_index;
2510
2511 return SCI_SUCCESS;
2512 }
2513
2514 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
2515}
2516
2517void sci_controller_free_remote_node_context(struct isci_host *ihost,
2518 struct isci_remote_device *idev,
2519 u16 node_id)
2520{
2521 u32 remote_node_count = sci_remote_device_node_count(idev);
2522
2523 if (ihost->device_table[node_id] == idev) {
2524 ihost->device_table[node_id] = NULL;
2525
2526 sci_remote_node_table_release_remote_node_index(
2527 &ihost->available_remote_nodes, remote_node_count, node_id
2528 );
2529 }
2530}
2531
2532void sci_controller_copy_sata_response(void *response_buffer,
2533 void *frame_header,
2534 void *frame_buffer)
2535{
2536 /* XXX type safety? */
2537 memcpy(response_buffer, frame_header, sizeof(u32));
2538
2539 memcpy(response_buffer + sizeof(u32),
2540 frame_buffer,
2541 sizeof(struct dev_to_host_fis) - sizeof(u32));
2542}
2543
2544void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
2545{
2546 if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
2547 writel(ihost->uf_control.get,
2548 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
2549}
2550
2551void isci_tci_free(struct isci_host *ihost, u16 tci)
2552{
2553 u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
2554
2555 ihost->tci_pool[tail] = tci;
2556 ihost->tci_tail = tail + 1;
2557}
2558
2559static u16 isci_tci_alloc(struct isci_host *ihost)
2560{
2561 u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
2562 u16 tci = ihost->tci_pool[head];
2563
2564 ihost->tci_head = head + 1;
2565 return tci;
2566}
2567
2568static u16 isci_tci_space(struct isci_host *ihost)
2569{
2570 return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
2571}
2572
2573u16 isci_alloc_tag(struct isci_host *ihost)
2574{
2575 if (isci_tci_space(ihost)) {
2576 u16 tci = isci_tci_alloc(ihost);
2577 u8 seq = ihost->io_request_sequence[tci];
2578
2579 return ISCI_TAG(seq, tci);
2580 }
2581
2582 return SCI_CONTROLLER_INVALID_IO_TAG;
2583}
2584
2585enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
2586{
2587 u16 tci = ISCI_TAG_TCI(io_tag);
2588 u16 seq = ISCI_TAG_SEQ(io_tag);
2589
2590 /* prevent tail from passing head */
2591 if (isci_tci_active(ihost) == 0)
2592 return SCI_FAILURE_INVALID_IO_TAG;
2593
2594 if (seq == ihost->io_request_sequence[tci]) {
2595 ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
2596
2597 isci_tci_free(ihost, tci);
2598
2599 return SCI_SUCCESS;
2600 }
2601 return SCI_FAILURE_INVALID_IO_TAG;
2602}
2603
2604enum sci_status sci_controller_start_io(struct isci_host *ihost,
2605 struct isci_remote_device *idev,
2606 struct isci_request *ireq)
2607{
2608 enum sci_status status;
2609
2610 if (ihost->sm.current_state_id != SCIC_READY) {
2611 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2612 __func__, ihost->sm.current_state_id);
2613 return SCI_FAILURE_INVALID_STATE;
2614 }
2615
2616 status = sci_remote_device_start_io(ihost, idev, ireq);
2617 if (status != SCI_SUCCESS)
2618 return status;
2619
2620 set_bit(IREQ_ACTIVE, &ireq->flags);
2621 sci_controller_post_request(ihost, ireq->post_context);
2622 return SCI_SUCCESS;
2623}
2624
2625enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
2626 struct isci_remote_device *idev,
2627 struct isci_request *ireq)
2628{
2629 /* terminate an ongoing (i.e. started) core IO request. This does not
2630 * abort the IO request at the target, but rather removes the IO
2631 * request from the host controller.
2632 */
2633 enum sci_status status;
2634
2635 if (ihost->sm.current_state_id != SCIC_READY) {
2636 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2637 __func__, ihost->sm.current_state_id);
2638 return SCI_FAILURE_INVALID_STATE;
2639 }
2640 status = sci_io_request_terminate(ireq);
2641
2642 dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n",
2643 __func__, status, ireq, ireq->flags);
2644
2645 if ((status == SCI_SUCCESS) &&
2646 !test_bit(IREQ_PENDING_ABORT, &ireq->flags) &&
2647 !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) {
2648 /* Utilize the original post context command and or in the
2649 * POST_TC_ABORT request sub-type.
2650 */
2651 sci_controller_post_request(
2652 ihost, ireq->post_context |
2653 SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2654 }
2655 return status;
2656}
2657
2658/**
2659 * sci_controller_complete_io() - This method will perform core specific
2660 * completion operations for an IO request. After this method is invoked,
2661 * the user should consider the IO request as invalid until it is properly
2662 * reused (i.e. re-constructed).
2663 * @ihost: The handle to the controller object for which to complete the
2664 * IO request.
2665 * @idev: The handle to the remote device object for which to complete
2666 * the IO request.
2667 * @ireq: the handle to the io request object to complete.
2668 */
2669enum sci_status sci_controller_complete_io(struct isci_host *ihost,
2670 struct isci_remote_device *idev,
2671 struct isci_request *ireq)
2672{
2673 enum sci_status status;
2674 u16 index;
2675
2676 switch (ihost->sm.current_state_id) {
2677 case SCIC_STOPPING:
2678 /* XXX: Implement this function */
2679 return SCI_FAILURE;
2680 case SCIC_READY:
2681 status = sci_remote_device_complete_io(ihost, idev, ireq);
2682 if (status != SCI_SUCCESS)
2683 return status;
2684
2685 index = ISCI_TAG_TCI(ireq->io_tag);
2686 clear_bit(IREQ_ACTIVE, &ireq->flags);
2687 return SCI_SUCCESS;
2688 default:
2689 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2690 __func__, ihost->sm.current_state_id);
2691 return SCI_FAILURE_INVALID_STATE;
2692 }
2693
2694}
2695
2696enum sci_status sci_controller_continue_io(struct isci_request *ireq)
2697{
2698 struct isci_host *ihost = ireq->owning_controller;
2699
2700 if (ihost->sm.current_state_id != SCIC_READY) {
2701 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2702 __func__, ihost->sm.current_state_id);
2703 return SCI_FAILURE_INVALID_STATE;
2704 }
2705
2706 set_bit(IREQ_ACTIVE, &ireq->flags);
2707 sci_controller_post_request(ihost, ireq->post_context);
2708 return SCI_SUCCESS;
2709}
2710
2711/**
2712 * sci_controller_start_task() - This method is called by the SCIC user to
2713 * send/start a framework task management request.
2714 * @controller: the handle to the controller object for which to start the task
2715 * management request.
2716 * @remote_device: the handle to the remote device object for which to start
2717 * the task management request.
2718 * @task_request: the handle to the task request object to start.
2719 */
2720enum sci_status sci_controller_start_task(struct isci_host *ihost,
2721 struct isci_remote_device *idev,
2722 struct isci_request *ireq)
2723{
2724 enum sci_status status;
2725
2726 if (ihost->sm.current_state_id != SCIC_READY) {
2727 dev_warn(&ihost->pdev->dev,
2728 "%s: SCIC Controller starting task from invalid "
2729 "state\n",
2730 __func__);
2731 return SCI_FAILURE_INVALID_STATE;
2732 }
2733
2734 status = sci_remote_device_start_task(ihost, idev, ireq);
2735 switch (status) {
2736 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
2737 set_bit(IREQ_ACTIVE, &ireq->flags);
2738
2739 /*
2740 * We will let framework know this task request started successfully,
2741 * although core is still woring on starting the request (to post tc when
2742 * RNC is resumed.)
2743 */
2744 return SCI_SUCCESS;
2745 case SCI_SUCCESS:
2746 set_bit(IREQ_ACTIVE, &ireq->flags);
2747 sci_controller_post_request(ihost, ireq->post_context);
2748 break;
2749 default:
2750 break;
2751 }
2752
2753 return status;
2754}
2755
2756static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data)
2757{
2758 int d;
2759
2760 /* no support for TX_GP_CFG */
2761 if (reg_index == 0)
2762 return -EINVAL;
2763
2764 for (d = 0; d < isci_gpio_count(ihost); d++) {
2765 u32 val = 0x444; /* all ODx.n clear */
2766 int i;
2767
2768 for (i = 0; i < 3; i++) {
2769 int bit;
2770
2771 bit = try_test_sas_gpio_gp_bit(to_sas_gpio_od(d, i),
2772 write_data, reg_index,
2773 reg_count);
2774 if (bit < 0)
2775 break;
2776
2777 /* if od is set, clear the 'invert' bit */
2778 val &= ~(bit << ((i << 2) + 2));
2779 }
2780
2781 if (i < 3)
2782 break;
2783 writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]);
2784 }
2785
2786 /* unless reg_index is > 1, we should always be able to write at
2787 * least one register
2788 */
2789 return d > 0;
2790}
2791
2792int isci_gpio_write(struct sas_ha_struct *sas_ha, u8 reg_type, u8 reg_index,
2793 u8 reg_count, u8 *write_data)
2794{
2795 struct isci_host *ihost = sas_ha->lldd_ha;
2796 int written;
2797
2798 switch (reg_type) {
2799 case SAS_GPIO_REG_TX_GP:
2800 written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data);
2801 break;
2802 default:
2803 written = -EINVAL;
2804 }
2805
2806 return written;
2807}