Loading...
1#include <linux/kernel.h>
2#include <linux/gfp.h>
3#include <linux/ide.h>
4
5int generic_ide_suspend(struct device *dev, pm_message_t mesg)
6{
7 ide_drive_t *drive = dev_get_drvdata(dev);
8 ide_drive_t *pair = ide_get_pair_dev(drive);
9 ide_hwif_t *hwif = drive->hwif;
10 struct request *rq;
11 struct request_pm_state rqpm;
12 int ret;
13
14 if (ide_port_acpi(hwif)) {
15 /* call ACPI _GTM only once */
16 if ((drive->dn & 1) == 0 || pair == NULL)
17 ide_acpi_get_timing(hwif);
18 }
19
20 memset(&rqpm, 0, sizeof(rqpm));
21 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
22 rq->cmd_type = REQ_TYPE_PM_SUSPEND;
23 rq->special = &rqpm;
24 rqpm.pm_step = IDE_PM_START_SUSPEND;
25 if (mesg.event == PM_EVENT_PRETHAW)
26 mesg.event = PM_EVENT_FREEZE;
27 rqpm.pm_state = mesg.event;
28
29 ret = blk_execute_rq(drive->queue, NULL, rq, 0);
30 blk_put_request(rq);
31
32 if (ret == 0 && ide_port_acpi(hwif)) {
33 /* call ACPI _PS3 only after both devices are suspended */
34 if ((drive->dn & 1) || pair == NULL)
35 ide_acpi_set_state(hwif, 0);
36 }
37
38 return ret;
39}
40
41int generic_ide_resume(struct device *dev)
42{
43 ide_drive_t *drive = dev_get_drvdata(dev);
44 ide_drive_t *pair = ide_get_pair_dev(drive);
45 ide_hwif_t *hwif = drive->hwif;
46 struct request *rq;
47 struct request_pm_state rqpm;
48 int err;
49
50 if (ide_port_acpi(hwif)) {
51 /* call ACPI _PS0 / _STM only once */
52 if ((drive->dn & 1) == 0 || pair == NULL) {
53 ide_acpi_set_state(hwif, 1);
54 ide_acpi_push_timing(hwif);
55 }
56
57 ide_acpi_exec_tfs(drive);
58 }
59
60 memset(&rqpm, 0, sizeof(rqpm));
61 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
62 rq->cmd_type = REQ_TYPE_PM_RESUME;
63 rq->cmd_flags |= REQ_PREEMPT;
64 rq->special = &rqpm;
65 rqpm.pm_step = IDE_PM_START_RESUME;
66 rqpm.pm_state = PM_EVENT_ON;
67
68 err = blk_execute_rq(drive->queue, NULL, rq, 1);
69 blk_put_request(rq);
70
71 if (err == 0 && dev->driver) {
72 struct ide_driver *drv = to_ide_driver(dev->driver);
73
74 if (drv->resume)
75 drv->resume(drive);
76 }
77
78 return err;
79}
80
81void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
82{
83 struct request_pm_state *pm = rq->special;
84
85#ifdef DEBUG_PM
86 printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
87 drive->name, pm->pm_step);
88#endif
89 if (drive->media != ide_disk)
90 return;
91
92 switch (pm->pm_step) {
93 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
94 if (pm->pm_state == PM_EVENT_FREEZE)
95 pm->pm_step = IDE_PM_COMPLETED;
96 else
97 pm->pm_step = IDE_PM_STANDBY;
98 break;
99 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
100 pm->pm_step = IDE_PM_COMPLETED;
101 break;
102 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
103 pm->pm_step = IDE_PM_IDLE;
104 break;
105 case IDE_PM_IDLE: /* Resume step 2 (idle)*/
106 pm->pm_step = IDE_PM_RESTORE_DMA;
107 break;
108 }
109}
110
111ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
112{
113 struct request_pm_state *pm = rq->special;
114 struct ide_cmd cmd = { };
115
116 switch (pm->pm_step) {
117 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
118 if (drive->media != ide_disk)
119 break;
120 /* Not supported? Switch to next step now. */
121 if (ata_id_flush_enabled(drive->id) == 0 ||
122 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
123 ide_complete_power_step(drive, rq);
124 return ide_stopped;
125 }
126 if (ata_id_flush_ext_enabled(drive->id))
127 cmd.tf.command = ATA_CMD_FLUSH_EXT;
128 else
129 cmd.tf.command = ATA_CMD_FLUSH;
130 goto out_do_tf;
131 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
132 cmd.tf.command = ATA_CMD_STANDBYNOW1;
133 goto out_do_tf;
134 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
135 ide_set_max_pio(drive);
136 /*
137 * skip IDE_PM_IDLE for ATAPI devices
138 */
139 if (drive->media != ide_disk)
140 pm->pm_step = IDE_PM_RESTORE_DMA;
141 else
142 ide_complete_power_step(drive, rq);
143 return ide_stopped;
144 case IDE_PM_IDLE: /* Resume step 2 (idle) */
145 cmd.tf.command = ATA_CMD_IDLEIMMEDIATE;
146 goto out_do_tf;
147 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
148 /*
149 * Right now, all we do is call ide_set_dma(drive),
150 * we could be smarter and check for current xfer_speed
151 * in struct drive etc...
152 */
153 if (drive->hwif->dma_ops == NULL)
154 break;
155 /*
156 * TODO: respect IDE_DFLAG_USING_DMA
157 */
158 ide_set_dma(drive);
159 break;
160 }
161
162 pm->pm_step = IDE_PM_COMPLETED;
163
164 return ide_stopped;
165
166out_do_tf:
167 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
168 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
169 cmd.protocol = ATA_PROT_NODATA;
170
171 return do_rw_taskfile(drive, &cmd);
172}
173
174/**
175 * ide_complete_pm_rq - end the current Power Management request
176 * @drive: target drive
177 * @rq: request
178 *
179 * This function cleans up the current PM request and stops the queue
180 * if necessary.
181 */
182void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
183{
184 struct request_queue *q = drive->queue;
185 struct request_pm_state *pm = rq->special;
186 unsigned long flags;
187
188 ide_complete_power_step(drive, rq);
189 if (pm->pm_step != IDE_PM_COMPLETED)
190 return;
191
192#ifdef DEBUG_PM
193 printk("%s: completing PM request, %s\n", drive->name,
194 (rq->cmd_type == REQ_TYPE_PM_SUSPEND) ? "suspend" : "resume");
195#endif
196 spin_lock_irqsave(q->queue_lock, flags);
197 if (rq->cmd_type == REQ_TYPE_PM_SUSPEND)
198 blk_stop_queue(q);
199 else
200 drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
201 spin_unlock_irqrestore(q->queue_lock, flags);
202
203 drive->hwif->rq = NULL;
204
205 if (blk_end_request(rq, 0, 0))
206 BUG();
207}
208
209void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
210{
211 struct request_pm_state *pm = rq->special;
212
213 if (rq->cmd_type == REQ_TYPE_PM_SUSPEND &&
214 pm->pm_step == IDE_PM_START_SUSPEND)
215 /* Mark drive blocked when starting the suspend sequence. */
216 drive->dev_flags |= IDE_DFLAG_BLOCKED;
217 else if (rq->cmd_type == REQ_TYPE_PM_RESUME &&
218 pm->pm_step == IDE_PM_START_RESUME) {
219 /*
220 * The first thing we do on wakeup is to wait for BSY bit to
221 * go away (with a looong timeout) as a drive on this hwif may
222 * just be POSTing itself.
223 * We do that before even selecting as the "other" device on
224 * the bus may be broken enough to walk on our toes at this
225 * point.
226 */
227 ide_hwif_t *hwif = drive->hwif;
228 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
229 struct request_queue *q = drive->queue;
230 unsigned long flags;
231 int rc;
232#ifdef DEBUG_PM
233 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
234#endif
235 rc = ide_wait_not_busy(hwif, 35000);
236 if (rc)
237 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
238 tp_ops->dev_select(drive);
239 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
240 rc = ide_wait_not_busy(hwif, 100000);
241 if (rc)
242 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
243
244 spin_lock_irqsave(q->queue_lock, flags);
245 blk_start_queue(q);
246 spin_unlock_irqrestore(q->queue_lock, flags);
247 }
248}
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/gfp.h>
4#include <linux/ide.h>
5
6int generic_ide_suspend(struct device *dev, pm_message_t mesg)
7{
8 ide_drive_t *drive = to_ide_device(dev);
9 ide_drive_t *pair = ide_get_pair_dev(drive);
10 ide_hwif_t *hwif = drive->hwif;
11 struct request *rq;
12 struct ide_pm_state rqpm;
13 int ret;
14
15 if (ide_port_acpi(hwif)) {
16 /* call ACPI _GTM only once */
17 if ((drive->dn & 1) == 0 || pair == NULL)
18 ide_acpi_get_timing(hwif);
19 }
20
21 memset(&rqpm, 0, sizeof(rqpm));
22 rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
23 ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
24 rq->special = &rqpm;
25 rqpm.pm_step = IDE_PM_START_SUSPEND;
26 if (mesg.event == PM_EVENT_PRETHAW)
27 mesg.event = PM_EVENT_FREEZE;
28 rqpm.pm_state = mesg.event;
29
30 blk_execute_rq(drive->queue, NULL, rq, 0);
31 ret = scsi_req(rq)->result ? -EIO : 0;
32 blk_put_request(rq);
33
34 if (ret == 0 && ide_port_acpi(hwif)) {
35 /* call ACPI _PS3 only after both devices are suspended */
36 if ((drive->dn & 1) || pair == NULL)
37 ide_acpi_set_state(hwif, 0);
38 }
39
40 return ret;
41}
42
43static void ide_end_sync_rq(struct request *rq, blk_status_t error)
44{
45 complete(rq->end_io_data);
46}
47
48static int ide_pm_execute_rq(struct request *rq)
49{
50 struct request_queue *q = rq->q;
51 DECLARE_COMPLETION_ONSTACK(wait);
52
53 rq->end_io_data = &wait;
54 rq->end_io = ide_end_sync_rq;
55
56 spin_lock_irq(q->queue_lock);
57 if (unlikely(blk_queue_dying(q))) {
58 rq->rq_flags |= RQF_QUIET;
59 scsi_req(rq)->result = -ENXIO;
60 __blk_end_request_all(rq, BLK_STS_OK);
61 spin_unlock_irq(q->queue_lock);
62 return -ENXIO;
63 }
64 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
65 __blk_run_queue_uncond(q);
66 spin_unlock_irq(q->queue_lock);
67
68 wait_for_completion_io(&wait);
69
70 return scsi_req(rq)->result ? -EIO : 0;
71}
72
73int generic_ide_resume(struct device *dev)
74{
75 ide_drive_t *drive = to_ide_device(dev);
76 ide_drive_t *pair = ide_get_pair_dev(drive);
77 ide_hwif_t *hwif = drive->hwif;
78 struct request *rq;
79 struct ide_pm_state rqpm;
80 int err;
81
82 if (ide_port_acpi(hwif)) {
83 /* call ACPI _PS0 / _STM only once */
84 if ((drive->dn & 1) == 0 || pair == NULL) {
85 ide_acpi_set_state(hwif, 1);
86 ide_acpi_push_timing(hwif);
87 }
88
89 ide_acpi_exec_tfs(drive);
90 }
91
92 memset(&rqpm, 0, sizeof(rqpm));
93 rq = blk_get_request_flags(drive->queue, REQ_OP_DRV_IN,
94 BLK_MQ_REQ_PREEMPT);
95 ide_req(rq)->type = ATA_PRIV_PM_RESUME;
96 rq->special = &rqpm;
97 rqpm.pm_step = IDE_PM_START_RESUME;
98 rqpm.pm_state = PM_EVENT_ON;
99
100 err = ide_pm_execute_rq(rq);
101 blk_put_request(rq);
102
103 if (err == 0 && dev->driver) {
104 struct ide_driver *drv = to_ide_driver(dev->driver);
105
106 if (drv->resume)
107 drv->resume(drive);
108 }
109
110 return err;
111}
112
113void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
114{
115 struct ide_pm_state *pm = rq->special;
116
117#ifdef DEBUG_PM
118 printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
119 drive->name, pm->pm_step);
120#endif
121 if (drive->media != ide_disk)
122 return;
123
124 switch (pm->pm_step) {
125 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
126 if (pm->pm_state == PM_EVENT_FREEZE)
127 pm->pm_step = IDE_PM_COMPLETED;
128 else
129 pm->pm_step = IDE_PM_STANDBY;
130 break;
131 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
132 pm->pm_step = IDE_PM_COMPLETED;
133 break;
134 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
135 pm->pm_step = IDE_PM_IDLE;
136 break;
137 case IDE_PM_IDLE: /* Resume step 2 (idle)*/
138 pm->pm_step = IDE_PM_RESTORE_DMA;
139 break;
140 }
141}
142
143ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
144{
145 struct ide_pm_state *pm = rq->special;
146 struct ide_cmd cmd = { };
147
148 switch (pm->pm_step) {
149 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
150 if (drive->media != ide_disk)
151 break;
152 /* Not supported? Switch to next step now. */
153 if (ata_id_flush_enabled(drive->id) == 0 ||
154 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
155 ide_complete_power_step(drive, rq);
156 return ide_stopped;
157 }
158 if (ata_id_flush_ext_enabled(drive->id))
159 cmd.tf.command = ATA_CMD_FLUSH_EXT;
160 else
161 cmd.tf.command = ATA_CMD_FLUSH;
162 goto out_do_tf;
163 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
164 cmd.tf.command = ATA_CMD_STANDBYNOW1;
165 goto out_do_tf;
166 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
167 ide_set_max_pio(drive);
168 /*
169 * skip IDE_PM_IDLE for ATAPI devices
170 */
171 if (drive->media != ide_disk)
172 pm->pm_step = IDE_PM_RESTORE_DMA;
173 else
174 ide_complete_power_step(drive, rq);
175 return ide_stopped;
176 case IDE_PM_IDLE: /* Resume step 2 (idle) */
177 cmd.tf.command = ATA_CMD_IDLEIMMEDIATE;
178 goto out_do_tf;
179 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
180 /*
181 * Right now, all we do is call ide_set_dma(drive),
182 * we could be smarter and check for current xfer_speed
183 * in struct drive etc...
184 */
185 if (drive->hwif->dma_ops == NULL)
186 break;
187 /*
188 * TODO: respect IDE_DFLAG_USING_DMA
189 */
190 ide_set_dma(drive);
191 break;
192 }
193
194 pm->pm_step = IDE_PM_COMPLETED;
195
196 return ide_stopped;
197
198out_do_tf:
199 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
200 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
201 cmd.protocol = ATA_PROT_NODATA;
202
203 return do_rw_taskfile(drive, &cmd);
204}
205
206/**
207 * ide_complete_pm_rq - end the current Power Management request
208 * @drive: target drive
209 * @rq: request
210 *
211 * This function cleans up the current PM request and stops the queue
212 * if necessary.
213 */
214void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
215{
216 struct request_queue *q = drive->queue;
217 struct ide_pm_state *pm = rq->special;
218 unsigned long flags;
219
220 ide_complete_power_step(drive, rq);
221 if (pm->pm_step != IDE_PM_COMPLETED)
222 return;
223
224#ifdef DEBUG_PM
225 printk("%s: completing PM request, %s\n", drive->name,
226 (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
227#endif
228 spin_lock_irqsave(q->queue_lock, flags);
229 if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
230 blk_stop_queue(q);
231 else
232 drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
233 spin_unlock_irqrestore(q->queue_lock, flags);
234
235 drive->hwif->rq = NULL;
236
237 if (blk_end_request(rq, BLK_STS_OK, 0))
238 BUG();
239}
240
241void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
242{
243 struct ide_pm_state *pm = rq->special;
244
245 if (blk_rq_is_private(rq) &&
246 ide_req(rq)->type == ATA_PRIV_PM_SUSPEND &&
247 pm->pm_step == IDE_PM_START_SUSPEND)
248 /* Mark drive blocked when starting the suspend sequence. */
249 drive->dev_flags |= IDE_DFLAG_BLOCKED;
250 else if (blk_rq_is_private(rq) &&
251 ide_req(rq)->type == ATA_PRIV_PM_RESUME &&
252 pm->pm_step == IDE_PM_START_RESUME) {
253 /*
254 * The first thing we do on wakeup is to wait for BSY bit to
255 * go away (with a looong timeout) as a drive on this hwif may
256 * just be POSTing itself.
257 * We do that before even selecting as the "other" device on
258 * the bus may be broken enough to walk on our toes at this
259 * point.
260 */
261 ide_hwif_t *hwif = drive->hwif;
262 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
263 struct request_queue *q = drive->queue;
264 unsigned long flags;
265 int rc;
266#ifdef DEBUG_PM
267 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
268#endif
269 rc = ide_wait_not_busy(hwif, 35000);
270 if (rc)
271 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
272 tp_ops->dev_select(drive);
273 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
274 rc = ide_wait_not_busy(hwif, 100000);
275 if (rc)
276 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
277
278 spin_lock_irqsave(q->queue_lock, flags);
279 blk_start_queue(q);
280 spin_unlock_irqrestore(q->queue_lock, flags);
281 }
282}