Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2016 CNEX Labs
  4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  5 *                  Matias Bjorling <matias@cnexlabs.com>
  6 *
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License version
  9 * 2 as published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it will be useful, but
 12 * WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 14 * General Public License for more details.
 15 *
 16 * pblk-write.c - pblk's write path from write buffer to media
 17 */
 18
 19#include "pblk.h"
 20#include "pblk-trace.h"
 21
 22static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
 23				    struct pblk_c_ctx *c_ctx)
 24{
 25	struct bio *original_bio;
 26	struct pblk_rb *rwb = &pblk->rwb;
 27	unsigned long ret;
 28	int i;
 29
 30	for (i = 0; i < c_ctx->nr_valid; i++) {
 31		struct pblk_w_ctx *w_ctx;
 32		int pos = c_ctx->sentry + i;
 33		int flags;
 34
 35		w_ctx = pblk_rb_w_ctx(rwb, pos);
 36		flags = READ_ONCE(w_ctx->flags);
 37
 38		if (flags & PBLK_FLUSH_ENTRY) {
 39			flags &= ~PBLK_FLUSH_ENTRY;
 40			/* Release flags on context. Protect from writes */
 41			smp_store_release(&w_ctx->flags, flags);
 42
 43#ifdef CONFIG_NVM_PBLK_DEBUG
 44			atomic_dec(&rwb->inflight_flush_point);
 45#endif
 46		}
 47
 48		while ((original_bio = bio_list_pop(&w_ctx->bios)))
 49			bio_endio(original_bio);
 50	}
 51
 52	if (c_ctx->nr_padded)
 53		pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
 54							c_ctx->nr_padded);
 55
 56#ifdef CONFIG_NVM_PBLK_DEBUG
 57	atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
 58#endif
 59
 60	ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
 61
 62	bio_put(rqd->bio);
 63	pblk_free_rqd(pblk, rqd, PBLK_WRITE);
 64
 65	return ret;
 66}
 67
 68static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
 69					   struct nvm_rq *rqd,
 70					   struct pblk_c_ctx *c_ctx)
 71{
 72	list_del(&c_ctx->list);
 73	return pblk_end_w_bio(pblk, rqd, c_ctx);
 74}
 75
 76static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
 77				struct pblk_c_ctx *c_ctx)
 78{
 79	struct pblk_c_ctx *c, *r;
 80	unsigned long flags;
 81	unsigned long pos;
 82
 83#ifdef CONFIG_NVM_PBLK_DEBUG
 84	atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
 85#endif
 86	pblk_up_rq(pblk, c_ctx->lun_bitmap);
 
 87
 88	pos = pblk_rb_sync_init(&pblk->rwb, &flags);
 89	if (pos == c_ctx->sentry) {
 90		pos = pblk_end_w_bio(pblk, rqd, c_ctx);
 91
 92retry:
 93		list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
 94			rqd = nvm_rq_from_c_ctx(c);
 95			if (c->sentry == pos) {
 96				pos = pblk_end_queued_w_bio(pblk, rqd, c);
 97				goto retry;
 98			}
 99		}
100	} else {
101		WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
102		list_add_tail(&c_ctx->list, &pblk->compl_list);
103	}
104	pblk_rb_sync_end(&pblk->rwb, &flags);
105}
106
107/* Map remaining sectors in chunk, starting from ppa */
108static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa,
109		int rqd_ppas)
110{
111	struct pblk_line *line;
112	struct ppa_addr map_ppa = *ppa;
113	__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
114	__le64 *lba_list;
115	u64 paddr;
116	int done = 0;
117	int n = 0;
118
119	line = pblk_ppa_to_line(pblk, *ppa);
120	lba_list = emeta_to_lbas(pblk, line->emeta->buf);
121
122	spin_lock(&line->lock);
123
124	while (!done)  {
125		paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
126
127		if (!test_and_set_bit(paddr, line->map_bitmap))
128			line->left_msecs--;
129
130		if (n < rqd_ppas && lba_list[paddr] != addr_empty)
131			line->nr_valid_lbas--;
132
133		lba_list[paddr] = addr_empty;
134
135		if (!test_and_set_bit(paddr, line->invalid_bitmap))
136			le32_add_cpu(line->vsc, -1);
137
138		done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
139
140		n++;
141	}
142
143	line->w_err_gc->has_write_err = 1;
144	spin_unlock(&line->lock);
145}
146
147static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
148				  unsigned int nr_entries)
149{
150	struct pblk_rb *rb = &pblk->rwb;
151	struct pblk_rb_entry *entry;
152	struct pblk_line *line;
153	struct pblk_w_ctx *w_ctx;
154	struct ppa_addr ppa_l2p;
155	int flags;
156	unsigned int i;
157
158	spin_lock(&pblk->trans_lock);
159	for (i = 0; i < nr_entries; i++) {
160		entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)];
161		w_ctx = &entry->w_ctx;
162
163		/* Check if the lba has been overwritten */
164		if (w_ctx->lba != ADDR_EMPTY) {
165			ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
166			if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
167				w_ctx->lba = ADDR_EMPTY;
 
 
 
 
 
 
 
168		}
169
170		/* Mark up the entry as submittable again */
171		flags = READ_ONCE(w_ctx->flags);
172		flags |= PBLK_WRITTEN_DATA;
173		/* Release flags on write context. Protect from writes */
174		smp_store_release(&w_ctx->flags, flags);
 
 
175
176		/* Decrease the reference count to the line as we will
177		 * re-map these entries
178		 */
179		line = pblk_ppa_to_line(pblk, w_ctx->ppa);
180		atomic_dec(&line->sec_to_update);
181		kref_put(&line->ref, pblk_line_put);
182	}
183	spin_unlock(&pblk->trans_lock);
184}
185
186static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
187{
188	struct pblk_c_ctx *r_ctx;
189
190	r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
191	if (!r_ctx)
192		return;
193
194	r_ctx->lun_bitmap = NULL;
195	r_ctx->sentry = c_ctx->sentry;
196	r_ctx->nr_valid = c_ctx->nr_valid;
197	r_ctx->nr_padded = c_ctx->nr_padded;
198
199	spin_lock(&pblk->resubmit_lock);
200	list_add_tail(&r_ctx->list, &pblk->resubmit_list);
201	spin_unlock(&pblk->resubmit_lock);
202
203#ifdef CONFIG_NVM_PBLK_DEBUG
204	atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
205#endif
206}
207
208static void pblk_submit_rec(struct work_struct *work)
209{
210	struct pblk_rec_ctx *recovery =
211			container_of(work, struct pblk_rec_ctx, ws_rec);
212	struct pblk *pblk = recovery->pblk;
213	struct nvm_rq *rqd = recovery->rqd;
214	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
215	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
216
217	pblk_log_write_err(pblk, rqd);
218
219	pblk_map_remaining(pblk, ppa_list, rqd->nr_ppas);
220	pblk_queue_resubmit(pblk, c_ctx);
221
222	pblk_up_rq(pblk, c_ctx->lun_bitmap);
223	if (c_ctx->nr_padded)
224		pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
225							c_ctx->nr_padded);
226	bio_put(rqd->bio);
227	pblk_free_rqd(pblk, rqd, PBLK_WRITE);
228	mempool_free(recovery, &pblk->rec_pool);
229
230	atomic_dec(&pblk->inflight_io);
231	pblk_write_kick(pblk);
232}
233
234
235static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
236{
237	struct pblk_rec_ctx *recovery;
238
239	recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
240	if (!recovery) {
241		pblk_err(pblk, "could not allocate recovery work\n");
242		return;
243	}
244
245	recovery->pblk = pblk;
246	recovery->rqd = rqd;
247
248	INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
249	queue_work(pblk->close_wq, &recovery->ws_rec);
 
 
 
250}
251
252static void pblk_end_io_write(struct nvm_rq *rqd)
253{
254	struct pblk *pblk = rqd->private;
255	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
256
257	if (rqd->error) {
258		pblk_end_w_fail(pblk, rqd);
259		return;
260	} else {
261		if (trace_pblk_chunk_state_enabled())
262			pblk_check_chunk_state_update(pblk, rqd);
263#ifdef CONFIG_NVM_PBLK_DEBUG
264		WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
265#endif
266	}
267
268	pblk_complete_write(pblk, rqd, c_ctx);
269	atomic_dec(&pblk->inflight_io);
270}
271
272static void pblk_end_io_write_meta(struct nvm_rq *rqd)
273{
274	struct pblk *pblk = rqd->private;
275	struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
276	struct pblk_line *line = m_ctx->private;
277	struct pblk_emeta *emeta = line->emeta;
278	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
279	int sync;
280
281	pblk_up_chunk(pblk, ppa_list[0]);
282
283	if (rqd->error) {
284		pblk_log_write_err(pblk, rqd);
285		pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
286		line->w_err_gc->has_write_err = 1;
287	} else {
288		if (trace_pblk_chunk_state_enabled())
289			pblk_check_chunk_state_update(pblk, rqd);
290	}
291
292	sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
293	if (sync == emeta->nr_entries)
294		pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
295						GFP_ATOMIC, pblk->close_wq);
296
297	pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
298
299	atomic_dec(&pblk->inflight_io);
300}
301
302static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
303			   unsigned int nr_secs, nvm_end_io_fn(*end_io))
 
304{
 
 
305	/* Setup write request */
306	rqd->opcode = NVM_OP_PWRITE;
307	rqd->nr_ppas = nr_secs;
308	rqd->is_seq = 1;
309	rqd->private = pblk;
310	rqd->end_io = end_io;
311
312	return pblk_alloc_rqd_meta(pblk, rqd);
 
 
 
 
 
 
 
 
313}
314
315static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
316			   struct ppa_addr *erase_ppa)
317{
318	struct pblk_line_meta *lm = &pblk->lm;
319	struct pblk_line *e_line = pblk_line_get_erase(pblk);
320	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
321	unsigned int valid = c_ctx->nr_valid;
322	unsigned int padded = c_ctx->nr_padded;
323	unsigned int nr_secs = valid + padded;
324	unsigned long *lun_bitmap;
325	int ret;
326
327	lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
328	if (!lun_bitmap)
329		return -ENOMEM;
330	c_ctx->lun_bitmap = lun_bitmap;
331
332	ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
333	if (ret) {
334		kfree(lun_bitmap);
335		return ret;
336	}
337
338	if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
339		ret = pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
340							valid, 0);
341	else
342		ret = pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
343							valid, erase_ppa);
344
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
345	return ret;
346}
347
348static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
349				  unsigned int secs_to_flush)
350{
351	int secs_to_sync;
352
353	secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush, true);
354
355#ifdef CONFIG_NVM_PBLK_DEBUG
356	if ((!secs_to_sync && secs_to_flush)
357			|| (secs_to_sync < 0)
358			|| (secs_to_sync > secs_avail && !secs_to_flush)) {
359		pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
360				secs_avail, secs_to_sync, secs_to_flush);
361	}
362#endif
363
364	return secs_to_sync;
365}
366
367int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
368{
369	struct nvm_tgt_dev *dev = pblk->dev;
370	struct nvm_geo *geo = &dev->geo;
371	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
372	struct pblk_line_meta *lm = &pblk->lm;
373	struct pblk_emeta *emeta = meta_line->emeta;
374	struct ppa_addr *ppa_list;
375	struct pblk_g_ctx *m_ctx;
 
376	struct nvm_rq *rqd;
377	void *data;
378	u64 paddr;
379	int rq_ppas = pblk->min_write_pgs;
380	int id = meta_line->id;
381	int rq_len;
382	int i, j;
383	int ret;
384
385	rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
386
387	m_ctx = nvm_rq_to_pdu(rqd);
388	m_ctx->private = meta_line;
389
390	rq_len = rq_ppas * geo->csecs;
391	data = ((void *)emeta->buf) + emeta->mem;
392
 
 
 
 
 
 
 
 
 
 
393	ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
394	if (ret)
395		goto fail_free_rqd;
396
397	ppa_list = nvm_rq_to_ppa_list(rqd);
398	for (i = 0; i < rqd->nr_ppas; ) {
399		spin_lock(&meta_line->lock);
400		paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
401		spin_unlock(&meta_line->lock);
402		for (j = 0; j < rq_ppas; j++, i++, paddr++)
403			ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
404	}
405
406	spin_lock(&l_mg->close_lock);
407	emeta->mem += rq_len;
408	if (emeta->mem >= lm->emeta_len[0])
 
409		list_del(&meta_line->list);
410	spin_unlock(&l_mg->close_lock);
 
411
412	pblk_down_chunk(pblk, ppa_list[0]);
413
414	ret = pblk_submit_io(pblk, rqd, data);
415	if (ret) {
416		pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
417		goto fail_rollback;
418	}
419
420	return NVM_IO_OK;
421
422fail_rollback:
423	pblk_up_chunk(pblk, ppa_list[0]);
424	spin_lock(&l_mg->close_lock);
425	pblk_dealloc_page(pblk, meta_line, rq_ppas);
426	list_add(&meta_line->list, &meta_line->list);
427	spin_unlock(&l_mg->close_lock);
 
 
428fail_free_rqd:
429	pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
430	return ret;
431}
432
433static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
434				       struct pblk_line *meta_line,
435				       struct nvm_rq *data_rqd)
436{
437	struct nvm_tgt_dev *dev = pblk->dev;
438	struct nvm_geo *geo = &dev->geo;
439	struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
440	struct pblk_line *data_line = pblk_line_get_data(pblk);
441	struct ppa_addr ppa, ppa_opt;
442	u64 paddr;
443	int pos_opt;
444
445	/* Schedule a metadata I/O that is half the distance from the data I/O
446	 * with regards to the number of LUNs forming the pblk instance. This
447	 * balances LUN conflicts across every I/O.
448	 *
449	 * When the LUN configuration changes (e.g., due to GC), this distance
450	 * can align, which would result on metadata and data I/Os colliding. In
451	 * this case, modify the distance to not be optimal, but move the
452	 * optimal in the right direction.
453	 */
454	paddr = pblk_lookup_page(pblk, meta_line);
455	ppa = addr_to_gen_ppa(pblk, paddr, 0);
456	ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
457	pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
458
459	if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
460				test_bit(pos_opt, data_line->blk_bitmap))
461		return true;
462
463	if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
464		data_line->meta_distance--;
465
466	return false;
467}
468
469static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
470						    struct nvm_rq *data_rqd)
471{
472	struct pblk_line_meta *lm = &pblk->lm;
473	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
474	struct pblk_line *meta_line;
475
476	spin_lock(&l_mg->close_lock);
 
477	if (list_empty(&l_mg->emeta_list)) {
478		spin_unlock(&l_mg->close_lock);
479		return NULL;
480	}
481	meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
482	if (meta_line->emeta->mem >= lm->emeta_len[0]) {
483		spin_unlock(&l_mg->close_lock);
484		return NULL;
485	}
486	spin_unlock(&l_mg->close_lock);
487
488	if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
489		return NULL;
490
491	return meta_line;
492}
493
494static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
495{
496	struct ppa_addr erase_ppa;
497	struct pblk_line *meta_line;
498	int err;
499
500	pblk_ppa_set_empty(&erase_ppa);
501
502	/* Assign lbas to ppas and populate request structure */
503	err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
504	if (err) {
505		pblk_err(pblk, "could not setup write request: %d\n", err);
506		return NVM_IO_ERR;
507	}
508
509	meta_line = pblk_should_submit_meta_io(pblk, rqd);
510
511	/* Submit data write for current data line */
512	err = pblk_submit_io(pblk, rqd, NULL);
513	if (err) {
514		pblk_err(pblk, "data I/O submission failed: %d\n", err);
515		return NVM_IO_ERR;
516	}
517
518	if (!pblk_ppa_empty(erase_ppa)) {
519		/* Submit erase for next data line */
520		if (pblk_blk_erase_async(pblk, erase_ppa)) {
521			struct pblk_line *e_line = pblk_line_get_erase(pblk);
522			struct nvm_tgt_dev *dev = pblk->dev;
523			struct nvm_geo *geo = &dev->geo;
524			int bit;
525
526			atomic_inc(&e_line->left_eblks);
527			bit = pblk_ppa_to_pos(geo, erase_ppa);
528			WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
529		}
530	}
531
532	if (meta_line) {
533		/* Submit metadata write for previous data line */
534		err = pblk_submit_meta_io(pblk, meta_line);
535		if (err) {
536			pblk_err(pblk, "metadata I/O submission failed: %d",
537					err);
538			return NVM_IO_ERR;
539		}
540	}
541
542	return NVM_IO_OK;
543}
544
545static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
546{
547	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
548	struct bio *bio = rqd->bio;
549
550	if (c_ctx->nr_padded)
551		pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
552							c_ctx->nr_padded);
553}
554
555static int pblk_submit_write(struct pblk *pblk, int *secs_left)
556{
557	struct bio *bio;
558	struct nvm_rq *rqd;
559	unsigned int secs_avail, secs_to_sync, secs_to_com;
560	unsigned int secs_to_flush, packed_meta_pgs;
561	unsigned long pos;
562	unsigned int resubmit;
563
564	*secs_left = 0;
565
566	spin_lock(&pblk->resubmit_lock);
567	resubmit = !list_empty(&pblk->resubmit_list);
568	spin_unlock(&pblk->resubmit_lock);
569
570	/* Resubmit failed writes first */
571	if (resubmit) {
572		struct pblk_c_ctx *r_ctx;
573
574		spin_lock(&pblk->resubmit_lock);
575		r_ctx = list_first_entry(&pblk->resubmit_list,
576					struct pblk_c_ctx, list);
577		list_del(&r_ctx->list);
578		spin_unlock(&pblk->resubmit_lock);
579
580		secs_avail = r_ctx->nr_valid;
581		pos = r_ctx->sentry;
582
583		pblk_prepare_resubmit(pblk, pos, secs_avail);
584		secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
585				secs_avail);
 
 
 
586
587		kfree(r_ctx);
588	} else {
589		/* If there are no sectors in the cache,
590		 * flushes (bios without data) will be cleared on
591		 * the cache threads
592		 */
593		secs_avail = pblk_rb_read_count(&pblk->rwb);
594		if (!secs_avail)
595			return 0;
596
597		secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
598		if (!secs_to_flush && secs_avail < pblk->min_write_pgs_data)
599			return 0;
600
601		secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
602					secs_to_flush);
603		if (secs_to_sync > pblk->max_write_pgs) {
604			pblk_err(pblk, "bad buffer sync calculation\n");
605			return 0;
606		}
607
608		secs_to_com = (secs_to_sync > secs_avail) ?
609			secs_avail : secs_to_sync;
610		pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
 
611	}
612
613	packed_meta_pgs = (pblk->min_write_pgs - pblk->min_write_pgs_data);
614	bio = bio_alloc(GFP_KERNEL, secs_to_sync + packed_meta_pgs);
 
 
615
616	bio->bi_iter.bi_sector = 0; /* internal bio */
617	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
618
619	rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
620	rqd->bio = bio;
621
622	if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
623								secs_avail)) {
624		pblk_err(pblk, "corrupted write bio\n");
625		goto fail_put_bio;
626	}
627
628	if (pblk_submit_io_set(pblk, rqd))
629		goto fail_free_bio;
630
631#ifdef CONFIG_NVM_PBLK_DEBUG
632	atomic_long_add(secs_to_sync, &pblk->sub_writes);
633#endif
634
635	*secs_left = 1;
636	return 0;
637
638fail_free_bio:
639	pblk_free_write_rqd(pblk, rqd);
640fail_put_bio:
641	bio_put(bio);
642	pblk_free_rqd(pblk, rqd, PBLK_WRITE);
643
644	return -EINTR;
645}
646
647int pblk_write_ts(void *data)
648{
649	struct pblk *pblk = data;
650	int secs_left;
651	int write_failure = 0;
652
653	while (!kthread_should_stop()) {
654		if (!write_failure) {
655			write_failure = pblk_submit_write(pblk, &secs_left);
656
657			if (secs_left)
658				continue;
659		}
660		set_current_state(TASK_INTERRUPTIBLE);
661		io_schedule();
662	}
663
664	return 0;
665}
v4.17
 
  1/*
  2 * Copyright (C) 2016 CNEX Labs
  3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  4 *                  Matias Bjorling <matias@cnexlabs.com>
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License version
  8 * 2 as published by the Free Software Foundation.
  9 *
 10 * This program is distributed in the hope that it will be useful, but
 11 * WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 13 * General Public License for more details.
 14 *
 15 * pblk-write.c - pblk's write path from write buffer to media
 16 */
 17
 18#include "pblk.h"
 
 19
 20static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
 21				    struct pblk_c_ctx *c_ctx)
 22{
 23	struct bio *original_bio;
 24	struct pblk_rb *rwb = &pblk->rwb;
 25	unsigned long ret;
 26	int i;
 27
 28	for (i = 0; i < c_ctx->nr_valid; i++) {
 29		struct pblk_w_ctx *w_ctx;
 30		int pos = c_ctx->sentry + i;
 31		int flags;
 32
 33		w_ctx = pblk_rb_w_ctx(rwb, pos);
 34		flags = READ_ONCE(w_ctx->flags);
 35
 36		if (flags & PBLK_FLUSH_ENTRY) {
 37			flags &= ~PBLK_FLUSH_ENTRY;
 38			/* Release flags on context. Protect from writes */
 39			smp_store_release(&w_ctx->flags, flags);
 40
 41#ifdef CONFIG_NVM_DEBUG
 42			atomic_dec(&rwb->inflight_flush_point);
 43#endif
 44		}
 45
 46		while ((original_bio = bio_list_pop(&w_ctx->bios)))
 47			bio_endio(original_bio);
 48	}
 49
 50	if (c_ctx->nr_padded)
 51		pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
 52							c_ctx->nr_padded);
 53
 54#ifdef CONFIG_NVM_DEBUG
 55	atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
 56#endif
 57
 58	ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
 59
 60	bio_put(rqd->bio);
 61	pblk_free_rqd(pblk, rqd, PBLK_WRITE);
 62
 63	return ret;
 64}
 65
 66static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
 67					   struct nvm_rq *rqd,
 68					   struct pblk_c_ctx *c_ctx)
 69{
 70	list_del(&c_ctx->list);
 71	return pblk_end_w_bio(pblk, rqd, c_ctx);
 72}
 73
 74static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
 75				struct pblk_c_ctx *c_ctx)
 76{
 77	struct pblk_c_ctx *c, *r;
 78	unsigned long flags;
 79	unsigned long pos;
 80
 81#ifdef CONFIG_NVM_DEBUG
 82	atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
 83#endif
 84
 85	pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
 86
 87	pos = pblk_rb_sync_init(&pblk->rwb, &flags);
 88	if (pos == c_ctx->sentry) {
 89		pos = pblk_end_w_bio(pblk, rqd, c_ctx);
 90
 91retry:
 92		list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
 93			rqd = nvm_rq_from_c_ctx(c);
 94			if (c->sentry == pos) {
 95				pos = pblk_end_queued_w_bio(pblk, rqd, c);
 96				goto retry;
 97			}
 98		}
 99	} else {
100		WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
101		list_add_tail(&c_ctx->list, &pblk->compl_list);
102	}
103	pblk_rb_sync_end(&pblk->rwb, &flags);
104}
105
106/* When a write fails, we are not sure whether the block has grown bad or a page
107 * range is more susceptible to write errors. If a high number of pages fail, we
108 * assume that the block is bad and we mark it accordingly. In all cases, we
109 * remap and resubmit the failed entries as fast as possible; if a flush is
110 * waiting on a completion, the whole stack would stall otherwise.
111 */
112static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113{
114	void *comp_bits = &rqd->ppa_status;
115	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
116	struct pblk_rec_ctx *recovery;
117	struct ppa_addr *ppa_list = rqd->ppa_list;
118	int nr_ppas = rqd->nr_ppas;
119	unsigned int c_entries;
120	int bit, ret;
121
122	if (unlikely(nr_ppas == 1))
123		ppa_list = &rqd->ppa_addr;
124
125	recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC);
126
127	INIT_LIST_HEAD(&recovery->failed);
128
129	bit = -1;
130	while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
131		struct pblk_rb_entry *entry;
132		struct ppa_addr ppa;
133
134		/* Logic error */
135		if (bit > c_ctx->nr_valid) {
136			WARN_ONCE(1, "pblk: corrupted write request\n");
137			mempool_free(recovery, pblk->rec_pool);
138			goto out;
139		}
140
141		ppa = ppa_list[bit];
142		entry = pblk_rb_sync_scan_entry(&pblk->rwb, &ppa);
143		if (!entry) {
144			pr_err("pblk: could not scan entry on write failure\n");
145			mempool_free(recovery, pblk->rec_pool);
146			goto out;
147		}
148
149		/* The list is filled first and emptied afterwards. No need for
150		 * protecting it with a lock
151		 */
152		list_add_tail(&entry->index, &recovery->failed);
 
 
153	}
 
 
 
 
 
 
 
 
 
 
154
155	c_entries = find_first_bit(comp_bits, nr_ppas);
156	ret = pblk_recov_setup_rq(pblk, c_ctx, recovery, comp_bits, c_entries);
157	if (ret) {
158		pr_err("pblk: could not recover from write failure\n");
159		mempool_free(recovery, pblk->rec_pool);
160		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161	}
162
 
 
 
163	INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
164	queue_work(pblk->close_wq, &recovery->ws_rec);
165
166out:
167	pblk_complete_write(pblk, rqd, c_ctx);
168}
169
170static void pblk_end_io_write(struct nvm_rq *rqd)
171{
172	struct pblk *pblk = rqd->private;
173	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
174
175	if (rqd->error) {
176		pblk_log_write_err(pblk, rqd);
177		return pblk_end_w_fail(pblk, rqd);
178	}
179#ifdef CONFIG_NVM_DEBUG
180	else
 
181		WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
182#endif
 
183
184	pblk_complete_write(pblk, rqd, c_ctx);
185	atomic_dec(&pblk->inflight_io);
186}
187
188static void pblk_end_io_write_meta(struct nvm_rq *rqd)
189{
190	struct pblk *pblk = rqd->private;
191	struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
192	struct pblk_line *line = m_ctx->private;
193	struct pblk_emeta *emeta = line->emeta;
 
194	int sync;
195
196	pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
197
198	if (rqd->error) {
199		pblk_log_write_err(pblk, rqd);
200		pr_err("pblk: metadata I/O failed. Line %d\n", line->id);
 
 
 
 
201	}
202
203	sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
204	if (sync == emeta->nr_entries)
205		pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
206						GFP_ATOMIC, pblk->close_wq);
207
208	pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
209
210	atomic_dec(&pblk->inflight_io);
211}
212
213static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
214			   unsigned int nr_secs,
215			   nvm_end_io_fn(*end_io))
216{
217	struct nvm_tgt_dev *dev = pblk->dev;
218
219	/* Setup write request */
220	rqd->opcode = NVM_OP_PWRITE;
221	rqd->nr_ppas = nr_secs;
222	rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
223	rqd->private = pblk;
224	rqd->end_io = end_io;
225
226	rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
227							&rqd->dma_meta_list);
228	if (!rqd->meta_list)
229		return -ENOMEM;
230
231	rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
232	rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
233
234	return 0;
235}
236
237static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
238			   struct ppa_addr *erase_ppa)
239{
240	struct pblk_line_meta *lm = &pblk->lm;
241	struct pblk_line *e_line = pblk_line_get_erase(pblk);
242	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
243	unsigned int valid = c_ctx->nr_valid;
244	unsigned int padded = c_ctx->nr_padded;
245	unsigned int nr_secs = valid + padded;
246	unsigned long *lun_bitmap;
247	int ret;
248
249	lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
250	if (!lun_bitmap)
251		return -ENOMEM;
252	c_ctx->lun_bitmap = lun_bitmap;
253
254	ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
255	if (ret) {
256		kfree(lun_bitmap);
257		return ret;
258	}
259
260	if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
261		pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
 
262	else
263		pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
264							valid, erase_ppa);
265
266	return 0;
267}
268
269int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
270			struct pblk_c_ctx *c_ctx)
271{
272	struct pblk_line_meta *lm = &pblk->lm;
273	unsigned long *lun_bitmap;
274	int ret;
275
276	lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
277	if (!lun_bitmap)
278		return -ENOMEM;
279
280	c_ctx->lun_bitmap = lun_bitmap;
281
282	ret = pblk_alloc_w_rq(pblk, rqd, rqd->nr_ppas, pblk_end_io_write);
283	if (ret)
284		return ret;
285
286	pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0);
287
288	rqd->ppa_status = (u64)0;
289	rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
290
291	return ret;
292}
293
294static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
295				  unsigned int secs_to_flush)
296{
297	int secs_to_sync;
298
299	secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
300
301#ifdef CONFIG_NVM_DEBUG
302	if ((!secs_to_sync && secs_to_flush)
303			|| (secs_to_sync < 0)
304			|| (secs_to_sync > secs_avail && !secs_to_flush)) {
305		pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
306				secs_avail, secs_to_sync, secs_to_flush);
307	}
308#endif
309
310	return secs_to_sync;
311}
312
313int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
314{
315	struct nvm_tgt_dev *dev = pblk->dev;
316	struct nvm_geo *geo = &dev->geo;
317	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
318	struct pblk_line_meta *lm = &pblk->lm;
319	struct pblk_emeta *emeta = meta_line->emeta;
 
320	struct pblk_g_ctx *m_ctx;
321	struct bio *bio;
322	struct nvm_rq *rqd;
323	void *data;
324	u64 paddr;
325	int rq_ppas = pblk->min_write_pgs;
326	int id = meta_line->id;
327	int rq_len;
328	int i, j;
329	int ret;
330
331	rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
332
333	m_ctx = nvm_rq_to_pdu(rqd);
334	m_ctx->private = meta_line;
335
336	rq_len = rq_ppas * geo->csecs;
337	data = ((void *)emeta->buf) + emeta->mem;
338
339	bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
340					l_mg->emeta_alloc_type, GFP_KERNEL);
341	if (IS_ERR(bio)) {
342		ret = PTR_ERR(bio);
343		goto fail_free_rqd;
344	}
345	bio->bi_iter.bi_sector = 0; /* internal bio */
346	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
347	rqd->bio = bio;
348
349	ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
350	if (ret)
351		goto fail_free_bio;
352
 
353	for (i = 0; i < rqd->nr_ppas; ) {
354		spin_lock(&meta_line->lock);
355		paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
356		spin_unlock(&meta_line->lock);
357		for (j = 0; j < rq_ppas; j++, i++, paddr++)
358			rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
359	}
360
 
361	emeta->mem += rq_len;
362	if (emeta->mem >= lm->emeta_len[0]) {
363		spin_lock(&l_mg->close_lock);
364		list_del(&meta_line->list);
365		spin_unlock(&l_mg->close_lock);
366	}
367
368	pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
369
370	ret = pblk_submit_io(pblk, rqd);
371	if (ret) {
372		pr_err("pblk: emeta I/O submission failed: %d\n", ret);
373		goto fail_rollback;
374	}
375
376	return NVM_IO_OK;
377
378fail_rollback:
379	pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
380	spin_lock(&l_mg->close_lock);
381	pblk_dealloc_page(pblk, meta_line, rq_ppas);
382	list_add(&meta_line->list, &meta_line->list);
383	spin_unlock(&l_mg->close_lock);
384fail_free_bio:
385	bio_put(bio);
386fail_free_rqd:
387	pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
388	return ret;
389}
390
391static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
392				       struct pblk_line *meta_line,
393				       struct nvm_rq *data_rqd)
394{
395	struct nvm_tgt_dev *dev = pblk->dev;
396	struct nvm_geo *geo = &dev->geo;
397	struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
398	struct pblk_line *data_line = pblk_line_get_data(pblk);
399	struct ppa_addr ppa, ppa_opt;
400	u64 paddr;
401	int pos_opt;
402
403	/* Schedule a metadata I/O that is half the distance from the data I/O
404	 * with regards to the number of LUNs forming the pblk instance. This
405	 * balances LUN conflicts across every I/O.
406	 *
407	 * When the LUN configuration changes (e.g., due to GC), this distance
408	 * can align, which would result on metadata and data I/Os colliding. In
409	 * this case, modify the distance to not be optimal, but move the
410	 * optimal in the right direction.
411	 */
412	paddr = pblk_lookup_page(pblk, meta_line);
413	ppa = addr_to_gen_ppa(pblk, paddr, 0);
414	ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
415	pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
416
417	if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
418				test_bit(pos_opt, data_line->blk_bitmap))
419		return true;
420
421	if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
422		data_line->meta_distance--;
423
424	return false;
425}
426
427static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
428						    struct nvm_rq *data_rqd)
429{
430	struct pblk_line_meta *lm = &pblk->lm;
431	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
432	struct pblk_line *meta_line;
433
434	spin_lock(&l_mg->close_lock);
435retry:
436	if (list_empty(&l_mg->emeta_list)) {
437		spin_unlock(&l_mg->close_lock);
438		return NULL;
439	}
440	meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
441	if (meta_line->emeta->mem >= lm->emeta_len[0])
442		goto retry;
 
 
443	spin_unlock(&l_mg->close_lock);
444
445	if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
446		return NULL;
447
448	return meta_line;
449}
450
451static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
452{
453	struct ppa_addr erase_ppa;
454	struct pblk_line *meta_line;
455	int err;
456
457	pblk_ppa_set_empty(&erase_ppa);
458
459	/* Assign lbas to ppas and populate request structure */
460	err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
461	if (err) {
462		pr_err("pblk: could not setup write request: %d\n", err);
463		return NVM_IO_ERR;
464	}
465
466	meta_line = pblk_should_submit_meta_io(pblk, rqd);
467
468	/* Submit data write for current data line */
469	err = pblk_submit_io(pblk, rqd);
470	if (err) {
471		pr_err("pblk: data I/O submission failed: %d\n", err);
472		return NVM_IO_ERR;
473	}
474
475	if (!pblk_ppa_empty(erase_ppa)) {
476		/* Submit erase for next data line */
477		if (pblk_blk_erase_async(pblk, erase_ppa)) {
478			struct pblk_line *e_line = pblk_line_get_erase(pblk);
479			struct nvm_tgt_dev *dev = pblk->dev;
480			struct nvm_geo *geo = &dev->geo;
481			int bit;
482
483			atomic_inc(&e_line->left_eblks);
484			bit = pblk_ppa_to_pos(geo, erase_ppa);
485			WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
486		}
487	}
488
489	if (meta_line) {
490		/* Submit metadata write for previous data line */
491		err = pblk_submit_meta_io(pblk, meta_line);
492		if (err) {
493			pr_err("pblk: metadata I/O submission failed: %d", err);
 
494			return NVM_IO_ERR;
495		}
496	}
497
498	return NVM_IO_OK;
499}
500
501static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
502{
503	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
504	struct bio *bio = rqd->bio;
505
506	if (c_ctx->nr_padded)
507		pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
508							c_ctx->nr_padded);
509}
510
511static int pblk_submit_write(struct pblk *pblk)
512{
513	struct bio *bio;
514	struct nvm_rq *rqd;
515	unsigned int secs_avail, secs_to_sync, secs_to_com;
516	unsigned int secs_to_flush;
517	unsigned long pos;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
519	/* If there are no sectors in the cache, flushes (bios without data)
520	 * will be cleared on the cache threads
521	 */
522	secs_avail = pblk_rb_read_count(&pblk->rwb);
523	if (!secs_avail)
524		return 1;
525
526	secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
527	if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
528		return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
529
530	secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush);
531	if (secs_to_sync > pblk->max_write_pgs) {
532		pr_err("pblk: bad buffer sync calculation\n");
533		return 1;
534	}
535
536	secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync;
537	pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
538
539	bio = bio_alloc(GFP_KERNEL, secs_to_sync);
540
541	bio->bi_iter.bi_sector = 0; /* internal bio */
542	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
543
544	rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
545	rqd->bio = bio;
546
547	if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
548								secs_avail)) {
549		pr_err("pblk: corrupted write bio\n");
550		goto fail_put_bio;
551	}
552
553	if (pblk_submit_io_set(pblk, rqd))
554		goto fail_free_bio;
555
556#ifdef CONFIG_NVM_DEBUG
557	atomic_long_add(secs_to_sync, &pblk->sub_writes);
558#endif
559
 
560	return 0;
561
562fail_free_bio:
563	pblk_free_write_rqd(pblk, rqd);
564fail_put_bio:
565	bio_put(bio);
566	pblk_free_rqd(pblk, rqd, PBLK_WRITE);
567
568	return 1;
569}
570
571int pblk_write_ts(void *data)
572{
573	struct pblk *pblk = data;
 
 
574
575	while (!kthread_should_stop()) {
576		if (!pblk_submit_write(pblk))
577			continue;
 
 
 
 
578		set_current_state(TASK_INTERRUPTIBLE);
579		io_schedule();
580	}
581
582	return 0;
583}