diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-03 10:34:51 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-03 10:34:51 -0700 |
commit | c6b1e36c8fa04a6680c44fe0321d0370400e90b6 (patch) | |
tree | 5110f0639bfa803baa8d213cb21efe37beeaf742 /drivers/lightnvm/pblk-gc.c | |
parent | 81e3e044897b0875a52953b3fb6241a33428e4f9 (diff) | |
parent | a84ebb837b419787c2ece74efa566c998929cead (diff) | |
download | kernel_replicant_linux-c6b1e36c8fa04a6680c44fe0321d0370400e90b6.tar.gz kernel_replicant_linux-c6b1e36c8fa04a6680c44fe0321d0370400e90b6.tar.bz2 kernel_replicant_linux-c6b1e36c8fa04a6680c44fe0321d0370400e90b6.zip |
Merge branch 'for-4.13/block' of git://git.kernel.dk/linux-block
Pull core block/IO updates from Jens Axboe:
"This is the main pull request for the block layer for 4.13. Not a huge
round in terms of features, but there's a lot of churn related to some
core cleanups.
Note this depends on the UUID tree pull request, that Christoph
already sent out.
This pull request contains:
- A series from Christoph, unifying the error/stats codes in the
block layer. We now use blk_status_t everywhere, instead of using
different schemes for different places.
- Also from Christoph, some cleanups around request allocation and IO
scheduler interactions in blk-mq.
- And yet another series from Christoph, cleaning up how we handle
and do bounce buffering in the block layer.
- A blk-mq debugfs series from Bart, further improving on the support
we have for exporting internal information to aid debugging IO
hangs or stalls.
- Also from Bart, a series that cleans up the request initialization
differences across types of devices.
- A series from Goldwyn Rodrigues, allowing the block layer to return
failure if we will block and the user asked for non-blocking.
- Patch from Hannes for supporting setting loop devices block size to
that of the underlying device.
- Two series of patches from Javier, fixing various issues with
lightnvm, particular around pblk.
- A series from me, adding support for write hints. This comes with
NVMe support as well, so applications can help guide data placement
on flash to improve performance, latencies, and write
amplification.
- A series from Ming, improving and hardening blk-mq support for
stopping/starting and quiescing hardware queues.
- Two pull requests for NVMe updates. Nothing major on the feature
side, but lots of cleanups and bug fixes. From the usual crew.
- A series from Neil Brown, greatly improving the bio rescue set
support. Most notably, this kills the bio rescue work queues, if we
don't really need them.
- Lots of other little bug fixes that are all over the place"
* 'for-4.13/block' of git://git.kernel.dk/linux-block: (217 commits)
lightnvm: pblk: set line bitmap check under debug
lightnvm: pblk: verify that cache read is still valid
lightnvm: pblk: add initialization check
lightnvm: pblk: remove target using async. I/Os
lightnvm: pblk: use vmalloc for GC data buffer
lightnvm: pblk: use right metadata buffer for recovery
lightnvm: pblk: schedule if data is not ready
lightnvm: pblk: remove unused return variable
lightnvm: pblk: fix double-free on pblk init
lightnvm: pblk: fix bad le64 assignations
nvme: Makefile: remove dead build rule
blk-mq: map all HWQ also in hyperthreaded system
nvmet-rdma: register ib_client to not deadlock in device removal
nvme_fc: fix error recovery on link down.
nvmet_fc: fix crashes on bad opcodes
nvme_fc: Fix crash when nvme controller connection fails.
nvme_fc: replace ioabort msleep loop with completion
nvme_fc: fix double calls to nvme_cleanup_cmd()
nvme-fabrics: verify that a controller returns the correct NQN
nvme: simplify nvme_dev_attrs_are_visible
...
Diffstat (limited to 'drivers/lightnvm/pblk-gc.c')
-rw-r--r-- | drivers/lightnvm/pblk-gc.c | 475 |
1 files changed, 284 insertions, 191 deletions
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c index eaf479c6b63c..6090d28f7995 100644 --- a/drivers/lightnvm/pblk-gc.c +++ b/drivers/lightnvm/pblk-gc.c @@ -20,8 +20,7 @@ static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq) { - kfree(gc_rq->data); - kfree(gc_rq->lba_list); + vfree(gc_rq->data); kfree(gc_rq); } @@ -37,10 +36,8 @@ static int pblk_gc_write(struct pblk *pblk) return 1; } - list_for_each_entry_safe(gc_rq, tgc_rq, &gc->w_list, list) { - list_move_tail(&gc_rq->list, &w_list); - gc->w_entries--; - } + list_cut_position(&w_list, &gc->w_list, gc->w_list.prev); + gc->w_entries = 0; spin_unlock(&gc->w_lock); list_for_each_entry_safe(gc_rq, tgc_rq, &w_list, list) { @@ -48,9 +45,8 @@ static int pblk_gc_write(struct pblk *pblk) gc_rq->nr_secs, gc_rq->secs_to_gc, gc_rq->line, PBLK_IOTYPE_GC); - kref_put(&gc_rq->line->ref, pblk_line_put); - list_del(&gc_rq->list); + kref_put(&gc_rq->line->ref, pblk_line_put); pblk_gc_free_gc_rq(gc_rq); } @@ -66,52 +62,41 @@ static void pblk_gc_writer_kick(struct pblk_gc *gc) * Responsible for managing all memory related to a gc request. Also in case of * failure */ -static int pblk_gc_move_valid_secs(struct pblk *pblk, struct pblk_line *line, - u64 *lba_list, unsigned int nr_secs) +static int pblk_gc_move_valid_secs(struct pblk *pblk, struct pblk_gc_rq *gc_rq) { struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; struct pblk_gc *gc = &pblk->gc; - struct pblk_gc_rq *gc_rq; + struct pblk_line *line = gc_rq->line; void *data; unsigned int secs_to_gc; - int ret = NVM_IO_OK; + int ret = 0; - data = kmalloc(nr_secs * geo->sec_size, GFP_KERNEL); + data = vmalloc(gc_rq->nr_secs * geo->sec_size); if (!data) { - ret = NVM_IO_ERR; - goto free_lba_list; + ret = -ENOMEM; + goto out; } /* Read from GC victim block */ - if (pblk_submit_read_gc(pblk, lba_list, data, nr_secs, + if (pblk_submit_read_gc(pblk, gc_rq->lba_list, data, gc_rq->nr_secs, &secs_to_gc, line)) { - ret = NVM_IO_ERR; + ret = -EFAULT; goto free_data; } if (!secs_to_gc) - goto free_data; - - gc_rq = kmalloc(sizeof(struct pblk_gc_rq), GFP_KERNEL); - if (!gc_rq) { - ret = NVM_IO_ERR; - goto free_data; - } + goto free_rq; - gc_rq->line = line; gc_rq->data = data; - gc_rq->lba_list = lba_list; - gc_rq->nr_secs = nr_secs; gc_rq->secs_to_gc = secs_to_gc; - kref_get(&line->ref); - retry: spin_lock(&gc->w_lock); - if (gc->w_entries > 256) { + if (gc->w_entries >= PBLK_GC_W_QD) { spin_unlock(&gc->w_lock); - usleep_range(256, 1024); + pblk_gc_writer_kick(&pblk->gc); + usleep_range(128, 256); goto retry; } gc->w_entries++; @@ -120,13 +105,14 @@ retry: pblk_gc_writer_kick(&pblk->gc); - return NVM_IO_OK; + return 0; +free_rq: + kfree(gc_rq); free_data: - kfree(data); -free_lba_list: - kfree(lba_list); - + vfree(data); +out: + kref_put(&line->ref, pblk_line_put); return ret; } @@ -150,140 +136,206 @@ static void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line) static void pblk_gc_line_ws(struct work_struct *work) { + struct pblk_line_ws *line_rq_ws = container_of(work, + struct pblk_line_ws, ws); + struct pblk *pblk = line_rq_ws->pblk; + struct pblk_gc *gc = &pblk->gc; + struct pblk_line *line = line_rq_ws->line; + struct pblk_gc_rq *gc_rq = line_rq_ws->priv; + + up(&gc->gc_sem); + + if (pblk_gc_move_valid_secs(pblk, gc_rq)) { + pr_err("pblk: could not GC all sectors: line:%d (%d/%d)\n", + line->id, *line->vsc, + gc_rq->nr_secs); + } + + mempool_free(line_rq_ws, pblk->line_ws_pool); +} + +static void pblk_gc_line_prepare_ws(struct work_struct *work) +{ struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws, ws); struct pblk *pblk = line_ws->pblk; - struct pblk_line_mgmt *l_mg = &pblk->l_mg; struct pblk_line *line = line_ws->line; + struct pblk_line_mgmt *l_mg = &pblk->l_mg; struct pblk_line_meta *lm = &pblk->lm; - __le64 *lba_list = line_ws->priv; - u64 *gc_list; - int sec_left; - int nr_ppas, bit; - int put_line = 1; + struct pblk_gc *gc = &pblk->gc; + struct line_emeta *emeta_buf; + struct pblk_line_ws *line_rq_ws; + struct pblk_gc_rq *gc_rq; + __le64 *lba_list; + int sec_left, nr_secs, bit; + int ret; - pr_debug("pblk: line '%d' being reclaimed for GC\n", line->id); + emeta_buf = pblk_malloc(lm->emeta_len[0], l_mg->emeta_alloc_type, + GFP_KERNEL); + if (!emeta_buf) { + pr_err("pblk: cannot use GC emeta\n"); + return; + } - spin_lock(&line->lock); - sec_left = line->vsc; - if (!sec_left) { - /* Lines are erased before being used (l_mg->data_/log_next) */ - spin_unlock(&line->lock); - goto out; + ret = pblk_line_read_emeta(pblk, line, emeta_buf); + if (ret) { + pr_err("pblk: line %d read emeta failed (%d)\n", line->id, ret); + goto fail_free_emeta; + } + + /* If this read fails, it means that emeta is corrupted. For now, leave + * the line untouched. TODO: Implement a recovery routine that scans and + * moves all sectors on the line. + */ + lba_list = pblk_recov_get_lba_list(pblk, emeta_buf); + if (!lba_list) { + pr_err("pblk: could not interpret emeta (line %d)\n", line->id); + goto fail_free_emeta; } - spin_unlock(&line->lock); + sec_left = pblk_line_vsc(line); if (sec_left < 0) { pr_err("pblk: corrupted GC line (%d)\n", line->id); - put_line = 0; - pblk_put_line_back(pblk, line); - goto out; + goto fail_free_emeta; } bit = -1; next_rq: - gc_list = kmalloc_array(pblk->max_write_pgs, sizeof(u64), GFP_KERNEL); - if (!gc_list) { - put_line = 0; - pblk_put_line_back(pblk, line); - goto out; - } + gc_rq = kmalloc(sizeof(struct pblk_gc_rq), GFP_KERNEL); + if (!gc_rq) + goto fail_free_emeta; - nr_ppas = 0; + nr_secs = 0; do { bit = find_next_zero_bit(line->invalid_bitmap, lm->sec_per_line, bit + 1); if (bit > line->emeta_ssec) break; - gc_list[nr_ppas++] = le64_to_cpu(lba_list[bit]); - } while (nr_ppas < pblk->max_write_pgs); + gc_rq->lba_list[nr_secs++] = le64_to_cpu(lba_list[bit]); + } while (nr_secs < pblk->max_write_pgs); - if (unlikely(!nr_ppas)) { - kfree(gc_list); + if (unlikely(!nr_secs)) { + kfree(gc_rq); goto out; } - if (pblk_gc_move_valid_secs(pblk, line, gc_list, nr_ppas)) { - pr_err("pblk: could not GC all sectors: line:%d (%d/%d/%d)\n", - line->id, line->vsc, - nr_ppas, nr_ppas); - put_line = 0; - pblk_put_line_back(pblk, line); - goto out; - } + gc_rq->nr_secs = nr_secs; + gc_rq->line = line; + + line_rq_ws = mempool_alloc(pblk->line_ws_pool, GFP_KERNEL); + if (!line_rq_ws) + goto fail_free_gc_rq; - sec_left -= nr_ppas; + line_rq_ws->pblk = pblk; + line_rq_ws->line = line; + line_rq_ws->priv = gc_rq; + + down(&gc->gc_sem); + kref_get(&line->ref); + + INIT_WORK(&line_rq_ws->ws, pblk_gc_line_ws); + queue_work(gc->gc_line_reader_wq, &line_rq_ws->ws); + + sec_left -= nr_secs; if (sec_left > 0) goto next_rq; out: - pblk_mfree(line->emeta, l_mg->emeta_alloc_type); + pblk_mfree(emeta_buf, l_mg->emeta_alloc_type); mempool_free(line_ws, pblk->line_ws_pool); - atomic_dec(&pblk->gc.inflight_gc); - if (put_line) - kref_put(&line->ref, pblk_line_put); + + kref_put(&line->ref, pblk_line_put); + atomic_dec(&gc->inflight_gc); + + return; + +fail_free_gc_rq: + kfree(gc_rq); +fail_free_emeta: + pblk_mfree(emeta_buf, l_mg->emeta_alloc_type); + pblk_put_line_back(pblk, line); + kref_put(&line->ref, pblk_line_put); + mempool_free(line_ws, pblk->line_ws_pool); + atomic_dec(&gc->inflight_gc); + + pr_err("pblk: Failed to GC line %d\n", line->id); } static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line) { - struct pblk_line_mgmt *l_mg = &pblk->l_mg; - struct pblk_line_meta *lm = &pblk->lm; + struct pblk_gc *gc = &pblk->gc; struct pblk_line_ws *line_ws; - __le64 *lba_list; - int ret; - line_ws = mempool_alloc(pblk->line_ws_pool, GFP_KERNEL); - line->emeta = pblk_malloc(lm->emeta_len, l_mg->emeta_alloc_type, - GFP_KERNEL); - if (!line->emeta) { - pr_err("pblk: cannot use GC emeta\n"); - goto fail_free_ws; - } - - ret = pblk_line_read_emeta(pblk, line); - if (ret) { - pr_err("pblk: line %d read emeta failed (%d)\n", line->id, ret); - goto fail_free_emeta; - } + pr_debug("pblk: line '%d' being reclaimed for GC\n", line->id); - /* If this read fails, it means that emeta is corrupted. For now, leave - * the line untouched. TODO: Implement a recovery routine that scans and - * moves all sectors on the line. - */ - lba_list = pblk_recov_get_lba_list(pblk, line->emeta); - if (!lba_list) { - pr_err("pblk: could not interpret emeta (line %d)\n", line->id); - goto fail_free_emeta; - } + line_ws = mempool_alloc(pblk->line_ws_pool, GFP_KERNEL); + if (!line_ws) + return -ENOMEM; line_ws->pblk = pblk; line_ws->line = line; - line_ws->priv = lba_list; - INIT_WORK(&line_ws->ws, pblk_gc_line_ws); - queue_work(pblk->gc.gc_reader_wq, &line_ws->ws); + INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws); + queue_work(gc->gc_reader_wq, &line_ws->ws); return 0; +} -fail_free_emeta: - pblk_mfree(line->emeta, l_mg->emeta_alloc_type); -fail_free_ws: - mempool_free(line_ws, pblk->line_ws_pool); - pblk_put_line_back(pblk, line); +static int pblk_gc_read(struct pblk *pblk) +{ + struct pblk_gc *gc = &pblk->gc; + struct pblk_line *line; + + spin_lock(&gc->r_lock); + if (list_empty(&gc->r_list)) { + spin_unlock(&gc->r_lock); + return 1; + } + + line = list_first_entry(&gc->r_list, struct pblk_line, list); + list_del(&line->list); + spin_unlock(&gc->r_lock); + + pblk_gc_kick(pblk); - return 1; + if (pblk_gc_line(pblk, line)) + pr_err("pblk: failed to GC line %d\n", line->id); + + return 0; } -static void pblk_gc_lines(struct pblk *pblk, struct list_head *gc_list) +static void pblk_gc_reader_kick(struct pblk_gc *gc) { - struct pblk_line *line, *tline; + wake_up_process(gc->gc_reader_ts); +} - list_for_each_entry_safe(line, tline, gc_list, list) { - if (pblk_gc_line(pblk, line)) - pr_err("pblk: failed to GC line %d\n", line->id); - list_del(&line->list); +static struct pblk_line *pblk_gc_get_victim_line(struct pblk *pblk, + struct list_head *group_list) +{ + struct pblk_line *line, *victim; + int line_vsc, victim_vsc; + + victim = list_first_entry(group_list, struct pblk_line, list); + list_for_each_entry(line, group_list, list) { + line_vsc = le32_to_cpu(*line->vsc); + victim_vsc = le32_to_cpu(*victim->vsc); + if (line_vsc < victim_vsc) + victim = line; } + + return victim; +} + +static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl) +{ + unsigned int nr_blocks_free, nr_blocks_need; + + nr_blocks_need = pblk_rl_high_thrs(rl); + nr_blocks_free = pblk_rl_nr_free_blks(rl); + + /* This is not critical, no need to take lock here */ + return ((gc->gc_active) && (nr_blocks_need > nr_blocks_free)); } /* @@ -296,71 +348,83 @@ static void pblk_gc_run(struct pblk *pblk) { struct pblk_line_mgmt *l_mg = &pblk->l_mg; struct pblk_gc *gc = &pblk->gc; - struct pblk_line *line, *tline; - unsigned int nr_blocks_free, nr_blocks_need; + struct pblk_line *line; struct list_head *group_list; - int run_gc, gc_group = 0; - int prev_gc = 0; - int inflight_gc = atomic_read(&gc->inflight_gc); - LIST_HEAD(gc_list); + bool run_gc; + int inflight_gc, gc_group = 0, prev_group = 0; + + do { + spin_lock(&l_mg->gc_lock); + if (list_empty(&l_mg->gc_full_list)) { + spin_unlock(&l_mg->gc_lock); + break; + } + + line = list_first_entry(&l_mg->gc_full_list, + struct pblk_line, list); - spin_lock(&l_mg->gc_lock); - list_for_each_entry_safe(line, tline, &l_mg->gc_full_list, list) { spin_lock(&line->lock); WARN_ON(line->state != PBLK_LINESTATE_CLOSED); line->state = PBLK_LINESTATE_GC; spin_unlock(&line->lock); list_del(&line->list); + spin_unlock(&l_mg->gc_lock); + kref_put(&line->ref, pblk_line_put); - } - spin_unlock(&l_mg->gc_lock); + } while (1); - nr_blocks_need = pblk_rl_gc_thrs(&pblk->rl); - nr_blocks_free = pblk_rl_nr_free_blks(&pblk->rl); - run_gc = (nr_blocks_need > nr_blocks_free || gc->gc_forced); + run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl); + if (!run_gc || (atomic_read(&gc->inflight_gc) >= PBLK_GC_L_QD)) + return; next_gc_group: group_list = l_mg->gc_lists[gc_group++]; - spin_lock(&l_mg->gc_lock); - while (run_gc && !list_empty(group_list)) { - /* No need to queue up more GC lines than we can handle */ - if (!run_gc || inflight_gc > gc->gc_jobs_active) { + + do { + spin_lock(&l_mg->gc_lock); + if (list_empty(group_list)) { spin_unlock(&l_mg->gc_lock); - pblk_gc_lines(pblk, &gc_list); - return; + break; } - line = list_first_entry(group_list, struct pblk_line, list); - nr_blocks_free += atomic_read(&line->blk_in_line); + line = pblk_gc_get_victim_line(pblk, group_list); spin_lock(&line->lock); WARN_ON(line->state != PBLK_LINESTATE_CLOSED); line->state = PBLK_LINESTATE_GC; - list_move_tail(&line->list, &gc_list); - atomic_inc(&gc->inflight_gc); - inflight_gc++; spin_unlock(&line->lock); - prev_gc = 1; - run_gc = (nr_blocks_need > nr_blocks_free || gc->gc_forced); - } - spin_unlock(&l_mg->gc_lock); + list_del(&line->list); + spin_unlock(&l_mg->gc_lock); + + spin_lock(&gc->r_lock); + list_add_tail(&line->list, &gc->r_list); + spin_unlock(&gc->r_lock); - pblk_gc_lines(pblk, &gc_list); + inflight_gc = atomic_inc_return(&gc->inflight_gc); + pblk_gc_reader_kick(gc); - if (!prev_gc && pblk->rl.rb_state > gc_group && - gc_group < PBLK_NR_GC_LISTS) + prev_group = 1; + + /* No need to queue up more GC lines than we can handle */ + run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl); + if (!run_gc || inflight_gc >= PBLK_GC_L_QD) + break; + } while (1); + + if (!prev_group && pblk->rl.rb_state > gc_group && + gc_group < PBLK_GC_NR_LISTS) goto next_gc_group; } - -static void pblk_gc_kick(struct pblk *pblk) +void pblk_gc_kick(struct pblk *pblk) { struct pblk_gc *gc = &pblk->gc; wake_up_process(gc->gc_ts); pblk_gc_writer_kick(gc); + pblk_gc_reader_kick(gc); mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS)); } @@ -398,42 +462,34 @@ static int pblk_gc_writer_ts(void *data) return 0; } -static void pblk_gc_start(struct pblk *pblk) +static int pblk_gc_reader_ts(void *data) { - pblk->gc.gc_active = 1; + struct pblk *pblk = data; - pr_debug("pblk: gc start\n"); + while (!kthread_should_stop()) { + if (!pblk_gc_read(pblk)) + continue; + set_current_state(TASK_INTERRUPTIBLE); + io_schedule(); + } + + return 0; } -int pblk_gc_status(struct pblk *pblk) +static void pblk_gc_start(struct pblk *pblk) { - struct pblk_gc *gc = &pblk->gc; - int ret; - - spin_lock(&gc->lock); - ret = gc->gc_active; - spin_unlock(&gc->lock); - - return ret; + pblk->gc.gc_active = 1; + pr_debug("pblk: gc start\n"); } -static void __pblk_gc_should_start(struct pblk *pblk) +void pblk_gc_should_start(struct pblk *pblk) { struct pblk_gc *gc = &pblk->gc; - lockdep_assert_held(&gc->lock); - if (gc->gc_enabled && !gc->gc_active) pblk_gc_start(pblk); -} -void pblk_gc_should_start(struct pblk *pblk) -{ - struct pblk_gc *gc = &pblk->gc; - - spin_lock(&gc->lock); - __pblk_gc_should_start(pblk); - spin_unlock(&gc->lock); + pblk_gc_kick(pblk); } /* @@ -442,10 +498,7 @@ void pblk_gc_should_start(struct pblk *pblk) */ static void pblk_gc_stop(struct pblk *pblk, int flush_wq) { - spin_lock(&pblk->gc.lock); pblk->gc.gc_active = 0; - spin_unlock(&pblk->gc.lock); - pr_debug("pblk: gc stop\n"); } @@ -468,20 +521,25 @@ void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled, spin_unlock(&gc->lock); } -void pblk_gc_sysfs_force(struct pblk *pblk, int force) +int pblk_gc_sysfs_force(struct pblk *pblk, int force) { struct pblk_gc *gc = &pblk->gc; - int rsv = 0; + + if (force < 0 || force > 1) + return -EINVAL; spin_lock(&gc->lock); - if (force) { - gc->gc_enabled = 1; - rsv = 64; - } - pblk_rl_set_gc_rsc(&pblk->rl, rsv); gc->gc_forced = force; - __pblk_gc_should_start(pblk); + + if (force) + gc->gc_enabled = 1; + else + gc->gc_enabled = 0; spin_unlock(&gc->lock); + + pblk_gc_should_start(pblk); + + return 0; } int pblk_gc_init(struct pblk *pblk) @@ -503,30 +561,58 @@ int pblk_gc_init(struct pblk *pblk) goto fail_free_main_kthread; } + gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk, + "pblk-gc-reader-ts"); + if (IS_ERR(gc->gc_reader_ts)) { + pr_err("pblk: could not allocate GC reader kthread\n"); + ret = PTR_ERR(gc->gc_reader_ts); + goto fail_free_writer_kthread; + } + setup_timer(&gc->gc_timer, pblk_gc_timer, (unsigned long)pblk); mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS)); gc->gc_active = 0; gc->gc_forced = 0; gc->gc_enabled = 1; - gc->gc_jobs_active = 8; gc->w_entries = 0; atomic_set(&gc->inflight_gc, 0); - gc->gc_reader_wq = alloc_workqueue("pblk-gc-reader-wq", - WQ_MEM_RECLAIM | WQ_UNBOUND, gc->gc_jobs_active); + /* Workqueue that reads valid sectors from a line and submit them to the + * GC writer to be recycled. + */ + gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq", + WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS); + if (!gc->gc_line_reader_wq) { + pr_err("pblk: could not allocate GC line reader workqueue\n"); + ret = -ENOMEM; + goto fail_free_reader_kthread; + } + + /* Workqueue that prepare lines for GC */ + gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq", + WQ_MEM_RECLAIM | WQ_UNBOUND, 1); if (!gc->gc_reader_wq) { pr_err("pblk: could not allocate GC reader workqueue\n"); ret = -ENOMEM; - goto fail_free_writer_kthread; + goto fail_free_reader_line_wq; } spin_lock_init(&gc->lock); spin_lock_init(&gc->w_lock); + spin_lock_init(&gc->r_lock); + + sema_init(&gc->gc_sem, 128); + INIT_LIST_HEAD(&gc->w_list); + INIT_LIST_HEAD(&gc->r_list); return 0; +fail_free_reader_line_wq: + destroy_workqueue(gc->gc_line_reader_wq); +fail_free_reader_kthread: + kthread_stop(gc->gc_reader_ts); fail_free_writer_kthread: kthread_stop(gc->gc_writer_ts); fail_free_main_kthread: @@ -540,6 +626,7 @@ void pblk_gc_exit(struct pblk *pblk) struct pblk_gc *gc = &pblk->gc; flush_workqueue(gc->gc_reader_wq); + flush_workqueue(gc->gc_line_reader_wq); del_timer(&gc->gc_timer); pblk_gc_stop(pblk, 1); @@ -547,9 +634,15 @@ void pblk_gc_exit(struct pblk *pblk) if (gc->gc_ts) kthread_stop(gc->gc_ts); - if (pblk->gc.gc_reader_wq) - destroy_workqueue(pblk->gc.gc_reader_wq); + if (gc->gc_reader_wq) + destroy_workqueue(gc->gc_reader_wq); + + if (gc->gc_line_reader_wq) + destroy_workqueue(gc->gc_line_reader_wq); if (gc->gc_writer_ts) kthread_stop(gc->gc_writer_ts); + + if (gc->gc_reader_ts) + kthread_stop(gc->gc_reader_ts); } |