aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-14 00:33:38 +0100
committerZiyan <jaraidaniel@gmail.com>2016-01-08 10:36:53 +0100
commitc1cff5fe466e4d89008725d84f76f635b7f863a6 (patch)
tree2fa27d5ab7b7f27e226fdbb19cd1aa7e6808ab2f /block
parent66fa8acc4245de17273acee0c9527dead751e99a (diff)
downloadkernel_samsung_tuna-c1cff5fe466e4d89008725d84f76f635b7f863a6.tar.gz
kernel_samsung_tuna-c1cff5fe466e4d89008725d84f76f635b7f863a6.tar.bz2
kernel_samsung_tuna-c1cff5fe466e4d89008725d84f76f635b7f863a6.zip
block, cfq: move ioc ioprio/cgroup changed handling to cic
ioprio/cgroup change was handled by marking the changed state in ioc and, on the following access to the ioc, performing RCU-protected iteration through all cic's grabbing the matching queue_lock. This patch moves the changed state to each cic. When ioprio or cgroup changes, the respective bit is set on all cic's of the ioc and when each of those cic (not ioc) is accessed, change is applied for that specific ioc-queue pair. This also fixes the following two race conditions between setting and clearing of changed states. * Missing barrier between assign/load of ioprio and ioprio_changed allowed applying old ioprio. * Change requests could happen between application of change and clearing of changed variables. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-ioc.c45
-rw-r--r--block/cfq-iosched.c28
3 files changed, 55 insertions, 20 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 4b001dcd85b..dc00835aab6 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1648,7 +1648,7 @@ static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
/* we don't lose anything even if ioc allocation fails */
ioc = get_task_io_context(tsk, GFP_ATOMIC, NUMA_NO_NODE);
if (ioc) {
- ioc->cgroup_changed = 1;
+ ioc_cgroup_changed(ioc);
put_io_context(ioc);
}
}
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 30eb6ce2303..384d6e43457 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -190,6 +190,51 @@ struct io_context *get_task_io_context(struct task_struct *task,
}
EXPORT_SYMBOL(get_task_io_context);
+void ioc_set_changed(struct io_context *ioc, int which)
+{
+ struct cfq_io_context *cic;
+ struct hlist_node *n;
+
+ hlist_for_each_entry(cic, n, &ioc->cic_list, cic_list)
+ set_bit(which, &cic->changed);
+}
+
+/**
+ * ioc_ioprio_changed - notify ioprio change
+ * @ioc: io_context of interest
+ * @ioprio: new ioprio
+ *
+ * @ioc's ioprio has changed to @ioprio. Set %CIC_IOPRIO_CHANGED for all
+ * cic's. iosched is responsible for checking the bit and applying it on
+ * request issue path.
+ */
+void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->lock, flags);
+ ioc->ioprio = ioprio;
+ ioc_set_changed(ioc, CIC_IOPRIO_CHANGED);
+ spin_unlock_irqrestore(&ioc->lock, flags);
+}
+
+/**
+ * ioc_cgroup_changed - notify cgroup change
+ * @ioc: io_context of interest
+ *
+ * @ioc's cgroup has changed. Set %CIC_CGROUP_CHANGED for all cic's.
+ * iosched is responsible for checking the bit and applying it on request
+ * issue path.
+ */
+void ioc_cgroup_changed(struct io_context *ioc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->lock, flags);
+ ioc_set_changed(ioc, CIC_CGROUP_CHANGED);
+ spin_unlock_irqrestore(&ioc->lock, flags);
+}
+
static int __init blk_ioc_init(void)
{
iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3253f100956..35a527f5262 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2909,7 +2909,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
cfq_clear_cfqq_prio_changed(cfqq);
}
-static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
+static void changed_ioprio(struct cfq_io_context *cic)
{
struct cfq_data *cfqd = cic_to_cfqd(cic);
struct cfq_queue *cfqq;
@@ -2938,12 +2938,6 @@ static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}
-static void cfq_ioc_set_ioprio(struct io_context *ioc)
-{
- call_for_each_cic(ioc, changed_ioprio);
- ioc->ioprio_changed = 0;
-}
-
static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
pid_t pid, bool is_sync)
{
@@ -2965,7 +2959,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
+static void changed_cgroup(struct cfq_io_context *cic)
{
struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
struct cfq_data *cfqd = cic_to_cfqd(cic);
@@ -2991,12 +2985,6 @@ static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
spin_unlock_irqrestore(q->queue_lock, flags);
}
-
-static void cfq_ioc_set_cgroup(struct io_context *ioc)
-{
- call_for_each_cic(ioc, changed_cgroup);
- ioc->cgroup_changed = 0;
-}
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
static struct cfq_queue *
@@ -3227,13 +3215,15 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
out:
get_io_context(ioc);
- if (unlikely(ioc->ioprio_changed))
- cfq_ioc_set_ioprio(ioc);
-
+ if (unlikely(cic->changed)) {
+ if (test_and_clear_bit(CIC_IOPRIO_CHANGED, &cic->changed))
+ changed_ioprio(cic);
#ifdef CONFIG_CFQ_GROUP_IOSCHED
- if (unlikely(ioc->cgroup_changed))
- cfq_ioc_set_cgroup(ioc);
+ if (test_and_clear_bit(CIC_CGROUP_CHANGED, &cic->changed))
+ changed_cgroup(cic);
#endif
+ }
+
return cic;
err:
if (cic)