aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/async-thread.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-09-30 19:24:06 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-30 19:36:34 -0400
commit75ccf47d13bfb66de7faf596bfe497b9af7aaa40 (patch)
treee1b06bc9afec8f6b48cc2fb00c5e1e4d4dda2e0b /fs/btrfs/async-thread.c
parent45b8c9a8b1e15bf79c2c17ec217adf96785f8011 (diff)
downloadkernel_samsung_smdk4412-75ccf47d13bfb66de7faf596bfe497b9af7aaa40.tar.gz
kernel_samsung_smdk4412-75ccf47d13bfb66de7faf596bfe497b9af7aaa40.tar.bz2
kernel_samsung_smdk4412-75ccf47d13bfb66de7faf596bfe497b9af7aaa40.zip
Btrfs: fix multi-device code to use raid policies set by mkfs
When reading in block groups, a global mask of the available raid policies should be adjusted based on the types of block groups found on disk. This global mask is then used to decide which raid policy to use for new block groups. The recent allocator changes dropped the call that updated the global mask, making all the block groups allocated at run time single striped onto a single drive. This also fixes the async worker threads to set any thread that uses the requeue mechanism as busy. This allows us to avoid blocking on get_request_wait for the async bio submission threads. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r--fs/btrfs/async-thread.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 04fb9702d14..d82efd722a4 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -302,8 +302,20 @@ int btrfs_requeue_work(struct btrfs_work *work)
spin_lock_irqsave(&worker->lock, flags);
atomic_inc(&worker->num_pending);
list_add_tail(&work->list, &worker->pending);
- check_busy_worker(worker);
+
+ /* by definition we're busy, take ourselves off the idle
+ * list
+ */
+ if (worker->idle) {
+ spin_lock_irqsave(&worker->workers->lock, flags);
+ worker->idle = 0;
+ list_move_tail(&worker->worker_list,
+ &worker->workers->worker_list);
+ spin_unlock_irqrestore(&worker->workers->lock, flags);
+ }
+
spin_unlock_irqrestore(&worker->lock, flags);
+
out:
return 0;
}