diff options
author | codeworkx <daniel.hillenbrand@codeworkx.de> | 2012-06-02 13:09:29 +0200 |
---|---|---|
committer | codeworkx <daniel.hillenbrand@codeworkx.de> | 2012-06-02 13:09:29 +0200 |
commit | c6da2cfeb05178a11c6d062a06f8078150ee492f (patch) | |
tree | f3b4021d252c52d6463a9b3c1bb7245e399b009c /kernel/workqueue.c | |
parent | c6d7c4dbff353eac7919342ae6b3299a378160a6 (diff) | |
download | kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.gz kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.bz2 kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.zip |
samsung update 1
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 155 |
1 files changed, 155 insertions, 0 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index aec02b6a1c4..cc97810a5ff 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -41,6 +41,7 @@ #include <linux/debug_locks.h> #include <linux/lockdep.h> #include <linux/idr.h> +#include <mach/sec_debug.h> #include "workqueue_sched.h" @@ -1074,6 +1075,158 @@ int queue_work(struct workqueue_struct *wq, struct work_struct *work) } EXPORT_SYMBOL_GPL(queue_work); +#ifdef CONFIG_WORKQUEUE_FRONT +static void insert_work_front(struct cpu_workqueue_struct *cwq, + struct work_struct *work, struct list_head *head, + unsigned int extra_flags) +{ + struct global_cwq *gcwq = cwq->gcwq; + + /* we own @work, set data and link */ + set_work_cwq(work, cwq, extra_flags); + + /* + * Ensure that we get the right work->data if we see the + * result of list_add() below, see try_to_grab_pending(). + */ + smp_wmb(); + + list_add(&work->entry, head); + + /* + * Ensure either worker_sched_deactivated() sees the above + * list_add_tail() or we see zero nr_running to avoid workers + * lying around lazily while there are works to be processed. + */ + smp_mb(); + + if (__need_more_worker(gcwq)) + wake_up_worker(gcwq); +} + +static void __queue_work_front(unsigned int cpu, struct workqueue_struct *wq, + struct work_struct *work) +{ + struct global_cwq *gcwq; + struct cpu_workqueue_struct *cwq; + struct list_head *worklist; + unsigned int work_flags; + unsigned long flags; + + debug_work_activate(work); + + /* if dying, only works from the same workqueue are allowed */ + if (unlikely(wq->flags & WQ_DYING) && + WARN_ON_ONCE(!is_chained_work(wq))) + return; + + /* determine gcwq to use */ + if (!(wq->flags & WQ_UNBOUND)) { + struct global_cwq *last_gcwq; + + if (unlikely(cpu == WORK_CPU_UNBOUND)) + cpu = raw_smp_processor_id(); + + /* + * It's multi cpu. If @wq is non-reentrant and @work + * was previously on a different cpu, it might still + * be running there, in which case the work needs to + * be queued on that cpu to guarantee non-reentrance. + */ + gcwq = get_gcwq(cpu); + last_gcwq = get_work_gcwq(work); + if (wq->flags & WQ_NON_REENTRANT && + (last_gcwq != NULL) && last_gcwq != gcwq) { + struct worker *worker; + + spin_lock_irqsave(&last_gcwq->lock, flags); + + worker = find_worker_executing_work(last_gcwq, work); + + if (worker && worker->current_cwq->wq == wq) + gcwq = last_gcwq; + else { + /* meh... not running there, queue here */ + spin_unlock_irqrestore(&last_gcwq->lock, flags); + spin_lock_irqsave(&gcwq->lock, flags); + } + } else + spin_lock_irqsave(&gcwq->lock, flags); + } else { + gcwq = get_gcwq(WORK_CPU_UNBOUND); + spin_lock_irqsave(&gcwq->lock, flags); + } + + /* gcwq determined, get cwq and queue */ + cwq = get_cwq(gcwq->cpu, wq); + trace_workqueue_queue_work(cpu, cwq, work); + + BUG_ON(!list_empty(&work->entry)); + + cwq->nr_in_flight[cwq->work_color]++; + work_flags = work_color_to_flags(cwq->work_color); + + if (likely(cwq->nr_active < cwq->max_active)) { + trace_workqueue_activate_work(work); + cwq->nr_active++; + worklist = gcwq_determine_ins_pos(gcwq, cwq); + } else { + work_flags |= WORK_STRUCT_DELAYED; + worklist = &cwq->delayed_works; + } + + insert_work_front(cwq, work, worklist, work_flags); + + spin_unlock_irqrestore(&gcwq->lock, flags); +} + +/** + * queue_work_on_front - queue work on specific cpu + * @cpu: CPU number to execute work on + * @wq: workqueue to use + * @work: work to queue + * + * Returns 0 if @work was already on a queue, non-zero otherwise. + * + * We queue the work to a specific CPU, the caller must ensure it + * can't go away. + */ + +int +queue_work_on_front(int cpu, struct workqueue_struct *wq, + struct work_struct *work) +{ + int ret = 0; + + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + __queue_work_front(cpu, wq, work); + ret = 1; + } + return ret; +} + +/** + * queue_work - queue work on a workqueue + * @wq: workqueue to use + * @work: work to queue + * + * Returns 0 if @work was already on a queue, non-zero otherwise. + * + * We queue the work to the CPU on which it was submitted, but if the CPU dies + * it can be processed by another CPU. + */ +int queue_work_front(struct workqueue_struct *wq, struct work_struct *work) +{ + int ret; + + ret = queue_work_on_front(get_cpu(), wq, work); + put_cpu(); + + return ret; +} +EXPORT_SYMBOL_GPL(queue_work_front); +#endif + /** * queue_work_on - queue work on specific cpu * @cpu: CPU number to execute work on @@ -1865,6 +2018,8 @@ __acquires(&gcwq->lock) lock_map_acquire_read(&cwq->wq->lockdep_map); lock_map_acquire(&lockdep_map); trace_workqueue_execute_start(work); + sec_debug_work_log(worker, work, f); + f(work); /* * While we must be careful to not use "work" after this, the trace |