aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/.gitignore6
-rw-r--r--kernel/Makefile20
-rw-r--r--kernel/cgroup.c225
-rw-r--r--kernel/cpu.c20
-rw-r--r--kernel/cpu_pm.c233
-rw-r--r--kernel/debug/kdb/.gitignore1
-rw-r--r--kernel/fork.c18
-rw-r--r--kernel/futex.c27
-rw-r--r--kernel/hrtimer.c3
-rw-r--r--kernel/irq/generic-chip.c16
-rw-r--r--kernel/irq/handle.c4
-rw-r--r--kernel/irq/pm.c7
-rw-r--r--kernel/kernel_sec_debug.c706
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/notifier.c5
-rw-r--r--kernel/panic.c13
-rw-r--r--kernel/pm_qos_params.c65
-rw-r--r--kernel/power/Kconfig115
-rw-r--r--kernel/power/Makefile6
-rw-r--r--kernel/power/consoleearlysuspend.c78
-rw-r--r--kernel/power/earlysuspend.c233
-rw-r--r--kernel/power/fbearlysuspend.c153
-rw-r--r--kernel/power/hibernate.c66
-rw-r--r--kernel/power/main.c442
-rw-r--r--kernel/power/power.h43
-rw-r--r--kernel/power/process.c27
-rw-r--r--kernel/power/snapshot.c83
-rw-r--r--kernel/power/suspend.c78
-rw-r--r--kernel/power/suspend_time.c111
-rw-r--r--kernel/power/swap.c29
-rw-r--r--kernel/power/userwakelock.c219
-rw-r--r--kernel/power/wakelock.c687
-rw-r--r--kernel/printk.c139
-rw-r--r--kernel/rtmutex.c2
-rw-r--r--kernel/sched.c116
-rw-r--r--kernel/sched_clock.c12
-rw-r--r--kernel/sched_debug.c9
-rw-r--r--kernel/softirq.c7
-rw-r--r--kernel/sys.c6
-rw-r--r--kernel/sysctl.c8
-rw-r--r--kernel/time/Kconfig4
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/time/timekeeping.c125
-rw-r--r--kernel/timeconst.bc108
-rw-r--r--kernel/timeconst.pl376
-rw-r--r--kernel/workqueue.c155
46 files changed, 4285 insertions, 529 deletions
diff --git a/kernel/.gitignore b/kernel/.gitignore
deleted file mode 100644
index ab4f1090f43..00000000000
--- a/kernel/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
-#
-# Generated files
-#
-config_data.h
-config_data.gz
-timeconst.h
diff --git a/kernel/Makefile b/kernel/Makefile
index 2d64cfcc8b4..9977923ede6 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -101,12 +101,16 @@ obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_TRACEPOINTS) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o
+obj-$(CONFIG_CPU_PM) += cpu_pm.o
obj-$(CONFIG_PERF_EVENTS) += events/
obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
obj-$(CONFIG_PADATA) += padata.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+ifeq ($(CONFIG_MACH_U1_NA_SPR),y)
+obj-$(CONFIG_SEC_DEBUG) += kernel_sec_debug.o
+endif
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
@@ -133,8 +137,16 @@ $(obj)/config_data.h: $(obj)/config_data.gz FORCE
$(obj)/time.o: $(obj)/timeconst.h
-quiet_cmd_timeconst = TIMEC $@
- cmd_timeconst = $(PERL) $< $(CONFIG_HZ) > $@
+quiet_cmd_hzfile = HZFILE $@
+ cmd_hzfile = echo "hz=$(CONFIG_HZ)" > $@
+
+targets += hz.bc
+$(obj)/hz.bc: $(objtree)/include/config/hz.h FORCE
+ $(call if_changed,hzfile)
+
+quiet_cmd_bc = BC $@
+ cmd_bc = bc -q $(filter-out FORCE,$^) > $@
+
targets += timeconst.h
-$(obj)/timeconst.h: $(src)/timeconst.pl FORCE
- $(call if_changed,timeconst)
+$(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
+ $(call if_changed,bc)
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 38f7f76ece8..e4f859514cc 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -268,6 +268,33 @@ static void cgroup_release_agent(struct work_struct *work);
static DECLARE_WORK(release_agent_work, cgroup_release_agent);
static void check_for_release(struct cgroup *cgrp);
+/*
+ * A queue for waiters to do rmdir() cgroup. A tasks will sleep when
+ * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some
+ * reference to css->refcnt. In general, this refcnt is expected to goes down
+ * to zero, soon.
+ *
+ * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
+ */
+DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
+
+static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
+{
+ if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
+ wake_up_all(&cgroup_rmdir_waitq);
+}
+
+void cgroup_exclude_rmdir(struct cgroup_subsys_state *css)
+{
+ css_get(css);
+}
+
+void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
+{
+ cgroup_wakeup_rmdir_waiter(css->cgroup);
+ css_put(css);
+}
+
/* Link structure for associating css_set objects with cgroups */
struct cg_cgroup_link {
/*
@@ -327,60 +354,52 @@ static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
return &css_set_table[index];
}
-/* We don't maintain the lists running through each css_set to its
- * task until after the first call to cgroup_iter_start(). This
- * reduces the fork()/exit() overhead for people who have cgroups
- * compiled into their kernel but not actually in use */
-static int use_task_css_set_links __read_mostly;
-
-static void __put_css_set(struct css_set *cg, int taskexit)
+static void free_css_set_work(struct work_struct *work)
{
+ struct css_set *cg = container_of(work, struct css_set, work);
struct cg_cgroup_link *link;
struct cg_cgroup_link *saved_link;
- /*
- * Ensure that the refcount doesn't hit zero while any readers
- * can see it. Similar to atomic_dec_and_lock(), but for an
- * rwlock
- */
- if (atomic_add_unless(&cg->refcount, -1, 1))
- return;
- write_lock(&css_set_lock);
- if (!atomic_dec_and_test(&cg->refcount)) {
- write_unlock(&css_set_lock);
- return;
- }
-
- /* This css_set is dead. unlink it and release cgroup refcounts */
- hlist_del(&cg->hlist);
- css_set_count--;
+ write_lock(&css_set_lock);
list_for_each_entry_safe(link, saved_link, &cg->cg_links,
cg_link_list) {
struct cgroup *cgrp = link->cgrp;
list_del(&link->cg_link_list);
list_del(&link->cgrp_link_list);
- /*
- * We may not be holding cgroup_mutex, and if cgrp->count is
- * dropped to 0 the cgroup can be destroyed at any time, hence
- * rcu_read_lock is used to keep it alive.
- */
- rcu_read_lock();
- if (atomic_dec_and_test(&cgrp->count) &&
- notify_on_release(cgrp)) {
- if (taskexit)
- set_bit(CGRP_RELEASABLE, &cgrp->flags);
+ /*
+ * We may not be holding cgroup_mutex, and if cgrp->count is
+ * dropped to 0 the cgroup can be destroyed at any time, hence
+ * rcu_read_lock is used to keep it alive.
+ */
+ rcu_read_lock();
+ if (atomic_dec_and_test(&cgrp->count)) {
check_for_release(cgrp);
+ cgroup_wakeup_rmdir_waiter(cgrp);
}
- rcu_read_unlock();
+ rcu_read_unlock();
kfree(link);
}
-
write_unlock(&css_set_lock);
- kfree_rcu(cg, rcu_head);
+
+ kfree(cg);
}
+static void free_css_set_rcu(struct rcu_head *obj)
+{
+ struct css_set *cg = container_of(obj, struct css_set, rcu_head);
+
+ INIT_WORK(&cg->work, free_css_set_work);
+ schedule_work(&cg->work);
+}
+
+/* We don't maintain the lists running through each css_set to its
+ * task until after the first call to cgroup_iter_start(). This
+ * reduces the fork()/exit() overhead for people who have cgroups
+ * compiled into their kernel but not actually in use */
+static int use_task_css_set_links __read_mostly;
+
/*
* refcounted get/put for css_set objects
*/
@@ -389,14 +408,26 @@ static inline void get_css_set(struct css_set *cg)
atomic_inc(&cg->refcount);
}
-static inline void put_css_set(struct css_set *cg)
+static void put_css_set(struct css_set *cg)
{
- __put_css_set(cg, 0);
-}
+ /*
+ * Ensure that the refcount doesn't hit zero while any readers
+ * can see it. Similar to atomic_dec_and_lock(), but for an
+ * rwlock
+ */
+ if (atomic_add_unless(&cg->refcount, -1, 1))
+ return;
+ write_lock(&css_set_lock);
+ if (!atomic_dec_and_test(&cg->refcount)) {
+ write_unlock(&css_set_lock);
+ return;
+ }
-static inline void put_css_set_taskexit(struct css_set *cg)
-{
- __put_css_set(cg, 1);
+ hlist_del(&cg->hlist);
+ css_set_count--;
+
+ write_unlock(&css_set_lock);
+ call_rcu(&cg->rcu_head, free_css_set_rcu);
}
/*
@@ -728,9 +759,9 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task,
* cgroup_attach_task(), which overwrites one tasks cgroup pointer with
* another. It does so using cgroup_mutex, however there are
* several performance critical places that need to reference
- * task->cgroup without the expense of grabbing a system global
+ * task->cgroups without the expense of grabbing a system global
* mutex. Therefore except as noted below, when dereferencing or, as
- * in cgroup_attach_task(), modifying a task'ss cgroup pointer we use
+ * in cgroup_attach_task(), modifying a task's cgroups pointer we use
* task_lock(), which acts on a spinlock (task->alloc_lock) already in
* the task_struct routinely used for such matters.
*
@@ -920,33 +951,6 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
}
/*
- * A queue for waiters to do rmdir() cgroup. A tasks will sleep when
- * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some
- * reference to css->refcnt. In general, this refcnt is expected to goes down
- * to zero, soon.
- *
- * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
- */
-DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
-
-static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
-{
- if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
- wake_up_all(&cgroup_rmdir_waitq);
-}
-
-void cgroup_exclude_rmdir(struct cgroup_subsys_state *css)
-{
- css_get(css);
-}
-
-void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
-{
- cgroup_wakeup_rmdir_waiter(css->cgroup);
- css_put(css);
-}
-
-/*
* Call with cgroup_mutex held. Drops reference counts on modules, including
* any duplicate ones that parse_cgroupfs_options took. If this function
* returns an error, no reference counts are touched.
@@ -1827,6 +1831,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
struct cgroup_subsys *ss, *failed_ss = NULL;
struct cgroup *oldcgrp;
struct cgroupfs_root *root = cgrp->root;
+ struct css_set *cg;
/* Nothing to do if the task is already in that cgroup */
oldcgrp = task_cgroup_from_root(tsk, root);
@@ -1856,6 +1861,11 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
}
}
+ task_lock(tsk);
+ cg = tsk->cgroups;
+ get_css_set(cg);
+ task_unlock(tsk);
+
retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false);
if (retval)
goto out;
@@ -1868,8 +1878,9 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
if (ss->attach)
ss->attach(ss, cgrp, oldcgrp, tsk);
}
-
- synchronize_rcu();
+ set_bit(CGRP_RELEASABLE, &cgrp->flags);
+ /* put_css_set will not destroy cg until after an RCU grace period */
+ put_css_set(cg);
/*
* wake up rmdir() waiter. the rmdir should fail since the cgroup
@@ -2191,6 +2202,24 @@ out_free_group_list:
return retval;
}
+static int cgroup_allow_attach(struct cgroup *cgrp, struct task_struct *tsk)
+{
+ struct cgroup_subsys *ss;
+ int ret;
+
+ for_each_subsys(cgrp->root, ss) {
+ if (ss->allow_attach) {
+ ret = ss->allow_attach(cgrp, tsk);
+ if (ret)
+ return ret;
+ } else {
+ return -EACCES;
+ }
+ }
+
+ return 0;
+}
+
/*
* Find the task_struct of the task to attach by vpid and pass it along to the
* function to attach either it or all tasks in its threadgroup. Will take
@@ -2236,9 +2265,16 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
if (cred->euid &&
cred->euid != tcred->uid &&
cred->euid != tcred->suid) {
- rcu_read_unlock();
- cgroup_unlock();
- return -EACCES;
+ /*
+ * if the default permission check fails, give each
+ * cgroup a chance to extend the permission check
+ */
+ ret = cgroup_allow_attach(cgrp, tsk);
+ if (ret) {
+ rcu_read_unlock();
+ cgroup_unlock();
+ return ret;
+ }
}
get_task_struct(tsk);
rcu_read_unlock();
@@ -3821,6 +3857,8 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
if (err < 0)
goto err_remove;
+ set_bit(CGRP_RELEASABLE, &parent->flags);
+
/* The cgroup directory was pre-locked for us */
BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex));
@@ -3952,6 +3990,21 @@ static int cgroup_clear_css_refs(struct cgroup *cgrp)
return !failed;
}
+/* checks if all of the css_sets attached to a cgroup have a refcount of 0.
+ * Must be called with css_set_lock held */
+static int cgroup_css_sets_empty(struct cgroup *cgrp)
+{
+ struct cg_cgroup_link *link;
+
+ list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
+ struct css_set *cg = link->cg;
+ if (atomic_read(&cg->refcount) > 0)
+ return 0;
+ }
+
+ return 1;
+}
+
static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
{
struct cgroup *cgrp = dentry->d_fsdata;
@@ -3964,7 +4017,7 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
/* the vfs holds both inode->i_mutex already */
again:
mutex_lock(&cgroup_mutex);
- if (atomic_read(&cgrp->count) != 0) {
+ if (!cgroup_css_sets_empty(cgrp)) {
mutex_unlock(&cgroup_mutex);
return -EBUSY;
}
@@ -3997,7 +4050,7 @@ again:
mutex_lock(&cgroup_mutex);
parent = cgrp->parent;
- if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) {
+ if (!cgroup_css_sets_empty(cgrp) || !list_empty(&cgrp->children)) {
clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
mutex_unlock(&cgroup_mutex);
return -EBUSY;
@@ -4037,7 +4090,6 @@ again:
cgroup_d_remove_dir(d);
dput(d);
- set_bit(CGRP_RELEASABLE, &parent->flags);
check_for_release(parent);
/*
@@ -4637,7 +4689,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
task_unlock(tsk);
if (cg)
- put_css_set_taskexit(cg);
+ put_css_set(cg);
}
/**
@@ -4691,6 +4743,14 @@ static void check_for_release(struct cgroup *cgrp)
}
/* Caller must verify that the css is not for root cgroup */
+void __css_get(struct cgroup_subsys_state *css, int count)
+{
+ atomic_add(count, &css->refcnt);
+ set_bit(CGRP_RELEASABLE, &css->cgroup->flags);
+}
+EXPORT_SYMBOL_GPL(__css_get);
+
+/* Caller must verify that the css is not for root cgroup */
void __css_put(struct cgroup_subsys_state *css, int count)
{
struct cgroup *cgrp = css->cgroup;
@@ -4698,10 +4758,7 @@ void __css_put(struct cgroup_subsys_state *css, int count)
rcu_read_lock();
val = atomic_sub_return(count, &css->refcnt);
if (val == 1) {
- if (notify_on_release(cgrp)) {
- set_bit(CGRP_RELEASABLE, &cgrp->flags);
- check_for_release(cgrp);
- }
+ check_for_release(cgrp);
cgroup_wakeup_rmdir_waiter(cgrp);
}
rcu_read_unlock();
diff --git a/kernel/cpu.c b/kernel/cpu.c
index aa39dd7a384..eae3d9b3957 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -668,3 +668,23 @@ void init_cpu_online(const struct cpumask *src)
{
cpumask_copy(to_cpumask(cpu_online_bits), src);
}
+
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+ atomic_notifier_chain_register(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+ atomic_notifier_chain_unregister(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+
+void idle_notifier_call_chain(unsigned long val)
+{
+ atomic_notifier_call_chain(&idle_notifier, val, NULL);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
new file mode 100644
index 00000000000..9656a3c3650
--- /dev/null
+++ b/kernel/cpu_pm.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpu_pm.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+
+static DEFINE_RWLOCK(cpu_pm_notifier_lock);
+static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain);
+
+static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
+{
+ int ret;
+
+ ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
+ nr_to_call, nr_calls);
+
+ return notifier_to_errno(ret);
+}
+
+/**
+ * cpu_pm_register_notifier - register a driver with cpu_pm
+ * @nb: notifier block to register
+ *
+ * Add a driver to a list of drivers that are notified about
+ * CPU and CPU cluster low power entry and exit.
+ *
+ * This function may sleep, and has the same return conditions as
+ * raw_notifier_chain_register.
+ */
+int cpu_pm_register_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret;
+
+ write_lock_irqsave(&cpu_pm_notifier_lock, flags);
+ ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb);
+ write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
+
+/**
+ * cpu_pm_unregister_notifier - unregister a driver with cpu_pm
+ * @nb: notifier block to be unregistered
+ *
+ * Remove a driver from the CPU PM notifier list.
+ *
+ * This function may sleep, and has the same return conditions as
+ * raw_notifier_chain_unregister.
+ */
+int cpu_pm_unregister_notifier(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret;
+
+ write_lock_irqsave(&cpu_pm_notifier_lock, flags);
+ ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
+ write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
+
+/**
+ * cpu_pm_enter - CPU low power entry notifier
+ *
+ * Notifies listeners that a single CPU is entering a low power state that may
+ * cause some blocks in the same power domain as the cpu to reset.
+ *
+ * Must be called on the affected CPU with interrupts disabled. Platform is
+ * responsible for ensuring that cpu_pm_enter is not called twice on the same
+ * CPU before cpu_pm_exit is called. Notified drivers can include VFP
+ * co-processor, interrupt controller and its PM extensions, local CPU
+ * timers context save/restore which shouldn't be interrupted. Hence it
+ * must be called with interrupts disabled.
+ *
+ * Return conditions are same as __raw_notifier_call_chain.
+ */
+int cpu_pm_enter(void)
+{
+ int nr_calls;
+ int ret = 0;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
+ if (ret)
+ /*
+ * Inform listeners (nr_calls - 1) about failure of CPU PM
+ * PM entry who are notified earlier to prepare for it.
+ */
+ cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_enter);
+
+/**
+ * cpu_pm_exit - CPU low power exit notifier
+ *
+ * Notifies listeners that a single CPU is exiting a low power state that may
+ * have caused some blocks in the same power domain as the cpu to reset.
+ *
+ * Notified drivers can include VFP co-processor, interrupt controller
+ * and its PM extensions, local CPU timers context save/restore which
+ * shouldn't be interrupted. Hence it must be called with interrupts disabled.
+ *
+ * Return conditions are same as __raw_notifier_call_chain.
+ */
+int cpu_pm_exit(void)
+{
+ int ret;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_exit);
+
+/**
+ * cpu_cluster_pm_enter - CPU cluster low power entry notifier
+ *
+ * Notifies listeners that all cpus in a power domain are entering a low power
+ * state that may cause some blocks in the same power domain to reset.
+ *
+ * Must be called after cpu_pm_enter has been called on all cpus in the power
+ * domain, and before cpu_pm_exit has been called on any cpu in the power
+ * domain. Notified drivers can include VFP co-processor, interrupt controller
+ * and its PM extensions, local CPU timers context save/restore which
+ * shouldn't be interrupted. Hence it must be called with interrupts disabled.
+ *
+ * Must be called with interrupts disabled.
+ *
+ * Return conditions are same as __raw_notifier_call_chain.
+ */
+int cpu_cluster_pm_enter(void)
+{
+ int nr_calls;
+ int ret = 0;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
+ if (ret)
+ /*
+ * Inform listeners (nr_calls - 1) about failure of CPU cluster
+ * PM entry who are notified earlier to prepare for it.
+ */
+ cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
+
+/**
+ * cpu_cluster_pm_exit - CPU cluster low power exit notifier
+ *
+ * Notifies listeners that all cpus in a power domain are exiting form a
+ * low power state that may have caused some blocks in the same power domain
+ * to reset.
+ *
+ * Must be called after cpu_pm_exit has been called on all cpus in the power
+ * domain, and before cpu_pm_exit has been called on any cpu in the power
+ * domain. Notified drivers can include VFP co-processor, interrupt controller
+ * and its PM extensions, local CPU timers context save/restore which
+ * shouldn't be interrupted. Hence it must be called with interrupts disabled.
+ *
+ * Return conditions are same as __raw_notifier_call_chain.
+ */
+int cpu_cluster_pm_exit(void)
+{
+ int ret;
+
+ read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
+ read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);
+
+#ifdef CONFIG_PM
+static int cpu_pm_suspend(void)
+{
+ int ret;
+
+ ret = cpu_pm_enter();
+ if (ret)
+ return ret;
+
+ ret = cpu_cluster_pm_enter();
+ return ret;
+}
+
+static void cpu_pm_resume(void)
+{
+ cpu_cluster_pm_exit();
+ cpu_pm_exit();
+}
+
+static struct syscore_ops cpu_pm_syscore_ops = {
+ .suspend = cpu_pm_suspend,
+ .resume = cpu_pm_resume,
+};
+
+static int cpu_pm_init(void)
+{
+ register_syscore_ops(&cpu_pm_syscore_ops);
+ return 0;
+}
+core_initcall(cpu_pm_init);
+#endif
diff --git a/kernel/debug/kdb/.gitignore b/kernel/debug/kdb/.gitignore
deleted file mode 100644
index 396d12eda9e..00000000000
--- a/kernel/debug/kdb/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-gen-kdb_cmds.c
diff --git a/kernel/fork.c b/kernel/fork.c
index 3d42aa3dad3..158ca4f026c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -156,6 +156,9 @@ struct kmem_cache *vm_area_cachep;
/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;
+/* Notifier list called when a task struct is freed */
+static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
+
static void account_kernel_stack(struct thread_info *ti, int account)
{
struct zone *zone = page_zone(virt_to_page(ti));
@@ -187,6 +190,18 @@ static inline void put_signal_struct(struct signal_struct *sig)
free_signal_struct(sig);
}
+int task_free_register(struct notifier_block *n)
+{
+ return atomic_notifier_chain_register(&task_free_notifier, n);
+}
+EXPORT_SYMBOL(task_free_register);
+
+int task_free_unregister(struct notifier_block *n)
+{
+ return atomic_notifier_chain_unregister(&task_free_notifier, n);
+}
+EXPORT_SYMBOL(task_free_unregister);
+
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
@@ -197,6 +212,7 @@ void __put_task_struct(struct task_struct *tsk)
delayacct_tsk_free(tsk);
put_signal_struct(tsk->signal);
+ atomic_notifier_call_chain(&task_free_notifier, 0, tsk);
if (!profile_handoff_task(tsk))
free_task(tsk);
}
@@ -1020,7 +1036,7 @@ static void rt_mutex_init_task(struct task_struct *p)
{
raw_spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
- plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
+ plist_head_init(&p->pi_waiters);
p->pi_blocked_on = NULL;
#endif
}
diff --git a/kernel/futex.c b/kernel/futex.c
index 5c305c06958..c7c19cb4c03 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1272,6 +1272,13 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
if (requeue_pi) {
/*
+ * Requeue PI only works on two distinct uaddrs. This
+ * check is only valid for private futexes. See below.
+ */
+ if (uaddr1 == uaddr2)
+ return -EINVAL;
+
+ /*
* requeue_pi requires a pi_state, try to allocate it now
* without any locks in case it fails.
*/
@@ -1309,6 +1316,15 @@ retry:
if (unlikely(ret != 0))
goto out_put_key1;
+ /*
+ * The check above which compares uaddrs is not sufficient for
+ * shared futexes. We need to compare the keys:
+ */
+ if (requeue_pi && match_futex(&key1, &key2)) {
+ ret = -EINVAL;
+ goto out_put_keys;
+ }
+
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
@@ -2331,6 +2347,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
if (ret)
goto out_key2;
+ /*
+ * The check above which compares uaddrs is not sufficient for
+ * shared futexes. We need to compare the keys:
+ */
+ if (match_futex(&q.key, &key2)) {
+ ret = -EINVAL;
+ goto out_put_keys;
+ }
+
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
@@ -2759,7 +2784,7 @@ static int __init futex_init(void)
futex_cmpxchg_enabled = 1;
for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
- plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
+ plist_head_init(&futex_queues[i].chain);
spin_lock_init(&futex_queues[i].lock);
}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 80ec91dfdee..4407e3e916e 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -49,6 +49,7 @@
#include <asm/uaccess.h>
#include <trace/events/timer.h>
+#include <mach/sec_debug.h>
/*
* The timer bases:
@@ -1235,7 +1236,9 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
*/
raw_spin_unlock(&cpu_base->lock);
trace_hrtimer_expire_entry(timer, now);
+ sec_debug_hrtimer_log(timer, fn, 1);
restart = fn(timer);
+ sec_debug_hrtimer_log(timer, fn, 2);
trace_hrtimer_expire_exit(timer);
raw_spin_lock(&cpu_base->lock);
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index e38544dddb1..11ebd521210 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -144,6 +144,22 @@ void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
}
/**
+ * irq_gc_mask_and_ack_set- Mask and ack pending interrupt
+ * @d: irq_data
+ */
+void irq_gc_mask_and_ack_set(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+ gc->mask_cache |= mask;
+ irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask);
+ irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
+ irq_gc_unlock(gc);
+}
+
+/**
* irq_gc_eoi - EOI interrupt
* @d: irq_data
*/
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 10e077289c8..c3403529267 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -18,6 +18,8 @@
#include <trace/events/irq.h>
+#include <mach/sec_debug.h>
+
#include "internals.h"
/**
@@ -122,9 +124,11 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
do {
irqreturn_t res;
+ sec_debug_irq_log(irq, (void *)action->handler, 1);
trace_irq_handler_entry(irq, action);
res = action->handler(irq, action->dev_id);
trace_irq_handler_exit(irq, action, res);
+ sec_debug_irq_log(irq, (void *)action->handler, 2);
if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
irq, action->handler))
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index 15e53b1766a..fe4b09cf829 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -104,8 +104,13 @@ int check_wakeup_irqs(void)
for_each_irq_desc(irq, desc) {
if (irqd_is_wakeup_set(&desc->irq_data)) {
- if (desc->istate & IRQS_PENDING)
+ if (desc->istate & IRQS_PENDING) {
+ pr_info("Wakeup IRQ %d %s pending, suspend aborted\n",
+ irq,
+ desc->action && desc->action->name ?
+ desc->action->name : "");
return -EBUSY;
+ }
continue;
}
/*
diff --git a/kernel/kernel_sec_debug.c b/kernel/kernel_sec_debug.c
new file mode 100644
index 00000000000..4fc452746aa
--- /dev/null
+++ b/kernel/kernel_sec_debug.c
@@ -0,0 +1,706 @@
+/* kernel_sec_debug.c
+ *
+ * Exception handling in kernel by SEC
+ *
+ * Copyright (c) 2010 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ */
+
+#ifdef CONFIG_SEC_DEBUG
+
+#include <linux/kernel_sec_common.h>
+#include <asm/cacheflush.h> /* cacheflush*/
+#include <linux/errno.h>
+#include <linux/ctype.h>
+#include <linux/vmalloc.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+
+#include <linux/file.h>
+#include <mach/hardware.h>
+
+#ifndef CONFIG_S5PV310_RAFFAELLO
+#define CONFIG_S5PV310_RAFFAELLO
+#endif
+
+/*
+ * Variable
+ */
+
+const char *gkernel_sec_build_info_date_time[] = {
+ __DATE__,
+ __TIME__
+};
+
+#define DEBUG_LEVEL_FILE_NAME "/mnt/.lfs/debug_level.inf"
+#define DEBUG_LEVEL_RD 0
+#define DEBUG_LEVEL_WR 1
+static int debuglevel;
+
+/* klaatu*/
+/*sched_log_t gExcpTaskLog[SCHED_LOG_MAX];*/
+unsigned int gExcpTaskLogIdx = 0;
+
+typedef enum {
+ __SERIAL_SPEED,
+ __LOAD_RAMDISK,
+ __BOOT_DELAY,
+ __LCD_LEVEL,
+ __SWITCH_SEL,
+ __PHONE_DEBUG_ON,
+ __LCD_DIM_LEVEL,
+ __MELODY_MODE,
+ __REBOOT_MODE,
+ __NATION_SEL,
+ __SET_DEFAULT_PARAM,
+ __PARAM_INT_11,
+ __PARAM_INT_12,
+ __PARAM_INT_13,
+ __PARAM_INT_14,
+ __VERSION,
+ __CMDLINE,
+ __PARAM_STR_2,
+ __PARAM_STR_3,
+ __PARAM_STR_4
+} param_idx;
+
+char gkernel_sec_build_info[100];
+unsigned int HWREV = 1;
+unsigned char kernel_sec_cause_str[KERNEL_SEC_DEBUG_CAUSE_STR_LEN];
+
+/*
+ * Function
+ */
+
+void __iomem *kernel_sec_viraddr_wdt_reset_reg;
+__used t_kernel_sec_arm_core_regsiters kernel_sec_core_reg_dump;
+__used t_kernel_sec_mmu_info kernel_sec_mmu_reg_dump;
+__used kernel_sec_upload_cause_type gkernel_sec_upload_cause;
+
+#if defined(CONFIG_S5PV310_RAFFAELLO) /* dpram*/
+static volatile void __iomem *idpram_base; /* Base of internal DPRAM*/
+
+static volatile void __iomem *onedram_base = NULL; /* Base of OneDRAM*/
+static volatile unsigned int *onedram_sem;
+static volatile unsigned int *onedram_mailboxAB; /*received mail*/
+static volatile unsigned int *onedram_mailboxBA; /*send mail*/
+#else
+volatile void __iomem *dpram_base = 0;
+volatile unsigned int *onedram_sem;
+volatile unsigned int *onedram_mailboxAB; /*received mail*/
+volatile unsigned int *onedram_mailboxBA; /*send mail*/
+#endif
+unsigned int received_cp_ack = 0;
+
+extern void (*sec_set_param_value)(int idx, void *value);
+extern void (*sec_get_param_value)(int idx, void *value);
+
+#if defined(CONFIG_S5PV310_RAFFAELLO) /* dpram*/
+
+/*
+ * assigned 16K internal dpram buf for debugging
+ */
+
+#define DPRAM_BUF_SIZE 0x4000
+struct _idpram_buf {
+ unsigned int dpram_start_key1;
+ unsigned int dpram_start_key2;
+ unsigned char dpram_buf[DPRAM_BUF_SIZE];
+ unsigned int dpram_end_key;
+} g_cdma_dpram_buf = {
+ .dpram_start_key1 = 'R',
+ .dpram_start_key2 = 'A',
+ .dpram_buf[0] = 'N',
+ .dpram_buf[1] = 'O',
+ .dpram_buf[2] = 'N',
+ .dpram_buf[3] = 'E',
+ .dpram_end_key = 'D'
+};
+
+void kernel_sec_cdma_dpram_dump(void)
+{
+ printk(KERN_EMERG "Backup CDMA dpram to RAM refore upload\n");
+ memcpy(g_cdma_dpram_buf.dpram_buf, (void *)idpram_base, DPRAM_BUF_SIZE);
+ printk(KERN_EMERG "buf address (0x%x), dpram (0x%x)\n", \
+ (unsigned int)g_cdma_dpram_buf.dpram_buf, (unsigned int) idpram_base);
+}
+EXPORT_SYMBOL(kernel_sec_cdma_dpram_dump);
+#endif
+
+void kernel_sec_set_cp_upload(void)
+{
+ unsigned int send_mail, wait_count;
+
+#if defined(CONFIG_S5PV310_RAFFAELLO) /* dpram*/
+ static volatile u16 *cp_dpram_mbx_BA; /*send mail box*/
+ static volatile u16 *cp_dpram_mbx_AB; /*receive mail box*/
+ u16 cp_irq_mask;
+
+ *((unsigned short *)idpram_base) = 0x554C;
+
+ cp_dpram_mbx_BA = (volatile u16 *)(idpram_base + 0x3FFC);
+ cp_dpram_mbx_AB = (volatile u16 *)(idpram_base + 0x3FFE);
+ cp_irq_mask = 0xCF; /*0x80|0x40|0x0F;*/
+
+#ifdef CDMA_IPC_C210_IDPRAM
+ iowrite16(cp_irq_mask, (void *)cp_dpram_mbx_BA);
+#else
+ *cp_dpram_mbx_BA = cp_irq_mask;
+#endif
+ printk(KERN_EMERG"[kernel_sec_dump_set_cp_upload]" \
+ "set cp upload mode, MailboxBA 0x%8x\n", cp_irq_mask);
+ wait_count = 0;
+ while (1) {
+ cp_irq_mask = ioread16((void *)cp_dpram_mbx_AB);
+ if (cp_irq_mask == 0xCF) {
+ printk(KERN_EMERG" - Done. cp_irq_mask: 0x%04X\n", \
+ cp_irq_mask);
+ break;
+ }
+ mdelay(10);
+ if (++wait_count > 500) {
+ printk(KERN_EMERG"- Fail to set CP uploadmode." \
+ "cp_irq_mask: 0x%04X\n", cp_irq_mask);
+ break;
+ }
+ }
+ printk(KERN_EMERG"modem_wait_count : %d\n", wait_count);
+
+#else
+ send_mail = KERNEL_SEC_DUMP_AP_DEAD_INDICATOR;
+
+ *onedram_sem = 0x0;
+ *onedram_mailboxBA = send_mail;
+ printk(KERN_EMERG"[kernel_sec_dump_set_cp_upload]" \
+ "set cp upload mode, MailboxBA 0x%8x\n", send_mail);
+ wait_count = 0;
+ received_cp_ack = 0;
+ while (1) {
+ if (received_cp_ack == 1) {
+ printk(KERN_EMERG" - Done.\n");
+ break;
+ }
+ mdelay(10);
+ if (++wait_count > 500) {
+ printk(KERN_EMERG" - Fail to set CP uploadmode.\n");
+ break;
+ }
+ }
+ printk(KERN_EMERG"modem_wait_count : %d\n", wait_count);
+#endif
+
+#if defined(CONFIG_S5PV310_RAFFAELLO) /* dpram*/
+ /*
+ * QSC6085 marking the QSC upload mode
+ */
+ *((unsigned int *)idpram_base) = 0xdeaddead;
+ printk(KERN_EMERG"QSC upload magic key write\n");
+ kernel_sec_cdma_dpram_dump();
+#endif
+}
+EXPORT_SYMBOL(kernel_sec_set_cp_upload);
+
+
+void kernel_sec_set_cp_ack(void) /*is set by dpram - dpram_irq_handler*/
+{
+ received_cp_ack = 1;
+}
+EXPORT_SYMBOL(kernel_sec_set_cp_ack);
+
+
+
+void kernel_sec_set_upload_magic_number(void)
+{
+ int *magic_virt_addr = (int *) LOKE_BOOT_USB_DWNLD_V_ADDR;
+
+ if ((KERNEL_SEC_DEBUG_LEVEL_MID == kernel_sec_get_debug_level()) ||
+ (KERNEL_SEC_DEBUG_LEVEL_HIGH == kernel_sec_get_debug_level())) {
+ *magic_virt_addr = LOKE_BOOT_USB_DWNLDMAGIC_NO; /* SET*/
+ printk(KERN_EMERG"KERNEL:magic_number=0x%x" \
+ "SET_UPLOAD_MAGIC_NUMBER\n", *magic_virt_addr);
+ } else {
+ *magic_virt_addr = 0;
+ printk(KERN_EMERG"KERNEL:" \
+ "magic_number=0x%x DEBUG LEVEL low!!\n", \
+ *magic_virt_addr);
+ }
+}
+EXPORT_SYMBOL(kernel_sec_set_upload_magic_number);
+
+
+void kernel_sec_get_debug_level_from_boot(void)
+{
+ unsigned int temp;
+ temp = __raw_readl(S5P_INFORM6);
+ temp &= KERNEL_SEC_DEBUG_LEVEL_MASK;
+ temp = temp >> KERNEL_SEC_DEBUG_LEVEL_BIT;
+
+ if (temp == 0x0) /*low*/
+ debuglevel = KERNEL_SEC_DEBUG_LEVEL_LOW;
+ else if (temp == 0x1) /*mid*/
+ debuglevel = KERNEL_SEC_DEBUG_LEVEL_MID;
+ else if (temp == 0x2) /*high*/
+ debuglevel = KERNEL_SEC_DEBUG_LEVEL_HIGH;
+ else {
+ printk(KERN_EMERG"KERNEL:kernel_sec_get_debug_level_from_boot" \
+ "(reg value is incorrect.)\n");
+ /*debuglevel = KERNEL_SEC_DEBUG_LEVEL_LOW;*/
+ debuglevel = KERNEL_SEC_DEBUG_LEVEL_MID;
+ }
+
+ printk(KERN_EMERG"KERNEL:" \
+ "kernel_sec_get_debug_level_from_boot=0x%x\n", debuglevel);
+}
+
+
+void kernel_sec_clear_upload_magic_number(void)
+{
+ int *magic_virt_addr = (int *) LOKE_BOOT_USB_DWNLD_V_ADDR;
+
+ *magic_virt_addr = 0; /* CLEAR*/
+ printk(KERN_EMERG"KERNEL:magic_number=%x " \
+ "CLEAR_UPLOAD_MAGIC_NUMBER\n", *magic_virt_addr);
+}
+EXPORT_SYMBOL(kernel_sec_clear_upload_magic_number);
+
+void kernel_sec_map_wdog_reg(void)
+{
+ /* Virtual Mapping of Watchdog register */
+ kernel_sec_viraddr_wdt_reset_reg = ioremap_nocache(S3C_PA_WDT, 0x400);
+
+ if (kernel_sec_viraddr_wdt_reset_reg == NULL) {
+ printk(KERN_EMERG"Failed to ioremap()" \
+ "region in forced upload keystring\n");
+ }
+}
+EXPORT_SYMBOL(kernel_sec_map_wdog_reg);
+
+void kernel_sec_set_upload_cause(kernel_sec_upload_cause_type uploadType)
+{
+ unsigned int temp;
+ gkernel_sec_upload_cause = uploadType;
+
+
+ temp = __raw_readl(S5P_INFORM6);
+ /*KERNEL_SEC_UPLOAD_CAUSE_MASK 0x000000FF*/
+ temp |= uploadType;
+ __raw_writel(temp , S5P_INFORM6);
+ printk(KERN_EMERG"(kernel_sec_set_upload_cause)" \
+ ": upload_cause set %x\n", uploadType);
+}
+EXPORT_SYMBOL(kernel_sec_set_upload_cause);
+
+void kernel_sec_set_cause_strptr(unsigned char *str_ptr, int size)
+{
+ unsigned int temp;
+
+ memset((void *)kernel_sec_cause_str, 0, sizeof(kernel_sec_cause_str));
+ memcpy(kernel_sec_cause_str, str_ptr, size);
+
+ temp = virt_to_phys(kernel_sec_cause_str);
+ /*loke read this ptr, display_aries_upload_image*/
+ __raw_writel(temp, LOKE_BOOT_USB_DWNLD_V_ADDR+4);
+}
+EXPORT_SYMBOL(kernel_sec_set_cause_strptr);
+
+
+void kernel_sec_set_autotest(void)
+{
+ unsigned int temp;
+
+ temp = __raw_readl(S5P_INFORM6);
+ temp |= 1<<KERNEL_SEC_UPLOAD_AUTOTEST_BIT;
+ __raw_writel(temp , S5P_INFORM6);
+}
+EXPORT_SYMBOL(kernel_sec_set_autotest);
+
+void kernel_sec_set_build_info(void)
+{
+ char *p = gkernel_sec_build_info;
+ sprintf(p, "ARIES_BUILD_INFO: HWREV: %x", HWREV);
+ strcat(p, " Date:");
+ strcat(p, gkernel_sec_build_info_date_time[0]);
+ strcat(p, " Time:");
+ strcat(p, gkernel_sec_build_info_date_time[1]);
+}
+EXPORT_SYMBOL(kernel_sec_set_build_info);
+
+void kernel_sec_init(void)
+{
+ /*set the dpram mailbox virtual address*/
+#if defined(CONFIG_S5PV310_RAFFAELLO) /*dpram*/
+
+ idpram_base = (volatile void *) \
+ ioremap_nocache(IDPRAM_PHYSICAL_ADDR, 0x4000);
+ if (idpram_base == NULL)
+ printk(KERN_EMERG "failed ioremap g_idpram_region\n");
+#endif
+ kernel_sec_get_debug_level_from_boot();
+ kernel_sec_set_upload_magic_number();
+ kernel_sec_set_upload_cause(UPLOAD_CAUSE_INIT);
+ kernel_sec_map_wdog_reg();
+}
+EXPORT_SYMBOL(kernel_sec_init);
+
+/* core reg dump function*/
+void kernel_sec_get_core_reg_dump(t_kernel_sec_arm_core_regsiters *regs)
+{
+ asm(
+ /* we will be in SVC mode when we enter this function.
+ Collect SVC registers along with cmn registers.*/
+ "str r0, [%0,#0]\n\t" /*R0*/
+ "str r1, [%0,#4]\n\t" /*R1*/
+ "str r2, [%0,#8]\n\t" /*R2*/
+ "str r3, [%0,#12]\n\t" /*R3*/
+ "str r4, [%0,#16]\n\t" /*R4*/
+ "str r5, [%0,#20]\n\t" /*R5*/
+ "str r6, [%0,#24]\n\t" /*R6*/
+ "str r7, [%0,#28]\n\t" /*R7*/
+ "str r8, [%0,#32]\n\t" /*R8*/
+ "str r9, [%0,#36]\n\t" /*R9*/
+ "str r10, [%0,#40]\n\t" /*R10*/
+ "str r11, [%0,#44]\n\t" /*R11*/
+ "str r12, [%0,#48]\n\t" /*R12*/
+
+ /* SVC */
+ "str r13, [%0,#52]\n\t" /*R13_SVC*/
+ "str r14, [%0,#56]\n\t" /*R14_SVC*/
+ "mrs r1, spsr\n\t" /*SPSR_SVC*/
+ "str r1, [%0,#60]\n\t"
+
+ /* PC and CPSR */
+ "sub r1, r15, #0x4\n\t" /*PC*/
+ "str r1, [%0,#64]\n\t"
+ "mrs r1, cpsr\n\t" /*CPSR*/
+ "str r1, [%0,#68]\n\t"
+
+ /* SYS/USR */
+ "mrs r1, cpsr\n\t" /*switch to SYS mode*/
+ "and r1, r1, #0xFFFFFFE0\n\t"
+ "orr r1, r1, #0x1f\n\t"
+ "msr cpsr,r1\n\t"
+ "str r13, [%0,#72]\n\t" /*R13_USR*/
+ "str r14, [%0,#76]\n\t" /*R13_USR*/
+
+ /*FIQ*/
+ "mrs r1, cpsr\n\t" /*switch to FIQ mode*/
+ "and r1,r1,#0xFFFFFFE0\n\t"
+ "orr r1,r1,#0x11\n\t"
+ "msr cpsr,r1\n\t"
+ "str r8, [%0,#80]\n\t" /*R8_FIQ*/
+ "str r9, [%0,#84]\n\t" /*R9_FIQ*/
+ "str r10, [%0,#88]\n\t" /*R10_FIQ*/
+ "str r11, [%0,#92]\n\t" /*R11_FIQ*/
+ "str r12, [%0,#96]\n\t" /*R12_FIQ*/
+ "str r13, [%0,#100]\n\t" /*R13_FIQ*/
+ "str r14, [%0,#104]\n\t" /*R14_FIQ*/
+ "mrs r1, spsr\n\t" /*SPSR_FIQ*/
+ "str r1, [%0,#108]\n\t"
+
+ /*IRQ*/
+ "mrs r1, cpsr\n\t" /*switch to IRQ mode*/
+ "and r1, r1, #0xFFFFFFE0\n\t"
+ "orr r1, r1, #0x12\n\t"
+ "msr cpsr,r1\n\t"
+ "str r13, [%0,#112]\n\t" /*R13_IRQ*/
+ "str r14, [%0,#116]\n\t" /*R14_IRQ*/
+ "mrs r1, spsr\n\t" /* SPSR_IRQ*/
+ "str r1, [%0,#120]\n\t"
+
+ /*MON*/
+ "mrs r1, cpsr\n\t" /*switch to monitor mode*/
+ "and r1, r1, #0xFFFFFFE0\n\t"
+ "orr r1, r1, #0x16\n\t"
+ "msr cpsr,r1\n\t"
+
+ "str r13, [%0,#124]\n\t" /*R13_MON*/
+ "str r14, [%0,#128]\n\t" /*R14_MON*/
+ "mrs r1, spsr\n\t" /*SPSR_MON*/
+ "str r1, [%0,#132]\n\t"
+
+ /*ABT*/
+ "mrs r1, cpsr\n\t" /* switch to Abort mode*/
+ "and r1, r1, #0xFFFFFFE0\n\t"
+ "orr r1, r1, #0x17\n\t"
+ "msr cpsr,r1\n\t"
+
+ "str r13, [%0,#136]\n\t" /*R13_ABT*/
+ "str r14, [%0,#140]\n\t" /* R14_ABT*/
+ "mrs r1, spsr\n\t" /* SPSR_ABT*/
+ "str r1, [%0,#144]\n\t"
+
+ /*UND*/
+ "mrs r1, cpsr\n\t" /* switch to undef mode*/
+ "and r1, r1, #0xFFFFFFE0\n\t"
+ "orr r1, r1, #0x1B\n\t"
+ "msr cpsr,r1\n\t"
+ "str r13, [%0,#148]\n\t" /* R13_UND*/
+ "str r14, [%0,#152]\n\t" /*R14_UND*/
+ "mrs r1, spsr\n\t" /*SPSR_UND*/
+ "str r1, [%0,#156]\n\t"
+
+ /* restore to SVC mode */
+ "mrs r1, cpsr\n\t" /* switch to undef mode*/
+ "and r1, r1, #0xFFFFFFE0\n\t"
+ "orr r1, r1, #0x13\n\t"
+ "msr cpsr,r1\n\t"
+ : /* output */
+ : "r"(regs) /* input */
+ : "%r1" /* clobbered register */
+ );
+ return;
+}
+EXPORT_SYMBOL(kernel_sec_get_core_reg_dump);
+
+int kernel_sec_get_mmu_reg_dump(t_kernel_sec_mmu_info *mmu_info)
+{
+ asm("mrc p15, 0, r1, c1, c0, 0\n\t" /*SCTLR*/
+ "str r1, [%0]\n\t"
+ "mrc p15, 0, r1, c2, c0, 0\n\t" /*TTBR0*/
+ "str r1, [%0,#4]\n\t"
+ "mrc p15, 0, r1, c2, c0,1\n\t" /*TTBR1*/
+ "str r1, [%0,#8]\n\t"
+ "mrc p15, 0, r1, c2, c0,2\n\t" /*TTBCR*/
+ "str r1, [%0,#12]\n\t"
+ "mrc p15, 0, r1, c3, c0,0\n\t" /*DACR*/
+ "str r1, [%0,#16]\n\t"
+ "mrc p15, 0, r1, c5, c0,0\n\t" /*DFSR*/
+ "str r1, [%0,#20]\n\t"
+ "mrc p15, 0, r1, c6, c0,0\n\t" /*DFAR*/
+ "str r1, [%0,#24]\n\t"
+ "mrc p15, 0, r1, c5, c0,1\n\t" /*IFSR*/
+ "str r1, [%0,#28]\n\t"
+ "mrc p15, 0, r1, c6, c0,2\n\t" /*IFAR*/
+ "str r1, [%0,#32]\n\t"
+ /*Dont populate DAFSR and RAFSR*/
+ "mrc p15, 0, r1, c10, c2,0\n\t" /*PMRRR*/
+ "str r1, [%0,#44]\n\t"
+ "mrc p15, 0, r1, c10, c2,1\n\t" /*NMRRR*/
+ "str r1, [%0,#48]\n\t"
+ "mrc p15, 0, r1, c13, c0,0\n\t" /*FCSEPID*/
+ "str r1, [%0,#52]\n\t"
+ "mrc p15, 0, r1, c13, c0,1\n\t" /*CONTEXT*/
+ "str r1, [%0,#56]\n\t"
+ "mrc p15, 0, r1, c13, c0,2\n\t" /*URWTPID*/
+ "str r1, [%0,#60]\n\t"
+ "mrc p15, 0, r1, c13, c0,3\n\t" /*UROTPID*/
+ "str r1, [%0,#64]\n\t"
+ "mrc p15, 0, r1, c13, c0,4\n\t" /*POTPIDR*/
+ "str r1, [%0,#68]\n\t"
+ : /* output */
+ : "r"(mmu_info) /* input */
+ : "%r1", "memory" /* clobbered register */
+ );
+ return 0;
+}
+EXPORT_SYMBOL(kernel_sec_get_mmu_reg_dump);
+
+void kernel_sec_save_final_context(void)
+{
+ if (kernel_sec_get_mmu_reg_dump(&kernel_sec_mmu_reg_dump) < 0)
+ printk(KERN_EMERG"(kernel_sec_save_final_context) kernel_sec_get_mmu_reg_dump faile.\n");
+ kernel_sec_get_core_reg_dump(&kernel_sec_core_reg_dump);
+
+ printk(KERN_EMERG "(kernel_sec_save_final_context) Final context was saved before the system reset.\n");
+}
+EXPORT_SYMBOL(kernel_sec_save_final_context);
+
+
+/*
+ * bSilentReset
+ * TRUE : Silent reset - clear the magic code.
+ * FALSE : Reset to upload mode - not clear the magic code.
+ *
+ * TODO : DebugLevel consideration should be added.
+ */
+/*extern void Ap_Cp_Switch_Config(u16 ap_cp_mode);*/
+void kernel_sec_hw_reset(bool bSilentReset)
+{
+/*Ap_Cp_Switch_Config(0);*/
+
+ if (bSilentReset || (KERNEL_SEC_DEBUG_LEVEL_LOW == \
+ kernel_sec_get_debug_level())) {
+ kernel_sec_clear_upload_magic_number();
+ printk(KERN_EMERG "(kernel_sec_hw_reset)" \
+ "Upload Magic Code is cleared for silet reset.\n");
+ }
+
+ printk(KERN_EMERG "(kernel_sec_hw_reset) %s\n", gkernel_sec_build_info);
+
+ printk(KERN_EMERG "(kernel_sec_hw_reset) The forced reset was called." \
+ "The system will be reset !!\n");
+
+ /* flush cache back to ram */
+ flush_cache_all();
+
+ __raw_writel(0x8000, kernel_sec_viraddr_wdt_reset_reg + 0x4);
+ __raw_writel(0x1, kernel_sec_viraddr_wdt_reset_reg + 0x4);
+ __raw_writel(0x8, kernel_sec_viraddr_wdt_reset_reg + 0x8);
+ __raw_writel(0x8021, kernel_sec_viraddr_wdt_reset_reg);
+
+ /* Never happened because the reset will occur before this. */
+ while (1);
+}
+EXPORT_SYMBOL(kernel_sec_hw_reset);
+
+
+bool kernel_set_debug_level(int level)
+{
+ /*if (sec_set_param_value)
+ {
+ if( (level == KERNEL_SEC_DEBUG_LEVEL_LOW) ||
+ ( level == KERNEL_SEC_DEBUG_LEVEL_MID ) )
+ {
+ sec_set_param_value(__PHONE_DEBUG_ON, (void*)&level);
+ printk(KERN_NOTICE "(kernel_set_debug_level)
+ The debug value is %x !!\n", level);
+ return 1;
+ }
+ else
+ {
+ printk(KERN_NOTICE "(kernel_set_debug_level)
+ The debug value is invalid (%x) !!\n", level);
+ return 0;
+ }
+ }
+ else
+ {*/
+ printk(KERN_NOTICE "(kernel_set_debug_level)" \
+ " sec_set_param_value is not intialized !!\n");
+ return 0;
+ /*}*/
+}
+EXPORT_SYMBOL(kernel_set_debug_level);
+
+int kernel_get_debug_level()
+{
+ int debug_level = -1;
+
+/* if (sec_get_param_value)
+ {
+ sec_get_param_value(__PHONE_DEBUG_ON, &debug_level);
+ }
+*/
+ if ((debug_level == KERNEL_SEC_DEBUG_LEVEL_LOW) ||
+ (debug_level == KERNEL_SEC_DEBUG_LEVEL_MID)) {
+ printk(KERN_NOTICE "(kernel_get_debug_level) kernel" \
+ "debug level is %x !!\n", debug_level);
+ return debug_level;
+ }
+ printk(KERN_NOTICE "(kernel_get_debug_level) kernel" \
+ "debug level is invalid (%x) !!\n", debug_level);
+ return debug_level;
+}
+EXPORT_SYMBOL(kernel_get_debug_level);
+
+int kernel_sec_lfs_debug_level_op(int dir, int flags)
+{
+ struct file *filp;
+ mm_segment_t fs;
+
+ int ret;
+
+ filp = filp_open(DEBUG_LEVEL_FILE_NAME, flags, 0);
+
+ if (IS_ERR(filp)) {
+ pr_err("%s: filp_open failed. (%ld)\n", __func__,
+ PTR_ERR(filp));
+
+ return -1;
+ }
+
+ fs = get_fs();
+ set_fs(get_ds());
+
+ if (dir == DEBUG_LEVEL_RD)
+ ret = filp->f_op->read(filp, (char __user *)&debuglevel,
+ sizeof(int), &filp->f_pos);
+ else
+ ret = filp->f_op->write(filp, (char __user *)&debuglevel,
+ sizeof(int), &filp->f_pos);
+
+ set_fs(fs);
+ filp_close(filp, NULL);
+
+ return ret;
+}
+
+bool kernel_sec_set_debug_level(int level)
+{
+ int ret;
+
+ if ((level == KERNEL_SEC_DEBUG_LEVEL_LOW) ||
+ (level == KERNEL_SEC_DEBUG_LEVEL_MID) ||
+ (level == KERNEL_SEC_DEBUG_LEVEL_HIGH)) {
+ debuglevel = level;
+ /* write to param.lfs */
+ ret = kernel_sec_lfs_debug_level_op(DEBUG_LEVEL_WR, \
+ O_RDWR|O_SYNC);
+
+ if (ret == sizeof(debuglevel))
+ pr_info("%s: debuglevel.inf" \
+ "write successfully.\n", \
+ __func__);
+ /* write to regiter (magic code) */
+ kernel_sec_set_upload_magic_number();
+
+ printk(KERN_NOTICE \
+ "(kernel_sec_set_debug_level)" \
+ "The debug value is 0x%x !!\n", level);
+ return 1;
+ } else {
+ printk(KERN_NOTICE "(kernel_sec_set_debug_level)" \
+ "The debug value is" \
+ "invalid(0x%x)!! Set default level(LOW)\n", level);
+ debuglevel = KERNEL_SEC_DEBUG_LEVEL_LOW;
+ return 0;
+ }
+ }
+EXPORT_SYMBOL(kernel_sec_set_debug_level);
+
+
+
+int kernel_sec_get_debug_level_from_param()
+{
+ int ret;
+
+ /* read from param.lfs*/
+ ret = kernel_sec_lfs_debug_level_op(DEBUG_LEVEL_RD, O_RDONLY);
+
+ if (ret == sizeof(debuglevel))
+ pr_info("%s: debuglevel.inf read successfully.\n", __func__);
+ if ((debuglevel == KERNEL_SEC_DEBUG_LEVEL_LOW) ||
+ (debuglevel == KERNEL_SEC_DEBUG_LEVEL_MID) ||
+ (debuglevel == KERNEL_SEC_DEBUG_LEVEL_HIGH)) {
+ /* return debug level */
+ printk(KERN_NOTICE "(kernel_sec_get_debug_level_from_param)" \
+ "kernel debug level is 0x%x !!\n", debuglevel);
+ return debuglevel;
+ } else {
+ /*In case of invalid debug level, default (debug level low)*/
+ printk(KERN_NOTICE "(kernel_sec_get_debug_level_from_param)" \
+ "The debug value is invalid(0x%x)!!" \
+ "Set default level(LOW)\n", debuglevel);
+ /*debuglevel = KERNEL_SEC_DEBUG_LEVEL_LOW;*/
+ debuglevel = KERNEL_SEC_DEBUG_LEVEL_MID;
+ }
+ return debuglevel;
+}
+EXPORT_SYMBOL(kernel_sec_get_debug_level_from_param);
+
+int kernel_sec_get_debug_level()
+{
+ return debuglevel;
+}
+EXPORT_SYMBOL(kernel_sec_get_debug_level);
+
+int kernel_sec_check_debug_level_high(void)
+{
+ if (KERNEL_SEC_DEBUG_LEVEL_HIGH == kernel_sec_get_debug_level())
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(kernel_sec_check_debug_level_high);
+
+#endif /* CONFIG_KERNEL_DEBUG_SEC*/
diff --git a/kernel/module.c b/kernel/module.c
index a8bd2151b54..c99de29c6e0 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -570,6 +570,7 @@ MODINFO_ATTR(version);
MODINFO_ATTR(srcversion);
static char last_unloaded_module[MODULE_NAME_LEN+1];
+static unsigned int last_unloaded_module_addr;
#ifdef CONFIG_MODULE_UNLOAD
@@ -841,7 +842,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
/* Store the name of the last unloaded module for diagnostic purposes */
strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
-
+ last_unloaded_module_addr = (unsigned int)&mod->module_core;
free_module(mod);
return 0;
out:
@@ -3408,7 +3409,8 @@ void print_modules(void)
printk(" %s%s", mod->name, module_flags(mod, buf));
preempt_enable();
if (last_unloaded_module[0])
- printk(" [last unloaded: %s]", last_unloaded_module);
+ printk(" [last unloaded: %s](%x)", last_unloaded_module,
+ last_unloaded_module_addr);
printk("\n");
}
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 2488ba7eb56..76eea1e86bb 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -95,8 +95,11 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl,
if (nr_calls)
(*nr_calls)++;
- if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
+ if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) {
+ pr_info("notifier_call_chain : NOTIFY BAD %pf\n",
+ nb->notifier_call);
break;
+ }
nb = next_nb;
nr_to_call--;
}
diff --git a/kernel/panic.c b/kernel/panic.c
index 8e48cf6ab56..564c7bc6ecb 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -27,13 +27,19 @@
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18
+/* Machine specific panic information string */
+char *mach_panic_string;
+
int panic_on_oops;
static unsigned long tainted_mask;
static int pause_on_oops;
static int pause_on_oops_flag;
static DEFINE_SPINLOCK(pause_on_oops_lock);
-int panic_timeout;
+#ifndef CONFIG_PANIC_TIMEOUT
+#define CONFIG_PANIC_TIMEOUT 0
+#endif
+int panic_timeout = CONFIG_PANIC_TIMEOUT;
EXPORT_SYMBOL_GPL(panic_timeout);
ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
@@ -350,6 +356,11 @@ late_initcall(init_oops_id);
void print_oops_end_marker(void)
{
init_oops_id();
+
+ if (mach_panic_string)
+ printk(KERN_WARNING "Board Information: %s\n",
+ mach_panic_string);
+
printk(KERN_WARNING "---[ end trace %016llx ]---\n",
(unsigned long long)oops_id);
}
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 6824ca7d4d0..b61f2fd26c5 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -74,7 +74,7 @@ static DEFINE_SPINLOCK(pm_qos_lock);
static struct pm_qos_object null_pm_qos;
static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
static struct pm_qos_object cpu_dma_pm_qos = {
- .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock),
+ .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests),
.notifiers = &cpu_dma_lat_notifier,
.name = "cpu_dma_latency",
.target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
@@ -84,7 +84,7 @@ static struct pm_qos_object cpu_dma_pm_qos = {
static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
static struct pm_qos_object network_lat_pm_qos = {
- .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock),
+ .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests),
.notifiers = &network_lat_notifier,
.name = "network_latency",
.target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
@@ -95,7 +95,7 @@ static struct pm_qos_object network_lat_pm_qos = {
static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
static struct pm_qos_object network_throughput_pm_qos = {
- .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock),
+ .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests),
.notifiers = &network_throughput_notifier,
.name = "network_throughput",
.target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
@@ -103,12 +103,55 @@ static struct pm_qos_object network_throughput_pm_qos = {
.type = PM_QOS_MAX,
};
+static BLOCKING_NOTIFIER_HEAD(bus_dma_throughput_notifier);
+static struct pm_qos_object bus_dma_throughput_pm_qos = {
+ .requests = PLIST_HEAD_INIT(bus_dma_throughput_pm_qos.requests),
+ .notifiers = &bus_dma_throughput_notifier,
+ .name = "bus_dma_throughput",
+ .target_value = PM_QOS_BUS_DMA_THROUGHPUT_DEFAULT_VALUE,
+ .default_value = PM_QOS_BUS_DMA_THROUGHPUT_DEFAULT_VALUE,
+ .type = PM_QOS_MAX,
+};
+
+static BLOCKING_NOTIFIER_HEAD(display_frequency_notifier);
+static struct pm_qos_object display_frequency_pm_qos = {
+ .requests = PLIST_HEAD_INIT(display_frequency_pm_qos.requests),
+ .notifiers = &display_frequency_notifier,
+ .name = "display_frequency",
+ .target_value = PM_QOS_DISPLAY_FREQUENCY_DEFAULT_VALUE,
+ .default_value = PM_QOS_DISPLAY_FREQUENCY_DEFAULT_VALUE,
+ .type = PM_QOS_MAX,
+};
+
+static BLOCKING_NOTIFIER_HEAD(bus_qos_notifier);
+static struct pm_qos_object bus_qos_pm_qos = {
+ .requests = PLIST_HEAD_INIT(bus_qos_pm_qos.requests),
+ .notifiers = &bus_qos_notifier,
+ .name = "bus_qos",
+ .target_value = 0,
+ .default_value = 0,
+ .type = PM_QOS_MAX,
+};
+
+static BLOCKING_NOTIFIER_HEAD(dvfs_res_lat_notifier);
+static struct pm_qos_object dvfs_res_lat_pm_qos = {
+ .requests = PLIST_HEAD_INIT(dvfs_res_lat_pm_qos.requests),
+ .notifiers = &dvfs_res_lat_notifier,
+ .name = "dvfs_response_latency",
+ .target_value = PM_QOS_DVFS_RESPONSE_LAT_DEFAULT_VALUE,
+ .default_value = PM_QOS_DVFS_RESPONSE_LAT_DEFAULT_VALUE,
+ .type = PM_QOS_MIN
+};
static struct pm_qos_object *pm_qos_array[] = {
&null_pm_qos,
&cpu_dma_pm_qos,
&network_lat_pm_qos,
- &network_throughput_pm_qos
+ &network_throughput_pm_qos,
+ &bus_dma_throughput_pm_qos,
+ &display_frequency_pm_qos,
+ &bus_qos_pm_qos,
+ &dvfs_res_lat_pm_qos,
};
static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
@@ -474,6 +517,20 @@ static int __init pm_qos_power_init(void)
if (ret < 0)
printk(KERN_ERR
"pm_qos_param: network_throughput setup failed\n");
+ ret = register_pm_qos_misc(&bus_dma_throughput_pm_qos);
+ if (ret < 0)
+ printk(KERN_ERR
+ "pm_qos_param: bus_dma_throughput setup failed\n");
+
+ ret = register_pm_qos_misc(&display_frequency_pm_qos);
+ if (ret < 0)
+ printk(KERN_ERR
+ "pm_qos_param: display_frequency setup failed\n");
+
+ ret = register_pm_qos_misc(&dvfs_res_lat_pm_qos);
+ if (ret < 0)
+ printk(KERN_ERR
+ "pm_qos_param: dvfs_response_frequency setup failed\n");
return ret;
}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 87f4d24b55b..ee9375768c6 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -18,6 +18,90 @@ config SUSPEND_FREEZER
Turning OFF this setting is NOT recommended! If in doubt, say Y.
+config PM_WATCHDOG_TIMEOUT
+ bool "PM Watchdog timeout"
+ depends on CPU_EXYNOS4210
+ default y
+ ---help---
+ Enable PM watchdog timer to catch lockup during early_suspend,
+ late_resume and suspend_finish.
+
+config FAST_BOOT
+ bool "Force suspend and show fake turn off which is same with suspend"
+ depends on SUSPEND
+ default n
+ ---help---
+ This allows you go to suspend instead to turn off. If this is
+ done, it goes to wake up instead to turn on. This works with power
+ source.
+
+config HAS_WAKELOCK
+ bool
+
+config HAS_EARLYSUSPEND
+ bool
+
+config WAKELOCK
+ bool "Wake lock"
+ depends on PM && RTC_CLASS
+ default n
+ select HAS_WAKELOCK
+ ---help---
+ Enable wakelocks. When user space request a sleep state the
+ sleep request will be delayed until no wake locks are held.
+
+config WAKELOCK_STAT
+ bool "Wake lock stats"
+ depends on WAKELOCK
+ default y
+ ---help---
+ Report wake lock stats in /proc/wakelocks
+
+config USER_WAKELOCK
+ bool "Userspace wake locks"
+ depends on WAKELOCK
+ default y
+ ---help---
+ User-space wake lock api. Write "lockname" or "lockname timeout"
+ to /sys/power/wake_lock lock and if needed create a wake lock.
+ Write "lockname" to /sys/power/wake_unlock to unlock a user wake
+ lock.
+
+config EARLYSUSPEND
+ bool "Early suspend"
+ depends on WAKELOCK
+ default y
+ select HAS_EARLYSUSPEND
+ ---help---
+ Call early suspend handlers when the user requested sleep state
+ changes.
+
+choice
+ prompt "User-space screen access"
+ default FB_EARLYSUSPEND if !FRAMEBUFFER_CONSOLE
+ default CONSOLE_EARLYSUSPEND
+ depends on HAS_EARLYSUSPEND
+
+ config NO_USER_SPACE_SCREEN_ACCESS_CONTROL
+ bool "None"
+
+ config CONSOLE_EARLYSUSPEND
+ bool "Console switch on early-suspend"
+ depends on HAS_EARLYSUSPEND && VT
+ ---help---
+ Register early suspend handler to perform a console switch to
+ when user-space should stop drawing to the screen and a switch
+ back when it should resume.
+
+ config FB_EARLYSUSPEND
+ bool "Sysfs interface"
+ depends on HAS_EARLYSUSPEND
+ ---help---
+ Register early suspend handler that notifies and waits for
+ user-space through sysfs when user-space should stop drawing
+ to the screen and notifies user-space when it should resume.
+endchoice
+
config HIBERNATE_CALLBACKS
bool
@@ -65,6 +149,26 @@ config HIBERNATION
For more information take a look at <file:Documentation/power/swsusp.txt>.
+config FULL_PAGE_RECLAIM
+ bool "Using Full Page Reclaim during Suspend-to-Disk"
+ depends on HIBERNATION
+ default n
+ ---help---
+ Reclaim whole pages if possible before creating hibernation snapshot
+ image. For reducing snapshot image size, reclaim them.
+
+ With this config, the size of hibernation snapshot image is
+ dramatically decreased and small size of hibernation snapshot image
+ has benefit for fast booting.
+
+config FAST_RESUME
+ bool "Using fast resume during Suspend-to-Disk"
+ depends on HIBERNATION
+ ---help---
+ software_resume() function which triggers hibernation restore is
+ called faster on booting time by introducing new initcalls. This has
+ benefit for fast booting on hibernation
+
config PM_STD_PARTITION
string "Default resume partition"
depends on HIBERNATION
@@ -227,3 +331,14 @@ config PM_OPP
config PM_RUNTIME_CLK
def_bool y
depends on PM_RUNTIME && HAVE_CLK
+
+config SUSPEND_TIME
+ bool "Log time spent in suspend"
+ ---help---
+ Prints the time spent in suspend in the kernel log, and
+ keeps statistics on the time spent in suspend in
+ /sys/kernel/debug/suspend_time
+
+config CPU_PM
+ bool
+ depends on SUSPEND || CPU_IDLE
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index c5ebc6a9064..9b224e16b19 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -8,5 +8,11 @@ obj-$(CONFIG_SUSPEND) += suspend.o
obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \
block_io.o
+obj-$(CONFIG_WAKELOCK) += wakelock.o
+obj-$(CONFIG_USER_WAKELOCK) += userwakelock.o
+obj-$(CONFIG_EARLYSUSPEND) += earlysuspend.o
+obj-$(CONFIG_CONSOLE_EARLYSUSPEND) += consoleearlysuspend.o
+obj-$(CONFIG_FB_EARLYSUSPEND) += fbearlysuspend.o
+obj-$(CONFIG_SUSPEND_TIME) += suspend_time.o
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/consoleearlysuspend.c b/kernel/power/consoleearlysuspend.c
new file mode 100644
index 00000000000..a3edcb26738
--- /dev/null
+++ b/kernel/power/consoleearlysuspend.c
@@ -0,0 +1,78 @@
+/* kernel/power/consoleearlysuspend.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/earlysuspend.h>
+#include <linux/kbd_kern.h>
+#include <linux/module.h>
+#include <linux/vt_kern.h>
+#include <linux/wait.h>
+
+#define EARLY_SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
+
+static int orig_fgconsole;
+static void console_early_suspend(struct early_suspend *h)
+{
+ acquire_console_sem();
+ orig_fgconsole = fg_console;
+ if (vc_allocate(EARLY_SUSPEND_CONSOLE))
+ goto err;
+ if (set_console(EARLY_SUSPEND_CONSOLE))
+ goto err;
+ release_console_sem();
+
+ if (vt_waitactive(EARLY_SUSPEND_CONSOLE + 1))
+ pr_warning("console_early_suspend: Can't switch VCs.\n");
+ return;
+err:
+ pr_warning("console_early_suspend: Can't set console\n");
+ release_console_sem();
+}
+
+static void console_late_resume(struct early_suspend *h)
+{
+ int ret;
+ acquire_console_sem();
+ ret = set_console(orig_fgconsole);
+ release_console_sem();
+ if (ret) {
+ pr_warning("console_late_resume: Can't set console.\n");
+ return;
+ }
+
+ if (vt_waitactive(orig_fgconsole + 1))
+ pr_warning("console_late_resume: Can't switch VCs.\n");
+}
+
+static struct early_suspend console_early_suspend_desc = {
+ .level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
+ .suspend = console_early_suspend,
+ .resume = console_late_resume,
+};
+
+static int __init console_early_suspend_init(void)
+{
+ register_early_suspend(&console_early_suspend_desc);
+ return 0;
+}
+
+static void __exit console_early_suspend_exit(void)
+{
+ unregister_early_suspend(&console_early_suspend_desc);
+}
+
+module_init(console_early_suspend_init);
+module_exit(console_early_suspend_exit);
+
diff --git a/kernel/power/earlysuspend.c b/kernel/power/earlysuspend.c
new file mode 100644
index 00000000000..e6303fd48af
--- /dev/null
+++ b/kernel/power/earlysuspend.c
@@ -0,0 +1,233 @@
+/* kernel/power/earlysuspend.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/earlysuspend.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rtc.h>
+#include <linux/syscalls.h> /* sys_sync */
+#include <linux/wakelock.h>
+#include <linux/workqueue.h>
+#ifdef CONFIG_ZRAM_FOR_ANDROID
+#include <asm/atomic.h>
+#endif /* CONFIG_ZRAM_FOR_ANDROID */
+
+#include "power.h"
+
+enum {
+ DEBUG_USER_STATE = 1U << 0,
+ DEBUG_SUSPEND = 1U << 2,
+ DEBUG_VERBOSE = 1U << 3,
+};
+static int debug_mask = DEBUG_USER_STATE;
+#ifdef CONFIG_ZRAM_FOR_ANDROID
+atomic_t optimize_comp_on = ATOMIC_INIT(0);
+EXPORT_SYMBOL(optimize_comp_on);
+#endif /* CONFIG_ZRAM_FOR_ANDROID */
+
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static DEFINE_MUTEX(early_suspend_lock);
+static LIST_HEAD(early_suspend_handlers);
+static void sync_system(struct work_struct *work);
+static void early_suspend(struct work_struct *work);
+static void late_resume(struct work_struct *work);
+static DECLARE_WORK(sync_system_work, sync_system);
+static DECLARE_WORK(early_suspend_work, early_suspend);
+static DECLARE_WORK(late_resume_work, late_resume);
+static DEFINE_SPINLOCK(state_lock);
+enum {
+ SUSPEND_REQUESTED = 0x1,
+ SUSPENDED = 0x2,
+ SUSPEND_REQUESTED_AND_SUSPENDED = SUSPEND_REQUESTED | SUSPENDED,
+};
+static int state;
+
+static void sync_system(struct work_struct *work)
+{
+ pr_info("%s +\n", __func__);
+ wake_lock(&sync_wake_lock);
+ sys_sync();
+ wake_unlock(&sync_wake_lock);
+ pr_info("%s -\n", __func__);
+}
+
+void register_early_suspend(struct early_suspend *handler)
+{
+ struct list_head *pos;
+
+ mutex_lock(&early_suspend_lock);
+ list_for_each(pos, &early_suspend_handlers) {
+ struct early_suspend *e;
+ e = list_entry(pos, struct early_suspend, link);
+ if (e->level > handler->level)
+ break;
+ }
+ list_add_tail(&handler->link, pos);
+ if ((state & SUSPENDED) && handler->suspend)
+ handler->suspend(handler);
+ mutex_unlock(&early_suspend_lock);
+}
+EXPORT_SYMBOL(register_early_suspend);
+
+void unregister_early_suspend(struct early_suspend *handler)
+{
+ mutex_lock(&early_suspend_lock);
+ list_del(&handler->link);
+ mutex_unlock(&early_suspend_lock);
+}
+EXPORT_SYMBOL(unregister_early_suspend);
+
+static void early_suspend(struct work_struct *work)
+{
+ struct early_suspend *pos;
+ unsigned long irqflags;
+ int abort = 0;
+ struct timer_list timer;
+ struct pm_wd_data data;
+
+ pm_wd_add_timer(&timer, &data, 30);
+
+ mutex_lock(&early_suspend_lock);
+ spin_lock_irqsave(&state_lock, irqflags);
+#ifdef CONFIG_ZRAM_FOR_ANDROID
+ atomic_set(&optimize_comp_on, 1);
+#endif /* CONFIG_ZRAM_FOR_ANDROID */
+ if (state == SUSPEND_REQUESTED)
+ state |= SUSPENDED;
+ else
+ abort = 1;
+ spin_unlock_irqrestore(&state_lock, irqflags);
+
+ if (abort) {
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("early_suspend: abort, state %d\n", state);
+ mutex_unlock(&early_suspend_lock);
+ goto abort;
+ }
+
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("early_suspend: call handlers\n");
+ list_for_each_entry(pos, &early_suspend_handlers, link) {
+ if (pos->suspend != NULL) {
+ if (debug_mask & DEBUG_VERBOSE)
+ pr_info("early_suspend: calling %pf\n", pos->suspend);
+ pos->suspend(pos);
+ }
+ }
+ mutex_unlock(&early_suspend_lock);
+
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("early_suspend: sync\n");
+
+ /* sys_sync(); */
+ queue_work(sync_work_queue, &sync_system_work);
+
+abort:
+ spin_lock_irqsave(&state_lock, irqflags);
+ if (state == SUSPEND_REQUESTED_AND_SUSPENDED)
+ wake_unlock(&main_wake_lock);
+ spin_unlock_irqrestore(&state_lock, irqflags);
+
+ pm_wd_del_timer(&timer);
+}
+
+static void late_resume(struct work_struct *work)
+{
+ struct early_suspend *pos;
+ unsigned long irqflags;
+ int abort = 0;
+ struct timer_list timer;
+ struct pm_wd_data data;
+
+ pm_wd_add_timer(&timer, &data, 30);
+
+ mutex_lock(&early_suspend_lock);
+ spin_lock_irqsave(&state_lock, irqflags);
+#ifdef CONFIG_ZRAM_FOR_ANDROID
+ atomic_set(&optimize_comp_on, 0);
+#endif /* CONFIG_ZRAM_FOR_ANDROID */
+ if (state == SUSPENDED)
+ state &= ~SUSPENDED;
+ else
+ abort = 1;
+ spin_unlock_irqrestore(&state_lock, irqflags);
+
+ if (abort) {
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("late_resume: abort, state %d\n", state);
+ goto abort;
+ }
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("late_resume: call handlers\n");
+ list_for_each_entry_reverse(pos, &early_suspend_handlers, link) {
+ if (pos->resume != NULL) {
+ if (debug_mask & DEBUG_VERBOSE)
+ pr_info("late_resume: calling %pf\n", pos->resume);
+
+ pos->resume(pos);
+ }
+ }
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("late_resume: done\n");
+abort:
+ mutex_unlock(&early_suspend_lock);
+
+ pm_wd_del_timer(&timer);
+}
+
+#ifdef CONFIG_FAST_BOOT
+extern bool fake_shut_down;
+#endif
+void request_suspend_state(suspend_state_t new_state)
+{
+ unsigned long irqflags;
+ int old_sleep;
+
+ spin_lock_irqsave(&state_lock, irqflags);
+ old_sleep = state & SUSPEND_REQUESTED;
+ if (debug_mask & DEBUG_USER_STATE) {
+ struct timespec ts;
+ struct rtc_time tm;
+ getnstimeofday(&ts);
+ rtc_time_to_tm(ts.tv_sec, &tm);
+ pr_info("request_suspend_state: %s (%d->%d) at %lld "
+ "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n",
+ new_state != PM_SUSPEND_ON ? "sleep" : "wakeup",
+ requested_suspend_state, new_state,
+ ktime_to_ns(ktime_get()),
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
+ }
+ if (!old_sleep && new_state != PM_SUSPEND_ON) {
+ state |= SUSPEND_REQUESTED;
+ queue_work(suspend_work_queue, &early_suspend_work);
+ } else if (old_sleep && new_state == PM_SUSPEND_ON) {
+#ifdef CONFIG_FAST_BOOT
+ if (fake_shut_down)
+ fake_shut_down = false;
+#endif
+ state &= ~SUSPEND_REQUESTED;
+ wake_lock(&main_wake_lock);
+ queue_work(suspend_work_queue, &late_resume_work);
+ }
+ requested_suspend_state = new_state;
+ spin_unlock_irqrestore(&state_lock, irqflags);
+}
+
+suspend_state_t get_suspend_state(void)
+{
+ return requested_suspend_state;
+}
diff --git a/kernel/power/fbearlysuspend.c b/kernel/power/fbearlysuspend.c
new file mode 100644
index 00000000000..15137650149
--- /dev/null
+++ b/kernel/power/fbearlysuspend.c
@@ -0,0 +1,153 @@
+/* kernel/power/fbearlysuspend.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/earlysuspend.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+
+#include "power.h"
+
+static wait_queue_head_t fb_state_wq;
+static DEFINE_SPINLOCK(fb_state_lock);
+static enum {
+ FB_STATE_STOPPED_DRAWING,
+ FB_STATE_REQUEST_STOP_DRAWING,
+ FB_STATE_DRAWING_OK,
+} fb_state;
+
+/* tell userspace to stop drawing, wait for it to stop */
+static void stop_drawing_early_suspend(struct early_suspend *h)
+{
+ int ret;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&fb_state_lock, irq_flags);
+ fb_state = FB_STATE_REQUEST_STOP_DRAWING;
+ spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+
+ wake_up_all(&fb_state_wq);
+ ret = wait_event_timeout(fb_state_wq,
+ fb_state == FB_STATE_STOPPED_DRAWING,
+ HZ);
+ if (unlikely(fb_state != FB_STATE_STOPPED_DRAWING))
+ pr_warning("stop_drawing_early_suspend: timeout waiting for "
+ "userspace to stop drawing\n");
+}
+
+/* tell userspace to start drawing */
+static void start_drawing_late_resume(struct early_suspend *h)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&fb_state_lock, irq_flags);
+ fb_state = FB_STATE_DRAWING_OK;
+ spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+ wake_up(&fb_state_wq);
+}
+
+static struct early_suspend stop_drawing_early_suspend_desc = {
+ .level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
+ .suspend = stop_drawing_early_suspend,
+ .resume = start_drawing_late_resume,
+};
+
+static ssize_t wait_for_fb_sleep_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ char *s = buf;
+ int ret;
+
+ ret = wait_event_interruptible(fb_state_wq,
+ fb_state != FB_STATE_DRAWING_OK);
+ if (ret && fb_state == FB_STATE_DRAWING_OK)
+ return ret;
+ else
+ s += sprintf(buf, "sleeping");
+ return s - buf;
+}
+
+static ssize_t wait_for_fb_wake_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ char *s = buf;
+ int ret;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&fb_state_lock, irq_flags);
+ if (fb_state == FB_STATE_REQUEST_STOP_DRAWING) {
+ fb_state = FB_STATE_STOPPED_DRAWING;
+ wake_up(&fb_state_wq);
+ }
+ spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+
+ ret = wait_event_interruptible(fb_state_wq,
+ fb_state == FB_STATE_DRAWING_OK);
+ if (ret && fb_state != FB_STATE_DRAWING_OK)
+ return ret;
+ else
+ s += sprintf(buf, "awake");
+
+ return s - buf;
+}
+
+#define power_ro_attr(_name) \
+static struct kobj_attribute _name##_attr = { \
+ .attr = { \
+ .name = __stringify(_name), \
+ .mode = 0444, \
+ }, \
+ .show = _name##_show, \
+ .store = NULL, \
+}
+
+power_ro_attr(wait_for_fb_sleep);
+power_ro_attr(wait_for_fb_wake);
+
+static struct attribute *g[] = {
+ &wait_for_fb_sleep_attr.attr,
+ &wait_for_fb_wake_attr.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = g,
+};
+
+static int __init android_power_init(void)
+{
+ int ret;
+
+ init_waitqueue_head(&fb_state_wq);
+ fb_state = FB_STATE_DRAWING_OK;
+
+ ret = sysfs_create_group(power_kobj, &attr_group);
+ if (ret) {
+ pr_err("android_power_init: sysfs_create_group failed\n");
+ return ret;
+ }
+
+ register_early_suspend(&stop_drawing_early_suspend_desc);
+ return 0;
+}
+
+static void __exit android_power_exit(void)
+{
+ unregister_early_suspend(&stop_drawing_early_suspend_desc);
+ sysfs_remove_group(power_kobj, &attr_group);
+}
+
+module_init(android_power_init);
+module_exit(android_power_exit);
+
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 32f1590644d..7feea903fe4 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -14,6 +14,7 @@
#include <linux/reboot.h>
#include <linux/string.h>
#include <linux/device.h>
+#include <linux/async.h>
#include <linux/kmod.h>
#include <linux/delay.h>
#include <linux/fs.h>
@@ -24,13 +25,16 @@
#include <linux/freezer.h>
#include <linux/gfp.h>
#include <linux/syscore_ops.h>
+#include <linux/ctype.h>
+#include <linux/genhd.h>
#include <scsi/scsi_scan.h>
#include "power.h"
static int nocompress = 0;
-static int noresume = 0;
+int noresume;
+static int resume_wait = 0;
static char resume_file[256] = CONFIG_PM_STD_PARTITION;
dev_t swsusp_resume_device;
sector_t swsusp_resume_block;
@@ -733,12 +737,30 @@ static int software_resume(void)
/* Check if the device is there */
swsusp_resume_device = name_to_dev_t(resume_file);
+
+ /*
+ * name_to_dev_t is ineffective to verify parition if resume_file is in
+ * integer format. (e.g. major:minor)
+ */
+ if (isdigit(resume_file[0]) && resume_wait) {
+ int partno;
+ while (!get_gendisk(swsusp_resume_device, &partno))
+ msleep(10);
+ }
+
if (!swsusp_resume_device) {
/*
* Some device discovery might still be in progress; we need
* to wait for this to finish.
*/
wait_for_device_probe();
+
+ if (resume_wait) {
+ while ((swsusp_resume_device = name_to_dev_t(resume_file)) == 0)
+ msleep(10);
+ async_synchronize_full();
+ }
+
/*
* We can't depend on SCSI devices being available after loading
* one of their modules until scsi_complete_async_scans() is
@@ -816,7 +838,11 @@ close_finish:
goto Finish;
}
+#ifdef CONFIG_FAST_RESUME
+resume_initcall(software_resume);
+#else
late_initcall(software_resume);
+#endif
static const char * const hibernation_modes[] = {
@@ -1008,11 +1034,42 @@ static ssize_t reserved_size_store(struct kobject *kobj,
power_attr(reserved_size);
+#ifdef CONFIG_FAST_RESUME
+static ssize_t noresume_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", noresume);
+}
+
+static ssize_t noresume_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t n)
+{
+ if (sscanf(buf, "%d", &noresume) == 1) {
+ noresume = !!noresume;
+ if (noresume) {
+ if (!swsusp_resume_device)
+ swsusp_resume_device =
+ name_to_dev_t(resume_file);
+ swsusp_check();
+ swsusp_close(FMODE_READ);
+ }
+ return n;
+ }
+
+ return -EINVAL;
+}
+
+power_attr(noresume);
+#endif
+
static struct attribute * g[] = {
&disk_attr.attr,
&resume_attr.attr,
&image_size_attr.attr,
&reserved_size_attr.attr,
+#ifdef CONFIG_FAST_RESUME
+ &noresume_attr.attr,
+#endif
NULL,
};
@@ -1067,7 +1124,14 @@ static int __init noresume_setup(char *str)
return 1;
}
+static int __init resumewait_setup(char *str)
+{
+ resume_wait = 1;
+ return 1;
+}
+
__setup("noresume", noresume_setup);
__setup("resume_offset=", resume_offset_setup);
__setup("resume=", resume_setup);
__setup("hibernate=", hibernate_setup);
+__setup("resumewait=", resumewait_setup);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 2981af4ce7c..9c54ff7ec8c 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -13,6 +13,31 @@
#include <linux/resume-trace.h>
#include <linux/workqueue.h>
+#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_ARCH_EXYNOS4)
+#define CONFIG_DVFS_LIMIT
+#endif
+
+#if defined(CONFIG_CPU_EXYNOS4210)
+#define CONFIG_GPU_LOCK
+#define CONFIG_ROTATION_BOOSTER_SUPPORT
+#endif
+
+#ifdef CONFIG_DVFS_LIMIT
+#include <linux/cpufreq.h>
+#include <mach/cpufreq.h>
+#endif
+
+#ifdef CONFIG_GPU_LOCK
+#include <mach/gpufreq.h>
+#endif
+
+#if defined(CONFIG_CPU_EXYNOS4412) && defined(CONFIG_VIDEO_MALI400MP) \
+ && defined(CONFIG_VIDEO_MALI400MP_DVFS)
+#define CONFIG_PEGASUS_GPU_LOCK
+extern int mali_dvfs_bottom_lock_push(int lock_step);
+extern int mali_dvfs_bottom_lock_pop(void);
+#endif
+
#include "power.h"
DEFINE_MUTEX(pm_mutex);
@@ -165,12 +190,22 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
#endif
return (s - buf);
}
+#ifdef CONFIG_FAST_BOOT
+bool fake_shut_down = false;
+EXPORT_SYMBOL(fake_shut_down);
+
+extern void wakelock_force_suspend(void);
+#endif
static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
#ifdef CONFIG_SUSPEND
+#ifdef CONFIG_EARLYSUSPEND
+ suspend_state_t state = PM_SUSPEND_ON;
+#else
suspend_state_t state = PM_SUSPEND_STANDBY;
+#endif
const char * const *s;
#endif
char *p;
@@ -191,9 +226,30 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
break;
}
- if (state < PM_SUSPEND_MAX && *s)
+
+#ifdef CONFIG_FAST_BOOT
+ if (len == 4 && !strncmp(buf, "dmem", len)) {
+ pr_info("%s: fake shut down!!!\n", __func__);
+ fake_shut_down = true;
+ state = PM_SUSPEND_MEM;
+ }
+#endif
+
+ if (state < PM_SUSPEND_MAX && *s) {
+#ifdef CONFIG_EARLYSUSPEND
+ if (state == PM_SUSPEND_ON || valid_state(state)) {
+ error = 0;
+ request_suspend_state(state);
+ }
+#ifdef CONFIG_FAST_BOOT
+ if (fake_shut_down)
+ wakelock_force_suspend();
+#endif
+#else
error = enter_state(state);
#endif
+ }
+#endif
Exit:
return error ? error : n;
@@ -297,6 +353,372 @@ power_attr(pm_trace_dev_match);
#endif /* CONFIG_PM_TRACE */
+#ifdef CONFIG_USER_WAKELOCK
+power_attr(wake_lock);
+power_attr(wake_unlock);
+#endif
+
+#ifdef CONFIG_DVFS_LIMIT
+static int cpufreq_max_limit_val = -1;
+static int cpufreq_min_limit_val = -1;
+DEFINE_MUTEX(cpufreq_limit_mutex);
+
+static ssize_t cpufreq_table_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ ssize_t count = 0;
+ struct cpufreq_frequency_table *table;
+ struct cpufreq_policy *policy;
+ unsigned int min_freq = ~0;
+ unsigned int max_freq = 0;
+ unsigned int i = 0;
+
+ table = cpufreq_frequency_get_table(0);
+ if (!table) {
+ printk(KERN_ERR "%s: Failed to get the cpufreq table\n",
+ __func__);
+ return sprintf(buf, "Failed to get the cpufreq table\n");
+ }
+
+ policy = cpufreq_cpu_get(0);
+ if (policy) {
+ #if 0 /* /sys/devices/system/cpu/cpu0/cpufreq/scaling_min&max_freq */
+ min_freq = policy->min_freq;
+ max_freq = policy->max_freq;
+ #else /* /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min&max_freq */
+ min_freq = policy->cpuinfo.min_freq;
+ max_freq = policy->cpuinfo.max_freq;
+ #endif
+ }
+
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
+ if ((table[i].frequency == CPUFREQ_ENTRY_INVALID) ||
+ (table[i].frequency > max_freq) ||
+ (table[i].frequency < min_freq))
+ continue;
+ count += sprintf(&buf[count], "%d ", table[i].frequency);
+ }
+ count += sprintf(&buf[count], "\n");
+
+ return count;
+}
+
+static ssize_t cpufreq_table_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ printk(KERN_ERR "%s: cpufreq_table is read-only\n", __func__);
+ return -EINVAL;
+}
+
+#define VALID_LEVEL 1
+static int get_cpufreq_level(unsigned int freq, unsigned int *level)
+{
+ struct cpufreq_frequency_table *table;
+ unsigned int i = 0;
+
+ table = cpufreq_frequency_get_table(0);
+ if (!table) {
+ printk(KERN_ERR "%s: Failed to get the cpufreq table\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
+ if (table[i].frequency == freq) {
+ *level = i;
+ return VALID_LEVEL;
+ }
+
+ printk(KERN_ERR "%s: %u KHz is an unsupported cpufreq\n",
+ __func__, freq);
+ return -EINVAL;
+}
+
+static ssize_t cpufreq_max_limit_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", cpufreq_max_limit_val);
+}
+
+static ssize_t cpufreq_max_limit_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int val;
+ unsigned int cpufreq_level;
+ int lock_ret;
+ ssize_t ret = -EINVAL;
+
+ mutex_lock(&cpufreq_limit_mutex);
+
+ if (sscanf(buf, "%d", &val) != 1) {
+ printk(KERN_ERR "%s: Invalid cpufreq format\n", __func__);
+ goto out;
+ }
+
+ if (val == -1) { /* Unlock request */
+ if (cpufreq_max_limit_val != -1) {
+ exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_USER);
+ cpufreq_max_limit_val = -1;
+ } else /* Already unlocked */
+ printk(KERN_ERR "%s: Unlock request is ignored\n",
+ __func__);
+ } else { /* Lock request */
+ if (get_cpufreq_level((unsigned int)val, &cpufreq_level)
+ == VALID_LEVEL) {
+ if (cpufreq_max_limit_val != -1)
+ /* Unlock the previous lock */
+ exynos_cpufreq_upper_limit_free(
+ DVFS_LOCK_ID_USER);
+ lock_ret = exynos_cpufreq_upper_limit(
+ DVFS_LOCK_ID_USER, cpufreq_level);
+ /* ret of exynos_cpufreq_upper_limit is meaningless.
+ 0 is fail? success? */
+ cpufreq_max_limit_val = val;
+ } else /* Invalid lock request --> No action */
+ printk(KERN_ERR "%s: Lock request is invalid\n",
+ __func__);
+ }
+
+ ret = n;
+out:
+ mutex_unlock(&cpufreq_limit_mutex);
+ return ret;
+}
+
+static ssize_t cpufreq_min_limit_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", cpufreq_min_limit_val);
+}
+
+static ssize_t cpufreq_min_limit_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int val;
+ unsigned int cpufreq_level;
+ int lock_ret;
+ ssize_t ret = -EINVAL;
+
+ mutex_lock(&cpufreq_limit_mutex);
+
+ if (sscanf(buf, "%d", &val) != 1) {
+ printk(KERN_ERR "%s: Invalid cpufreq format\n", __func__);
+ goto out;
+ }
+
+ if (val == -1) { /* Unlock request */
+ if (cpufreq_min_limit_val != -1) {
+ exynos_cpufreq_lock_free(DVFS_LOCK_ID_USER);
+ cpufreq_min_limit_val = -1;
+ } else /* Already unlocked */
+ printk(KERN_ERR "%s: Unlock request is ignored\n",
+ __func__);
+ } else { /* Lock request */
+ if (get_cpufreq_level((unsigned int)val, &cpufreq_level)
+ == VALID_LEVEL) {
+ if (cpufreq_min_limit_val != -1)
+ /* Unlock the previous lock */
+ exynos_cpufreq_lock_free(DVFS_LOCK_ID_USER);
+ lock_ret = exynos_cpufreq_lock(
+ DVFS_LOCK_ID_USER, cpufreq_level);
+ /* ret of exynos_cpufreq_lock is meaningless.
+ 0 is fail? success? */
+ cpufreq_min_limit_val = val;
+ if ((cpufreq_max_limit_val != -1) &&
+ (cpufreq_min_limit_val > cpufreq_max_limit_val))
+ printk(KERN_ERR "%s: Min lock may not work well"
+ " because of Max lock\n", __func__);
+ } else /* Invalid lock request --> No action */
+ printk(KERN_ERR "%s: Lock request is invalid\n",
+ __func__);
+ }
+
+ ret = n;
+out:
+ mutex_unlock(&cpufreq_limit_mutex);
+ return ret;
+}
+
+power_attr(cpufreq_table);
+power_attr(cpufreq_max_limit);
+power_attr(cpufreq_min_limit);
+#endif /* CONFIG_DVFS_LIMIT */
+
+#ifdef CONFIG_GPU_LOCK
+static int gpu_lock_val;
+DEFINE_MUTEX(gpu_lock_mutex);
+
+static ssize_t gpu_lock_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", gpu_lock_val);
+}
+
+static ssize_t gpu_lock_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int val;
+ ssize_t ret = -EINVAL;
+
+ mutex_lock(&gpu_lock_mutex);
+
+ if (sscanf(buf, "%d", &val) != 1) {
+ pr_info("%s: Invalid mali lock format\n", __func__);
+ goto out;
+ }
+
+ if (val == 0) {
+ if (gpu_lock_val != 0) {
+ exynos_gpufreq_unlock();
+ gpu_lock_val = 0;
+ } else {
+ pr_info("%s: Unlock request is ignored\n", __func__);
+ }
+ } else if (val == 1) {
+ if (gpu_lock_val == 0) {
+ exynos_gpufreq_lock();
+ gpu_lock_val = val;
+ } else {
+ pr_info("%s: Lock request is ignored\n", __func__);
+ }
+ } else {
+ pr_info("%s: Lock request is invalid\n", __func__);
+ }
+
+ ret = n;
+out:
+ mutex_unlock(&gpu_lock_mutex);
+ return ret;
+}
+power_attr(gpu_lock);
+#endif
+
+#ifdef CONFIG_ROTATION_BOOSTER_SUPPORT
+static inline void rotation_booster_on(void)
+{
+ exynos_cpufreq_lock(DVFS_LOCK_ID_ROTATION_BOOSTER, L0);
+ exynos4_busfreq_lock(DVFS_LOCK_ID_ROTATION_BOOSTER, BUS_L0);
+ exynos_gpufreq_lock();
+}
+
+static inline void rotation_booster_off(void)
+{
+ exynos_gpufreq_unlock();
+ exynos4_busfreq_lock_free(DVFS_LOCK_ID_ROTATION_BOOSTER);
+ exynos_cpufreq_lock_free(DVFS_LOCK_ID_ROTATION_BOOSTER);
+}
+
+static int rotation_booster_val;
+DEFINE_MUTEX(rotation_booster_mutex);
+
+static ssize_t rotation_booster_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", rotation_booster_val);
+}
+
+static ssize_t rotation_booster_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int val;
+ ssize_t ret = -EINVAL;
+
+ mutex_lock(&rotation_booster_mutex);
+
+ if (sscanf(buf, "%d", &val) != 1) {
+ pr_info("%s: Invalid rotation_booster on, off format\n", \
+ __func__);
+ goto out;
+ }
+
+ if (val == 0) {
+ if (rotation_booster_val != 0) {
+ rotation_booster_off();
+ rotation_booster_val = 0;
+ } else {
+ pr_info("%s: rotation_booster off request"
+ " is ignored\n", __func__);
+ }
+ } else if (val == 1) {
+ if (rotation_booster_val == 0) {
+ rotation_booster_on();
+ rotation_booster_val = val;
+ } else {
+ pr_info("%s: rotation_booster on request"
+ " is ignored\n", __func__);
+ }
+ } else {
+ pr_info("%s: rotation_booster request is invalid\n", __func__);
+ }
+
+ ret = n;
+out:
+ mutex_unlock(&rotation_booster_mutex);
+ return ret;
+}
+power_attr(rotation_booster);
+#else /* CONFIG_ROTATION_BOOSTER_SUPPORT */
+static inline void rotation_booster_on(void){}
+static inline void rotation_booster_off(void){}
+#endif /* CONFIG_ROTATION_BOOSTER_SUPPORT */
+
+#ifdef CONFIG_PEGASUS_GPU_LOCK
+static int mali_lock_val;
+static int mali_lock_cnt;
+DEFINE_MUTEX(mali_lock_mutex);
+
+static ssize_t mali_lock_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "level = %d, count = %d\n",
+ mali_lock_val, mali_lock_cnt);
+}
+
+static ssize_t mali_lock_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int val;
+ ssize_t ret = -EINVAL;
+
+ mutex_lock(&mali_lock_mutex);
+
+ if (sscanf(buf, "%d", &val) != 1) {
+ pr_info("%s: Invalid mali lock format\n", __func__);
+ goto out;
+ }
+
+ if (val == 0) { /* unlock */
+ mali_lock_cnt = mali_dvfs_bottom_lock_pop();
+ if (mali_lock_cnt == 0)
+ mali_lock_val = 0;
+ } else if (val > 0 && val < 5) { /* lock with level */
+ mali_lock_cnt = mali_dvfs_bottom_lock_push(val);
+ if (mali_lock_val < val)
+ mali_lock_val = val;
+ } else {
+ pr_info("%s: Lock request is invalid\n", __func__);
+ }
+
+ ret = n;
+out:
+ mutex_unlock(&mali_lock_mutex);
+ return ret;
+}
+power_attr(mali_lock);
+#endif
+
static struct attribute * g[] = {
&state_attr.attr,
#ifdef CONFIG_PM_TRACE
@@ -309,6 +731,24 @@ static struct attribute * g[] = {
#ifdef CONFIG_PM_DEBUG
&pm_test_attr.attr,
#endif
+#ifdef CONFIG_USER_WAKELOCK
+ &wake_lock_attr.attr,
+ &wake_unlock_attr.attr,
+#endif
+#endif
+#ifdef CONFIG_DVFS_LIMIT
+ &cpufreq_table_attr.attr,
+ &cpufreq_max_limit_attr.attr,
+ &cpufreq_min_limit_attr.attr,
+#endif
+#ifdef CONFIG_GPU_LOCK
+ &gpu_lock_attr.attr,
+#endif
+#ifdef CONFIG_PEGASUS_GPU_LOCK
+ &mali_lock_attr.attr,
+#endif
+#ifdef CONFIG_ROTATION_BOOSTER_SUPPORT
+ &rotation_booster_attr.attr,
#endif
NULL,
};
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 9a00a0a2628..fac1ce85c4a 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -72,6 +72,7 @@ static struct kobj_attribute _name##_attr = { \
.store = _name##_store, \
}
+extern int noresume;
/* Preferred image size in bytes (default 500 MB) */
extern unsigned long image_size;
/* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
@@ -245,3 +246,45 @@ static inline void suspend_thaw_processes(void)
{
}
#endif
+
+#ifdef CONFIG_WAKELOCK
+/* kernel/power/wakelock.c */
+extern struct workqueue_struct *suspend_work_queue;
+extern struct wake_lock main_wake_lock;
+extern struct workqueue_struct *sync_work_queue;
+extern struct wake_lock sync_wake_lock;
+extern suspend_state_t requested_suspend_state;
+#endif
+
+#ifdef CONFIG_USER_WAKELOCK
+ssize_t wake_lock_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf);
+ssize_t wake_lock_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n);
+ssize_t wake_unlock_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf);
+ssize_t wake_unlock_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n);
+#endif
+
+#ifdef CONFIG_EARLYSUSPEND
+/* kernel/power/earlysuspend.c */
+void request_suspend_state(suspend_state_t state);
+suspend_state_t get_suspend_state(void);
+#endif
+
+struct pm_wd_data {
+ struct task_struct *tsk;
+ int timeout;
+};
+#ifdef CONFIG_PM_WATCHDOG_TIMEOUT
+void pm_wd_timeout(unsigned long data);
+void pm_wd_add_timer(struct timer_list *timer, struct pm_wd_data *data,
+ int timeout);
+void pm_wd_del_timer(struct timer_list *timer);
+#else
+static inline void pm_wd_timeout(unsigned long data) { }
+static inline void pm_wd_add_timer(struct timer_list *timer,
+ struct pm_wd_data *data, int timeout) { }
+static inline void pm_wd_del_timer(struct timer_list *timer) { }
+#endif
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0cf3a27a6c9..31338cdeafc 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -16,6 +16,7 @@
#include <linux/freezer.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
+#include <linux/wakelock.h>
/*
* Timeout for stopping processes
@@ -82,6 +83,10 @@ static int try_to_freeze_tasks(bool sig_only)
todo += wq_busy;
}
+ if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
+ wakeup = 1;
+ break;
+ }
if (!todo || time_after(jiffies, end_time))
break;
@@ -108,19 +113,25 @@ static int try_to_freeze_tasks(bool sig_only)
* and caller must call thaw_processes() if something fails),
* but it cleans up leftover PF_FREEZE requests.
*/
- printk("\n");
- printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
- "(%d tasks refusing to freeze, wq_busy=%d):\n",
- wakeup ? "aborted" : "failed",
- elapsed_csecs / 100, elapsed_csecs % 100,
- todo - wq_busy, wq_busy);
-
+ if(wakeup) {
+ printk("\n");
+ printk(KERN_ERR "Freezing of %s aborted\n",
+ sig_only ? "user space " : "tasks ");
+ }
+ else {
+ printk("\n");
+ printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
+ "(%d tasks refusing to freeze, wq_busy=%d):\n",
+ elapsed_csecs / 100, elapsed_csecs % 100,
+ todo - wq_busy, wq_busy);
+ }
thaw_workqueues();
read_lock(&tasklist_lock);
do_each_thread(g, p) {
task_lock(p);
- if (!wakeup && freezing(p) && !freezer_should_skip(p))
+ if (freezing(p) && !freezer_should_skip(p) &&
+ elapsed_csecs > 100)
sched_show_task(p);
cancel_freezing(p);
task_unlock(p);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 06efa54f93d..a45d501872f 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1272,6 +1272,79 @@ static unsigned long minimum_image_size(unsigned long saveable)
return saveable <= size ? 0 : saveable - size;
}
+#ifdef CONFIG_FULL_PAGE_RECLAIM
+static int is_exist_entry(pgd_t *pgd, int i)
+{
+ pmd_t *pmd;
+
+ pgd = pgd+i;
+
+ if (pgd_none(*pgd))
+ return 0;
+
+ if (pgd_bad(*pgd))
+ return 0;
+
+ pmd = pmd_offset(pgd, 0);
+
+ if (pmd_none(*pmd))
+ return 0;
+
+ if (pmd_bad(*pmd))
+ return 0;
+
+ return 1;
+}
+
+static int show_process_pte_size(void)
+{
+ struct task_struct *p;
+ int i;
+ int count;
+ int tot_count = 0;
+ int kernel_did = 0;
+ int k_count = 0;
+ int task_struct_size = 0;
+
+ read_lock(&tasklist_lock);
+ for_each_process(p) {
+ count = 0;
+ task_struct_size += sizeof(struct task_struct);
+ if (p->comm[0] == '[') {
+ printk(KERN_DEBUG "%s skip\n", p->comm);
+ continue;
+ }
+ if (p->mm == NULL) {
+ printk(KERN_DEBUG "%s skip\n", p->comm);
+ continue;
+ }
+ if (p->mm->pgd == NULL)
+ continue;
+
+ for (i = 0; i < 1536; i++) {
+ if (is_exist_entry(p->mm->pgd, i))
+ count++;
+ }
+ if (!kernel_did) {
+ for (i = 1536; i < 2048; i++) {
+ if (is_exist_entry(p->mm->pgd, i))
+ k_count++;
+ }
+ kernel_did = 1;
+ }
+ printk(KERN_INFO "%s : pgd entry count = %d, size = %d K\n",
+ p->comm, count, (16 + count * 4));
+ tot_count = tot_count + (16 + count * 4);
+ }
+ printk(KERN_INFO "PAGE TABLE ==> total size = %d K , kernel = %d K\n",
+ tot_count, k_count * 4);
+ printk(KERN_INFO "task_struct_size = %d K\n", task_struct_size / 1024);
+ read_unlock(&tasklist_lock);
+
+ return 0;
+}
+#endif /* CONFIG_FULL_PAGE_RECLAIM */
+
/**
* hibernate_preallocate_memory - Preallocate memory for hibernation image
*
@@ -1305,6 +1378,16 @@ int hibernate_preallocate_memory(void)
printk(KERN_INFO "PM: Preallocating image memory... ");
do_gettimeofday(&start);
+#ifdef CONFIG_FULL_PAGE_RECLAIM
+ /* First of all, throw out unnecessary page frames for saving */
+ do {
+ pages = shrink_all_memory(ULONG_MAX);
+ printk(KERN_INFO "\bdone (%lu pages freed)\n", pages);
+ /* shrink all pages */
+ } while (pages);
+ show_process_pte_size();
+#endif
+
error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
if (error)
goto err_out;
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index e40d20595b1..d48f60b4da1 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -29,6 +29,9 @@
#include "power.h"
const char *const pm_states[PM_SUSPEND_MAX] = {
+#ifdef CONFIG_EARLYSUSPEND
+ [PM_SUSPEND_ON] = "on",
+#endif
[PM_SUSPEND_STANDBY] = "standby",
[PM_SUSPEND_MEM] = "mem",
};
@@ -126,28 +129,41 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
local_irq_enable();
}
+#if !defined(CONFIG_CPU_EXYNOS4210)
+#define CHECK_POINT printk(KERN_DEBUG "%s:%d\n", __func__, __LINE__)
+#else
+#define CHECK_POINT
+#endif
+
/**
- * suspend_enter - enter the desired system sleep state.
- * @state: state to enter
+ * suspend_enter - enter the desired system sleep state.
+ * @state: State to enter
+ * @wakeup: Returns information that suspend should not be entered again.
*
- * This function should be called after devices have been suspended.
+ * This function should be called after devices have been suspended.
*/
-static int suspend_enter(suspend_state_t state)
+static int suspend_enter(suspend_state_t state, bool *wakeup)
{
int error;
+ CHECK_POINT;
+
if (suspend_ops->prepare) {
error = suspend_ops->prepare();
if (error)
goto Platform_finish;
}
+ CHECK_POINT;
+
error = dpm_suspend_noirq(PMSG_SUSPEND);
if (error) {
printk(KERN_ERR "PM: Some devices failed to power down\n");
goto Platform_finish;
}
+ CHECK_POINT;
+
if (suspend_ops->prepare_late) {
error = suspend_ops->prepare_late();
if (error)
@@ -161,12 +177,18 @@ static int suspend_enter(suspend_state_t state)
if (error || suspend_test(TEST_CPUS))
goto Enable_cpus;
+ CHECK_POINT;
+
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
error = syscore_suspend();
+
+ CHECK_POINT;
+
if (!error) {
- if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
+ *wakeup = pm_wakeup_pending();
+ if (!(suspend_test(TEST_CORE) || *wakeup)) {
error = suspend_ops->enter(state);
events_check_enabled = false;
}
@@ -200,6 +222,7 @@ static int suspend_enter(suspend_state_t state)
int suspend_devices_and_enter(suspend_state_t state)
{
int error;
+ bool wakeup = false;
if (!suspend_ops)
return -ENOSYS;
@@ -222,7 +245,10 @@ int suspend_devices_and_enter(suspend_state_t state)
if (suspend_test(TEST_DEVICES))
goto Recover_platform;
- error = suspend_enter(state);
+ do {
+ error = suspend_enter(state, &wakeup);
+ } while (!error && !wakeup
+ && suspend_ops->suspend_again && suspend_ops->suspend_again());
Resume_devices:
suspend_test_start();
@@ -256,6 +282,40 @@ static void suspend_finish(void)
pm_restore_console();
}
+#ifdef CONFIG_PM_WATCHDOG_TIMEOUT
+void pm_wd_timeout(unsigned long data)
+{
+ struct pm_wd_data *wd_data = (void *)data;
+ struct task_struct *tsk = wd_data->tsk;
+
+ pr_emerg("%s: PM watchdog timeout: %d seconds\n", __func__,
+ wd_data->timeout);
+
+ pr_emerg("stack:\n");
+ show_stack(tsk, NULL);
+
+ BUG();
+}
+
+void pm_wd_add_timer(struct timer_list *timer, struct pm_wd_data *data,
+ int timeout)
+{
+ data->timeout = timeout;
+ data->tsk = get_current();
+ init_timer_on_stack(timer);
+ timer->expires = jiffies + HZ * data->timeout;
+ timer->function = pm_wd_timeout;
+ timer->data = (unsigned long)data;
+ add_timer(timer);
+}
+
+void pm_wd_del_timer(struct timer_list *timer)
+{
+ del_timer_sync(timer);
+ destroy_timer_on_stack(timer);
+}
+#endif
+
/**
* enter_state - Do common work of entering low-power state.
* @state: pm_state structure for state we're entering.
@@ -269,6 +329,8 @@ static void suspend_finish(void)
int enter_state(suspend_state_t state)
{
int error;
+ struct timer_list timer;
+ struct pm_wd_data data;
if (!valid_state(state))
return -ENODEV;
@@ -294,8 +356,12 @@ int enter_state(suspend_state_t state)
pm_restore_gfp_mask();
Finish:
+ pm_wd_add_timer(&timer, &data, 15);
+
pr_debug("PM: Finishing wakeup.\n");
suspend_finish();
+
+ pm_wd_del_timer(&timer);
Unlock:
mutex_unlock(&pm_mutex);
return error;
diff --git a/kernel/power/suspend_time.c b/kernel/power/suspend_time.c
new file mode 100644
index 00000000000..d2a65da9f22
--- /dev/null
+++ b/kernel/power/suspend_time.c
@@ -0,0 +1,111 @@
+/*
+ * debugfs file to track time spent in suspend
+ *
+ * Copyright (c) 2011, Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/syscore_ops.h>
+#include <linux/time.h>
+
+static struct timespec suspend_time_before;
+static unsigned int time_in_suspend_bins[32];
+
+#ifdef CONFIG_DEBUG_FS
+static int suspend_time_debug_show(struct seq_file *s, void *data)
+{
+ int bin;
+ seq_printf(s, "time (secs) count\n");
+ seq_printf(s, "------------------\n");
+ for (bin = 0; bin < 32; bin++) {
+ if (time_in_suspend_bins[bin] == 0)
+ continue;
+ seq_printf(s, "%4d - %4d %4u\n",
+ bin ? 1 << (bin - 1) : 0, 1 << bin,
+ time_in_suspend_bins[bin]);
+ }
+ return 0;
+}
+
+static int suspend_time_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, suspend_time_debug_show, NULL);
+}
+
+static const struct file_operations suspend_time_debug_fops = {
+ .open = suspend_time_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init suspend_time_debug_init(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_file("suspend_time", 0755, NULL, NULL,
+ &suspend_time_debug_fops);
+ if (!d) {
+ pr_err("Failed to create suspend_time debug file\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+late_initcall(suspend_time_debug_init);
+#endif
+
+static int suspend_time_syscore_suspend(void)
+{
+ read_persistent_clock(&suspend_time_before);
+
+ return 0;
+}
+
+static void suspend_time_syscore_resume(void)
+{
+ struct timespec after;
+
+ read_persistent_clock(&after);
+
+ after = timespec_sub(after, suspend_time_before);
+
+ time_in_suspend_bins[fls(after.tv_sec)]++;
+
+ pr_info("Suspended for %lu.%03lu seconds\n", after.tv_sec,
+ after.tv_nsec / NSEC_PER_MSEC);
+}
+
+static struct syscore_ops suspend_time_syscore_ops = {
+ .suspend = suspend_time_syscore_suspend,
+ .resume = suspend_time_syscore_resume,
+};
+
+static int suspend_time_syscore_init(void)
+{
+ register_syscore_ops(&suspend_time_syscore_ops);
+
+ return 0;
+}
+
+static void suspend_time_syscore_exit(void)
+{
+ unregister_syscore_ops(&suspend_time_syscore_ops);
+}
+module_init(suspend_time_syscore_init);
+module_exit(suspend_time_syscore_exit);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 7c97c3a0eee..2a2dc30181e 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -372,6 +372,15 @@ static int swap_writer_finish(struct swap_map_handle *handle,
LZO_HEADER, PAGE_SIZE)
#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
+/*
+ * lzo experimental compression ratio.
+ * When compression is used for hibernation, swap size is not required for worst
+ * case. So we use an experimental compression ratio. If the swap size is not
+ * enough, then alloc_swapdev_block() return fails and hibernation codes handle
+ * the error well.
+ */
+#define LZO_RATIO(x) ((x) / 2)
+
/**
* save_image - save the suspend image data
*/
@@ -437,7 +446,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
struct bio *bio;
struct timeval start;
struct timeval stop;
- size_t off, unc_len, cmp_len;
+ size_t off, unc_len, cmp_len, total;
unsigned char *unc, *cmp, *wrk, *page;
page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
@@ -477,6 +486,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
if (!m)
m = 1;
nr_pages = 0;
+ total = 0;
bio = NULL;
do_gettimeofday(&start);
for (;;) {
@@ -529,6 +539,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
if (ret)
goto out_finish;
}
+ total += DIV_ROUND_UP(LZO_HEADER + cmp_len, PAGE_SIZE);
}
out_finish:
@@ -541,6 +552,11 @@ out_finish:
else
printk(KERN_CONT "\n");
swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
+ pr_info("PM: %lu->%lu kbytes, %d%% compressed\n",
+ nr_to_write * PAGE_SIZE / 1024,
+ total * PAGE_SIZE / 1024,
+ 100 - ((total * 100) / nr_to_write));
+ image_size = total * PAGE_SIZE;
vfree(cmp);
vfree(unc);
@@ -564,8 +580,8 @@ static int enough_swap(unsigned int nr_pages, unsigned int flags)
pr_debug("PM: Free swap pages: %u\n", free_swap);
- required = PAGES_FOR_IO + ((flags & SF_NOCOMPRESS_MODE) ?
- nr_pages : (nr_pages * LZO_CMP_PAGES) / LZO_UNC_PAGES + 1);
+ required = PAGES_FOR_IO + ((flags & SF_NOCOMPRESS_MODE) ? nr_pages :
+ LZO_RATIO((nr_pages * LZO_CMP_PAGES) / LZO_UNC_PAGES + 1));
return free_swap > required;
}
@@ -943,8 +959,11 @@ int swsusp_check(void)
if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
/* Reset swap signature now */
- error = hib_bio_write_page(swsusp_resume_block,
- swsusp_header, NULL);
+#if defined(CONFIG_FAST_RESUME) && defined(CONFIG_SLP)
+ if (noresume)
+#endif
+ error = hib_bio_write_page(swsusp_resume_block,
+ swsusp_header, NULL);
} else {
error = -EINVAL;
}
diff --git a/kernel/power/userwakelock.c b/kernel/power/userwakelock.c
new file mode 100644
index 00000000000..a28a8db4146
--- /dev/null
+++ b/kernel/power/userwakelock.c
@@ -0,0 +1,219 @@
+/* kernel/power/userwakelock.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/wakelock.h>
+#include <linux/slab.h>
+
+#include "power.h"
+
+enum {
+ DEBUG_FAILURE = BIT(0),
+ DEBUG_ERROR = BIT(1),
+ DEBUG_NEW = BIT(2),
+ DEBUG_ACCESS = BIT(3),
+ DEBUG_LOOKUP = BIT(4),
+};
+static int debug_mask = DEBUG_FAILURE;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static DEFINE_MUTEX(tree_lock);
+
+struct user_wake_lock {
+ struct rb_node node;
+ struct wake_lock wake_lock;
+ char name[0];
+};
+struct rb_root user_wake_locks;
+
+static struct user_wake_lock *lookup_wake_lock_name(
+ const char *buf, int allocate, long *timeoutptr)
+{
+ struct rb_node **p = &user_wake_locks.rb_node;
+ struct rb_node *parent = NULL;
+ struct user_wake_lock *l;
+ int diff;
+ u64 timeout;
+ int name_len;
+ const char *arg;
+
+ /* Find length of lock name and start of optional timeout string */
+ arg = buf;
+ while (*arg && !isspace(*arg))
+ arg++;
+ name_len = arg - buf;
+ if (!name_len)
+ goto bad_arg;
+ while (isspace(*arg))
+ arg++;
+
+ /* Process timeout string */
+ if (timeoutptr && *arg) {
+ timeout = simple_strtoull(arg, (char **)&arg, 0);
+ while (isspace(*arg))
+ arg++;
+ if (*arg)
+ goto bad_arg;
+ /* convert timeout from nanoseconds to jiffies > 0 */
+ timeout += (NSEC_PER_SEC / HZ) - 1;
+ do_div(timeout, (NSEC_PER_SEC / HZ));
+ if (timeout <= 0)
+ timeout = 1;
+ *timeoutptr = timeout;
+ } else if (*arg)
+ goto bad_arg;
+ else if (timeoutptr)
+ *timeoutptr = 0;
+
+ /* Lookup wake lock in rbtree */
+ while (*p) {
+ parent = *p;
+ l = rb_entry(parent, struct user_wake_lock, node);
+ diff = strncmp(buf, l->name, name_len);
+ if (!diff && l->name[name_len])
+ diff = -1;
+ if (debug_mask & DEBUG_ERROR)
+ pr_info("lookup_wake_lock_name: compare %.*s %s %d\n",
+ name_len, buf, l->name, diff);
+
+ if (diff < 0)
+ p = &(*p)->rb_left;
+ else if (diff > 0)
+ p = &(*p)->rb_right;
+ else
+ return l;
+ }
+
+ /* Allocate and add new wakelock to rbtree */
+ if (!allocate) {
+ if (debug_mask & DEBUG_ERROR)
+ pr_info("lookup_wake_lock_name: %.*s not found\n",
+ name_len, buf);
+ return ERR_PTR(-EINVAL);
+ }
+ l = kzalloc(sizeof(*l) + name_len + 1, GFP_KERNEL);
+ if (l == NULL) {
+ if (debug_mask & DEBUG_FAILURE)
+ pr_err("lookup_wake_lock_name: failed to allocate "
+ "memory for %.*s\n", name_len, buf);
+ return ERR_PTR(-ENOMEM);
+ }
+ memcpy(l->name, buf, name_len);
+ if (debug_mask & DEBUG_NEW)
+ pr_info("lookup_wake_lock_name: new wake lock %s\n", l->name);
+ wake_lock_init(&l->wake_lock, WAKE_LOCK_SUSPEND, l->name);
+ rb_link_node(&l->node, parent, p);
+ rb_insert_color(&l->node, &user_wake_locks);
+ return l;
+
+bad_arg:
+ if (debug_mask & DEBUG_ERROR)
+ pr_info("lookup_wake_lock_name: wake lock, %.*s, bad arg, %s\n",
+ name_len, buf, arg);
+ return ERR_PTR(-EINVAL);
+}
+
+ssize_t wake_lock_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ char *s = buf;
+ char *end = buf + PAGE_SIZE;
+ struct rb_node *n;
+ struct user_wake_lock *l;
+
+ mutex_lock(&tree_lock);
+
+ for (n = rb_first(&user_wake_locks); n != NULL; n = rb_next(n)) {
+ l = rb_entry(n, struct user_wake_lock, node);
+ if (wake_lock_active(&l->wake_lock))
+ s += scnprintf(s, end - s, "%s ", l->name);
+ }
+ s += scnprintf(s, end - s, "\n");
+
+ mutex_unlock(&tree_lock);
+ return (s - buf);
+}
+
+ssize_t wake_lock_store(
+ struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ long timeout;
+ struct user_wake_lock *l;
+
+ mutex_lock(&tree_lock);
+ l = lookup_wake_lock_name(buf, 1, &timeout);
+ if (IS_ERR(l)) {
+ n = PTR_ERR(l);
+ goto bad_name;
+ }
+
+ if (debug_mask & DEBUG_ACCESS)
+ pr_info("wake_lock_store: %s, timeout %ld\n", l->name, timeout);
+
+ if (timeout)
+ wake_lock_timeout(&l->wake_lock, timeout);
+ else
+ wake_lock(&l->wake_lock);
+bad_name:
+ mutex_unlock(&tree_lock);
+ return n;
+}
+
+
+ssize_t wake_unlock_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ char *s = buf;
+ char *end = buf + PAGE_SIZE;
+ struct rb_node *n;
+ struct user_wake_lock *l;
+
+ mutex_lock(&tree_lock);
+
+ for (n = rb_first(&user_wake_locks); n != NULL; n = rb_next(n)) {
+ l = rb_entry(n, struct user_wake_lock, node);
+ if (!wake_lock_active(&l->wake_lock))
+ s += scnprintf(s, end - s, "%s ", l->name);
+ }
+ s += scnprintf(s, end - s, "\n");
+
+ mutex_unlock(&tree_lock);
+ return (s - buf);
+}
+
+ssize_t wake_unlock_store(
+ struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct user_wake_lock *l;
+
+ mutex_lock(&tree_lock);
+ l = lookup_wake_lock_name(buf, 0, NULL);
+ if (IS_ERR(l)) {
+ n = PTR_ERR(l);
+ goto not_found;
+ }
+
+ if (debug_mask & DEBUG_ACCESS)
+ pr_info("wake_unlock_store: %s\n", l->name);
+
+ wake_unlock(&l->wake_lock);
+not_found:
+ mutex_unlock(&tree_lock);
+ return n;
+}
+
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
new file mode 100644
index 00000000000..104f6dc2cd3
--- /dev/null
+++ b/kernel/power/wakelock.c
@@ -0,0 +1,687 @@
+/* kernel/power/wakelock.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/suspend.h>
+#include <linux/syscalls.h> /* sys_sync */
+#include <linux/wakelock.h>
+#ifdef CONFIG_WAKELOCK_STAT
+#include <linux/proc_fs.h>
+#endif
+#ifdef CONFIG_FAST_BOOT
+#include <linux/delay.h>
+#endif
+#include "power.h"
+
+enum {
+ DEBUG_EXIT_SUSPEND = 1U << 0,
+ DEBUG_WAKEUP = 1U << 1,
+ DEBUG_SUSPEND = 1U << 2,
+ DEBUG_EXPIRE = 1U << 3,
+ DEBUG_WAKE_LOCK = 1U << 4,
+};
+static int debug_mask = DEBUG_EXIT_SUSPEND | DEBUG_WAKEUP | DEBUG_SUSPEND;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define WAKE_LOCK_TYPE_MASK (0x0f)
+#define WAKE_LOCK_INITIALIZED (1U << 8)
+#define WAKE_LOCK_ACTIVE (1U << 9)
+#define WAKE_LOCK_AUTO_EXPIRE (1U << 10)
+#define WAKE_LOCK_PREVENTING_SUSPEND (1U << 11)
+
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(inactive_locks);
+static struct list_head active_wake_locks[WAKE_LOCK_TYPE_COUNT];
+static int current_event_num;
+struct workqueue_struct *suspend_work_queue;
+struct workqueue_struct *sync_work_queue;
+struct wake_lock main_wake_lock;
+struct wake_lock sync_wake_lock;
+suspend_state_t requested_suspend_state = PM_SUSPEND_MEM;
+static struct wake_lock unknown_wakeup;
+static struct wake_lock suspend_backoff_lock;
+
+#define SUSPEND_BACKOFF_THRESHOLD 10
+#define SUSPEND_BACKOFF_INTERVAL 10000
+
+static unsigned suspend_short_count;
+
+#ifdef CONFIG_WAKELOCK_STAT
+static struct wake_lock deleted_wake_locks;
+static ktime_t last_sleep_time_update;
+static int wait_for_wakeup;
+
+int get_expired_time(struct wake_lock *lock, ktime_t *expire_time)
+{
+ struct timespec ts;
+ struct timespec kt;
+ struct timespec tomono;
+ struct timespec delta;
+ struct timespec sleep;
+ long timeout;
+
+ if (!(lock->flags & WAKE_LOCK_AUTO_EXPIRE))
+ return 0;
+ get_xtime_and_monotonic_and_sleep_offset(&kt, &tomono, &sleep);
+ timeout = lock->expires - jiffies;
+ if (timeout > 0)
+ return 0;
+ jiffies_to_timespec(-timeout, &delta);
+ set_normalized_timespec(&ts, kt.tv_sec + tomono.tv_sec - delta.tv_sec,
+ kt.tv_nsec + tomono.tv_nsec - delta.tv_nsec);
+ *expire_time = timespec_to_ktime(ts);
+ return 1;
+}
+
+
+static int print_lock_stat(struct seq_file *m, struct wake_lock *lock)
+{
+ int lock_count = lock->stat.count;
+ int expire_count = lock->stat.expire_count;
+ ktime_t active_time = ktime_set(0, 0);
+ ktime_t total_time = lock->stat.total_time;
+ ktime_t max_time = lock->stat.max_time;
+
+ ktime_t prevent_suspend_time = lock->stat.prevent_suspend_time;
+ if (lock->flags & WAKE_LOCK_ACTIVE) {
+ ktime_t now, add_time;
+ int expired = get_expired_time(lock, &now);
+ if (!expired)
+ now = ktime_get();
+ add_time = ktime_sub(now, lock->stat.last_time);
+ lock_count++;
+ if (!expired)
+ active_time = add_time;
+ else
+ expire_count++;
+ total_time = ktime_add(total_time, add_time);
+ if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND)
+ prevent_suspend_time = ktime_add(prevent_suspend_time,
+ ktime_sub(now, last_sleep_time_update));
+ if (add_time.tv64 > max_time.tv64)
+ max_time = add_time;
+ }
+
+ return seq_printf(m,
+ "\"%s\"\t%d\t%d\t%d\t%lld\t%lld\t%lld\t%lld\t%lld\n",
+ lock->name, lock_count, expire_count,
+ lock->stat.wakeup_count, ktime_to_ns(active_time),
+ ktime_to_ns(total_time),
+ ktime_to_ns(prevent_suspend_time), ktime_to_ns(max_time),
+ ktime_to_ns(lock->stat.last_time));
+}
+
+static int wakelock_stats_show(struct seq_file *m, void *unused)
+{
+ unsigned long irqflags;
+ struct wake_lock *lock;
+ int ret;
+ int type;
+
+ spin_lock_irqsave(&list_lock, irqflags);
+
+ ret = seq_puts(m, "name\tcount\texpire_count\twake_count\tactive_since"
+ "\ttotal_time\tsleep_time\tmax_time\tlast_change\n");
+ list_for_each_entry(lock, &inactive_locks, link)
+ ret = print_lock_stat(m, lock);
+ for (type = 0; type < WAKE_LOCK_TYPE_COUNT; type++) {
+ list_for_each_entry(lock, &active_wake_locks[type], link)
+ ret = print_lock_stat(m, lock);
+ }
+ spin_unlock_irqrestore(&list_lock, irqflags);
+ return 0;
+}
+
+static void wake_unlock_stat_locked(struct wake_lock *lock, int expired)
+{
+ ktime_t duration;
+ ktime_t now;
+ if (!(lock->flags & WAKE_LOCK_ACTIVE))
+ return;
+ if (get_expired_time(lock, &now))
+ expired = 1;
+ else
+ now = ktime_get();
+ lock->stat.count++;
+ if (expired)
+ lock->stat.expire_count++;
+ duration = ktime_sub(now, lock->stat.last_time);
+ lock->stat.total_time = ktime_add(lock->stat.total_time, duration);
+ if (ktime_to_ns(duration) > ktime_to_ns(lock->stat.max_time))
+ lock->stat.max_time = duration;
+ lock->stat.last_time = ktime_get();
+ if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) {
+ duration = ktime_sub(now, last_sleep_time_update);
+ lock->stat.prevent_suspend_time = ktime_add(
+ lock->stat.prevent_suspend_time, duration);
+ lock->flags &= ~WAKE_LOCK_PREVENTING_SUSPEND;
+ }
+}
+
+static void update_sleep_wait_stats_locked(int done)
+{
+ struct wake_lock *lock;
+ ktime_t now, etime, elapsed, add;
+ int expired;
+
+ now = ktime_get();
+ elapsed = ktime_sub(now, last_sleep_time_update);
+ list_for_each_entry(lock, &active_wake_locks[WAKE_LOCK_SUSPEND], link) {
+ expired = get_expired_time(lock, &etime);
+ if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) {
+ if (expired)
+ add = ktime_sub(etime, last_sleep_time_update);
+ else
+ add = elapsed;
+ lock->stat.prevent_suspend_time = ktime_add(
+ lock->stat.prevent_suspend_time, add);
+ }
+ if (done || expired)
+ lock->flags &= ~WAKE_LOCK_PREVENTING_SUSPEND;
+ else
+ lock->flags |= WAKE_LOCK_PREVENTING_SUSPEND;
+ }
+ last_sleep_time_update = now;
+}
+#endif
+
+
+static void expire_wake_lock(struct wake_lock *lock)
+{
+#ifdef CONFIG_WAKELOCK_STAT
+ wake_unlock_stat_locked(lock, 1);
+#endif
+ lock->flags &= ~(WAKE_LOCK_ACTIVE | WAKE_LOCK_AUTO_EXPIRE);
+ list_del(&lock->link);
+ list_add(&lock->link, &inactive_locks);
+ if (debug_mask & (DEBUG_WAKE_LOCK | DEBUG_EXPIRE))
+ pr_info("expired wake lock %s\n", lock->name);
+}
+
+/* Caller must acquire the list_lock spinlock */
+static void print_active_locks(int type)
+{
+ struct wake_lock *lock;
+ bool print_expired = true;
+
+ BUG_ON(type >= WAKE_LOCK_TYPE_COUNT);
+ list_for_each_entry(lock, &active_wake_locks[type], link) {
+ if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) {
+ long timeout = lock->expires - jiffies;
+ if (timeout > 0)
+ pr_info("active wake lock %s, time left %ld\n",
+ lock->name, timeout);
+ else if (print_expired)
+ pr_info("wake lock %s, expired\n", lock->name);
+ } else {
+ pr_info("active wake lock %s\n", lock->name);
+ if (!(debug_mask & DEBUG_EXPIRE))
+ print_expired = false;
+ }
+ }
+}
+
+static long has_wake_lock_locked(int type)
+{
+ struct wake_lock *lock, *n;
+ long max_timeout = 0;
+
+ BUG_ON(type >= WAKE_LOCK_TYPE_COUNT);
+ list_for_each_entry_safe(lock, n, &active_wake_locks[type], link) {
+ if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) {
+ long timeout = lock->expires - jiffies;
+ if (timeout <= 0)
+ expire_wake_lock(lock);
+ else if (timeout > max_timeout)
+ max_timeout = timeout;
+ } else
+ return -1;
+ }
+ return max_timeout;
+}
+#ifdef CONFIG_FAST_BOOT
+extern bool fake_shut_down;
+#endif
+
+long has_wake_lock(int type)
+{
+ long ret;
+ unsigned long irqflags;
+#ifdef CONFIG_FAST_BOOT
+ if (fake_shut_down)
+ return 0;
+#endif
+ spin_lock_irqsave(&list_lock, irqflags);
+ ret = has_wake_lock_locked(type);
+ if (ret && (debug_mask & DEBUG_WAKEUP) && type == WAKE_LOCK_SUSPEND)
+ print_active_locks(type);
+ spin_unlock_irqrestore(&list_lock, irqflags);
+ return ret;
+}
+
+static void suspend_backoff(void)
+{
+ pr_info("suspend: too many immediate wakeups, back off\n");
+ wake_lock_timeout(&suspend_backoff_lock,
+ msecs_to_jiffies(SUSPEND_BACKOFF_INTERVAL));
+}
+
+static void suspend(struct work_struct *work)
+{
+ int ret;
+ int entry_event_num;
+ struct timespec ts_entry, ts_exit;
+
+ if (has_wake_lock(WAKE_LOCK_SUSPEND)) {
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("suspend: abort suspend\n");
+ return;
+ }
+
+ entry_event_num = current_event_num;
+ sys_sync();
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("suspend: enter suspend\n");
+ getnstimeofday(&ts_entry);
+
+ if (debug_mask & DEBUG_EXIT_SUSPEND) {
+ struct rtc_time tm;
+ rtc_time_to_tm(ts_entry.tv_sec, &tm);
+ pr_info("suspend: enter suspend, "
+ "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n",
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, ts_entry.tv_nsec);
+ }
+
+ ret = pm_suspend(requested_suspend_state);
+ getnstimeofday(&ts_exit);
+
+ if (debug_mask & DEBUG_EXIT_SUSPEND) {
+ struct rtc_time tm;
+ rtc_time_to_tm(ts_exit.tv_sec, &tm);
+ pr_info("suspend: exit suspend, ret = %d "
+ "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", ret,
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, ts_exit.tv_nsec);
+ }
+
+ if (ts_exit.tv_sec - ts_entry.tv_sec <= 1) {
+ ++suspend_short_count;
+
+ if (suspend_short_count == SUSPEND_BACKOFF_THRESHOLD) {
+ suspend_backoff();
+ suspend_short_count = 0;
+ }
+ } else {
+ suspend_short_count = 0;
+ }
+
+ if (current_event_num == entry_event_num) {
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("suspend: pm_suspend returned with no event\n");
+ wake_lock_timeout(&unknown_wakeup, HZ / 2);
+ }
+}
+static DECLARE_WORK(suspend_work, suspend);
+
+static void expire_wake_locks(unsigned long data)
+{
+ long has_lock;
+ unsigned long irqflags;
+ if (debug_mask & DEBUG_EXPIRE)
+ pr_info("expire_wake_locks: start\n");
+ spin_lock_irqsave(&list_lock, irqflags);
+ if (debug_mask & DEBUG_SUSPEND)
+ print_active_locks(WAKE_LOCK_SUSPEND);
+ has_lock = has_wake_lock_locked(WAKE_LOCK_SUSPEND);
+ if (debug_mask & DEBUG_EXPIRE)
+ pr_info("expire_wake_locks: done, has_lock %ld\n", has_lock);
+ if (has_lock == 0)
+ queue_work(suspend_work_queue, &suspend_work);
+ spin_unlock_irqrestore(&list_lock, irqflags);
+}
+static DEFINE_TIMER(expire_timer, expire_wake_locks, 0, 0);
+
+static int power_suspend_late(struct device *dev)
+{
+ int ret = has_wake_lock(WAKE_LOCK_SUSPEND) ? -EAGAIN : 0;
+#ifdef CONFIG_WAKELOCK_STAT
+ wait_for_wakeup = !ret;
+#endif
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("power_suspend_late return %d\n", ret);
+ return ret;
+}
+
+static struct dev_pm_ops power_driver_pm_ops = {
+ .suspend_noirq = power_suspend_late,
+};
+
+static struct platform_driver power_driver = {
+ .driver.name = "power",
+ .driver.pm = &power_driver_pm_ops,
+};
+static struct platform_device power_device = {
+ .name = "power",
+};
+
+void wake_lock_init(struct wake_lock *lock, int type, const char *name)
+{
+ unsigned long irqflags = 0;
+
+ if (name)
+ lock->name = name;
+ BUG_ON(!lock->name);
+
+ if (debug_mask & DEBUG_WAKE_LOCK)
+ pr_info("wake_lock_init name=%s\n", lock->name);
+#ifdef CONFIG_WAKELOCK_STAT
+ lock->stat.count = 0;
+ lock->stat.expire_count = 0;
+ lock->stat.wakeup_count = 0;
+ lock->stat.total_time = ktime_set(0, 0);
+ lock->stat.prevent_suspend_time = ktime_set(0, 0);
+ lock->stat.max_time = ktime_set(0, 0);
+ lock->stat.last_time = ktime_set(0, 0);
+#endif
+ lock->flags = (type & WAKE_LOCK_TYPE_MASK) | WAKE_LOCK_INITIALIZED;
+
+ INIT_LIST_HEAD(&lock->link);
+ spin_lock_irqsave(&list_lock, irqflags);
+ list_add(&lock->link, &inactive_locks);
+ spin_unlock_irqrestore(&list_lock, irqflags);
+}
+EXPORT_SYMBOL(wake_lock_init);
+
+void wake_lock_destroy(struct wake_lock *lock)
+{
+ unsigned long irqflags;
+ if (debug_mask & DEBUG_WAKE_LOCK)
+ pr_info("wake_lock_destroy name=%s\n", lock->name);
+ spin_lock_irqsave(&list_lock, irqflags);
+ lock->flags &= ~WAKE_LOCK_INITIALIZED;
+#ifdef CONFIG_WAKELOCK_STAT
+ if (lock->stat.count) {
+ deleted_wake_locks.stat.count += lock->stat.count;
+ deleted_wake_locks.stat.expire_count += lock->stat.expire_count;
+ deleted_wake_locks.stat.total_time =
+ ktime_add(deleted_wake_locks.stat.total_time,
+ lock->stat.total_time);
+ deleted_wake_locks.stat.prevent_suspend_time =
+ ktime_add(deleted_wake_locks.stat.prevent_suspend_time,
+ lock->stat.prevent_suspend_time);
+ deleted_wake_locks.stat.max_time =
+ ktime_add(deleted_wake_locks.stat.max_time,
+ lock->stat.max_time);
+ }
+#endif
+ list_del(&lock->link);
+ spin_unlock_irqrestore(&list_lock, irqflags);
+}
+EXPORT_SYMBOL(wake_lock_destroy);
+
+static void wake_lock_internal(
+ struct wake_lock *lock, long timeout, int has_timeout)
+{
+ int type;
+ unsigned long irqflags;
+ long expire_in;
+
+ spin_lock_irqsave(&list_lock, irqflags);
+ type = lock->flags & WAKE_LOCK_TYPE_MASK;
+ BUG_ON(type >= WAKE_LOCK_TYPE_COUNT);
+ BUG_ON(!(lock->flags & WAKE_LOCK_INITIALIZED));
+#ifdef CONFIG_WAKELOCK_STAT
+ if (type == WAKE_LOCK_SUSPEND && wait_for_wakeup) {
+ if (debug_mask & DEBUG_WAKEUP)
+ pr_info("wakeup wake lock: %s\n", lock->name);
+ wait_for_wakeup = 0;
+ lock->stat.wakeup_count++;
+ }
+ if ((lock->flags & WAKE_LOCK_AUTO_EXPIRE) &&
+ (long)(lock->expires - jiffies) <= 0) {
+ wake_unlock_stat_locked(lock, 0);
+ lock->stat.last_time = ktime_get();
+ }
+#endif
+ if (!(lock->flags & WAKE_LOCK_ACTIVE)) {
+ lock->flags |= WAKE_LOCK_ACTIVE;
+#ifdef CONFIG_WAKELOCK_STAT
+ lock->stat.last_time = ktime_get();
+#endif
+ }
+ list_del(&lock->link);
+ if (has_timeout) {
+ if (debug_mask & DEBUG_WAKE_LOCK)
+ pr_info("wake_lock: %s, type %d, timeout %ld.%03lu\n",
+ lock->name, type, timeout / HZ,
+ (timeout % HZ) * MSEC_PER_SEC / HZ);
+ lock->expires = jiffies + timeout;
+ lock->flags |= WAKE_LOCK_AUTO_EXPIRE;
+ list_add_tail(&lock->link, &active_wake_locks[type]);
+ } else {
+ if (debug_mask & DEBUG_WAKE_LOCK)
+ pr_info("wake_lock: %s, type %d\n", lock->name, type);
+ lock->expires = LONG_MAX;
+ lock->flags &= ~WAKE_LOCK_AUTO_EXPIRE;
+ list_add(&lock->link, &active_wake_locks[type]);
+ }
+ if (type == WAKE_LOCK_SUSPEND) {
+ current_event_num++;
+#ifdef CONFIG_WAKELOCK_STAT
+ if (lock == &main_wake_lock)
+ update_sleep_wait_stats_locked(1);
+ else if (!wake_lock_active(&main_wake_lock))
+ update_sleep_wait_stats_locked(0);
+#endif
+ if (has_timeout)
+ expire_in = has_wake_lock_locked(type);
+ else
+ expire_in = -1;
+ if (expire_in > 0) {
+ if (debug_mask & DEBUG_EXPIRE)
+ pr_info("wake_lock: %s, start expire timer, "
+ "%ld\n", lock->name, expire_in);
+ mod_timer(&expire_timer, jiffies + expire_in);
+ } else {
+ if (del_timer(&expire_timer))
+ if (debug_mask & DEBUG_EXPIRE)
+ pr_info("wake_lock: %s, stop expire timer\n",
+ lock->name);
+ if (expire_in == 0)
+ queue_work(suspend_work_queue, &suspend_work);
+ }
+ }
+ spin_unlock_irqrestore(&list_lock, irqflags);
+}
+
+void wake_lock(struct wake_lock *lock)
+{
+ wake_lock_internal(lock, 0, 0);
+}
+EXPORT_SYMBOL(wake_lock);
+
+void wake_lock_timeout(struct wake_lock *lock, long timeout)
+{
+ wake_lock_internal(lock, timeout, 1);
+}
+EXPORT_SYMBOL(wake_lock_timeout);
+
+void wake_unlock(struct wake_lock *lock)
+{
+ int type;
+ unsigned long irqflags;
+ spin_lock_irqsave(&list_lock, irqflags);
+ type = lock->flags & WAKE_LOCK_TYPE_MASK;
+#ifdef CONFIG_WAKELOCK_STAT
+ wake_unlock_stat_locked(lock, 0);
+#endif
+ if (debug_mask & DEBUG_WAKE_LOCK)
+ pr_info("wake_unlock: %s\n", lock->name);
+ lock->flags &= ~(WAKE_LOCK_ACTIVE | WAKE_LOCK_AUTO_EXPIRE);
+ list_del(&lock->link);
+ list_add(&lock->link, &inactive_locks);
+ if (type == WAKE_LOCK_SUSPEND) {
+ long has_lock = has_wake_lock_locked(type);
+ if (has_lock > 0) {
+ if (debug_mask & DEBUG_EXPIRE)
+ pr_info("wake_unlock: %s, start expire timer, "
+ "%ld\n", lock->name, has_lock);
+ mod_timer(&expire_timer, jiffies + has_lock);
+ } else {
+ if (del_timer(&expire_timer))
+ if (debug_mask & DEBUG_EXPIRE)
+ pr_info("wake_unlock: %s, stop expire "
+ "timer\n", lock->name);
+ if (has_lock == 0)
+ queue_work(suspend_work_queue, &suspend_work);
+ }
+ if (lock == &main_wake_lock) {
+ if (debug_mask & DEBUG_SUSPEND)
+ print_active_locks(WAKE_LOCK_SUSPEND);
+#ifdef CONFIG_WAKELOCK_STAT
+ update_sleep_wait_stats_locked(0);
+#endif
+ }
+ }
+ spin_unlock_irqrestore(&list_lock, irqflags);
+}
+EXPORT_SYMBOL(wake_unlock);
+
+int wake_lock_active(struct wake_lock *lock)
+{
+ return !!(lock->flags & WAKE_LOCK_ACTIVE);
+}
+EXPORT_SYMBOL(wake_lock_active);
+
+#ifdef CONFIG_FAST_BOOT
+void wakelock_force_suspend(void)
+{
+ static int cnt;
+
+ if (cnt > 0) {
+ pr_info("%s: duplicated\n", __func__);
+ return;
+ }
+ cnt++;
+
+ msleep(3000);
+ pr_info("%s: fake shut down\n", __func__);
+ queue_work(suspend_work_queue, &suspend_work);
+
+ cnt = 0;
+}
+#endif
+
+static int wakelock_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wakelock_stats_show, NULL);
+}
+
+static const struct file_operations wakelock_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = wakelock_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init wakelocks_init(void)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(active_wake_locks); i++)
+ INIT_LIST_HEAD(&active_wake_locks[i]);
+
+#ifdef CONFIG_WAKELOCK_STAT
+ wake_lock_init(&deleted_wake_locks, WAKE_LOCK_SUSPEND,
+ "deleted_wake_locks");
+#endif
+ wake_lock_init(&main_wake_lock, WAKE_LOCK_SUSPEND, "main");
+ wake_lock_init(&sync_wake_lock, WAKE_LOCK_SUSPEND, "sync_system");
+ wake_lock(&main_wake_lock);
+ wake_lock_init(&unknown_wakeup, WAKE_LOCK_SUSPEND, "unknown_wakeups");
+ wake_lock_init(&suspend_backoff_lock, WAKE_LOCK_SUSPEND,
+ "suspend_backoff");
+
+ ret = platform_device_register(&power_device);
+ if (ret) {
+ pr_err("wakelocks_init: platform_device_register failed\n");
+ goto err_platform_device_register;
+ }
+ ret = platform_driver_register(&power_driver);
+ if (ret) {
+ pr_err("wakelocks_init: platform_driver_register failed\n");
+ goto err_platform_driver_register;
+ }
+
+ suspend_work_queue = alloc_workqueue("suspend", WQ_HIGHPRI, 0);
+ if (suspend_work_queue == NULL) {
+ ret = -ENOMEM;
+ goto err_suspend_work_queue;
+ }
+
+ sync_work_queue = create_singlethread_workqueue("sync_system_work");
+ if (sync_work_queue == NULL) {
+ ret = -ENOMEM;
+ goto err_sync_work_queue;
+ }
+
+#ifdef CONFIG_WAKELOCK_STAT
+ proc_create("wakelocks", S_IRUGO, NULL, &wakelock_stats_fops);
+#endif
+
+ return 0;
+
+err_sync_work_queue:
+ destroy_workqueue(suspend_work_queue);
+err_suspend_work_queue:
+ platform_driver_unregister(&power_driver);
+err_platform_driver_register:
+ platform_device_unregister(&power_device);
+err_platform_device_register:
+ wake_lock_destroy(&suspend_backoff_lock);
+ wake_lock_destroy(&unknown_wakeup);
+ wake_lock_destroy(&sync_wake_lock);
+ wake_lock_destroy(&main_wake_lock);
+#ifdef CONFIG_WAKELOCK_STAT
+ wake_lock_destroy(&deleted_wake_locks);
+#endif
+ return ret;
+}
+
+static void __exit wakelocks_exit(void)
+{
+#ifdef CONFIG_WAKELOCK_STAT
+ remove_proc_entry("wakelocks", NULL);
+#endif
+ destroy_workqueue(suspend_work_queue);
+ destroy_workqueue(sync_work_queue);
+ platform_driver_unregister(&power_driver);
+ platform_device_unregister(&power_device);
+ wake_lock_destroy(&suspend_backoff_lock);
+ wake_lock_destroy(&unknown_wakeup);
+ wake_lock_destroy(&sync_wake_lock);
+ wake_lock_destroy(&main_wake_lock);
+#ifdef CONFIG_WAKELOCK_STAT
+ wake_lock_destroy(&deleted_wake_locks);
+#endif
+}
+
+core_initcall(wakelocks_init);
+module_exit(wakelocks_exit);
diff --git a/kernel/printk.c b/kernel/printk.c
index 6edc4e89529..d614ccdd97e 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -43,6 +43,7 @@
#include <linux/rculist.h>
#include <asm/uaccess.h>
+#include <mach/sec_debug.h>
/*
* Architectures can override it:
@@ -53,6 +54,10 @@ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
+#ifdef CONFIG_DEBUG_LL
+extern void printascii(char *);
+#endif
+
/* printk's without a loglevel use this.. */
#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
@@ -144,7 +149,7 @@ static int console_may_schedule;
#ifdef CONFIG_PRINTK
-static char __log_buf[__LOG_BUF_LEN];
+static char __log_buf[__LOG_BUF_LEN] __nosavedata;
static char *log_buf = __log_buf;
static int log_buf_len = __LOG_BUF_LEN;
static unsigned logged_chars; /* Number of chars produced since last read+clear operation */
@@ -211,7 +216,6 @@ void __init setup_log_buf(int early)
new_log_buf_len);
return;
}
-
spin_lock_irqsave(&logbuf_lock, flags);
log_buf_len = new_log_buf_len;
log_buf = new_log_buf;
@@ -290,6 +294,53 @@ static inline void boot_delay_msec(void)
}
#endif
+/*
+ * Return the number of unread characters in the log buffer.
+ */
+static int log_buf_get_len(void)
+{
+ return logged_chars;
+}
+
+/*
+ * Clears the ring-buffer
+ */
+void log_buf_clear(void)
+{
+ logged_chars = 0;
+}
+
+/*
+ * Copy a range of characters from the log buffer.
+ */
+int log_buf_copy(char *dest, int idx, int len)
+{
+ int ret, max;
+ bool took_lock = false;
+
+ if (!oops_in_progress) {
+ spin_lock_irq(&logbuf_lock);
+ took_lock = true;
+ }
+
+ max = log_buf_get_len();
+ if (idx < 0 || idx >= max) {
+ ret = -1;
+ } else {
+ if (len > max - idx)
+ len = max - idx;
+ ret = len;
+ idx += (log_end - max);
+ while (len-- > 0)
+ dest[len] = LOG_BUF(idx + len);
+ }
+
+ if (took_lock)
+ spin_unlock_irq(&logbuf_lock);
+
+ return ret;
+}
+
#ifdef CONFIG_SECURITY_DMESG_RESTRICT
int dmesg_restrict = 1;
#else
@@ -672,6 +723,27 @@ static void call_console_drivers(unsigned start, unsigned end)
_call_console_drivers(start_print, end, msg_level);
}
+#ifdef CONFIG_SEC_LOG
+static void (*log_char_hook)(char c);
+
+void register_log_char_hook(void (*f) (char c))
+{
+ unsigned start;
+ unsigned long flags;
+
+ spin_lock_irqsave(&logbuf_lock, flags);
+
+ start = min(con_start, log_start);
+ while (start != log_end)
+ f(__log_buf[start++ & (__LOG_BUF_LEN - 1)]);
+
+ log_char_hook = f;
+
+ spin_unlock_irqrestore(&logbuf_lock, flags);
+}
+EXPORT_SYMBOL(register_log_char_hook);
+#endif
+
static void emit_log_char(char c)
{
LOG_BUF(log_end) = c;
@@ -682,6 +754,11 @@ static void emit_log_char(char c)
con_start = log_end - log_buf_len;
if (logged_chars < log_buf_len)
logged_chars++;
+
+#ifdef CONFIG_SEC_LOG
+ if (log_char_hook)
+ log_char_hook(c);
+#endif
}
/*
@@ -712,6 +789,21 @@ static int printk_time = 0;
#endif
module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
+#if defined(CONFIG_PRINTK_CPU_ID)
+static int printk_cpu_id = 1;
+#else
+static int printk_cpu_id = 0;
+#endif
+module_param_named(cpu, printk_cpu_id, bool, S_IRUGO | S_IWUSR);
+
+#if defined(CONFIG_PRINTK_PID)
+static int printk_pid = 1;
+#else
+static int printk_pid;
+#endif
+module_param_named(pid, printk_pid, bool, S_IRUGO | S_IWUSR);
+
+
/* Check if we have any console registered that can be called early in boot. */
static int have_callable_console(void)
{
@@ -885,6 +977,10 @@ asmlinkage int vprintk(const char *fmt, va_list args)
printed_len += vscnprintf(printk_buf + printed_len,
sizeof(printk_buf) - printed_len, fmt, args);
+#ifdef CONFIG_DEBUG_LL
+ printascii(printk_buf);
+#endif
+
p = printk_buf;
/* Read log level and handle special printk prefix */
@@ -947,6 +1043,30 @@ asmlinkage int vprintk(const char *fmt, va_list args)
printed_len += tlen;
}
+ if (printk_cpu_id) {
+ /* Add the cpu id */
+ char tbuf[10], *tp;
+ unsigned tlen;
+
+ tlen = sprintf(tbuf, "c%u ", printk_cpu);
+
+ for (tp = tbuf; tp < tbuf + tlen; tp++)
+ emit_log_char(*tp);
+ printed_len += tlen;
+ }
+
+ if (printk_pid) {
+ /* Add the current process id */
+ char tbuf[10], *tp;
+ unsigned tlen;
+
+ tlen = sprintf(tbuf, "%6u ", current->pid);
+
+ for (tp = tbuf; tp < tbuf + tlen; tp++)
+ emit_log_char(*tp);
+ printed_len += tlen;
+ }
+
if (!*p)
break;
}
@@ -1142,6 +1262,12 @@ void resume_console(void)
console_unlock();
}
+int get_console_suspended(void)
+{
+ return console_suspended;
+}
+EXPORT_SYMBOL(get_console_suspended);
+
/**
* console_cpu_notify - print deferred console messages after CPU hotplug
* @self: notifier struct
@@ -1159,7 +1285,6 @@ static int __cpuinit console_cpu_notify(struct notifier_block *self,
switch (action) {
case CPU_ONLINE:
case CPU_DEAD:
- case CPU_DYING:
case CPU_DOWN_FAILED:
case CPU_UP_CANCELED:
console_lock();
@@ -1749,3 +1874,11 @@ void kmsg_dump(enum kmsg_dump_reason reason)
rcu_read_unlock();
}
#endif
+
+#ifdef CONFIG_MACH_PX
+void logbuf_force_unlock(void)
+{
+ logbuf_lock = __SPIN_LOCK_UNLOCKED(logbuf_lock);
+}
+EXPORT_SYMBOL(logbuf_force_unlock);
+#endif
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index ab449117aaf..255e1662acd 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -890,7 +890,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
raw_spin_lock_init(&lock->wait_lock);
- plist_head_init_raw(&lock->wait_list, &lock->wait_lock);
+ plist_head_init(&lock->wait_list);
debug_rt_mutex_init(lock, name);
}
diff --git a/kernel/sched.c b/kernel/sched.c
index ce2ff4e2993..0b0d3dcd940 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -71,11 +71,14 @@
#include <linux/ctype.h>
#include <linux/ftrace.h>
#include <linux/slab.h>
+#include <linux/cpuacct.h>
#include <asm/tlb.h>
#include <asm/irq_regs.h>
#include <asm/mutex.h>
+#include <mach/sec_debug.h>
+
#include "sched_cpupri.h"
#include "workqueue_sched.h"
#include "sched_autogroup.h"
@@ -3187,6 +3190,13 @@ context_switch(struct rq *rq, struct task_struct *prev,
finish_task_switch(this_rq(), prev);
}
+unsigned long get_cpu_nr_running(unsigned int cpu)
+{
+ if(cpu < NR_CPUS)
+ return cpu_rq(cpu)->nr_running;
+ else
+ return 0;
+}
/*
* nr_running, nr_uninterruptible and nr_context_switches:
*
@@ -4291,6 +4301,7 @@ need_resched:
} else
raw_spin_unlock_irq(&rq->lock);
+ sec_debug_task_log(cpu, rq->curr);
post_schedule(rq);
preempt_enable_no_resched();
@@ -6479,7 +6490,7 @@ static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
+ case CPU_STARTING:
case CPU_DOWN_FAILED:
set_cpu_active((long)hcpu, true);
return NOTIFY_OK;
@@ -7952,7 +7963,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
#ifdef CONFIG_SMP
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
- plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
+ plist_head_init(&rt_rq->pushable_tasks);
#endif
rt_rq->rt_time = 0;
@@ -8024,6 +8035,9 @@ void __init sched_init(void)
int i, j;
unsigned long alloc_size = 0, ptr;
+ sec_gaf_supply_rqinfo(offsetof(struct rq, curr),
+ offsetof(struct cfs_rq, rq));
+
#ifdef CONFIG_FAIR_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
@@ -8157,7 +8171,7 @@ void __init sched_init(void)
#endif
#ifdef CONFIG_RT_MUTEXES
- plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
+ plist_head_init(&init_task.pi_waiters);
#endif
/*
@@ -8208,13 +8222,24 @@ static inline int preempt_count_equals(int preempt_offset)
return (nested == preempt_offset);
}
+static int __might_sleep_init_called;
+int __init __might_sleep_init(void)
+{
+ __might_sleep_init_called = 1;
+ return 0;
+}
+early_initcall(__might_sleep_init);
+
void __might_sleep(const char *file, int line, int preempt_offset)
{
#ifdef in_atomic
static unsigned long prev_jiffy; /* ratelimiting */
if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
- system_state != SYSTEM_RUNNING || oops_in_progress)
+ oops_in_progress)
+ return;
+ if (system_state != SYSTEM_RUNNING &&
+ (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
return;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
@@ -8965,6 +8990,20 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
}
static int
+cpu_cgroup_allow_attach(struct cgroup *cgrp, struct task_struct *tsk)
+{
+ const struct cred *cred = current_cred(), *tcred;
+
+ tcred = __task_cred(tsk);
+
+ if ((current != tsk) && !capable(CAP_SYS_NICE) &&
+ cred->euid != tcred->uid && cred->euid != tcred->suid)
+ return -EACCES;
+
+ return 0;
+}
+
+static int
cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
#ifdef CONFIG_RT_GROUP_SCHED
@@ -9069,6 +9108,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
.name = "cpu",
.create = cpu_cgroup_create,
.destroy = cpu_cgroup_destroy,
+ .allow_attach = cpu_cgroup_allow_attach,
.can_attach_task = cpu_cgroup_can_attach_task,
.attach_task = cpu_cgroup_attach_task,
.exit = cpu_cgroup_exit,
@@ -9095,8 +9135,30 @@ struct cpuacct {
u64 __percpu *cpuusage;
struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
struct cpuacct *parent;
+ struct cpuacct_charge_calls *cpufreq_fn;
+ void *cpuacct_data;
};
+static struct cpuacct *cpuacct_root;
+
+/* Default calls for cpufreq accounting */
+static struct cpuacct_charge_calls *cpuacct_cpufreq;
+int cpuacct_register_cpufreq(struct cpuacct_charge_calls *fn)
+{
+ cpuacct_cpufreq = fn;
+
+ /*
+ * Root node is created before platform can register callbacks,
+ * initalize here.
+ */
+ if (cpuacct_root && fn) {
+ cpuacct_root->cpufreq_fn = fn;
+ if (fn->init)
+ fn->init(&cpuacct_root->cpuacct_data);
+ }
+ return 0;
+}
+
struct cgroup_subsys cpuacct_subsys;
/* return cpu accounting group corresponding to this container */
@@ -9131,8 +9193,16 @@ static struct cgroup_subsys_state *cpuacct_create(
if (percpu_counter_init(&ca->cpustat[i], 0))
goto out_free_counters;
+ ca->cpufreq_fn = cpuacct_cpufreq;
+
+ /* If available, have platform code initalize cpu frequency table */
+ if (ca->cpufreq_fn && ca->cpufreq_fn->init)
+ ca->cpufreq_fn->init(&ca->cpuacct_data);
+
if (cgrp->parent)
ca->parent = cgroup_ca(cgrp->parent);
+ else
+ cpuacct_root = ca;
return &ca->css;
@@ -9260,6 +9330,32 @@ static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
return 0;
}
+static int cpuacct_cpufreq_show(struct cgroup *cgrp, struct cftype *cft,
+ struct cgroup_map_cb *cb)
+{
+ struct cpuacct *ca = cgroup_ca(cgrp);
+ if (ca->cpufreq_fn && ca->cpufreq_fn->cpufreq_show)
+ ca->cpufreq_fn->cpufreq_show(ca->cpuacct_data, cb);
+
+ return 0;
+}
+
+/* return total cpu power usage (milliWatt second) of a group */
+static u64 cpuacct_powerusage_read(struct cgroup *cgrp, struct cftype *cft)
+{
+ int i;
+ struct cpuacct *ca = cgroup_ca(cgrp);
+ u64 totalpower = 0;
+
+ if (ca->cpufreq_fn && ca->cpufreq_fn->power_usage)
+ for_each_present_cpu(i) {
+ totalpower += ca->cpufreq_fn->power_usage(
+ ca->cpuacct_data);
+ }
+
+ return totalpower;
+}
+
static struct cftype files[] = {
{
.name = "usage",
@@ -9274,6 +9370,14 @@ static struct cftype files[] = {
.name = "stat",
.read_map = cpuacct_stats_show,
},
+ {
+ .name = "cpufreq",
+ .read_map = cpuacct_cpufreq_show,
+ },
+ {
+ .name = "power",
+ .read_u64 = cpuacct_powerusage_read
+ },
};
static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
@@ -9303,6 +9407,10 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
for (; ca; ca = ca->parent) {
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
*cpuusage += cputime;
+
+ /* Call back into platform code to account for CPU speeds */
+ if (ca->cpufreq_fn && ca->cpufreq_fn->charge)
+ ca->cpufreq_fn->charge(ca->cpuacct_data, cputime, cpu);
}
rcu_read_unlock();
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 1eeaf747e56..e17ec398aa1 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -67,18 +67,6 @@
#include <linux/ktime.h>
#include <linux/sched.h>
-/*
- * Scheduler clock - returns current time in nanosec units.
- * This is default implementation.
- * Architectures and sub-architectures can override this.
- */
-unsigned long long __attribute__((weak)) sched_clock(void)
-{
- return (unsigned long long)(jiffies - INITIAL_JIFFIES)
- * (NSEC_PER_SEC / HZ);
-}
-EXPORT_SYMBOL_GPL(sched_clock);
-
__read_mostly int sched_clock_running;
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index a6710a112b4..29887ccd039 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -122,10 +122,11 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
(long long)(p->nvcsw + p->nivcsw),
p->prio);
#ifdef CONFIG_SCHEDSTATS
- SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
+ SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
SPLIT_NS(p->se.vruntime),
SPLIT_NS(p->se.sum_exec_runtime),
- SPLIT_NS(p->se.statistics.sum_sleep_runtime));
+ SPLIT_NS(p->se.statistics.sum_sleep_runtime),
+ SPLIT_NS(p->sched_info.last_queued));
#else
SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
@@ -145,9 +146,9 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
SEQ_printf(m,
"\nrunnable tasks:\n"
" task PID tree-key switches prio"
- " exec-runtime sum-exec sum-sleep\n"
+ " exec-runtime sum-exec sum-sleep last-queued\n"
"------------------------------------------------------"
- "----------------------------------------------------\n");
+ "---------------------------------------------------------------------\n");
read_lock_irqsave(&tasklist_lock, flags);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index fca82c32042..a9d2ba4c2fa 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -29,6 +29,7 @@
#include <trace/events/irq.h>
#include <asm/irq.h>
+#include <mach/sec_debug.h>
/*
- No shared variables, all the data are CPU local.
- If a softirq needs serialization, let it serialize itself
@@ -235,7 +236,9 @@ restart:
kstat_incr_softirqs_this_cpu(vec_nr);
trace_softirq_entry(vec_nr);
+ sec_debug_softirq_log(9999, h->action, 4);
h->action(h);
+ sec_debug_softirq_log(9999, h->action, 5);
trace_softirq_exit(vec_nr);
if (unlikely(prev_count != preempt_count())) {
printk(KERN_ERR "huh, entered softirq %u %s %p"
@@ -460,7 +463,9 @@ static void tasklet_action(struct softirq_action *a)
if (!atomic_read(&t->count)) {
if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
BUG();
+ sec_debug_softirq_log(9997, t->func, 4);
t->func(t->data);
+ sec_debug_softirq_log(9997, t->func, 5);
tasklet_unlock(t);
continue;
}
@@ -495,7 +500,9 @@ static void tasklet_hi_action(struct softirq_action *a)
if (!atomic_read(&t->count)) {
if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
BUG();
+ sec_debug_softirq_log(9998, t->func, 4);
t->func(t->data);
+ sec_debug_softirq_log(9998, t->func, 5);
tasklet_unlock(t);
continue;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 1c69aa71911..be53817497c 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -333,12 +333,16 @@ void kernel_restart_prepare(char *cmd)
void kernel_restart(char *cmd)
{
kernel_restart_prepare(cmd);
+ if (pm_power_off_prepare)
+ pm_power_off_prepare();
disable_nonboot_cpus();
syscore_shutdown();
if (!cmd)
printk(KERN_EMERG "Restarting system.\n");
- else
+ else{
printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
+ printk(KERN_EMERG "pid = %d name:%s\n", task_tgid_vnr(current), current->comm);
+ }
kmsg_dump(KMSG_DUMP_RESTART);
machine_restart(cmd);
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index aaaa7e749ad..5b6afb27e8b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -96,6 +96,7 @@ extern char core_pattern[];
extern unsigned int core_pipe_limit;
extern int pid_max;
extern int min_free_kbytes;
+extern int min_free_order_shift;
extern int pid_max_min, pid_max_max;
extern int sysctl_drop_caches;
extern int percpu_pagelist_fraction;
@@ -1189,6 +1190,13 @@ static struct ctl_table vm_table[] = {
.extra1 = &zero,
},
{
+ .procname = "min_free_order_shift",
+ .data = &min_free_order_shift,
+ .maxlen = sizeof(min_free_order_shift),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
.procname = "percpu_pagelist_fraction",
.data = &percpu_pagelist_fraction,
.maxlen = sizeof(percpu_pagelist_fraction),
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index f06a8a36564..689fe69629e 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -27,3 +27,7 @@ config GENERIC_CLOCKEVENTS_BUILD
default y
depends on GENERIC_CLOCKEVENTS || GENERIC_CLOCKEVENTS_MIGR
+# Selectable by architectures which want to reuse the clocksource as
+# sched_clock
+config HAVE_CLKSRC_SCHED_CLOCK
+ bool
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index e2fd74b8e8c..cae2ad7491b 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,5 +1,5 @@
obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o
-obj-y += timeconv.o posix-clock.o alarmtimer.o
+obj-y += timeconv.o posix-clock.o #alarmtimer.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index c3cbd8c34b4..e1889779bc9 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -21,6 +21,9 @@
#include <linux/tick.h>
#include <linux/stop_machine.h>
+static void notrace sched_clock_clksrc_install(struct clocksource *clock);
+static void notrace sched_clock_clksrc_update(void);
+
/* Structure holding internal timekeeping values. */
struct timekeeper {
/* Current clocksource used for timekeeping. */
@@ -66,6 +69,9 @@ static void timekeeper_setup_internals(struct clocksource *clock)
cycle_t interval;
u64 tmp, ntpinterval;
+ if (clock->flags & CLOCK_SOURCE_SCHED_CLOCK)
+ sched_clock_clksrc_install(clock);
+
timekeeper.clock = clock;
clock->cycle_last = clock->read(clock);
@@ -1136,6 +1142,7 @@ void do_timer(unsigned long ticks)
{
jiffies_64 += ticks;
update_wall_time();
+ sched_clock_clksrc_update();
calc_global_load(ticks);
}
@@ -1220,3 +1227,121 @@ void xtime_update(unsigned long ticks)
do_timer(ticks);
write_sequnlock(&xtime_lock);
}
+
+/**
+ * struct sched_clksrc - clocksource based sched_clock
+ * @clock: Pointer to the clocksource
+ * @nsecs: Nanoseconds base value
+ * @seqcnt: Sequence counter for sched_clock
+ * @last_update: Counter value at last update
+ * @mult: Multiplier for nsec conversion
+ * @shift: Shift value (divisor) for nsec conversion
+ * @mask: Mask for the delta
+ * @update_cycles: Cycles after which we update nsecs and last_update
+ * @update_nsesc: Nanoseconds value corresponding to @update_cycles
+ */
+struct sched_clksrc {
+ struct clocksource *clock;
+ u64 nsecs;
+ struct seqcount seqcnt;
+ u64 last_update;
+ u32 mult;
+ u32 shift;
+ u64 mask;
+ u64 update_cycles;
+ u64 update_nsecs;
+};
+
+static struct sched_clksrc sched_clksrc;
+
+/*
+ * Called from clocksource code when a clocksource usable for
+ * sched_clock is installed.
+ */
+static void notrace sched_clock_clksrc_install(struct clocksource *clock)
+{
+ u64 nsecs, cyc = clock->mask & CLOCKSOURCE_MASK(32);
+
+ if (sched_clksrc.clock)
+ return;
+
+ /* Make sure we get the wraparounds */
+ cyc >>= 2;
+
+ /* Use the raw mult/shift values */
+ sched_clksrc.mult = clock->mult;
+ sched_clksrc.shift = clock->shift;
+ sched_clksrc.mask = clock->mask;
+ sched_clksrc.update_cycles = cyc;
+ nsecs = clocksource_cyc2ns(cyc, sched_clksrc.mult, sched_clksrc.shift);
+ sched_clksrc.update_nsecs = nsecs;
+ /* Establish the base line */
+ sched_clksrc.nsecs = (u64)(jiffies - INITIAL_JIFFIES) *
+ (NSEC_PER_SEC / HZ);
+ sched_clksrc.last_update = clock->read(clock) & sched_clksrc.mask;
+ sched_clksrc.clock = clock;
+}
+
+/*
+ * Called from timekeeping code with xtime lock held and interrupts
+ * disabled, so we have only one updater at a time. Note that readers
+ * of sched_clock are _NOT_ affected by xtime_lock. We have our own
+ * sequence counter for sched_clksrc.
+ */
+static void notrace sched_clock_clksrc_update(void)
+{
+ struct clocksource *clock = sched_clksrc.clock;
+ u64 delta;
+
+ if (!clock)
+ return;
+
+ delta = clock->read(clock) - sched_clksrc.last_update;
+ delta &= sched_clksrc.mask;
+ while (delta >= sched_clksrc.update_cycles) {
+ delta -= sched_clksrc.update_cycles;
+ write_seqcount_begin(&sched_clksrc.seqcnt);
+ sched_clksrc.last_update += sched_clksrc.update_cycles;
+ sched_clksrc.nsecs += sched_clksrc.update_nsecs;
+ write_seqcount_end(&sched_clksrc.seqcnt);
+ }
+}
+
+/*
+ * Scheduler clock clocksource based - returns current time in nanosec units.
+ *
+ * Can be called from the default implementation below or from
+ * architecture code if it overrides the default implementation.
+ */
+unsigned long long notrace sched_clock_clksrc(void)
+{
+ struct clocksource *clock = sched_clksrc.clock;
+ unsigned int seq;
+ u64 nsecs, last, delta;
+
+ if (!sched_clksrc.clock)
+ return (unsigned long long)(jiffies - INITIAL_JIFFIES) *
+ (NSEC_PER_SEC / HZ);
+
+ do {
+ seq = read_seqcount_begin(&sched_clksrc.seqcnt);
+ last = sched_clksrc.last_update;
+ nsecs = sched_clksrc.nsecs;
+ } while (read_seqcount_retry(&sched_clksrc.seqcnt, seq));
+
+ delta = (clock->read(clock) - last) & sched_clksrc.mask;
+
+ return nsecs + clocksource_cyc2ns(delta, sched_clksrc.mult,
+ sched_clksrc.shift);
+}
+
+/*
+ * Scheduler clock - returns current time in nanosec units.
+ * This is default implementation.
+ * Architectures and sub-architectures can override this.
+ */
+unsigned long long __attribute__((weak)) sched_clock(void)
+{
+ return sched_clock_clksrc();
+}
+EXPORT_SYMBOL_GPL(sched_clock);
diff --git a/kernel/timeconst.bc b/kernel/timeconst.bc
new file mode 100644
index 00000000000..511bdf2cafd
--- /dev/null
+++ b/kernel/timeconst.bc
@@ -0,0 +1,108 @@
+scale=0
+
+define gcd(a,b) {
+ auto t;
+ while (b) {
+ t = b;
+ b = a % b;
+ a = t;
+ }
+ return a;
+}
+
+/* Division by reciprocal multiplication. */
+define fmul(b,n,d) {
+ return (2^b*n+d-1)/d;
+}
+
+/* Adjustment factor when a ceiling value is used. Use as:
+ (imul * n) + (fmulxx * n + fadjxx) >> xx) */
+define fadj(b,n,d) {
+ auto v;
+ d = d/gcd(n,d);
+ v = 2^b*(d-1)/d;
+ return v;
+}
+
+/* Compute the appropriate mul/adj values as well as a shift count,
+ which brings the mul value into the range 2^b-1 <= x < 2^b. Such
+ a shift value will be correct in the signed integer range and off
+ by at most one in the upper half of the unsigned range. */
+define fmuls(b,n,d) {
+ auto s, m;
+ for (s = 0; 1; s++) {
+ m = fmul(s,n,d);
+ if (m >= 2^(b-1))
+ return s;
+ }
+ return 0;
+}
+
+define timeconst(hz) {
+ print "/* Automatically generated by kernel/timeconst.bc */\n"
+ print "/* Time conversion constants for HZ == ", hz, " */\n"
+ print "\n"
+
+ print "#ifndef KERNEL_TIMECONST_H\n"
+ print "#define KERNEL_TIMECONST_H\n\n"
+
+ print "#include <linux/param.h>\n"
+ print "#include <linux/types.h>\n\n"
+
+ print "#if HZ != ", hz, "\n"
+ print "#error \qkernel/timeconst.h has the wrong HZ value!\q\n"
+ print "#endif\n\n"
+
+ if (hz < 2) {
+ print "#error Totally bogus HZ value!\n"
+ } else {
+ s=fmuls(32,1000,hz)
+ obase=16
+ print "#define HZ_TO_MSEC_MUL32\tU64_C(0x", fmul(s,1000,hz), ")\n"
+ print "#define HZ_TO_MSEC_ADJ32\tU64_C(0x", fadj(s,1000,hz), ")\n"
+ obase=10
+ print "#define HZ_TO_MSEC_SHR32\t", s, "\n"
+
+ s=fmuls(32,hz,1000)
+ obase=16
+ print "#define MSEC_TO_HZ_MUL32\tU64_C(0x", fmul(s,hz,1000), ")\n"
+ print "#define MSEC_TO_HZ_ADJ32\tU64_C(0x", fadj(s,hz,1000), ")\n"
+ obase=10
+ print "#define MSEC_TO_HZ_SHR32\t", s, "\n"
+
+ obase=10
+ cd=gcd(hz,1000)
+ print "#define HZ_TO_MSEC_NUM\t\t", 1000/cd, "\n"
+ print "#define HZ_TO_MSEC_DEN\t\t", hz/cd, "\n"
+ print "#define MSEC_TO_HZ_NUM\t\t", hz/cd, "\n"
+ print "#define MSEC_TO_HZ_DEN\t\t", 1000/cd, "\n"
+ print "\n"
+
+ s=fmuls(32,1000000,hz)
+ obase=16
+ print "#define HZ_TO_USEC_MUL32\tU64_C(0x", fmul(s,1000000,hz), ")\n"
+ print "#define HZ_TO_USEC_ADJ32\tU64_C(0x", fadj(s,1000000,hz), ")\n"
+ obase=10
+ print "#define HZ_TO_USEC_SHR32\t", s, "\n"
+
+ s=fmuls(32,hz,1000000)
+ obase=16
+ print "#define USEC_TO_HZ_MUL32\tU64_C(0x", fmul(s,hz,1000000), ")\n"
+ print "#define USEC_TO_HZ_ADJ32\tU64_C(0x", fadj(s,hz,1000000), ")\n"
+ obase=10
+ print "#define USEC_TO_HZ_SHR32\t", s, "\n"
+
+ obase=10
+ cd=gcd(hz,1000000)
+ print "#define HZ_TO_USEC_NUM\t\t", 1000000/cd, "\n"
+ print "#define HZ_TO_USEC_DEN\t\t", hz/cd, "\n"
+ print "#define USEC_TO_HZ_NUM\t\t", hz/cd, "\n"
+ print "#define USEC_TO_HZ_DEN\t\t", 1000000/cd, "\n"
+ print "\n"
+
+ print "#endif /* KERNEL_TIMECONST_H */\n"
+ }
+ halt
+}
+
+timeconst(hz)
diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl
deleted file mode 100644
index 3f42652a6a3..00000000000
--- a/kernel/timeconst.pl
+++ /dev/null
@@ -1,376 +0,0 @@
-#!/usr/bin/perl
-# -----------------------------------------------------------------------
-#
-# Copyright 2007-2008 rPath, Inc. - All Rights Reserved
-#
-# This file is part of the Linux kernel, and is made available under
-# the terms of the GNU General Public License version 2 or (at your
-# option) any later version; incorporated herein by reference.
-#
-# -----------------------------------------------------------------------
-#
-
-#
-# Usage: timeconst.pl HZ > timeconst.h
-#
-
-# Precomputed values for systems without Math::BigInt
-# Generated by:
-# timeconst.pl --can 24 32 48 64 100 122 128 200 250 256 300 512 1000 1024 1200
-%canned_values = (
- 24 => [
- '0xa6aaaaab','0x2aaaaaa',26,
- 125,3,
- '0xc49ba5e4','0x1fbe76c8b4',37,
- 3,125,
- '0xa2c2aaab','0xaaaa',16,
- 125000,3,
- '0xc9539b89','0x7fffbce4217d',47,
- 3,125000,
- ], 32 => [
- '0xfa000000','0x6000000',27,
- 125,4,
- '0x83126e98','0xfdf3b645a',36,
- 4,125,
- '0xf4240000','0x0',17,
- 31250,1,
- '0x8637bd06','0x3fff79c842fa',46,
- 1,31250,
- ], 48 => [
- '0xa6aaaaab','0x6aaaaaa',27,
- 125,6,
- '0xc49ba5e4','0xfdf3b645a',36,
- 6,125,
- '0xa2c2aaab','0x15555',17,
- 62500,3,
- '0xc9539b89','0x3fffbce4217d',46,
- 3,62500,
- ], 64 => [
- '0xfa000000','0xe000000',28,
- 125,8,
- '0x83126e98','0x7ef9db22d',35,
- 8,125,
- '0xf4240000','0x0',18,
- 15625,1,
- '0x8637bd06','0x1fff79c842fa',45,
- 1,15625,
- ], 100 => [
- '0xa0000000','0x0',28,
- 10,1,
- '0xcccccccd','0x733333333',35,
- 1,10,
- '0x9c400000','0x0',18,
- 10000,1,
- '0xd1b71759','0x1fff2e48e8a7',45,
- 1,10000,
- ], 122 => [
- '0x8325c53f','0xfbcda3a',28,
- 500,61,
- '0xf9db22d1','0x7fbe76c8b',35,
- 61,500,
- '0x8012e2a0','0x3ef36',18,
- 500000,61,
- '0xffda4053','0x1ffffbce4217',45,
- 61,500000,
- ], 128 => [
- '0xfa000000','0x1e000000',29,
- 125,16,
- '0x83126e98','0x3f7ced916',34,
- 16,125,
- '0xf4240000','0x40000',19,
- 15625,2,
- '0x8637bd06','0xfffbce4217d',44,
- 2,15625,
- ], 200 => [
- '0xa0000000','0x0',29,
- 5,1,
- '0xcccccccd','0x333333333',34,
- 1,5,
- '0x9c400000','0x0',19,
- 5000,1,
- '0xd1b71759','0xfff2e48e8a7',44,
- 1,5000,
- ], 250 => [
- '0x80000000','0x0',29,
- 4,1,
- '0x80000000','0x180000000',33,
- 1,4,
- '0xfa000000','0x0',20,
- 4000,1,
- '0x83126e98','0x7ff7ced9168',43,
- 1,4000,
- ], 256 => [
- '0xfa000000','0x3e000000',30,
- 125,32,
- '0x83126e98','0x1fbe76c8b',33,
- 32,125,
- '0xf4240000','0xc0000',20,
- 15625,4,
- '0x8637bd06','0x7ffde7210be',43,
- 4,15625,
- ], 300 => [
- '0xd5555556','0x2aaaaaaa',30,
- 10,3,
- '0x9999999a','0x1cccccccc',33,
- 3,10,
- '0xd0555556','0xaaaaa',20,
- 10000,3,
- '0x9d495183','0x7ffcb923a29',43,
- 3,10000,
- ], 512 => [
- '0xfa000000','0x7e000000',31,
- 125,64,
- '0x83126e98','0xfdf3b645',32,
- 64,125,
- '0xf4240000','0x1c0000',21,
- 15625,8,
- '0x8637bd06','0x3ffef39085f',42,
- 8,15625,
- ], 1000 => [
- '0x80000000','0x0',31,
- 1,1,
- '0x80000000','0x0',31,
- 1,1,
- '0xfa000000','0x0',22,
- 1000,1,
- '0x83126e98','0x1ff7ced9168',41,
- 1,1000,
- ], 1024 => [
- '0xfa000000','0xfe000000',32,
- 125,128,
- '0x83126e98','0x7ef9db22',31,
- 128,125,
- '0xf4240000','0x3c0000',22,
- 15625,16,
- '0x8637bd06','0x1fff79c842f',41,
- 16,15625,
- ], 1200 => [
- '0xd5555556','0xd5555555',32,
- 5,6,
- '0x9999999a','0x66666666',31,
- 6,5,
- '0xd0555556','0x2aaaaa',22,
- 2500,3,
- '0x9d495183','0x1ffcb923a29',41,
- 3,2500,
- ]
-);
-
-$has_bigint = eval 'use Math::BigInt qw(bgcd); 1;';
-
-sub bint($)
-{
- my($x) = @_;
- return Math::BigInt->new($x);
-}
-
-#
-# Constants for division by reciprocal multiplication.
-# (bits, numerator, denominator)
-#
-sub fmul($$$)
-{
- my ($b,$n,$d) = @_;
-
- $n = bint($n);
- $d = bint($d);
-
- return scalar (($n << $b)+$d-bint(1))/$d;
-}
-
-sub fadj($$$)
-{
- my($b,$n,$d) = @_;
-
- $n = bint($n);
- $d = bint($d);
-
- $d = $d/bgcd($n, $d);
- return scalar (($d-bint(1)) << $b)/$d;
-}
-
-sub fmuls($$$) {
- my($b,$n,$d) = @_;
- my($s,$m);
- my($thres) = bint(1) << ($b-1);
-
- $n = bint($n);
- $d = bint($d);
-
- for ($s = 0; 1; $s++) {
- $m = fmul($s,$n,$d);
- return $s if ($m >= $thres);
- }
- return 0;
-}
-
-# Generate a hex value if the result fits in 64 bits;
-# otherwise skip.
-sub bignum_hex($) {
- my($x) = @_;
- my $s = $x->as_hex();
-
- return (length($s) > 18) ? undef : $s;
-}
-
-# Provides mul, adj, and shr factors for a specific
-# (bit, time, hz) combination
-sub muladj($$$) {
- my($b, $t, $hz) = @_;
- my $s = fmuls($b, $t, $hz);
- my $m = fmul($s, $t, $hz);
- my $a = fadj($s, $t, $hz);
- return (bignum_hex($m), bignum_hex($a), $s);
-}
-
-# Provides numerator, denominator values
-sub numden($$) {
- my($n, $d) = @_;
- my $g = bgcd($n, $d);
- return ($n/$g, $d/$g);
-}
-
-# All values for a specific (time, hz) combo
-sub conversions($$) {
- my ($t, $hz) = @_;
- my @val = ();
-
- # HZ_TO_xx
- push(@val, muladj(32, $t, $hz));
- push(@val, numden($t, $hz));
-
- # xx_TO_HZ
- push(@val, muladj(32, $hz, $t));
- push(@val, numden($hz, $t));
-
- return @val;
-}
-
-sub compute_values($) {
- my($hz) = @_;
- my @val = ();
- my $s, $m, $a, $g;
-
- if (!$has_bigint) {
- die "$0: HZ == $hz not canned and ".
- "Math::BigInt not available\n";
- }
-
- # MSEC conversions
- push(@val, conversions(1000, $hz));
-
- # USEC conversions
- push(@val, conversions(1000000, $hz));
-
- return @val;
-}
-
-sub outputval($$)
-{
- my($name, $val) = @_;
- my $csuf;
-
- if (defined($val)) {
- if ($name !~ /SHR/) {
- $val = "U64_C($val)";
- }
- printf "#define %-23s %s\n", $name.$csuf, $val.$csuf;
- }
-}
-
-sub output($@)
-{
- my($hz, @val) = @_;
- my $pfx, $bit, $suf, $s, $m, $a;
-
- print "/* Automatically generated by kernel/timeconst.pl */\n";
- print "/* Conversion constants for HZ == $hz */\n";
- print "\n";
- print "#ifndef KERNEL_TIMECONST_H\n";
- print "#define KERNEL_TIMECONST_H\n";
- print "\n";
-
- print "#include <linux/param.h>\n";
- print "#include <linux/types.h>\n";
-
- print "\n";
- print "#if HZ != $hz\n";
- print "#error \"kernel/timeconst.h has the wrong HZ value!\"\n";
- print "#endif\n";
- print "\n";
-
- foreach $pfx ('HZ_TO_MSEC','MSEC_TO_HZ',
- 'HZ_TO_USEC','USEC_TO_HZ') {
- foreach $bit (32) {
- foreach $suf ('MUL', 'ADJ', 'SHR') {
- outputval("${pfx}_$suf$bit", shift(@val));
- }
- }
- foreach $suf ('NUM', 'DEN') {
- outputval("${pfx}_$suf", shift(@val));
- }
- }
-
- print "\n";
- print "#endif /* KERNEL_TIMECONST_H */\n";
-}
-
-# Pretty-print Perl values
-sub perlvals(@) {
- my $v;
- my @l = ();
-
- foreach $v (@_) {
- if (!defined($v)) {
- push(@l, 'undef');
- } elsif ($v =~ /^0x/) {
- push(@l, "\'".$v."\'");
- } else {
- push(@l, $v.'');
- }
- }
- return join(',', @l);
-}
-
-($hz) = @ARGV;
-
-# Use this to generate the %canned_values structure
-if ($hz eq '--can') {
- shift(@ARGV);
- @hzlist = sort {$a <=> $b} (@ARGV);
-
- print "# Precomputed values for systems without Math::BigInt\n";
- print "# Generated by:\n";
- print "# timeconst.pl --can ", join(' ', @hzlist), "\n";
- print "\%canned_values = (\n";
- my $pf = "\t";
- foreach $hz (@hzlist) {
- my @values = compute_values($hz);
- print "$pf$hz => [\n";
- while (scalar(@values)) {
- my $bit;
- foreach $bit (32) {
- my $m = shift(@values);
- my $a = shift(@values);
- my $s = shift(@values);
- print "\t\t", perlvals($m,$a,$s), ",\n";
- }
- my $n = shift(@values);
- my $d = shift(@values);
- print "\t\t", perlvals($n,$d), ",\n";
- }
- print "\t]";
- $pf = ', ';
- }
- print "\n);\n";
-} else {
- $hz += 0; # Force to number
- if ($hz < 1) {
- die "Usage: $0 HZ\n";
- }
-
- $cv = $canned_values{$hz};
- @val = defined($cv) ? @$cv : compute_values($hz);
- output($hz, @val);
-}
-exit 0;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index dc8438d2aa6..9b8ce3fd7d6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -41,6 +41,7 @@
#include <linux/debug_locks.h>
#include <linux/lockdep.h>
#include <linux/idr.h>
+#include <mach/sec_debug.h>
#include "workqueue_sched.h"
@@ -1076,6 +1077,158 @@ int queue_work(struct workqueue_struct *wq, struct work_struct *work)
}
EXPORT_SYMBOL_GPL(queue_work);
+#ifdef CONFIG_WORKQUEUE_FRONT
+static void insert_work_front(struct cpu_workqueue_struct *cwq,
+ struct work_struct *work, struct list_head *head,
+ unsigned int extra_flags)
+{
+ struct global_cwq *gcwq = cwq->gcwq;
+
+ /* we own @work, set data and link */
+ set_work_cwq(work, cwq, extra_flags);
+
+ /*
+ * Ensure that we get the right work->data if we see the
+ * result of list_add() below, see try_to_grab_pending().
+ */
+ smp_wmb();
+
+ list_add(&work->entry, head);
+
+ /*
+ * Ensure either worker_sched_deactivated() sees the above
+ * list_add_tail() or we see zero nr_running to avoid workers
+ * lying around lazily while there are works to be processed.
+ */
+ smp_mb();
+
+ if (__need_more_worker(gcwq))
+ wake_up_worker(gcwq);
+}
+
+static void __queue_work_front(unsigned int cpu, struct workqueue_struct *wq,
+ struct work_struct *work)
+{
+ struct global_cwq *gcwq;
+ struct cpu_workqueue_struct *cwq;
+ struct list_head *worklist;
+ unsigned int work_flags;
+ unsigned long flags;
+
+ debug_work_activate(work);
+
+ /* if dying, only works from the same workqueue are allowed */
+ if (unlikely(wq->flags & WQ_DYING) &&
+ WARN_ON_ONCE(!is_chained_work(wq)))
+ return;
+
+ /* determine gcwq to use */
+ if (!(wq->flags & WQ_UNBOUND)) {
+ struct global_cwq *last_gcwq;
+
+ if (unlikely(cpu == WORK_CPU_UNBOUND))
+ cpu = raw_smp_processor_id();
+
+ /*
+ * It's multi cpu. If @wq is non-reentrant and @work
+ * was previously on a different cpu, it might still
+ * be running there, in which case the work needs to
+ * be queued on that cpu to guarantee non-reentrance.
+ */
+ gcwq = get_gcwq(cpu);
+ last_gcwq = get_work_gcwq(work);
+ if (wq->flags & WQ_NON_REENTRANT &&
+ (last_gcwq != NULL) && last_gcwq != gcwq) {
+ struct worker *worker;
+
+ spin_lock_irqsave(&last_gcwq->lock, flags);
+
+ worker = find_worker_executing_work(last_gcwq, work);
+
+ if (worker && worker->current_cwq->wq == wq)
+ gcwq = last_gcwq;
+ else {
+ /* meh... not running there, queue here */
+ spin_unlock_irqrestore(&last_gcwq->lock, flags);
+ spin_lock_irqsave(&gcwq->lock, flags);
+ }
+ } else
+ spin_lock_irqsave(&gcwq->lock, flags);
+ } else {
+ gcwq = get_gcwq(WORK_CPU_UNBOUND);
+ spin_lock_irqsave(&gcwq->lock, flags);
+ }
+
+ /* gcwq determined, get cwq and queue */
+ cwq = get_cwq(gcwq->cpu, wq);
+ trace_workqueue_queue_work(cpu, cwq, work);
+
+ BUG_ON(!list_empty(&work->entry));
+
+ cwq->nr_in_flight[cwq->work_color]++;
+ work_flags = work_color_to_flags(cwq->work_color);
+
+ if (likely(cwq->nr_active < cwq->max_active)) {
+ trace_workqueue_activate_work(work);
+ cwq->nr_active++;
+ worklist = gcwq_determine_ins_pos(gcwq, cwq);
+ } else {
+ work_flags |= WORK_STRUCT_DELAYED;
+ worklist = &cwq->delayed_works;
+ }
+
+ insert_work_front(cwq, work, worklist, work_flags);
+
+ spin_unlock_irqrestore(&gcwq->lock, flags);
+}
+
+/**
+ * queue_work_on_front - queue work on specific cpu
+ * @cpu: CPU number to execute work on
+ * @wq: workqueue to use
+ * @work: work to queue
+ *
+ * Returns 0 if @work was already on a queue, non-zero otherwise.
+ *
+ * We queue the work to a specific CPU, the caller must ensure it
+ * can't go away.
+ */
+
+int
+queue_work_on_front(int cpu, struct workqueue_struct *wq,
+ struct work_struct *work)
+{
+ int ret = 0;
+
+ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+ __queue_work_front(cpu, wq, work);
+ ret = 1;
+ }
+ return ret;
+}
+
+/**
+ * queue_work - queue work on a workqueue
+ * @wq: workqueue to use
+ * @work: work to queue
+ *
+ * Returns 0 if @work was already on a queue, non-zero otherwise.
+ *
+ * We queue the work to the CPU on which it was submitted, but if the CPU dies
+ * it can be processed by another CPU.
+ */
+int queue_work_front(struct workqueue_struct *wq, struct work_struct *work)
+{
+ int ret;
+
+ ret = queue_work_on_front(get_cpu(), wq, work);
+ put_cpu();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(queue_work_front);
+#endif
+
/**
* queue_work_on - queue work on specific cpu
* @cpu: CPU number to execute work on
@@ -1874,6 +2027,8 @@ __acquires(&gcwq->lock)
lock_map_acquire_read(&cwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
trace_workqueue_execute_start(work);
+ sec_debug_work_log(worker, work, f);
+
f(work);
/*
* While we must be careful to not use "work" after this, the trace