Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
This commit is contained in:
@@ -1791,19 +1791,20 @@ out:
|
||||
}
|
||||
|
||||
/**
|
||||
* cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup
|
||||
* cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
|
||||
* @from: attach to all cgroups of a given task
|
||||
* @tsk: the task to be attached
|
||||
*/
|
||||
int cgroup_attach_task_current_cg(struct task_struct *tsk)
|
||||
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
|
||||
{
|
||||
struct cgroupfs_root *root;
|
||||
struct cgroup *cur_cg;
|
||||
int retval = 0;
|
||||
|
||||
cgroup_lock();
|
||||
for_each_active_root(root) {
|
||||
cur_cg = task_cgroup_from_root(current, root);
|
||||
retval = cgroup_attach_task(cur_cg, tsk);
|
||||
struct cgroup *from_cg = task_cgroup_from_root(from, root);
|
||||
|
||||
retval = cgroup_attach_task(from_cg, tsk);
|
||||
if (retval)
|
||||
break;
|
||||
}
|
||||
@@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)
|
||||
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg);
|
||||
EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
|
||||
|
||||
/*
|
||||
* Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
|
||||
|
||||
@@ -741,7 +741,7 @@ static struct console kgdbcons = {
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MAGIC_SYSRQ
|
||||
static void sysrq_handle_dbg(int key, struct tty_struct *tty)
|
||||
static void sysrq_handle_dbg(int key)
|
||||
{
|
||||
if (!dbg_io_ops) {
|
||||
printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
|
||||
|
||||
@@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv)
|
||||
int i, bpno;
|
||||
kdb_bp_t *bp, *bp_check;
|
||||
int diag;
|
||||
int free;
|
||||
char *symname = NULL;
|
||||
long offset = 0ul;
|
||||
int nextarg;
|
||||
@@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv)
|
||||
/*
|
||||
* Find an empty bp structure to allocate
|
||||
*/
|
||||
free = KDB_MAXBPT;
|
||||
for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
|
||||
if (bp->bp_free)
|
||||
break;
|
||||
|
||||
@@ -1929,7 +1929,7 @@ static int kdb_sr(int argc, const char **argv)
|
||||
if (argc != 1)
|
||||
return KDB_ARGCOUNT;
|
||||
kdb_trap_printk++;
|
||||
__handle_sysrq(*argv[1], NULL, 0);
|
||||
__handle_sysrq(*argv[1], false);
|
||||
kdb_trap_printk--;
|
||||
|
||||
return 0;
|
||||
|
||||
250
kernel/gcov/fs.c
250
kernel/gcov/fs.c
@@ -33,10 +33,11 @@
|
||||
* @children: child nodes
|
||||
* @all: list head for list of all nodes
|
||||
* @parent: parent node
|
||||
* @info: associated profiling data structure if not a directory
|
||||
* @ghost: when an object file containing profiling data is unloaded we keep a
|
||||
* copy of the profiling data here to allow collecting coverage data
|
||||
* for cleanup code. Such a node is called a "ghost".
|
||||
* @loaded_info: array of pointers to profiling data sets for loaded object
|
||||
* files.
|
||||
* @num_loaded: number of profiling data sets for loaded object files.
|
||||
* @unloaded_info: accumulated copy of profiling data sets for unloaded
|
||||
* object files. Used only when gcov_persist=1.
|
||||
* @dentry: main debugfs entry, either a directory or data file
|
||||
* @links: associated symbolic links
|
||||
* @name: data file basename
|
||||
@@ -51,10 +52,11 @@ struct gcov_node {
|
||||
struct list_head children;
|
||||
struct list_head all;
|
||||
struct gcov_node *parent;
|
||||
struct gcov_info *info;
|
||||
struct gcov_info *ghost;
|
||||
struct gcov_info **loaded_info;
|
||||
struct gcov_info *unloaded_info;
|
||||
struct dentry *dentry;
|
||||
struct dentry **links;
|
||||
int num_loaded;
|
||||
char name[0];
|
||||
};
|
||||
|
||||
@@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = {
|
||||
};
|
||||
|
||||
/*
|
||||
* Return the profiling data set for a given node. This can either be the
|
||||
* original profiling data structure or a duplicate (also called "ghost")
|
||||
* in case the associated object file has been unloaded.
|
||||
* Return a profiling data set associated with the given node. This is
|
||||
* either a data set for a loaded object file or a data set copy in case
|
||||
* all associated object files have been unloaded.
|
||||
*/
|
||||
static struct gcov_info *get_node_info(struct gcov_node *node)
|
||||
{
|
||||
if (node->info)
|
||||
return node->info;
|
||||
if (node->num_loaded > 0)
|
||||
return node->loaded_info[0];
|
||||
|
||||
return node->ghost;
|
||||
return node->unloaded_info;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return a newly allocated profiling data set which contains the sum of
|
||||
* all profiling data associated with the given node.
|
||||
*/
|
||||
static struct gcov_info *get_accumulated_info(struct gcov_node *node)
|
||||
{
|
||||
struct gcov_info *info;
|
||||
int i = 0;
|
||||
|
||||
if (node->unloaded_info)
|
||||
info = gcov_info_dup(node->unloaded_info);
|
||||
else
|
||||
info = gcov_info_dup(node->loaded_info[i++]);
|
||||
if (!info)
|
||||
return NULL;
|
||||
for (; i < node->num_loaded; i++)
|
||||
gcov_info_add(info, node->loaded_info[i]);
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file)
|
||||
mutex_lock(&node_lock);
|
||||
/*
|
||||
* Read from a profiling data copy to minimize reference tracking
|
||||
* complexity and concurrent access.
|
||||
* complexity and concurrent access and to keep accumulating multiple
|
||||
* profiling data sets associated with one node simple.
|
||||
*/
|
||||
info = gcov_info_dup(get_node_info(node));
|
||||
info = get_accumulated_info(node);
|
||||
if (!info)
|
||||
goto out_unlock;
|
||||
iter = gcov_iter_new(info);
|
||||
@@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset all profiling data associated with the specified node.
|
||||
*/
|
||||
static void reset_node(struct gcov_node *node)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (node->unloaded_info)
|
||||
gcov_info_reset(node->unloaded_info);
|
||||
for (i = 0; i < node->num_loaded; i++)
|
||||
gcov_info_reset(node->loaded_info[i]);
|
||||
}
|
||||
|
||||
static void remove_node(struct gcov_node *node);
|
||||
|
||||
/*
|
||||
* write() implementation for gcov data files. Reset profiling data for the
|
||||
* associated file. If the object file has been unloaded (i.e. this is
|
||||
* a "ghost" node), remove the debug fs node as well.
|
||||
* corresponding file. If all associated object files have been unloaded,
|
||||
* remove the debug fs node as well.
|
||||
*/
|
||||
static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
|
||||
size_t len, loff_t *pos)
|
||||
@@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
|
||||
node = get_node_by_name(info->filename);
|
||||
if (node) {
|
||||
/* Reset counts or remove node for unloaded modules. */
|
||||
if (node->ghost)
|
||||
if (node->num_loaded == 0)
|
||||
remove_node(node);
|
||||
else
|
||||
gcov_info_reset(node->info);
|
||||
reset_node(node);
|
||||
}
|
||||
/* Reset counts for open file. */
|
||||
gcov_info_reset(info);
|
||||
@@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info,
|
||||
INIT_LIST_HEAD(&node->list);
|
||||
INIT_LIST_HEAD(&node->children);
|
||||
INIT_LIST_HEAD(&node->all);
|
||||
node->info = info;
|
||||
if (node->loaded_info) {
|
||||
node->loaded_info[0] = info;
|
||||
node->num_loaded = 1;
|
||||
}
|
||||
node->parent = parent;
|
||||
if (name)
|
||||
strcpy(node->name, name);
|
||||
@@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent,
|
||||
struct gcov_node *node;
|
||||
|
||||
node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL);
|
||||
if (!node) {
|
||||
pr_warning("out of memory\n");
|
||||
return NULL;
|
||||
if (!node)
|
||||
goto err_nomem;
|
||||
if (info) {
|
||||
node->loaded_info = kcalloc(1, sizeof(struct gcov_info *),
|
||||
GFP_KERNEL);
|
||||
if (!node->loaded_info)
|
||||
goto err_nomem;
|
||||
}
|
||||
init_node(node, info, name, parent);
|
||||
/* Differentiate between gcov data file nodes and directory nodes. */
|
||||
@@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent,
|
||||
list_add(&node->all, &all_head);
|
||||
|
||||
return node;
|
||||
|
||||
err_nomem:
|
||||
kfree(node);
|
||||
pr_warning("out of memory\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Remove symbolic links associated with node. */
|
||||
@@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node)
|
||||
list_del(&node->all);
|
||||
debugfs_remove(node->dentry);
|
||||
remove_links(node);
|
||||
if (node->ghost)
|
||||
gcov_info_free(node->ghost);
|
||||
kfree(node->loaded_info);
|
||||
if (node->unloaded_info)
|
||||
gcov_info_free(node->unloaded_info);
|
||||
kfree(node);
|
||||
}
|
||||
|
||||
@@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent,
|
||||
|
||||
/*
|
||||
* write() implementation for reset file. Reset all profiling data to zero
|
||||
* and remove ghost nodes.
|
||||
* and remove nodes for which all associated object files are unloaded.
|
||||
*/
|
||||
static ssize_t reset_write(struct file *file, const char __user *addr,
|
||||
size_t len, loff_t *pos)
|
||||
@@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr,
|
||||
mutex_lock(&node_lock);
|
||||
restart:
|
||||
list_for_each_entry(node, &all_head, all) {
|
||||
if (node->info)
|
||||
gcov_info_reset(node->info);
|
||||
if (node->num_loaded > 0)
|
||||
reset_node(node);
|
||||
else if (list_empty(&node->children)) {
|
||||
remove_node(node);
|
||||
/* Several nodes may have gone - restart loop. */
|
||||
@@ -564,37 +614,115 @@ err_remove:
|
||||
}
|
||||
|
||||
/*
|
||||
* The profiling data set associated with this node is being unloaded. Store a
|
||||
* copy of the profiling data and turn this node into a "ghost".
|
||||
* Associate a profiling data set with an existing node. Needs to be called
|
||||
* with node_lock held.
|
||||
*/
|
||||
static int ghost_node(struct gcov_node *node)
|
||||
static void add_info(struct gcov_node *node, struct gcov_info *info)
|
||||
{
|
||||
node->ghost = gcov_info_dup(node->info);
|
||||
if (!node->ghost) {
|
||||
pr_warning("could not save data for '%s' (out of memory)\n",
|
||||
node->info->filename);
|
||||
return -ENOMEM;
|
||||
}
|
||||
node->info = NULL;
|
||||
struct gcov_info **loaded_info;
|
||||
int num = node->num_loaded;
|
||||
|
||||
return 0;
|
||||
/*
|
||||
* Prepare new array. This is done first to simplify cleanup in
|
||||
* case the new data set is incompatible, the node only contains
|
||||
* unloaded data sets and there's not enough memory for the array.
|
||||
*/
|
||||
loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL);
|
||||
if (!loaded_info) {
|
||||
pr_warning("could not add '%s' (out of memory)\n",
|
||||
info->filename);
|
||||
return;
|
||||
}
|
||||
memcpy(loaded_info, node->loaded_info,
|
||||
num * sizeof(struct gcov_info *));
|
||||
loaded_info[num] = info;
|
||||
/* Check if the new data set is compatible. */
|
||||
if (num == 0) {
|
||||
/*
|
||||
* A module was unloaded, modified and reloaded. The new
|
||||
* data set replaces the copy of the last one.
|
||||
*/
|
||||
if (!gcov_info_is_compatible(node->unloaded_info, info)) {
|
||||
pr_warning("discarding saved data for %s "
|
||||
"(incompatible version)\n", info->filename);
|
||||
gcov_info_free(node->unloaded_info);
|
||||
node->unloaded_info = NULL;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Two different versions of the same object file are loaded.
|
||||
* The initial one takes precedence.
|
||||
*/
|
||||
if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
|
||||
pr_warning("could not add '%s' (incompatible "
|
||||
"version)\n", info->filename);
|
||||
kfree(loaded_info);
|
||||
return;
|
||||
}
|
||||
}
|
||||
/* Overwrite previous array. */
|
||||
kfree(node->loaded_info);
|
||||
node->loaded_info = loaded_info;
|
||||
node->num_loaded = num + 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Profiling data for this node has been loaded again. Add profiling data
|
||||
* from previous instantiation and turn this node into a regular node.
|
||||
* Return the index of a profiling data set associated with a node.
|
||||
*/
|
||||
static void revive_node(struct gcov_node *node, struct gcov_info *info)
|
||||
static int get_info_index(struct gcov_node *node, struct gcov_info *info)
|
||||
{
|
||||
if (gcov_info_is_compatible(node->ghost, info))
|
||||
gcov_info_add(info, node->ghost);
|
||||
else {
|
||||
pr_warning("discarding saved data for '%s' (version changed)\n",
|
||||
info->filename);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < node->num_loaded; i++) {
|
||||
if (node->loaded_info[i] == info)
|
||||
return i;
|
||||
}
|
||||
gcov_info_free(node->ghost);
|
||||
node->ghost = NULL;
|
||||
node->info = info;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the data of a profiling data set which is being unloaded.
|
||||
*/
|
||||
static void save_info(struct gcov_node *node, struct gcov_info *info)
|
||||
{
|
||||
if (node->unloaded_info)
|
||||
gcov_info_add(node->unloaded_info, info);
|
||||
else {
|
||||
node->unloaded_info = gcov_info_dup(info);
|
||||
if (!node->unloaded_info) {
|
||||
pr_warning("could not save data for '%s' "
|
||||
"(out of memory)\n", info->filename);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Disassociate a profiling data set from a node. Needs to be called with
|
||||
* node_lock held.
|
||||
*/
|
||||
static void remove_info(struct gcov_node *node, struct gcov_info *info)
|
||||
{
|
||||
int i;
|
||||
|
||||
i = get_info_index(node, info);
|
||||
if (i < 0) {
|
||||
pr_warning("could not remove '%s' (not found)\n",
|
||||
info->filename);
|
||||
return;
|
||||
}
|
||||
if (gcov_persist)
|
||||
save_info(node, info);
|
||||
/* Shrink array. */
|
||||
node->loaded_info[i] = node->loaded_info[node->num_loaded - 1];
|
||||
node->num_loaded--;
|
||||
if (node->num_loaded > 0)
|
||||
return;
|
||||
/* Last loaded data set was removed. */
|
||||
kfree(node->loaded_info);
|
||||
node->loaded_info = NULL;
|
||||
node->num_loaded = 0;
|
||||
if (!node->unloaded_info)
|
||||
remove_node(node);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info)
|
||||
node = get_node_by_name(info->filename);
|
||||
switch (action) {
|
||||
case GCOV_ADD:
|
||||
/* Add new node or revive ghost. */
|
||||
if (!node) {
|
||||
if (node)
|
||||
add_info(node, info);
|
||||
else
|
||||
add_node(info);
|
||||
break;
|
||||
}
|
||||
if (gcov_persist)
|
||||
revive_node(node, info);
|
||||
else {
|
||||
pr_warning("could not add '%s' (already exists)\n",
|
||||
info->filename);
|
||||
}
|
||||
break;
|
||||
case GCOV_REMOVE:
|
||||
/* Remove node or turn into ghost. */
|
||||
if (!node) {
|
||||
if (node)
|
||||
remove_info(node, info);
|
||||
else {
|
||||
pr_warning("could not remove '%s' (not found)\n",
|
||||
info->filename);
|
||||
break;
|
||||
}
|
||||
if (gcov_persist) {
|
||||
if (!ghost_node(node))
|
||||
break;
|
||||
}
|
||||
remove_node(node);
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&node_lock);
|
||||
|
||||
@@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp)
|
||||
right = group_info->ngroups;
|
||||
while (left < right) {
|
||||
unsigned int mid = (left+right)/2;
|
||||
int cmp = grp - GROUP_AT(group_info, mid);
|
||||
if (cmp > 0)
|
||||
if (grp > GROUP_AT(group_info, mid))
|
||||
left = mid + 1;
|
||||
else if (cmp < 0)
|
||||
else if (grp < GROUP_AT(group_info, mid))
|
||||
right = mid;
|
||||
else
|
||||
return 1;
|
||||
|
||||
@@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
|
||||
*/
|
||||
ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
|
||||
{
|
||||
struct hrtimer_clock_base *base;
|
||||
unsigned long flags;
|
||||
ktime_t rem;
|
||||
|
||||
base = lock_hrtimer_base(timer, &flags);
|
||||
lock_hrtimer_base(timer, &flags);
|
||||
rem = hrtimer_expires_remaining(timer);
|
||||
unlock_hrtimer_base(timer, &flags);
|
||||
|
||||
|
||||
@@ -36,15 +36,6 @@
|
||||
# include <asm/mutex.h>
|
||||
#endif
|
||||
|
||||
/***
|
||||
* mutex_init - initialize the mutex
|
||||
* @lock: the mutex to be initialized
|
||||
* @key: the lock_class_key for the class; used by mutex lock debugging
|
||||
*
|
||||
* Initialize the mutex to unlocked state.
|
||||
*
|
||||
* It is not allowed to initialize an already locked mutex.
|
||||
*/
|
||||
void
|
||||
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
|
||||
{
|
||||
@@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
|
||||
static __used noinline void __sched
|
||||
__mutex_lock_slowpath(atomic_t *lock_count);
|
||||
|
||||
/***
|
||||
/**
|
||||
* mutex_lock - acquire the mutex
|
||||
* @lock: the mutex to be acquired
|
||||
*
|
||||
@@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock);
|
||||
|
||||
static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
|
||||
|
||||
/***
|
||||
/**
|
||||
* mutex_unlock - release the mutex
|
||||
* @lock: the mutex to be released
|
||||
*
|
||||
@@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count);
|
||||
static noinline int __sched
|
||||
__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
|
||||
|
||||
/***
|
||||
* mutex_lock_interruptible - acquire the mutex, interruptable
|
||||
/**
|
||||
* mutex_lock_interruptible - acquire the mutex, interruptible
|
||||
* @lock: the mutex to be acquired
|
||||
*
|
||||
* Lock the mutex like mutex_lock(), and return 0 if the mutex has
|
||||
@@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
|
||||
return prev == 1;
|
||||
}
|
||||
|
||||
/***
|
||||
* mutex_trylock - try acquire the mutex, without waiting
|
||||
/**
|
||||
* mutex_trylock - try to acquire the mutex, without waiting
|
||||
* @lock: the mutex to be acquired
|
||||
*
|
||||
* Try to acquire the mutex atomically. Returns 1 if the mutex
|
||||
* has been acquired successfully, and 0 on contention.
|
||||
*
|
||||
* NOTE: this function follows the spin_trylock() convention, so
|
||||
* it is negated to the down_trylock() return values! Be careful
|
||||
* it is negated from the down_trylock() return values! Be careful
|
||||
* about this when converting semaphore users to mutexes.
|
||||
*
|
||||
* This function must not be used in interrupt context. The
|
||||
|
||||
@@ -212,15 +212,17 @@ EXPORT_SYMBOL_GPL(pm_qos_request_active);
|
||||
|
||||
/**
|
||||
* pm_qos_add_request - inserts new qos request into the list
|
||||
* @pm_qos_class: identifies which list of qos request to us
|
||||
* @dep: pointer to a preallocated handle
|
||||
* @pm_qos_class: identifies which list of qos request to use
|
||||
* @value: defines the qos request
|
||||
*
|
||||
* This function inserts a new entry in the pm_qos_class list of requested qos
|
||||
* performance characteristics. It recomputes the aggregate QoS expectations
|
||||
* for the pm_qos_class of parameters, and returns the pm_qos_request list
|
||||
* element as a handle for use in updating and removal. Call needs to save
|
||||
* this handle for later use.
|
||||
* for the pm_qos_class of parameters and initializes the pm_qos_request_list
|
||||
* handle. Caller needs to save this handle for later use in updates and
|
||||
* removal.
|
||||
*/
|
||||
|
||||
void pm_qos_add_request(struct pm_qos_request_list *dep,
|
||||
int pm_qos_class, s32 value)
|
||||
{
|
||||
@@ -348,7 +350,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
|
||||
|
||||
pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
|
||||
if (pm_qos_class >= 0) {
|
||||
struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req));
|
||||
struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -387,10 +389,12 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
|
||||
} else if (count == 11) { /* len('0x12345678/0') */
|
||||
if (copy_from_user(ascii_value, buf, 11))
|
||||
return -EFAULT;
|
||||
if (strlen(ascii_value) != 10)
|
||||
return -EINVAL;
|
||||
x = sscanf(ascii_value, "%x", &value);
|
||||
if (x != 1)
|
||||
return -EINVAL;
|
||||
pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value);
|
||||
pr_debug("%s, %d, 0x%x\n", ascii_value, x, value);
|
||||
} else
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
@@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode)
|
||||
goto Close;
|
||||
|
||||
suspend_console();
|
||||
hibernation_freeze_swap();
|
||||
saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
|
||||
error = dpm_suspend_start(PMSG_FREEZE);
|
||||
if (error)
|
||||
|
||||
@@ -24,7 +24,7 @@ static void do_poweroff(struct work_struct *dummy)
|
||||
|
||||
static DECLARE_WORK(poweroff_work, do_poweroff);
|
||||
|
||||
static void handle_poweroff(int key, struct tty_struct *tty)
|
||||
static void handle_poweroff(int key)
|
||||
{
|
||||
/* run sysrq poweroff on boot cpu */
|
||||
schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
|
||||
|
||||
@@ -1086,7 +1086,6 @@ void swsusp_free(void)
|
||||
buffer = NULL;
|
||||
alloc_normal = 0;
|
||||
alloc_highmem = 0;
|
||||
hibernation_thaw_swap();
|
||||
}
|
||||
|
||||
/* Helper functions used for the shrinking of memory. */
|
||||
@@ -1122,9 +1121,19 @@ static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
|
||||
return nr_alloc;
|
||||
}
|
||||
|
||||
static unsigned long preallocate_image_memory(unsigned long nr_pages)
|
||||
static unsigned long preallocate_image_memory(unsigned long nr_pages,
|
||||
unsigned long avail_normal)
|
||||
{
|
||||
return preallocate_image_pages(nr_pages, GFP_IMAGE);
|
||||
unsigned long alloc;
|
||||
|
||||
if (avail_normal <= alloc_normal)
|
||||
return 0;
|
||||
|
||||
alloc = avail_normal - alloc_normal;
|
||||
if (nr_pages < alloc)
|
||||
alloc = nr_pages;
|
||||
|
||||
return preallocate_image_pages(alloc, GFP_IMAGE);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
@@ -1170,15 +1179,22 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
|
||||
*/
|
||||
static void free_unnecessary_pages(void)
|
||||
{
|
||||
unsigned long save_highmem, to_free_normal, to_free_highmem;
|
||||
unsigned long save, to_free_normal, to_free_highmem;
|
||||
|
||||
to_free_normal = alloc_normal - count_data_pages();
|
||||
save_highmem = count_highmem_pages();
|
||||
if (alloc_highmem > save_highmem) {
|
||||
to_free_highmem = alloc_highmem - save_highmem;
|
||||
save = count_data_pages();
|
||||
if (alloc_normal >= save) {
|
||||
to_free_normal = alloc_normal - save;
|
||||
save = 0;
|
||||
} else {
|
||||
to_free_normal = 0;
|
||||
save -= alloc_normal;
|
||||
}
|
||||
save += count_highmem_pages();
|
||||
if (alloc_highmem >= save) {
|
||||
to_free_highmem = alloc_highmem - save;
|
||||
} else {
|
||||
to_free_highmem = 0;
|
||||
to_free_normal -= save_highmem - alloc_highmem;
|
||||
to_free_normal -= save - alloc_highmem;
|
||||
}
|
||||
|
||||
memory_bm_position_reset(©_bm);
|
||||
@@ -1259,7 +1275,7 @@ int hibernate_preallocate_memory(void)
|
||||
{
|
||||
struct zone *zone;
|
||||
unsigned long saveable, size, max_size, count, highmem, pages = 0;
|
||||
unsigned long alloc, save_highmem, pages_highmem;
|
||||
unsigned long alloc, save_highmem, pages_highmem, avail_normal;
|
||||
struct timeval start, stop;
|
||||
int error;
|
||||
|
||||
@@ -1296,6 +1312,7 @@ int hibernate_preallocate_memory(void)
|
||||
else
|
||||
count += zone_page_state(zone, NR_FREE_PAGES);
|
||||
}
|
||||
avail_normal = count;
|
||||
count += highmem;
|
||||
count -= totalreserve_pages;
|
||||
|
||||
@@ -1310,12 +1327,21 @@ int hibernate_preallocate_memory(void)
|
||||
*/
|
||||
if (size >= saveable) {
|
||||
pages = preallocate_image_highmem(save_highmem);
|
||||
pages += preallocate_image_memory(saveable - pages);
|
||||
pages += preallocate_image_memory(saveable - pages, avail_normal);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Estimate the minimum size of the image. */
|
||||
pages = minimum_image_size(saveable);
|
||||
/*
|
||||
* To avoid excessive pressure on the normal zone, leave room in it to
|
||||
* accommodate an image of the minimum size (unless it's already too
|
||||
* small, in which case don't preallocate pages from it at all).
|
||||
*/
|
||||
if (avail_normal > pages)
|
||||
avail_normal -= pages;
|
||||
else
|
||||
avail_normal = 0;
|
||||
if (size < pages)
|
||||
size = min_t(unsigned long, pages, max_size);
|
||||
|
||||
@@ -1336,16 +1362,34 @@ int hibernate_preallocate_memory(void)
|
||||
*/
|
||||
pages_highmem = preallocate_image_highmem(highmem / 2);
|
||||
alloc = (count - max_size) - pages_highmem;
|
||||
pages = preallocate_image_memory(alloc);
|
||||
if (pages < alloc)
|
||||
goto err_out;
|
||||
size = max_size - size;
|
||||
alloc = size;
|
||||
size = preallocate_highmem_fraction(size, highmem, count);
|
||||
pages_highmem += size;
|
||||
alloc -= size;
|
||||
pages += preallocate_image_memory(alloc);
|
||||
pages += pages_highmem;
|
||||
pages = preallocate_image_memory(alloc, avail_normal);
|
||||
if (pages < alloc) {
|
||||
/* We have exhausted non-highmem pages, try highmem. */
|
||||
alloc -= pages;
|
||||
pages += pages_highmem;
|
||||
pages_highmem = preallocate_image_highmem(alloc);
|
||||
if (pages_highmem < alloc)
|
||||
goto err_out;
|
||||
pages += pages_highmem;
|
||||
/*
|
||||
* size is the desired number of saveable pages to leave in
|
||||
* memory, so try to preallocate (all memory - size) pages.
|
||||
*/
|
||||
alloc = (count - pages) - size;
|
||||
pages += preallocate_image_highmem(alloc);
|
||||
} else {
|
||||
/*
|
||||
* There are approximately max_size saveable pages at this point
|
||||
* and we want to reduce this number down to size.
|
||||
*/
|
||||
alloc = max_size - size;
|
||||
size = preallocate_highmem_fraction(alloc, highmem, count);
|
||||
pages_highmem += size;
|
||||
alloc -= size;
|
||||
size = preallocate_image_memory(alloc, avail_normal);
|
||||
pages_highmem += preallocate_image_highmem(alloc - size);
|
||||
pages += pages_highmem + size;
|
||||
}
|
||||
|
||||
/*
|
||||
* We only need as many page frames for the image as there are saveable
|
||||
|
||||
@@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap)
|
||||
{
|
||||
unsigned long offset;
|
||||
|
||||
offset = swp_offset(get_swap_for_hibernation(swap));
|
||||
offset = swp_offset(get_swap_page_of_type(swap));
|
||||
if (offset) {
|
||||
if (swsusp_extents_insert(offset))
|
||||
swap_free_for_hibernation(swp_entry(swap, offset));
|
||||
swap_free(swp_entry(swap, offset));
|
||||
else
|
||||
return swapdev_block(swap, offset);
|
||||
}
|
||||
@@ -163,7 +163,7 @@ void free_all_swap_pages(int swap)
|
||||
ext = container_of(node, struct swsusp_extent, node);
|
||||
rb_erase(node, &swsusp_extents);
|
||||
for (offset = ext->start; offset <= ext->end; offset++)
|
||||
swap_free_for_hibernation(swp_entry(swap, offset));
|
||||
swap_free(swp_entry(swap, offset));
|
||||
|
||||
kfree(ext);
|
||||
}
|
||||
|
||||
@@ -1294,6 +1294,10 @@ static void resched_task(struct task_struct *p)
|
||||
static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
|
||||
{
|
||||
}
|
||||
|
||||
static void sched_avg_update(struct rq *rq)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
@@ -3182,6 +3186,8 @@ static void update_cpu_load(struct rq *this_rq)
|
||||
|
||||
this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
|
||||
}
|
||||
|
||||
sched_avg_update(this_rq);
|
||||
}
|
||||
|
||||
static void update_cpu_load_active(struct rq *this_rq)
|
||||
|
||||
@@ -54,13 +54,13 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling
|
||||
* Minimal preemption granularity for CPU-bound tasks:
|
||||
* (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds)
|
||||
*/
|
||||
unsigned int sysctl_sched_min_granularity = 2000000ULL;
|
||||
unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL;
|
||||
unsigned int sysctl_sched_min_granularity = 750000ULL;
|
||||
unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
|
||||
|
||||
/*
|
||||
* is kept at sysctl_sched_latency / sysctl_sched_min_granularity
|
||||
*/
|
||||
static unsigned int sched_nr_latency = 3;
|
||||
static unsigned int sched_nr_latency = 8;
|
||||
|
||||
/*
|
||||
* After fork, child runs first. If set to 0 (default) then
|
||||
@@ -1313,7 +1313,7 @@ static struct sched_group *
|
||||
find_idlest_group(struct sched_domain *sd, struct task_struct *p,
|
||||
int this_cpu, int load_idx)
|
||||
{
|
||||
struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
|
||||
struct sched_group *idlest = NULL, *group = sd->groups;
|
||||
unsigned long min_load = ULONG_MAX, this_load = 0;
|
||||
int imbalance = 100 + (sd->imbalance_pct-100)/2;
|
||||
|
||||
@@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
|
||||
|
||||
if (local_group) {
|
||||
this_load = avg_load;
|
||||
this = group;
|
||||
} else if (avg_load < min_load) {
|
||||
min_load = avg_load;
|
||||
idlest = group;
|
||||
@@ -2268,8 +2267,6 @@ unsigned long scale_rt_power(int cpu)
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
u64 total, available;
|
||||
|
||||
sched_avg_update(rq);
|
||||
|
||||
total = sched_avg_period() + (rq->clock - rq->age_stamp);
|
||||
available = total - rq->rt_avg;
|
||||
|
||||
@@ -3752,6 +3749,8 @@ static void task_fork_fair(struct task_struct *p)
|
||||
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
|
||||
update_rq_clock(rq);
|
||||
|
||||
if (unlikely(task_cpu(p) != this_cpu))
|
||||
__set_task_cpu(p, this_cpu);
|
||||
|
||||
|
||||
@@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
|
||||
pgid = pid;
|
||||
if (pgid < 0)
|
||||
return -EINVAL;
|
||||
rcu_read_lock();
|
||||
|
||||
/* From this point forward we keep holding onto the tasklist lock
|
||||
* so that our parent does not change from under us. -DaveM
|
||||
@@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
|
||||
out:
|
||||
/* All paths lead to here, thus we are safe. -DaveM */
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -1713,10 +1713,7 @@ static __init int sysctl_init(void)
|
||||
{
|
||||
sysctl_set_parent(NULL, root_table);
|
||||
#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
|
||||
{
|
||||
int err;
|
||||
err = sysctl_check_table(current->nsproxy, root_table);
|
||||
}
|
||||
sysctl_check_table(current->nsproxy, root_table);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1358,24 +1358,29 @@ enum {
|
||||
#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
|
||||
|
||||
struct ftrace_iterator {
|
||||
struct ftrace_page *pg;
|
||||
int hidx;
|
||||
int idx;
|
||||
unsigned flags;
|
||||
struct trace_parser parser;
|
||||
loff_t pos;
|
||||
loff_t func_pos;
|
||||
struct ftrace_page *pg;
|
||||
struct dyn_ftrace *func;
|
||||
struct ftrace_func_probe *probe;
|
||||
struct trace_parser parser;
|
||||
int hidx;
|
||||
int idx;
|
||||
unsigned flags;
|
||||
};
|
||||
|
||||
static void *
|
||||
t_hash_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
t_hash_next(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
struct ftrace_iterator *iter = m->private;
|
||||
struct hlist_node *hnd = v;
|
||||
struct hlist_node *hnd = NULL;
|
||||
struct hlist_head *hhd;
|
||||
|
||||
WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
|
||||
|
||||
(*pos)++;
|
||||
iter->pos = *pos;
|
||||
|
||||
if (iter->probe)
|
||||
hnd = &iter->probe->node;
|
||||
retry:
|
||||
if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
|
||||
return NULL;
|
||||
@@ -1398,7 +1403,12 @@ t_hash_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
}
|
||||
}
|
||||
|
||||
return hnd;
|
||||
if (WARN_ON_ONCE(!hnd))
|
||||
return NULL;
|
||||
|
||||
iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
static void *t_hash_start(struct seq_file *m, loff_t *pos)
|
||||
@@ -1407,26 +1417,32 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
|
||||
void *p = NULL;
|
||||
loff_t l;
|
||||
|
||||
if (!(iter->flags & FTRACE_ITER_HASH))
|
||||
*pos = 0;
|
||||
|
||||
iter->flags |= FTRACE_ITER_HASH;
|
||||
if (iter->func_pos > *pos)
|
||||
return NULL;
|
||||
|
||||
iter->hidx = 0;
|
||||
for (l = 0; l <= *pos; ) {
|
||||
p = t_hash_next(m, p, &l);
|
||||
for (l = 0; l <= (*pos - iter->func_pos); ) {
|
||||
p = t_hash_next(m, &l);
|
||||
if (!p)
|
||||
break;
|
||||
}
|
||||
return p;
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
||||
/* Only set this if we have an item */
|
||||
iter->flags |= FTRACE_ITER_HASH;
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
static int t_hash_show(struct seq_file *m, void *v)
|
||||
static int
|
||||
t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
|
||||
{
|
||||
struct ftrace_func_probe *rec;
|
||||
struct hlist_node *hnd = v;
|
||||
|
||||
rec = hlist_entry(hnd, struct ftrace_func_probe, node);
|
||||
rec = iter->probe;
|
||||
if (WARN_ON_ONCE(!rec))
|
||||
return -EIO;
|
||||
|
||||
if (rec->ops->print)
|
||||
return rec->ops->print(m, rec->ip, rec->ops, rec->data);
|
||||
@@ -1447,12 +1463,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
struct dyn_ftrace *rec = NULL;
|
||||
|
||||
if (iter->flags & FTRACE_ITER_HASH)
|
||||
return t_hash_next(m, v, pos);
|
||||
return t_hash_next(m, pos);
|
||||
|
||||
(*pos)++;
|
||||
iter->pos = *pos;
|
||||
|
||||
if (iter->flags & FTRACE_ITER_PRINTALL)
|
||||
return NULL;
|
||||
return t_hash_start(m, pos);
|
||||
|
||||
retry:
|
||||
if (iter->idx >= iter->pg->index) {
|
||||
@@ -1481,7 +1498,20 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
}
|
||||
}
|
||||
|
||||
return rec;
|
||||
if (!rec)
|
||||
return t_hash_start(m, pos);
|
||||
|
||||
iter->func_pos = *pos;
|
||||
iter->func = rec;
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
static void reset_iter_read(struct ftrace_iterator *iter)
|
||||
{
|
||||
iter->pos = 0;
|
||||
iter->func_pos = 0;
|
||||
iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
|
||||
}
|
||||
|
||||
static void *t_start(struct seq_file *m, loff_t *pos)
|
||||
@@ -1491,6 +1521,12 @@ static void *t_start(struct seq_file *m, loff_t *pos)
|
||||
loff_t l;
|
||||
|
||||
mutex_lock(&ftrace_lock);
|
||||
/*
|
||||
* If an lseek was done, then reset and start from beginning.
|
||||
*/
|
||||
if (*pos < iter->pos)
|
||||
reset_iter_read(iter);
|
||||
|
||||
/*
|
||||
* For set_ftrace_filter reading, if we have the filter
|
||||
* off, we can short cut and just print out that all
|
||||
@@ -1500,12 +1536,19 @@ static void *t_start(struct seq_file *m, loff_t *pos)
|
||||
if (*pos > 0)
|
||||
return t_hash_start(m, pos);
|
||||
iter->flags |= FTRACE_ITER_PRINTALL;
|
||||
/* reset in case of seek/pread */
|
||||
iter->flags &= ~FTRACE_ITER_HASH;
|
||||
return iter;
|
||||
}
|
||||
|
||||
if (iter->flags & FTRACE_ITER_HASH)
|
||||
return t_hash_start(m, pos);
|
||||
|
||||
/*
|
||||
* Unfortunately, we need to restart at ftrace_pages_start
|
||||
* every time we let go of the ftrace_mutex. This is because
|
||||
* those pointers can change without the lock.
|
||||
*/
|
||||
iter->pg = ftrace_pages_start;
|
||||
iter->idx = 0;
|
||||
for (l = 0; l <= *pos; ) {
|
||||
@@ -1514,10 +1557,14 @@ static void *t_start(struct seq_file *m, loff_t *pos)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!p && iter->flags & FTRACE_ITER_FILTER)
|
||||
return t_hash_start(m, pos);
|
||||
if (!p) {
|
||||
if (iter->flags & FTRACE_ITER_FILTER)
|
||||
return t_hash_start(m, pos);
|
||||
|
||||
return p;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
static void t_stop(struct seq_file *m, void *p)
|
||||
@@ -1528,16 +1575,18 @@ static void t_stop(struct seq_file *m, void *p)
|
||||
static int t_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct ftrace_iterator *iter = m->private;
|
||||
struct dyn_ftrace *rec = v;
|
||||
struct dyn_ftrace *rec;
|
||||
|
||||
if (iter->flags & FTRACE_ITER_HASH)
|
||||
return t_hash_show(m, v);
|
||||
return t_hash_show(m, iter);
|
||||
|
||||
if (iter->flags & FTRACE_ITER_PRINTALL) {
|
||||
seq_printf(m, "#### all functions enabled ####\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
rec = iter->func;
|
||||
|
||||
if (!rec)
|
||||
return 0;
|
||||
|
||||
@@ -2406,7 +2455,7 @@ static const struct file_operations ftrace_filter_fops = {
|
||||
.open = ftrace_filter_open,
|
||||
.read = seq_read,
|
||||
.write = ftrace_filter_write,
|
||||
.llseek = no_llseek,
|
||||
.llseek = ftrace_regex_lseek,
|
||||
.release = ftrace_filter_release,
|
||||
};
|
||||
|
||||
|
||||
@@ -2994,13 +2994,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
|
||||
static void rb_advance_iter(struct ring_buffer_iter *iter)
|
||||
{
|
||||
struct ring_buffer *buffer;
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct ring_buffer_event *event;
|
||||
unsigned length;
|
||||
|
||||
cpu_buffer = iter->cpu_buffer;
|
||||
buffer = cpu_buffer->buffer;
|
||||
|
||||
/*
|
||||
* Check if we are at the end of the buffer.
|
||||
|
||||
@@ -90,7 +90,8 @@ enum {
|
||||
/*
|
||||
* Structure fields follow one of the following exclusion rules.
|
||||
*
|
||||
* I: Set during initialization and read-only afterwards.
|
||||
* I: Modifiable by initialization/destruction paths and read-only for
|
||||
* everyone else.
|
||||
*
|
||||
* P: Preemption protected. Disabling preemption is enough and should
|
||||
* only be modified and accessed from the local cpu.
|
||||
@@ -198,7 +199,7 @@ typedef cpumask_var_t mayday_mask_t;
|
||||
cpumask_test_and_set_cpu((cpu), (mask))
|
||||
#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
|
||||
#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
|
||||
#define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp))
|
||||
#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
|
||||
#define free_mayday_mask(mask) free_cpumask_var((mask))
|
||||
#else
|
||||
typedef unsigned long mayday_mask_t;
|
||||
@@ -943,10 +944,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
|
||||
struct global_cwq *gcwq;
|
||||
struct cpu_workqueue_struct *cwq;
|
||||
struct list_head *worklist;
|
||||
unsigned int work_flags;
|
||||
unsigned long flags;
|
||||
|
||||
debug_work_activate(work);
|
||||
|
||||
if (WARN_ON_ONCE(wq->flags & WQ_DYING))
|
||||
return;
|
||||
|
||||
/* determine gcwq to use */
|
||||
if (!(wq->flags & WQ_UNBOUND)) {
|
||||
struct global_cwq *last_gcwq;
|
||||
@@ -989,14 +994,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
|
||||
BUG_ON(!list_empty(&work->entry));
|
||||
|
||||
cwq->nr_in_flight[cwq->work_color]++;
|
||||
work_flags = work_color_to_flags(cwq->work_color);
|
||||
|
||||
if (likely(cwq->nr_active < cwq->max_active)) {
|
||||
cwq->nr_active++;
|
||||
worklist = gcwq_determine_ins_pos(gcwq, cwq);
|
||||
} else
|
||||
} else {
|
||||
work_flags |= WORK_STRUCT_DELAYED;
|
||||
worklist = &cwq->delayed_works;
|
||||
}
|
||||
|
||||
insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
|
||||
insert_work(cwq, work, worklist, work_flags);
|
||||
|
||||
spin_unlock_irqrestore(&gcwq->lock, flags);
|
||||
}
|
||||
@@ -1215,6 +1223,7 @@ static void worker_leave_idle(struct worker *worker)
|
||||
* bound), %false if offline.
|
||||
*/
|
||||
static bool worker_maybe_bind_and_lock(struct worker *worker)
|
||||
__acquires(&gcwq->lock)
|
||||
{
|
||||
struct global_cwq *gcwq = worker->gcwq;
|
||||
struct task_struct *task = worker->task;
|
||||
@@ -1488,6 +1497,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq)
|
||||
* otherwise.
|
||||
*/
|
||||
static bool maybe_create_worker(struct global_cwq *gcwq)
|
||||
__releases(&gcwq->lock)
|
||||
__acquires(&gcwq->lock)
|
||||
{
|
||||
if (!need_to_create_worker(gcwq))
|
||||
return false;
|
||||
@@ -1662,6 +1673,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
|
||||
struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
|
||||
|
||||
move_linked_works(work, pos, NULL);
|
||||
__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
|
||||
cwq->nr_active++;
|
||||
}
|
||||
|
||||
@@ -1669,6 +1681,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
|
||||
* cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
|
||||
* @cwq: cwq of interest
|
||||
* @color: color of work which left the queue
|
||||
* @delayed: for a delayed work
|
||||
*
|
||||
* A work either has completed or is removed from pending queue,
|
||||
* decrement nr_in_flight of its cwq and handle workqueue flushing.
|
||||
@@ -1676,19 +1689,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
|
||||
* CONTEXT:
|
||||
* spin_lock_irq(gcwq->lock).
|
||||
*/
|
||||
static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
|
||||
static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
|
||||
bool delayed)
|
||||
{
|
||||
/* ignore uncolored works */
|
||||
if (color == WORK_NO_COLOR)
|
||||
return;
|
||||
|
||||
cwq->nr_in_flight[color]--;
|
||||
cwq->nr_active--;
|
||||
|
||||
if (!list_empty(&cwq->delayed_works)) {
|
||||
/* one down, submit a delayed one */
|
||||
if (cwq->nr_active < cwq->max_active)
|
||||
cwq_activate_first_delayed(cwq);
|
||||
if (!delayed) {
|
||||
cwq->nr_active--;
|
||||
if (!list_empty(&cwq->delayed_works)) {
|
||||
/* one down, submit a delayed one */
|
||||
if (cwq->nr_active < cwq->max_active)
|
||||
cwq_activate_first_delayed(cwq);
|
||||
}
|
||||
}
|
||||
|
||||
/* is flush in progress and are we at the flushing tip? */
|
||||
@@ -1725,6 +1741,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
|
||||
* spin_lock_irq(gcwq->lock) which is released and regrabbed.
|
||||
*/
|
||||
static void process_one_work(struct worker *worker, struct work_struct *work)
|
||||
__releases(&gcwq->lock)
|
||||
__acquires(&gcwq->lock)
|
||||
{
|
||||
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
|
||||
struct global_cwq *gcwq = cwq->gcwq;
|
||||
@@ -1823,7 +1841,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
|
||||
hlist_del_init(&worker->hentry);
|
||||
worker->current_work = NULL;
|
||||
worker->current_cwq = NULL;
|
||||
cwq_dec_nr_in_flight(cwq, work_color);
|
||||
cwq_dec_nr_in_flight(cwq, work_color, false);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2388,7 +2406,8 @@ static int try_to_grab_pending(struct work_struct *work)
|
||||
debug_work_deactivate(work);
|
||||
list_del_init(&work->entry);
|
||||
cwq_dec_nr_in_flight(get_work_cwq(work),
|
||||
get_work_color(work));
|
||||
get_work_color(work),
|
||||
*work_data_bits(work) & WORK_STRUCT_DELAYED);
|
||||
ret = 1;
|
||||
}
|
||||
}
|
||||
@@ -2791,7 +2810,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
|
||||
if (IS_ERR(rescuer->task))
|
||||
goto err;
|
||||
|
||||
wq->rescuer = rescuer;
|
||||
rescuer->task->flags |= PF_THREAD_BOUND;
|
||||
wake_up_process(rescuer->task);
|
||||
}
|
||||
@@ -2833,6 +2851,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
wq->flags |= WQ_DYING;
|
||||
flush_workqueue(wq);
|
||||
|
||||
/*
|
||||
@@ -2857,6 +2876,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
||||
if (wq->flags & WQ_RESCUER) {
|
||||
kthread_stop(wq->rescuer->task);
|
||||
free_mayday_mask(wq->mayday_mask);
|
||||
kfree(wq->rescuer);
|
||||
}
|
||||
|
||||
free_cwqs(wq);
|
||||
@@ -3239,6 +3259,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
|
||||
* multiple times. To be used by cpu_callback.
|
||||
*/
|
||||
static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
|
||||
__releases(&gcwq->lock)
|
||||
__acquires(&gcwq->lock)
|
||||
{
|
||||
if (!(gcwq->trustee_state == state ||
|
||||
gcwq->trustee_state == TRUSTEE_DONE)) {
|
||||
@@ -3545,8 +3567,7 @@ static int __init init_workqueues(void)
|
||||
spin_lock_init(&gcwq->lock);
|
||||
INIT_LIST_HEAD(&gcwq->worklist);
|
||||
gcwq->cpu = cpu;
|
||||
if (cpu == WORK_CPU_UNBOUND)
|
||||
gcwq->flags |= GCWQ_DISASSOCIATED;
|
||||
gcwq->flags |= GCWQ_DISASSOCIATED;
|
||||
|
||||
INIT_LIST_HEAD(&gcwq->idle_list);
|
||||
for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
|
||||
@@ -3570,6 +3591,8 @@ static int __init init_workqueues(void)
|
||||
struct global_cwq *gcwq = get_gcwq(cpu);
|
||||
struct worker *worker;
|
||||
|
||||
if (cpu != WORK_CPU_UNBOUND)
|
||||
gcwq->flags &= ~GCWQ_DISASSOCIATED;
|
||||
worker = create_worker(gcwq, true);
|
||||
BUG_ON(!worker);
|
||||
spin_lock_irq(&gcwq->lock);
|
||||
|
||||
Reference in New Issue
Block a user