Merge branch 'sched/migrate-disable'
This commit is contained in:
@@ -1602,7 +1602,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
|
||||
.name = "ap:online",
|
||||
},
|
||||
/*
|
||||
* Handled on controll processor until the plugged processor manages
|
||||
* Handled on control processor until the plugged processor manages
|
||||
* this itself.
|
||||
*/
|
||||
[CPUHP_TEARDOWN_CPU] = {
|
||||
@@ -1611,6 +1611,13 @@ static struct cpuhp_step cpuhp_hp_states[] = {
|
||||
.teardown.single = takedown_cpu,
|
||||
.cant_stop = true,
|
||||
},
|
||||
|
||||
[CPUHP_AP_SCHED_WAIT_EMPTY] = {
|
||||
.name = "sched:waitempty",
|
||||
.startup.single = NULL,
|
||||
.teardown.single = sched_cpu_wait_empty,
|
||||
},
|
||||
|
||||
/* Handle smpboot threads park/unpark */
|
||||
[CPUHP_AP_SMPBOOT_THREADS] = {
|
||||
.name = "smpboot/threads:online",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -120,7 +120,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
|
||||
const struct sched_dl_entity *dl_se = &p->dl;
|
||||
|
||||
if (later_mask &&
|
||||
cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) {
|
||||
cpumask_and(later_mask, cp->free_cpus, &p->cpus_mask)) {
|
||||
unsigned long cap, max_cap = 0;
|
||||
int cpu, max_cpu = -1;
|
||||
|
||||
@@ -151,7 +151,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
|
||||
|
||||
WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
|
||||
|
||||
if (cpumask_test_cpu(best_cpu, p->cpus_ptr) &&
|
||||
if (cpumask_test_cpu(best_cpu, &p->cpus_mask) &&
|
||||
dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
|
||||
if (later_mask)
|
||||
cpumask_set_cpu(best_cpu, later_mask);
|
||||
|
||||
@@ -97,11 +97,11 @@ static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
|
||||
if (skip)
|
||||
return 0;
|
||||
|
||||
if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids)
|
||||
if (cpumask_any_and(&p->cpus_mask, vec->mask) >= nr_cpu_ids)
|
||||
return 0;
|
||||
|
||||
if (lowest_mask) {
|
||||
cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
|
||||
cpumask_and(lowest_mask, &p->cpus_mask, vec->mask);
|
||||
|
||||
/*
|
||||
* We have to ensure that we have at least one bit
|
||||
|
||||
@@ -559,7 +559,7 @@ static int push_dl_task(struct rq *rq);
|
||||
|
||||
static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
|
||||
{
|
||||
return dl_task(prev);
|
||||
return rq->online && dl_task(prev);
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct callback_head, dl_push_head);
|
||||
@@ -1931,7 +1931,7 @@ static void task_fork_dl(struct task_struct *p)
|
||||
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
|
||||
{
|
||||
if (!task_running(rq, p) &&
|
||||
cpumask_test_cpu(cpu, p->cpus_ptr))
|
||||
cpumask_test_cpu(cpu, &p->cpus_mask))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
@@ -2021,8 +2021,8 @@ static int find_later_rq(struct task_struct *task)
|
||||
return this_cpu;
|
||||
}
|
||||
|
||||
best_cpu = cpumask_first_and(later_mask,
|
||||
sched_domain_span(sd));
|
||||
best_cpu = cpumask_any_and_distribute(later_mask,
|
||||
sched_domain_span(sd));
|
||||
/*
|
||||
* Last chance: if a CPU being in both later_mask
|
||||
* and current sd span is valid, that becomes our
|
||||
@@ -2044,7 +2044,7 @@ static int find_later_rq(struct task_struct *task)
|
||||
if (this_cpu != -1)
|
||||
return this_cpu;
|
||||
|
||||
cpu = cpumask_any(later_mask);
|
||||
cpu = cpumask_any_distribute(later_mask);
|
||||
if (cpu < nr_cpu_ids)
|
||||
return cpu;
|
||||
|
||||
@@ -2081,7 +2081,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
|
||||
/* Retry if something changed. */
|
||||
if (double_lock_balance(rq, later_rq)) {
|
||||
if (unlikely(task_rq(task) != rq ||
|
||||
!cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
|
||||
!cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
|
||||
task_running(rq, task) ||
|
||||
!dl_task(task) ||
|
||||
!task_on_rq_queued(task))) {
|
||||
@@ -2148,6 +2148,9 @@ static int push_dl_task(struct rq *rq)
|
||||
return 0;
|
||||
|
||||
retry:
|
||||
if (is_migration_disabled(next_task))
|
||||
return 0;
|
||||
|
||||
if (WARN_ON(next_task == rq->curr))
|
||||
return 0;
|
||||
|
||||
@@ -2225,7 +2228,7 @@ static void push_dl_tasks(struct rq *rq)
|
||||
static void pull_dl_task(struct rq *this_rq)
|
||||
{
|
||||
int this_cpu = this_rq->cpu, cpu;
|
||||
struct task_struct *p;
|
||||
struct task_struct *p, *push_task;
|
||||
bool resched = false;
|
||||
struct rq *src_rq;
|
||||
u64 dmin = LONG_MAX;
|
||||
@@ -2255,6 +2258,7 @@ static void pull_dl_task(struct rq *this_rq)
|
||||
continue;
|
||||
|
||||
/* Might drop this_rq->lock */
|
||||
push_task = NULL;
|
||||
double_lock_balance(this_rq, src_rq);
|
||||
|
||||
/*
|
||||
@@ -2286,17 +2290,27 @@ static void pull_dl_task(struct rq *this_rq)
|
||||
src_rq->curr->dl.deadline))
|
||||
goto skip;
|
||||
|
||||
resched = true;
|
||||
|
||||
deactivate_task(src_rq, p, 0);
|
||||
set_task_cpu(p, this_cpu);
|
||||
activate_task(this_rq, p, 0);
|
||||
dmin = p->dl.deadline;
|
||||
if (is_migration_disabled(p)) {
|
||||
push_task = get_push_task(src_rq);
|
||||
} else {
|
||||
deactivate_task(src_rq, p, 0);
|
||||
set_task_cpu(p, this_cpu);
|
||||
activate_task(this_rq, p, 0);
|
||||
dmin = p->dl.deadline;
|
||||
resched = true;
|
||||
}
|
||||
|
||||
/* Is there any other task even earlier? */
|
||||
}
|
||||
skip:
|
||||
double_unlock_balance(this_rq, src_rq);
|
||||
|
||||
if (push_task) {
|
||||
raw_spin_unlock(&this_rq->lock);
|
||||
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
|
||||
push_task, &src_rq->push_work);
|
||||
raw_spin_lock(&this_rq->lock);
|
||||
}
|
||||
}
|
||||
|
||||
if (resched)
|
||||
@@ -2320,7 +2334,8 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
|
||||
}
|
||||
|
||||
static void set_cpus_allowed_dl(struct task_struct *p,
|
||||
const struct cpumask *new_mask)
|
||||
const struct cpumask *new_mask,
|
||||
u32 flags)
|
||||
{
|
||||
struct root_domain *src_rd;
|
||||
struct rq *rq;
|
||||
@@ -2349,7 +2364,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
|
||||
raw_spin_unlock(&src_dl_b->lock);
|
||||
}
|
||||
|
||||
set_cpus_allowed_common(p, new_mask);
|
||||
set_cpus_allowed_common(p, new_mask, flags);
|
||||
}
|
||||
|
||||
/* Assumes rq->lock is held */
|
||||
@@ -2542,6 +2557,7 @@ DEFINE_SCHED_CLASS(dl) = {
|
||||
.rq_online = rq_online_dl,
|
||||
.rq_offline = rq_offline_dl,
|
||||
.task_woken = task_woken_dl,
|
||||
.find_lock_rq = find_lock_later_rq,
|
||||
#endif
|
||||
|
||||
.task_tick = task_tick_dl,
|
||||
|
||||
@@ -265,7 +265,7 @@ static void pull_rt_task(struct rq *this_rq);
|
||||
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
|
||||
{
|
||||
/* Try to pull RT tasks here if we lower this rq's prio */
|
||||
return rq->rt.highest_prio.curr > prev->prio;
|
||||
return rq->online && rq->rt.highest_prio.curr > prev->prio;
|
||||
}
|
||||
|
||||
static inline int rt_overloaded(struct rq *rq)
|
||||
@@ -1660,7 +1660,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
|
||||
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
|
||||
{
|
||||
if (!task_running(rq, p) &&
|
||||
cpumask_test_cpu(cpu, p->cpus_ptr))
|
||||
cpumask_test_cpu(cpu, &p->cpus_mask))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
@@ -1754,8 +1754,8 @@ static int find_lowest_rq(struct task_struct *task)
|
||||
return this_cpu;
|
||||
}
|
||||
|
||||
best_cpu = cpumask_first_and(lowest_mask,
|
||||
sched_domain_span(sd));
|
||||
best_cpu = cpumask_any_and_distribute(lowest_mask,
|
||||
sched_domain_span(sd));
|
||||
if (best_cpu < nr_cpu_ids) {
|
||||
rcu_read_unlock();
|
||||
return best_cpu;
|
||||
@@ -1772,7 +1772,7 @@ static int find_lowest_rq(struct task_struct *task)
|
||||
if (this_cpu != -1)
|
||||
return this_cpu;
|
||||
|
||||
cpu = cpumask_any(lowest_mask);
|
||||
cpu = cpumask_any_distribute(lowest_mask);
|
||||
if (cpu < nr_cpu_ids)
|
||||
return cpu;
|
||||
|
||||
@@ -1813,7 +1813,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
||||
* Also make sure that it wasn't scheduled on its rq.
|
||||
*/
|
||||
if (unlikely(task_rq(task) != rq ||
|
||||
!cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) ||
|
||||
!cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
|
||||
task_running(rq, task) ||
|
||||
!rt_task(task) ||
|
||||
!task_on_rq_queued(task))) {
|
||||
@@ -1861,7 +1861,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
|
||||
* running task can migrate over to a CPU that is running a task
|
||||
* of lesser priority.
|
||||
*/
|
||||
static int push_rt_task(struct rq *rq)
|
||||
static int push_rt_task(struct rq *rq, bool pull)
|
||||
{
|
||||
struct task_struct *next_task;
|
||||
struct rq *lowest_rq;
|
||||
@@ -1875,6 +1875,34 @@ static int push_rt_task(struct rq *rq)
|
||||
return 0;
|
||||
|
||||
retry:
|
||||
if (is_migration_disabled(next_task)) {
|
||||
struct task_struct *push_task = NULL;
|
||||
int cpu;
|
||||
|
||||
if (!pull || rq->push_busy)
|
||||
return 0;
|
||||
|
||||
cpu = find_lowest_rq(rq->curr);
|
||||
if (cpu == -1 || cpu == rq->cpu)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Given we found a CPU with lower priority than @next_task,
|
||||
* therefore it should be running. However we cannot migrate it
|
||||
* to this other CPU, instead attempt to push the current
|
||||
* running task on this CPU away.
|
||||
*/
|
||||
push_task = get_push_task(rq);
|
||||
if (push_task) {
|
||||
raw_spin_unlock(&rq->lock);
|
||||
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
|
||||
push_task, &rq->push_work);
|
||||
raw_spin_lock(&rq->lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (WARN_ON(next_task == rq->curr))
|
||||
return 0;
|
||||
|
||||
@@ -1929,12 +1957,10 @@ retry:
|
||||
deactivate_task(rq, next_task, 0);
|
||||
set_task_cpu(next_task, lowest_rq->cpu);
|
||||
activate_task(lowest_rq, next_task, 0);
|
||||
resched_curr(lowest_rq);
|
||||
ret = 1;
|
||||
|
||||
resched_curr(lowest_rq);
|
||||
|
||||
double_unlock_balance(rq, lowest_rq);
|
||||
|
||||
out:
|
||||
put_task_struct(next_task);
|
||||
|
||||
@@ -1944,7 +1970,7 @@ out:
|
||||
static void push_rt_tasks(struct rq *rq)
|
||||
{
|
||||
/* push_rt_task will return true if it moved an RT */
|
||||
while (push_rt_task(rq))
|
||||
while (push_rt_task(rq, false))
|
||||
;
|
||||
}
|
||||
|
||||
@@ -2097,7 +2123,8 @@ void rto_push_irq_work_func(struct irq_work *work)
|
||||
*/
|
||||
if (has_pushable_tasks(rq)) {
|
||||
raw_spin_lock(&rq->lock);
|
||||
push_rt_tasks(rq);
|
||||
while (push_rt_task(rq, true))
|
||||
;
|
||||
raw_spin_unlock(&rq->lock);
|
||||
}
|
||||
|
||||
@@ -2122,7 +2149,7 @@ static void pull_rt_task(struct rq *this_rq)
|
||||
{
|
||||
int this_cpu = this_rq->cpu, cpu;
|
||||
bool resched = false;
|
||||
struct task_struct *p;
|
||||
struct task_struct *p, *push_task;
|
||||
struct rq *src_rq;
|
||||
int rt_overload_count = rt_overloaded(this_rq);
|
||||
|
||||
@@ -2169,6 +2196,7 @@ static void pull_rt_task(struct rq *this_rq)
|
||||
* double_lock_balance, and another CPU could
|
||||
* alter this_rq
|
||||
*/
|
||||
push_task = NULL;
|
||||
double_lock_balance(this_rq, src_rq);
|
||||
|
||||
/*
|
||||
@@ -2196,11 +2224,14 @@ static void pull_rt_task(struct rq *this_rq)
|
||||
if (p->prio < src_rq->curr->prio)
|
||||
goto skip;
|
||||
|
||||
resched = true;
|
||||
|
||||
deactivate_task(src_rq, p, 0);
|
||||
set_task_cpu(p, this_cpu);
|
||||
activate_task(this_rq, p, 0);
|
||||
if (is_migration_disabled(p)) {
|
||||
push_task = get_push_task(src_rq);
|
||||
} else {
|
||||
deactivate_task(src_rq, p, 0);
|
||||
set_task_cpu(p, this_cpu);
|
||||
activate_task(this_rq, p, 0);
|
||||
resched = true;
|
||||
}
|
||||
/*
|
||||
* We continue with the search, just in
|
||||
* case there's an even higher prio task
|
||||
@@ -2210,6 +2241,13 @@ static void pull_rt_task(struct rq *this_rq)
|
||||
}
|
||||
skip:
|
||||
double_unlock_balance(this_rq, src_rq);
|
||||
|
||||
if (push_task) {
|
||||
raw_spin_unlock(&this_rq->lock);
|
||||
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
|
||||
push_task, &src_rq->push_work);
|
||||
raw_spin_lock(&this_rq->lock);
|
||||
}
|
||||
}
|
||||
|
||||
if (resched)
|
||||
@@ -2451,6 +2489,7 @@ DEFINE_SCHED_CLASS(rt) = {
|
||||
.rq_offline = rq_offline_rt,
|
||||
.task_woken = task_woken_rt,
|
||||
.switched_from = switched_from_rt,
|
||||
.find_lock_rq = find_lock_lowest_rq,
|
||||
#endif
|
||||
|
||||
.task_tick = task_tick_rt,
|
||||
|
||||
@@ -975,6 +975,7 @@ struct rq {
|
||||
unsigned long cpu_capacity_orig;
|
||||
|
||||
struct callback_head *balance_callback;
|
||||
unsigned char balance_flags;
|
||||
|
||||
unsigned char nohz_idle_balance;
|
||||
unsigned char idle_balance;
|
||||
@@ -1005,6 +1006,10 @@ struct rq {
|
||||
|
||||
/* This is used to determine avg_idle's max value */
|
||||
u64 max_idle_balance_cost;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
struct rcuwait hotplug_wait;
|
||||
#endif
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||
@@ -1050,6 +1055,12 @@ struct rq {
|
||||
/* Must be inspected within a rcu lock section */
|
||||
struct cpuidle_state *idle_state;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PREEMPT_RT) && defined(CONFIG_SMP)
|
||||
unsigned int nr_pinned;
|
||||
#endif
|
||||
unsigned int push_busy;
|
||||
struct cpu_stop_work push_work;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
@@ -1077,6 +1088,16 @@ static inline int cpu_of(struct rq *rq)
|
||||
#endif
|
||||
}
|
||||
|
||||
#define MDF_PUSH 0x01
|
||||
|
||||
static inline bool is_migration_disabled(struct task_struct *p)
|
||||
{
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
|
||||
return p->migration_disabled;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
extern void __update_idle_core(struct rq *rq);
|
||||
@@ -1223,6 +1244,9 @@ static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
|
||||
rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
|
||||
rf->clock_update_flags = 0;
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
SCHED_WARN_ON(rq->balance_callback);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
|
||||
@@ -1384,6 +1408,9 @@ init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#define BALANCE_WORK 0x01
|
||||
#define BALANCE_PUSH 0x02
|
||||
|
||||
static inline void
|
||||
queue_balance_callback(struct rq *rq,
|
||||
struct callback_head *head,
|
||||
@@ -1391,12 +1418,13 @@ queue_balance_callback(struct rq *rq,
|
||||
{
|
||||
lockdep_assert_held(&rq->lock);
|
||||
|
||||
if (unlikely(head->next))
|
||||
if (unlikely(head->next || (rq->balance_flags & BALANCE_PUSH)))
|
||||
return;
|
||||
|
||||
head->func = (void (*)(struct callback_head *))func;
|
||||
head->next = rq->balance_callback;
|
||||
rq->balance_callback = head;
|
||||
rq->balance_flags |= BALANCE_WORK;
|
||||
}
|
||||
|
||||
#define rcu_dereference_check_sched_domain(p) \
|
||||
@@ -1804,10 +1832,13 @@ struct sched_class {
|
||||
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
|
||||
|
||||
void (*set_cpus_allowed)(struct task_struct *p,
|
||||
const struct cpumask *newmask);
|
||||
const struct cpumask *newmask,
|
||||
u32 flags);
|
||||
|
||||
void (*rq_online)(struct rq *rq);
|
||||
void (*rq_offline)(struct rq *rq);
|
||||
|
||||
struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
|
||||
#endif
|
||||
|
||||
void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
|
||||
@@ -1905,13 +1936,35 @@ static inline bool sched_fair_runnable(struct rq *rq)
|
||||
extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
|
||||
extern struct task_struct *pick_next_task_idle(struct rq *rq);
|
||||
|
||||
#define SCA_CHECK 0x01
|
||||
#define SCA_MIGRATE_DISABLE 0x02
|
||||
#define SCA_MIGRATE_ENABLE 0x04
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
extern void update_group_capacity(struct sched_domain *sd, int cpu);
|
||||
|
||||
extern void trigger_load_balance(struct rq *rq);
|
||||
|
||||
extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
|
||||
extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
|
||||
|
||||
static inline struct task_struct *get_push_task(struct rq *rq)
|
||||
{
|
||||
struct task_struct *p = rq->curr;
|
||||
|
||||
lockdep_assert_held(&rq->lock);
|
||||
|
||||
if (rq->push_busy)
|
||||
return NULL;
|
||||
|
||||
if (p->nr_cpus_allowed == 1)
|
||||
return NULL;
|
||||
|
||||
rq->push_busy = true;
|
||||
return get_task_struct(p);
|
||||
}
|
||||
|
||||
extern int push_cpu_stop(void *arg);
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@@ -42,11 +42,27 @@ struct cpu_stopper {
|
||||
struct list_head works; /* list of pending works */
|
||||
|
||||
struct cpu_stop_work stop_work; /* for stop_cpus */
|
||||
unsigned long caller;
|
||||
cpu_stop_fn_t fn;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
|
||||
static bool stop_machine_initialized = false;
|
||||
|
||||
void print_stop_info(const char *log_lvl, struct task_struct *task)
|
||||
{
|
||||
/*
|
||||
* If @task is a stopper task, it cannot migrate and task_cpu() is
|
||||
* stable.
|
||||
*/
|
||||
struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task));
|
||||
|
||||
if (task != stopper->thread)
|
||||
return;
|
||||
|
||||
printk("%sStopper: %pS <- %pS\n", log_lvl, stopper->fn, (void *)stopper->caller);
|
||||
}
|
||||
|
||||
/* static data for stop_cpus */
|
||||
static DEFINE_MUTEX(stop_cpus_mutex);
|
||||
static bool stop_cpus_in_progress;
|
||||
@@ -123,7 +139,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
|
||||
int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
|
||||
{
|
||||
struct cpu_stop_done done;
|
||||
struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
|
||||
struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ };
|
||||
|
||||
cpu_stop_init_done(&done, 1);
|
||||
if (!cpu_stop_queue_work(cpu, &work))
|
||||
@@ -331,7 +347,8 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
|
||||
work1 = work2 = (struct cpu_stop_work){
|
||||
.fn = multi_cpu_stop,
|
||||
.arg = &msdata,
|
||||
.done = &done
|
||||
.done = &done,
|
||||
.caller = _RET_IP_,
|
||||
};
|
||||
|
||||
cpu_stop_init_done(&done, 2);
|
||||
@@ -367,7 +384,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
|
||||
bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
|
||||
struct cpu_stop_work *work_buf)
|
||||
{
|
||||
*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
|
||||
*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, };
|
||||
return cpu_stop_queue_work(cpu, work_buf);
|
||||
}
|
||||
|
||||
@@ -487,6 +504,8 @@ repeat:
|
||||
int ret;
|
||||
|
||||
/* cpu stop callbacks must not sleep, make in_atomic() == T */
|
||||
stopper->caller = work->caller;
|
||||
stopper->fn = fn;
|
||||
preempt_count_inc();
|
||||
ret = fn(arg);
|
||||
if (done) {
|
||||
@@ -495,6 +514,8 @@ repeat:
|
||||
cpu_stop_signal_done(done);
|
||||
}
|
||||
preempt_count_dec();
|
||||
stopper->fn = NULL;
|
||||
stopper->caller = 0;
|
||||
WARN_ONCE(preempt_count(),
|
||||
"cpu_stop: %ps(%p) leaked preempt count\n", fn, arg);
|
||||
goto repeat;
|
||||
|
||||
@@ -4908,6 +4908,10 @@ static void unbind_workers(int cpu)
|
||||
pool->flags |= POOL_DISASSOCIATED;
|
||||
|
||||
raw_spin_unlock_irq(&pool->lock);
|
||||
|
||||
for_each_pool_worker(worker, pool)
|
||||
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_active_mask) < 0);
|
||||
|
||||
mutex_unlock(&wq_pool_attach_mutex);
|
||||
|
||||
/*
|
||||
|
||||
Reference in New Issue
Block a user