Merge commit 'v2.6.27-rc3' into core/urgent
This commit is contained in:
@@ -55,4 +55,4 @@ config HZ
|
||||
default 1000 if HZ_1000
|
||||
|
||||
config SCHED_HRTICK
|
||||
def_bool HIGH_RES_TIMERS && USE_GENERIC_SMP_HELPERS
|
||||
def_bool HIGH_RES_TIMERS && (!SMP || USE_GENERIC_SMP_HELPERS)
|
||||
|
||||
@@ -349,6 +349,8 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
|
||||
goto out_notify;
|
||||
BUG_ON(!cpu_online(cpu));
|
||||
|
||||
cpu_set(cpu, cpu_active_map);
|
||||
|
||||
/* Now call notifier in preparation. */
|
||||
raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
|
||||
|
||||
@@ -367,7 +369,7 @@ int __cpuinit cpu_up(unsigned int cpu)
|
||||
if (!cpu_isset(cpu, cpu_possible_map)) {
|
||||
printk(KERN_ERR "can't online cpu %d because it is not "
|
||||
"configured as may-hotadd at boot time\n", cpu);
|
||||
#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) || defined(CONFIG_S390)
|
||||
#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
|
||||
printk(KERN_ERR "please check additional_cpus= boot "
|
||||
"parameter\n");
|
||||
#endif
|
||||
@@ -383,9 +385,6 @@ int __cpuinit cpu_up(unsigned int cpu)
|
||||
|
||||
err = _cpu_up(cpu, 0);
|
||||
|
||||
if (cpu_online(cpu))
|
||||
cpu_set(cpu, cpu_active_map);
|
||||
|
||||
out:
|
||||
cpu_maps_update_done();
|
||||
return err;
|
||||
|
||||
@@ -92,7 +92,7 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
|
||||
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
|
||||
|
||||
/**
|
||||
* Try to allocate memory from the per-device coherent area.
|
||||
* dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
|
||||
*
|
||||
* @dev: device from which we allocate memory
|
||||
* @size: size of requested memory area
|
||||
@@ -100,11 +100,11 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
|
||||
* @ret: This pointer will be filled with the virtual address
|
||||
* to allocated area.
|
||||
*
|
||||
* This function should be only called from per-arch %dma_alloc_coherent()
|
||||
* This function should be only called from per-arch dma_alloc_coherent()
|
||||
* to support allocation from per-device coherent memory pools.
|
||||
*
|
||||
* Returns 0 if dma_alloc_coherent should continue with allocating from
|
||||
* generic memory areas, or !0 if dma_alloc_coherent should return %ret.
|
||||
* generic memory areas, or !0 if dma_alloc_coherent should return @ret.
|
||||
*/
|
||||
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
|
||||
dma_addr_t *dma_handle, void **ret)
|
||||
@@ -126,7 +126,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to free the memory allocated from per-device coherent memory pool.
|
||||
* dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
|
||||
* @dev: device from which the memory was allocated
|
||||
* @order: the order of pages allocated
|
||||
* @vaddr: virtual address of allocated pages
|
||||
@@ -135,7 +135,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
|
||||
* coherent memory pool and if so, releases that memory.
|
||||
*
|
||||
* Returns 1 if we correctly released the memory, or 0 if
|
||||
* %dma_release_coherent() should proceed with releasing memory from
|
||||
* dma_release_coherent() should proceed with releasing memory from
|
||||
* generic pools.
|
||||
*/
|
||||
int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
#include <linux/irq.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include "internals.h"
|
||||
@@ -16,23 +17,18 @@ static struct proc_dir_entry *root_irq_dir;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static int irq_affinity_read_proc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
static int irq_affinity_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + (long)data;
|
||||
struct irq_desc *desc = irq_desc + (long)m->private;
|
||||
cpumask_t *mask = &desc->affinity;
|
||||
int len;
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (desc->status & IRQ_MOVE_PENDING)
|
||||
mask = &desc->pending_mask;
|
||||
#endif
|
||||
len = cpumask_scnprintf(page, count, *mask);
|
||||
|
||||
if (count - len < 2)
|
||||
return -EINVAL;
|
||||
len += sprintf(page + len, "\n");
|
||||
return len;
|
||||
seq_cpumask(m, mask);
|
||||
seq_putc(m, '\n');
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef is_affinity_mask_valid
|
||||
@@ -40,11 +36,12 @@ static int irq_affinity_read_proc(char *page, char **start, off_t off,
|
||||
#endif
|
||||
|
||||
int no_irq_affinity;
|
||||
static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
|
||||
unsigned long count, void *data)
|
||||
static ssize_t irq_affinity_proc_write(struct file *file,
|
||||
const char __user *buffer, size_t count, loff_t *pos)
|
||||
{
|
||||
unsigned int irq = (int)(long)data, full_count = count, err;
|
||||
unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data;
|
||||
cpumask_t new_value;
|
||||
int err;
|
||||
|
||||
if (!irq_desc[irq].chip->set_affinity || no_irq_affinity ||
|
||||
irq_balancing_disabled(irq))
|
||||
@@ -65,28 +62,38 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
|
||||
if (!cpus_intersects(new_value, cpu_online_map))
|
||||
/* Special case for empty set - allow the architecture
|
||||
code to set default SMP affinity. */
|
||||
return irq_select_affinity(irq) ? -EINVAL : full_count;
|
||||
return irq_select_affinity(irq) ? -EINVAL : count;
|
||||
|
||||
irq_set_affinity(irq, new_value);
|
||||
|
||||
return full_count;
|
||||
return count;
|
||||
}
|
||||
|
||||
static int default_affinity_read(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
static int irq_affinity_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
int len = cpumask_scnprintf(page, count, irq_default_affinity);
|
||||
if (count - len < 2)
|
||||
return -EINVAL;
|
||||
len += sprintf(page + len, "\n");
|
||||
return len;
|
||||
return single_open(file, irq_affinity_proc_show, PDE(inode)->data);
|
||||
}
|
||||
|
||||
static int default_affinity_write(struct file *file, const char __user *buffer,
|
||||
unsigned long count, void *data)
|
||||
static const struct file_operations irq_affinity_proc_fops = {
|
||||
.open = irq_affinity_proc_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
.write = irq_affinity_proc_write,
|
||||
};
|
||||
|
||||
static int default_affinity_show(struct seq_file *m, void *v)
|
||||
{
|
||||
seq_cpumask(m, &irq_default_affinity);
|
||||
seq_putc(m, '\n');
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t default_affinity_write(struct file *file,
|
||||
const char __user *buffer, size_t count, loff_t *ppos)
|
||||
{
|
||||
unsigned int full_count = count, err;
|
||||
cpumask_t new_value;
|
||||
int err;
|
||||
|
||||
err = cpumask_parse_user(buffer, count, new_value);
|
||||
if (err)
|
||||
@@ -105,8 +112,21 @@ static int default_affinity_write(struct file *file, const char __user *buffer,
|
||||
|
||||
irq_default_affinity = new_value;
|
||||
|
||||
return full_count;
|
||||
return count;
|
||||
}
|
||||
|
||||
static int default_affinity_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, default_affinity_show, NULL);
|
||||
}
|
||||
|
||||
static const struct file_operations default_affinity_proc_fops = {
|
||||
.open = default_affinity_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
.write = default_affinity_write,
|
||||
};
|
||||
#endif
|
||||
|
||||
static int irq_spurious_read(char *page, char **start, off_t off,
|
||||
@@ -178,16 +198,9 @@ void register_irq_proc(unsigned int irq)
|
||||
irq_desc[irq].dir = proc_mkdir(name, root_irq_dir);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
{
|
||||
/* create /proc/irq/<irq>/smp_affinity */
|
||||
entry = create_proc_entry("smp_affinity", 0600, irq_desc[irq].dir);
|
||||
|
||||
if (entry) {
|
||||
entry->data = (void *)(long)irq;
|
||||
entry->read_proc = irq_affinity_read_proc;
|
||||
entry->write_proc = irq_affinity_write_proc;
|
||||
}
|
||||
}
|
||||
/* create /proc/irq/<irq>/smp_affinity */
|
||||
proc_create_data("smp_affinity", 0600, irq_desc[irq].dir,
|
||||
&irq_affinity_proc_fops, (void *)(long)irq);
|
||||
#endif
|
||||
|
||||
entry = create_proc_entry("spurious", 0444, irq_desc[irq].dir);
|
||||
@@ -208,15 +221,8 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action)
|
||||
void register_default_affinity_proc(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
struct proc_dir_entry *entry;
|
||||
|
||||
/* create /proc/irq/default_smp_affinity */
|
||||
entry = create_proc_entry("default_smp_affinity", 0600, root_irq_dir);
|
||||
if (entry) {
|
||||
entry->data = NULL;
|
||||
entry->read_proc = default_affinity_read;
|
||||
entry->write_proc = default_affinity_write;
|
||||
}
|
||||
proc_create("irq/default_smp_affinity", 0600, NULL,
|
||||
&default_affinity_proc_fops);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@@ -2288,7 +2288,7 @@ sys_init_module(void __user *umod,
|
||||
|
||||
/* Start the module */
|
||||
if (mod->init != NULL)
|
||||
ret = mod->init();
|
||||
ret = do_one_initcall(mod->init);
|
||||
if (ret < 0) {
|
||||
/* Init routine failed: abort. Try to protect us from
|
||||
buggy refcounters. */
|
||||
|
||||
@@ -289,21 +289,29 @@ void do_schedule_next_timer(struct siginfo *info)
|
||||
else
|
||||
schedule_next_timer(timr);
|
||||
|
||||
info->si_overrun = timr->it_overrun_last;
|
||||
info->si_overrun += timr->it_overrun_last;
|
||||
}
|
||||
|
||||
if (timr)
|
||||
unlock_timer(timr, flags);
|
||||
}
|
||||
|
||||
int posix_timer_event(struct k_itimer *timr,int si_private)
|
||||
int posix_timer_event(struct k_itimer *timr, int si_private)
|
||||
{
|
||||
memset(&timr->sigq->info, 0, sizeof(siginfo_t));
|
||||
/*
|
||||
* FIXME: if ->sigq is queued we can race with
|
||||
* dequeue_signal()->do_schedule_next_timer().
|
||||
*
|
||||
* If dequeue_signal() sees the "right" value of
|
||||
* si_sys_private it calls do_schedule_next_timer().
|
||||
* We re-queue ->sigq and drop ->it_lock().
|
||||
* do_schedule_next_timer() locks the timer
|
||||
* and re-schedules it while ->sigq is pending.
|
||||
* Not really bad, but not that we want.
|
||||
*/
|
||||
timr->sigq->info.si_sys_private = si_private;
|
||||
/* Send signal to the process that owns this timer.*/
|
||||
|
||||
timr->sigq->info.si_signo = timr->it_sigev_signo;
|
||||
timr->sigq->info.si_errno = 0;
|
||||
timr->sigq->info.si_code = SI_TIMER;
|
||||
timr->sigq->info.si_tid = timr->it_id;
|
||||
timr->sigq->info.si_value = timr->it_sigev_value;
|
||||
@@ -435,6 +443,7 @@ static struct k_itimer * alloc_posix_timer(void)
|
||||
kmem_cache_free(posix_timers_cache, tmr);
|
||||
tmr = NULL;
|
||||
}
|
||||
memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
|
||||
return tmr;
|
||||
}
|
||||
|
||||
|
||||
@@ -833,7 +833,7 @@ static inline u64 global_rt_period(void)
|
||||
|
||||
static inline u64 global_rt_runtime(void)
|
||||
{
|
||||
if (sysctl_sched_rt_period < 0)
|
||||
if (sysctl_sched_rt_runtime < 0)
|
||||
return RUNTIME_INF;
|
||||
|
||||
return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
|
||||
|
||||
@@ -32,14 +32,20 @@
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/*
|
||||
* Scheduler clock - returns current time in nanosec units.
|
||||
* This is default implementation.
|
||||
* Architectures and sub-architectures can override this.
|
||||
*/
|
||||
unsigned long long __attribute__((weak)) sched_clock(void)
|
||||
{
|
||||
return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
|
||||
}
|
||||
|
||||
static __read_mostly int sched_clock_running;
|
||||
|
||||
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
|
||||
#define MULTI_SHIFT 15
|
||||
/* Max is double, Min is 1/2 */
|
||||
#define MAX_MULTI (2LL << MULTI_SHIFT)
|
||||
#define MIN_MULTI (1LL << (MULTI_SHIFT-1))
|
||||
|
||||
struct sched_clock_data {
|
||||
/*
|
||||
* Raw spinlock - this is a special case: this might be called
|
||||
@@ -49,14 +55,9 @@ struct sched_clock_data {
|
||||
raw_spinlock_t lock;
|
||||
|
||||
unsigned long tick_jiffies;
|
||||
u64 prev_raw;
|
||||
u64 tick_raw;
|
||||
u64 tick_gtod;
|
||||
u64 clock;
|
||||
s64 multi;
|
||||
#ifdef CONFIG_NO_HZ
|
||||
int check_max;
|
||||
#endif
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
|
||||
@@ -71,8 +72,6 @@ static inline struct sched_clock_data *cpu_sdc(int cpu)
|
||||
return &per_cpu(sched_clock_data, cpu);
|
||||
}
|
||||
|
||||
static __read_mostly int sched_clock_running;
|
||||
|
||||
void sched_clock_init(void)
|
||||
{
|
||||
u64 ktime_now = ktime_to_ns(ktime_get());
|
||||
@@ -84,90 +83,39 @@ void sched_clock_init(void)
|
||||
|
||||
scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
scd->tick_jiffies = now_jiffies;
|
||||
scd->prev_raw = 0;
|
||||
scd->tick_raw = 0;
|
||||
scd->tick_gtod = ktime_now;
|
||||
scd->clock = ktime_now;
|
||||
scd->multi = 1 << MULTI_SHIFT;
|
||||
#ifdef CONFIG_NO_HZ
|
||||
scd->check_max = 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
sched_clock_running = 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_HZ
|
||||
/*
|
||||
* The dynamic ticks makes the delta jiffies inaccurate. This
|
||||
* prevents us from checking the maximum time update.
|
||||
* Disable the maximum check during stopped ticks.
|
||||
*/
|
||||
void sched_clock_tick_stop(int cpu)
|
||||
{
|
||||
struct sched_clock_data *scd = cpu_sdc(cpu);
|
||||
|
||||
scd->check_max = 0;
|
||||
}
|
||||
|
||||
void sched_clock_tick_start(int cpu)
|
||||
{
|
||||
struct sched_clock_data *scd = cpu_sdc(cpu);
|
||||
|
||||
scd->check_max = 1;
|
||||
}
|
||||
|
||||
static int check_max(struct sched_clock_data *scd)
|
||||
{
|
||||
return scd->check_max;
|
||||
}
|
||||
#else
|
||||
static int check_max(struct sched_clock_data *scd)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif /* CONFIG_NO_HZ */
|
||||
|
||||
/*
|
||||
* update the percpu scd from the raw @now value
|
||||
*
|
||||
* - filter out backward motion
|
||||
* - use jiffies to generate a min,max window to clip the raw values
|
||||
*/
|
||||
static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *time)
|
||||
static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
|
||||
{
|
||||
unsigned long now_jiffies = jiffies;
|
||||
long delta_jiffies = now_jiffies - scd->tick_jiffies;
|
||||
u64 clock = scd->clock;
|
||||
u64 min_clock, max_clock;
|
||||
s64 delta = now - scd->prev_raw;
|
||||
s64 delta = now - scd->tick_raw;
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
|
||||
/*
|
||||
* At schedule tick the clock can be just under the gtod. We don't
|
||||
* want to push it too prematurely.
|
||||
*/
|
||||
min_clock = scd->tick_gtod + (delta_jiffies * TICK_NSEC);
|
||||
if (min_clock > TICK_NSEC)
|
||||
min_clock -= TICK_NSEC / 2;
|
||||
min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC;
|
||||
|
||||
if (unlikely(delta < 0)) {
|
||||
clock++;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* The clock must stay within a jiffie of the gtod.
|
||||
* But since we may be at the start of a jiffy or the end of one
|
||||
* we add another jiffy buffer.
|
||||
*/
|
||||
max_clock = scd->tick_gtod + (2 + delta_jiffies) * TICK_NSEC;
|
||||
max_clock = min_clock + TICK_NSEC;
|
||||
|
||||
delta *= scd->multi;
|
||||
delta >>= MULTI_SHIFT;
|
||||
|
||||
if (unlikely(clock + delta > max_clock) && check_max(scd)) {
|
||||
if (unlikely(clock + delta > max_clock)) {
|
||||
if (clock < max_clock)
|
||||
clock = max_clock;
|
||||
else
|
||||
@@ -180,12 +128,10 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *tim
|
||||
if (unlikely(clock < min_clock))
|
||||
clock = min_clock;
|
||||
|
||||
if (time)
|
||||
*time = clock;
|
||||
else {
|
||||
scd->prev_raw = now;
|
||||
scd->clock = clock;
|
||||
}
|
||||
scd->tick_jiffies = now_jiffies;
|
||||
scd->clock = clock;
|
||||
|
||||
return clock;
|
||||
}
|
||||
|
||||
static void lock_double_clock(struct sched_clock_data *data1,
|
||||
@@ -203,7 +149,7 @@ static void lock_double_clock(struct sched_clock_data *data1,
|
||||
u64 sched_clock_cpu(int cpu)
|
||||
{
|
||||
struct sched_clock_data *scd = cpu_sdc(cpu);
|
||||
u64 now, clock;
|
||||
u64 now, clock, this_clock, remote_clock;
|
||||
|
||||
if (unlikely(!sched_clock_running))
|
||||
return 0ull;
|
||||
@@ -212,43 +158,44 @@ u64 sched_clock_cpu(int cpu)
|
||||
now = sched_clock();
|
||||
|
||||
if (cpu != raw_smp_processor_id()) {
|
||||
/*
|
||||
* in order to update a remote cpu's clock based on our
|
||||
* unstable raw time rebase it against:
|
||||
* tick_raw (offset between raw counters)
|
||||
* tick_gotd (tick offset between cpus)
|
||||
*/
|
||||
struct sched_clock_data *my_scd = this_scd();
|
||||
|
||||
lock_double_clock(scd, my_scd);
|
||||
|
||||
now -= my_scd->tick_raw;
|
||||
now += scd->tick_raw;
|
||||
this_clock = __update_sched_clock(my_scd, now);
|
||||
remote_clock = scd->clock;
|
||||
|
||||
now += my_scd->tick_gtod;
|
||||
now -= scd->tick_gtod;
|
||||
/*
|
||||
* Use the opportunity that we have both locks
|
||||
* taken to couple the two clocks: we take the
|
||||
* larger time as the latest time for both
|
||||
* runqueues. (this creates monotonic movement)
|
||||
*/
|
||||
if (likely(remote_clock < this_clock)) {
|
||||
clock = this_clock;
|
||||
scd->clock = clock;
|
||||
} else {
|
||||
/*
|
||||
* Should be rare, but possible:
|
||||
*/
|
||||
clock = remote_clock;
|
||||
my_scd->clock = remote_clock;
|
||||
}
|
||||
|
||||
__raw_spin_unlock(&my_scd->lock);
|
||||
|
||||
__update_sched_clock(scd, now, &clock);
|
||||
|
||||
__raw_spin_unlock(&scd->lock);
|
||||
|
||||
} else {
|
||||
__raw_spin_lock(&scd->lock);
|
||||
__update_sched_clock(scd, now, NULL);
|
||||
clock = scd->clock;
|
||||
__raw_spin_unlock(&scd->lock);
|
||||
clock = __update_sched_clock(scd, now);
|
||||
}
|
||||
|
||||
__raw_spin_unlock(&scd->lock);
|
||||
|
||||
return clock;
|
||||
}
|
||||
|
||||
void sched_clock_tick(void)
|
||||
{
|
||||
struct sched_clock_data *scd = this_scd();
|
||||
unsigned long now_jiffies = jiffies;
|
||||
s64 mult, delta_gtod, delta_raw;
|
||||
u64 now, now_gtod;
|
||||
|
||||
if (unlikely(!sched_clock_running))
|
||||
@@ -260,29 +207,14 @@ void sched_clock_tick(void)
|
||||
now = sched_clock();
|
||||
|
||||
__raw_spin_lock(&scd->lock);
|
||||
__update_sched_clock(scd, now, NULL);
|
||||
__update_sched_clock(scd, now);
|
||||
/*
|
||||
* update tick_gtod after __update_sched_clock() because that will
|
||||
* already observe 1 new jiffy; adding a new tick_gtod to that would
|
||||
* increase the clock 2 jiffies.
|
||||
*/
|
||||
delta_gtod = now_gtod - scd->tick_gtod;
|
||||
delta_raw = now - scd->tick_raw;
|
||||
|
||||
if ((long)delta_raw > 0) {
|
||||
mult = delta_gtod << MULTI_SHIFT;
|
||||
do_div(mult, delta_raw);
|
||||
scd->multi = mult;
|
||||
if (scd->multi > MAX_MULTI)
|
||||
scd->multi = MAX_MULTI;
|
||||
else if (scd->multi < MIN_MULTI)
|
||||
scd->multi = MIN_MULTI;
|
||||
} else
|
||||
scd->multi = 1 << MULTI_SHIFT;
|
||||
|
||||
scd->tick_raw = now;
|
||||
scd->tick_gtod = now_gtod;
|
||||
scd->tick_jiffies = now_jiffies;
|
||||
__raw_spin_unlock(&scd->lock);
|
||||
}
|
||||
|
||||
@@ -301,7 +233,6 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
|
||||
void sched_clock_idle_wakeup_event(u64 delta_ns)
|
||||
{
|
||||
struct sched_clock_data *scd = this_scd();
|
||||
u64 now = sched_clock();
|
||||
|
||||
/*
|
||||
* Override the previous timestamp and ignore all
|
||||
@@ -310,27 +241,30 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
|
||||
* rq clock:
|
||||
*/
|
||||
__raw_spin_lock(&scd->lock);
|
||||
scd->prev_raw = now;
|
||||
scd->clock += delta_ns;
|
||||
scd->multi = 1 << MULTI_SHIFT;
|
||||
__raw_spin_unlock(&scd->lock);
|
||||
|
||||
touch_softlockup_watchdog();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
|
||||
|
||||
#endif
|
||||
#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
|
||||
|
||||
/*
|
||||
* Scheduler clock - returns current time in nanosec units.
|
||||
* This is default implementation.
|
||||
* Architectures and sub-architectures can override this.
|
||||
*/
|
||||
unsigned long long __attribute__((weak)) sched_clock(void)
|
||||
void sched_clock_init(void)
|
||||
{
|
||||
return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
|
||||
sched_clock_running = 1;
|
||||
}
|
||||
|
||||
u64 sched_clock_cpu(int cpu)
|
||||
{
|
||||
if (unlikely(!sched_clock_running))
|
||||
return 0;
|
||||
|
||||
return sched_clock();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
unsigned long long cpu_clock(int cpu)
|
||||
{
|
||||
unsigned long long clock;
|
||||
|
||||
@@ -899,7 +899,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
|
||||
* doesn't make sense. Rely on vruntime for fairness.
|
||||
*/
|
||||
if (rq->curr != p)
|
||||
delta = max(10000LL, delta);
|
||||
delta = max_t(s64, 10000LL, delta);
|
||||
|
||||
hrtick_start(rq, delta);
|
||||
}
|
||||
@@ -1442,18 +1442,23 @@ __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
|
||||
struct task_struct *p = NULL;
|
||||
struct sched_entity *se;
|
||||
|
||||
while (next != &cfs_rq->tasks) {
|
||||
if (next == &cfs_rq->tasks)
|
||||
return NULL;
|
||||
|
||||
/* Skip over entities that are not tasks */
|
||||
do {
|
||||
se = list_entry(next, struct sched_entity, group_node);
|
||||
next = next->next;
|
||||
} while (next != &cfs_rq->tasks && !entity_is_task(se));
|
||||
|
||||
/* Skip over entities that are not tasks */
|
||||
if (entity_is_task(se)) {
|
||||
p = task_of(se);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (next == &cfs_rq->tasks)
|
||||
return NULL;
|
||||
|
||||
cfs_rq->balance_iterator = next;
|
||||
|
||||
if (entity_is_task(se))
|
||||
p = task_of(se);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
@@ -1304,6 +1304,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
|
||||
q->info.si_overrun++;
|
||||
goto out;
|
||||
}
|
||||
q->info.si_overrun = 0;
|
||||
|
||||
signalfd_notify(t, sig);
|
||||
pending = group ? &t->signal->shared_pending : &t->pending;
|
||||
|
||||
@@ -65,7 +65,6 @@ static void ack_state(void)
|
||||
static int stop_cpu(struct stop_machine_data *smdata)
|
||||
{
|
||||
enum stopmachine_state curstate = STOPMACHINE_NONE;
|
||||
int uninitialized_var(ret);
|
||||
|
||||
/* Simple state machine */
|
||||
do {
|
||||
|
||||
@@ -289,7 +289,6 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||
ts->tick_stopped = 1;
|
||||
ts->idle_jiffies = last_jiffies;
|
||||
rcu_enter_nohz();
|
||||
sched_clock_tick_stop(cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -392,7 +391,6 @@ void tick_nohz_restart_sched_tick(void)
|
||||
select_nohz_load_balancer(0);
|
||||
now = ktime_get();
|
||||
tick_do_update_jiffies64(now);
|
||||
sched_clock_tick_start(cpu);
|
||||
cpu_clear(cpu, nohz_cpu_mask);
|
||||
|
||||
/*
|
||||
|
||||
Reference in New Issue
Block a user