Merge branch 'perfcounters-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perfcounters-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: perf_counter: Report the cloning task as parent on perf_counter_fork() perf_counter: Fix an ipi-deadlock perf: Rework/fix the whole read vs group stuff perf_counter: Fix swcounter context invariance perf report: Don't show unresolved DSOs and symbols when -S/-d is used perf tools: Add a general option to enable raw sample records perf tools: Add a per tracepoint counter attribute to get raw sample perf_counter: Provide hw_perf_counter_setup_online() APIs perf list: Fix large list output by using the pager perf_counter, x86: Fix/improve apic fallback perf record: Add missing -C option support for specifying profile cpu perf tools: Fix dso__new handle() to handle deleted DSOs perf tools: Fix fallback to cplus_demangle() when bfd_demangle() is not available perf report: Show the tid too in -D perf record: Fix .tid and .pid fill-in when synthesizing events perf_counter, x86: Fix generic cache events on P6-mobile CPUs perf_counter, x86: Fix lapic printk message
This commit is contained in:
@@ -88,6 +88,7 @@ void __weak hw_perf_disable(void) { barrier(); }
|
||||
void __weak hw_perf_enable(void) { barrier(); }
|
||||
|
||||
void __weak hw_perf_counter_setup(int cpu) { barrier(); }
|
||||
void __weak hw_perf_counter_setup_online(int cpu) { barrier(); }
|
||||
|
||||
int __weak
|
||||
hw_perf_group_sched_in(struct perf_counter *group_leader,
|
||||
@@ -306,6 +307,10 @@ counter_sched_out(struct perf_counter *counter,
|
||||
return;
|
||||
|
||||
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
||||
if (counter->pending_disable) {
|
||||
counter->pending_disable = 0;
|
||||
counter->state = PERF_COUNTER_STATE_OFF;
|
||||
}
|
||||
counter->tstamp_stopped = ctx->time;
|
||||
counter->pmu->disable(counter);
|
||||
counter->oncpu = -1;
|
||||
@@ -1691,7 +1696,32 @@ static int perf_release(struct inode *inode, struct file *file)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 perf_counter_read_tree(struct perf_counter *counter)
|
||||
static int perf_counter_read_size(struct perf_counter *counter)
|
||||
{
|
||||
int entry = sizeof(u64); /* value */
|
||||
int size = 0;
|
||||
int nr = 1;
|
||||
|
||||
if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
|
||||
size += sizeof(u64);
|
||||
|
||||
if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
|
||||
size += sizeof(u64);
|
||||
|
||||
if (counter->attr.read_format & PERF_FORMAT_ID)
|
||||
entry += sizeof(u64);
|
||||
|
||||
if (counter->attr.read_format & PERF_FORMAT_GROUP) {
|
||||
nr += counter->group_leader->nr_siblings;
|
||||
size += sizeof(u64);
|
||||
}
|
||||
|
||||
size += entry * nr;
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static u64 perf_counter_read_value(struct perf_counter *counter)
|
||||
{
|
||||
struct perf_counter *child;
|
||||
u64 total = 0;
|
||||
@@ -1703,14 +1733,96 @@ static u64 perf_counter_read_tree(struct perf_counter *counter)
|
||||
return total;
|
||||
}
|
||||
|
||||
static int perf_counter_read_entry(struct perf_counter *counter,
|
||||
u64 read_format, char __user *buf)
|
||||
{
|
||||
int n = 0, count = 0;
|
||||
u64 values[2];
|
||||
|
||||
values[n++] = perf_counter_read_value(counter);
|
||||
if (read_format & PERF_FORMAT_ID)
|
||||
values[n++] = primary_counter_id(counter);
|
||||
|
||||
count = n * sizeof(u64);
|
||||
|
||||
if (copy_to_user(buf, values, count))
|
||||
return -EFAULT;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static int perf_counter_read_group(struct perf_counter *counter,
|
||||
u64 read_format, char __user *buf)
|
||||
{
|
||||
struct perf_counter *leader = counter->group_leader, *sub;
|
||||
int n = 0, size = 0, err = -EFAULT;
|
||||
u64 values[3];
|
||||
|
||||
values[n++] = 1 + leader->nr_siblings;
|
||||
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
|
||||
values[n++] = leader->total_time_enabled +
|
||||
atomic64_read(&leader->child_total_time_enabled);
|
||||
}
|
||||
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
|
||||
values[n++] = leader->total_time_running +
|
||||
atomic64_read(&leader->child_total_time_running);
|
||||
}
|
||||
|
||||
size = n * sizeof(u64);
|
||||
|
||||
if (copy_to_user(buf, values, size))
|
||||
return -EFAULT;
|
||||
|
||||
err = perf_counter_read_entry(leader, read_format, buf + size);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
size += err;
|
||||
|
||||
list_for_each_entry(sub, &leader->sibling_list, list_entry) {
|
||||
err = perf_counter_read_entry(counter, read_format,
|
||||
buf + size);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
size += err;
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static int perf_counter_read_one(struct perf_counter *counter,
|
||||
u64 read_format, char __user *buf)
|
||||
{
|
||||
u64 values[4];
|
||||
int n = 0;
|
||||
|
||||
values[n++] = perf_counter_read_value(counter);
|
||||
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
|
||||
values[n++] = counter->total_time_enabled +
|
||||
atomic64_read(&counter->child_total_time_enabled);
|
||||
}
|
||||
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
|
||||
values[n++] = counter->total_time_running +
|
||||
atomic64_read(&counter->child_total_time_running);
|
||||
}
|
||||
if (read_format & PERF_FORMAT_ID)
|
||||
values[n++] = primary_counter_id(counter);
|
||||
|
||||
if (copy_to_user(buf, values, n * sizeof(u64)))
|
||||
return -EFAULT;
|
||||
|
||||
return n * sizeof(u64);
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the performance counter - simple non blocking version for now
|
||||
*/
|
||||
static ssize_t
|
||||
perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
|
||||
{
|
||||
u64 values[4];
|
||||
int n;
|
||||
u64 read_format = counter->attr.read_format;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Return end-of-file for a read on a counter that is in
|
||||
@@ -1720,28 +1832,18 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
|
||||
if (counter->state == PERF_COUNTER_STATE_ERROR)
|
||||
return 0;
|
||||
|
||||
if (count < perf_counter_read_size(counter))
|
||||
return -ENOSPC;
|
||||
|
||||
WARN_ON_ONCE(counter->ctx->parent_ctx);
|
||||
mutex_lock(&counter->child_mutex);
|
||||
values[0] = perf_counter_read_tree(counter);
|
||||
n = 1;
|
||||
if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
|
||||
values[n++] = counter->total_time_enabled +
|
||||
atomic64_read(&counter->child_total_time_enabled);
|
||||
if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
|
||||
values[n++] = counter->total_time_running +
|
||||
atomic64_read(&counter->child_total_time_running);
|
||||
if (counter->attr.read_format & PERF_FORMAT_ID)
|
||||
values[n++] = primary_counter_id(counter);
|
||||
if (read_format & PERF_FORMAT_GROUP)
|
||||
ret = perf_counter_read_group(counter, read_format, buf);
|
||||
else
|
||||
ret = perf_counter_read_one(counter, read_format, buf);
|
||||
mutex_unlock(&counter->child_mutex);
|
||||
|
||||
if (count < n * sizeof(u64))
|
||||
return -EINVAL;
|
||||
count = n * sizeof(u64);
|
||||
|
||||
if (copy_to_user(buf, values, count))
|
||||
return -EFAULT;
|
||||
|
||||
return count;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -2245,7 +2347,7 @@ static void perf_pending_counter(struct perf_pending_entry *entry)
|
||||
|
||||
if (counter->pending_disable) {
|
||||
counter->pending_disable = 0;
|
||||
perf_counter_disable(counter);
|
||||
__perf_counter_disable(counter);
|
||||
}
|
||||
|
||||
if (counter->pending_wakeup) {
|
||||
@@ -2630,7 +2732,80 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
|
||||
return task_pid_nr_ns(p, counter->ns);
|
||||
}
|
||||
|
||||
static void perf_counter_output(struct perf_counter *counter, int nmi,
|
||||
static void perf_output_read_one(struct perf_output_handle *handle,
|
||||
struct perf_counter *counter)
|
||||
{
|
||||
u64 read_format = counter->attr.read_format;
|
||||
u64 values[4];
|
||||
int n = 0;
|
||||
|
||||
values[n++] = atomic64_read(&counter->count);
|
||||
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
|
||||
values[n++] = counter->total_time_enabled +
|
||||
atomic64_read(&counter->child_total_time_enabled);
|
||||
}
|
||||
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
|
||||
values[n++] = counter->total_time_running +
|
||||
atomic64_read(&counter->child_total_time_running);
|
||||
}
|
||||
if (read_format & PERF_FORMAT_ID)
|
||||
values[n++] = primary_counter_id(counter);
|
||||
|
||||
perf_output_copy(handle, values, n * sizeof(u64));
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX PERF_FORMAT_GROUP vs inherited counters seems difficult.
|
||||
*/
|
||||
static void perf_output_read_group(struct perf_output_handle *handle,
|
||||
struct perf_counter *counter)
|
||||
{
|
||||
struct perf_counter *leader = counter->group_leader, *sub;
|
||||
u64 read_format = counter->attr.read_format;
|
||||
u64 values[5];
|
||||
int n = 0;
|
||||
|
||||
values[n++] = 1 + leader->nr_siblings;
|
||||
|
||||
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
|
||||
values[n++] = leader->total_time_enabled;
|
||||
|
||||
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
|
||||
values[n++] = leader->total_time_running;
|
||||
|
||||
if (leader != counter)
|
||||
leader->pmu->read(leader);
|
||||
|
||||
values[n++] = atomic64_read(&leader->count);
|
||||
if (read_format & PERF_FORMAT_ID)
|
||||
values[n++] = primary_counter_id(leader);
|
||||
|
||||
perf_output_copy(handle, values, n * sizeof(u64));
|
||||
|
||||
list_for_each_entry(sub, &leader->sibling_list, list_entry) {
|
||||
n = 0;
|
||||
|
||||
if (sub != counter)
|
||||
sub->pmu->read(sub);
|
||||
|
||||
values[n++] = atomic64_read(&sub->count);
|
||||
if (read_format & PERF_FORMAT_ID)
|
||||
values[n++] = primary_counter_id(sub);
|
||||
|
||||
perf_output_copy(handle, values, n * sizeof(u64));
|
||||
}
|
||||
}
|
||||
|
||||
static void perf_output_read(struct perf_output_handle *handle,
|
||||
struct perf_counter *counter)
|
||||
{
|
||||
if (counter->attr.read_format & PERF_FORMAT_GROUP)
|
||||
perf_output_read_group(handle, counter);
|
||||
else
|
||||
perf_output_read_one(handle, counter);
|
||||
}
|
||||
|
||||
void perf_counter_output(struct perf_counter *counter, int nmi,
|
||||
struct perf_sample_data *data)
|
||||
{
|
||||
int ret;
|
||||
@@ -2641,10 +2816,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
|
||||
struct {
|
||||
u32 pid, tid;
|
||||
} tid_entry;
|
||||
struct {
|
||||
u64 id;
|
||||
u64 counter;
|
||||
} group_entry;
|
||||
struct perf_callchain_entry *callchain = NULL;
|
||||
int callchain_size = 0;
|
||||
u64 time;
|
||||
@@ -2699,10 +2870,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
|
||||
if (sample_type & PERF_SAMPLE_PERIOD)
|
||||
header.size += sizeof(u64);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_GROUP) {
|
||||
header.size += sizeof(u64) +
|
||||
counter->nr_siblings * sizeof(group_entry);
|
||||
}
|
||||
if (sample_type & PERF_SAMPLE_READ)
|
||||
header.size += perf_counter_read_size(counter);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
|
||||
callchain = perf_callchain(data->regs);
|
||||
@@ -2759,26 +2928,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
|
||||
if (sample_type & PERF_SAMPLE_PERIOD)
|
||||
perf_output_put(&handle, data->period);
|
||||
|
||||
/*
|
||||
* XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
|
||||
*/
|
||||
if (sample_type & PERF_SAMPLE_GROUP) {
|
||||
struct perf_counter *leader, *sub;
|
||||
u64 nr = counter->nr_siblings;
|
||||
|
||||
perf_output_put(&handle, nr);
|
||||
|
||||
leader = counter->group_leader;
|
||||
list_for_each_entry(sub, &leader->sibling_list, list_entry) {
|
||||
if (sub != counter)
|
||||
sub->pmu->read(sub);
|
||||
|
||||
group_entry.id = primary_counter_id(sub);
|
||||
group_entry.counter = atomic64_read(&sub->count);
|
||||
|
||||
perf_output_put(&handle, group_entry);
|
||||
}
|
||||
}
|
||||
if (sample_type & PERF_SAMPLE_READ)
|
||||
perf_output_read(&handle, counter);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
|
||||
if (callchain)
|
||||
@@ -2817,8 +2968,6 @@ struct perf_read_event {
|
||||
|
||||
u32 pid;
|
||||
u32 tid;
|
||||
u64 value;
|
||||
u64 format[3];
|
||||
};
|
||||
|
||||
static void
|
||||
@@ -2830,34 +2979,20 @@ perf_counter_read_event(struct perf_counter *counter,
|
||||
.header = {
|
||||
.type = PERF_EVENT_READ,
|
||||
.misc = 0,
|
||||
.size = sizeof(event) - sizeof(event.format),
|
||||
.size = sizeof(event) + perf_counter_read_size(counter),
|
||||
},
|
||||
.pid = perf_counter_pid(counter, task),
|
||||
.tid = perf_counter_tid(counter, task),
|
||||
.value = atomic64_read(&counter->count),
|
||||
};
|
||||
int ret, i = 0;
|
||||
|
||||
if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
|
||||
event.header.size += sizeof(u64);
|
||||
event.format[i++] = counter->total_time_enabled;
|
||||
}
|
||||
|
||||
if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
|
||||
event.header.size += sizeof(u64);
|
||||
event.format[i++] = counter->total_time_running;
|
||||
}
|
||||
|
||||
if (counter->attr.read_format & PERF_FORMAT_ID) {
|
||||
event.header.size += sizeof(u64);
|
||||
event.format[i++] = primary_counter_id(counter);
|
||||
}
|
||||
int ret;
|
||||
|
||||
ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
perf_output_copy(&handle, &event, event.header.size);
|
||||
perf_output_put(&handle, event);
|
||||
perf_output_read(&handle, counter);
|
||||
|
||||
perf_output_end(&handle);
|
||||
}
|
||||
|
||||
@@ -2893,10 +3028,10 @@ static void perf_counter_task_output(struct perf_counter *counter,
|
||||
return;
|
||||
|
||||
task_event->event.pid = perf_counter_pid(counter, task);
|
||||
task_event->event.ppid = perf_counter_pid(counter, task->real_parent);
|
||||
task_event->event.ppid = perf_counter_pid(counter, current);
|
||||
|
||||
task_event->event.tid = perf_counter_tid(counter, task);
|
||||
task_event->event.ptid = perf_counter_tid(counter, task->real_parent);
|
||||
task_event->event.ptid = perf_counter_tid(counter, current);
|
||||
|
||||
perf_output_put(&handle, task_event->event);
|
||||
perf_output_end(&handle);
|
||||
@@ -3443,40 +3578,32 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
|
||||
|
||||
static int perf_swcounter_is_counting(struct perf_counter *counter)
|
||||
{
|
||||
struct perf_counter_context *ctx;
|
||||
unsigned long flags;
|
||||
int count;
|
||||
|
||||
/*
|
||||
* The counter is active, we're good!
|
||||
*/
|
||||
if (counter->state == PERF_COUNTER_STATE_ACTIVE)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* The counter is off/error, not counting.
|
||||
*/
|
||||
if (counter->state != PERF_COUNTER_STATE_INACTIVE)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If the counter is inactive, it could be just because
|
||||
* its task is scheduled out, or because it's in a group
|
||||
* which could not go on the PMU. We want to count in
|
||||
* the first case but not the second. If the context is
|
||||
* currently active then an inactive software counter must
|
||||
* be the second case. If it's not currently active then
|
||||
* we need to know whether the counter was active when the
|
||||
* context was last active, which we can determine by
|
||||
* comparing counter->tstamp_stopped with ctx->time.
|
||||
*
|
||||
* We are within an RCU read-side critical section,
|
||||
* which protects the existence of *ctx.
|
||||
* The counter is inactive, if the context is active
|
||||
* we're part of a group that didn't make it on the 'pmu',
|
||||
* not counting.
|
||||
*/
|
||||
ctx = counter->ctx;
|
||||
spin_lock_irqsave(&ctx->lock, flags);
|
||||
count = 1;
|
||||
/* Re-check state now we have the lock */
|
||||
if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
|
||||
counter->ctx->is_active ||
|
||||
counter->tstamp_stopped < ctx->time)
|
||||
count = 0;
|
||||
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
return count;
|
||||
if (counter->ctx->is_active)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We're inactive and the context is too, this means the
|
||||
* task is scheduled out, we're counting events that happen
|
||||
* to us, like migration events.
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int perf_swcounter_match(struct perf_counter *counter,
|
||||
@@ -3928,9 +4055,9 @@ perf_counter_alloc(struct perf_counter_attr *attr,
|
||||
atomic64_set(&hwc->period_left, hwc->sample_period);
|
||||
|
||||
/*
|
||||
* we currently do not support PERF_SAMPLE_GROUP on inherited counters
|
||||
* we currently do not support PERF_FORMAT_GROUP on inherited counters
|
||||
*/
|
||||
if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
|
||||
if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
|
||||
goto done;
|
||||
|
||||
switch (attr->type) {
|
||||
@@ -4592,6 +4719,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
|
||||
perf_counter_init_cpu(cpu);
|
||||
break;
|
||||
|
||||
case CPU_ONLINE:
|
||||
case CPU_ONLINE_FROZEN:
|
||||
hw_perf_counter_setup_online(cpu);
|
||||
break;
|
||||
|
||||
case CPU_DOWN_PREPARE:
|
||||
case CPU_DOWN_PREPARE_FROZEN:
|
||||
perf_counter_exit_cpu(cpu);
|
||||
@@ -4616,6 +4748,8 @@ void __init perf_counter_init(void)
|
||||
{
|
||||
perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
|
||||
(void *)(long)smp_processor_id());
|
||||
perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
|
||||
(void *)(long)smp_processor_id());
|
||||
register_cpu_notifier(&perf_cpu_nb);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user