Patches contributed by Eötvös Lorand University
commit 17c0e7107bed3d578864e6519f7f4e4c324c8f58
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jul 3 08:29:25 2009 -0500
x86: Mark atomic irq ops raw for 32bit legacy
The atomic ops emulation for 32bit legacy CPUs floods the tracer with
irq off/on entries. The irq disabled regions are short and therefor
not interesting when chasing long irq disabled latencies. Mark them
raw and keep them out of the trace.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 8baaa719fa7f..8f8217b9bdac 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -187,10 +187,10 @@ static inline int atomic_add_return(int i, atomic_t *v)
#ifdef CONFIG_M386
no_xadd: /* Legacy 386 processor */
- local_irq_save(flags);
+ raw_local_irq_save(flags);
__i = atomic_read(v);
atomic_set(v, i + __i);
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
return i + __i;
#endif
}
commit 8e7672cdb413af859086ffceaed68f7e1e8ea4c2
Author: Ingo Molnar <mingo@elte.hu>
Date: Tue Feb 9 06:11:00 2010 +0100
nmi_watchdog: Only enable on x86 for now
It wont even build on other platforms just yet - so restrict it
to x86 for now.
Cc: Don Zickus <dzickus@redhat.com>
Cc: gorcunov@gmail.com
Cc: aris@redhat.com
Cc: peterz@infradead.org
LKML-Reference: <1265424425-31562-4-git-send-email-dzickus@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index f80b67e72aa0..acef88239e15 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -173,6 +173,7 @@ config DETECT_SOFTLOCKUP
config NMI_WATCHDOG
bool "Detect Hard Lockups with an NMI Watchdog"
depends on DEBUG_KERNEL && PERF_EVENTS
+ depends on X86
default y
help
Say Y here to enable the kernel to use the NMI as a watchdog
commit 6d3e0907b8b239d16720d144e2675ecf10d3bc3b
Merge: 23577256953c 50200df46202
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Feb 8 08:55:43 2010 +0100
Merge branch 'sched/urgent' into sched/core
Merge reason: Merge dependent fix, update to latest -rc.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
commit 2161db969313cb94ffd9377a525fb75c3fee9eeb
Author: Ingo Molnar <mingo@elte.hu>
Date: Thu Feb 4 10:22:01 2010 +0100
perf tools: Fix session init on non-modular kernels
perf top and perf record refuses to initialize on non-modular kernels:
refuse to initialize:
$ perf top -v
map_groups__set_modules_path_dir: cannot open /lib/modules/2.6.33-rc6-tip-00586-g398dde3-dirty/
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1265223128-11786-1-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index a60ba2ba1044..6882e9fec2d6 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1937,7 +1937,7 @@ int map_groups__create_kernel_maps(struct map_groups *self,
return -1;
if (symbol_conf.use_modules && map_groups__create_modules(self) < 0)
- return -1;
+ return 0;
/*
* Now that we have all the maps created, just set the ->end of them:
*/
commit 59f411b62c9282891274e721fea29026b0eda3cc
Author: Ingo Molnar <mingo@elte.hu>
Date: Sun Jan 31 08:27:58 2010 +0100
perf lock: Clean up various details
Fix up a few small stylistic details:
- use consistent vertical spacing/alignment
- remove line80 artifacts
- group some global variables better
- remove dead code
Plus rename 'prof' to 'report' to make it more in line with other
tools, and remove the line/file keying as we really want to use
IPs like the other tools do.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <1264851813-8413-12-git-send-email-mitake@dcl.info.waseda.ac.jp>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 2b5f88754c26..fb9ab2ad3f92 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -32,37 +32,37 @@ static struct list_head lockhash_table[LOCKHASH_SIZE];
#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
-#define LOCK_STATE_UNLOCKED 0 /* initial state */
-#define LOCK_STATE_LOCKED 1
+#define LOCK_STATE_UNLOCKED 0 /* initial state */
+#define LOCK_STATE_LOCKED 1
struct lock_stat {
- struct list_head hash_entry;
- struct rb_node rb; /* used for sorting */
+ struct list_head hash_entry;
+ struct rb_node rb; /* used for sorting */
- /* FIXME: raw_field_value() returns unsigned long long,
+ /*
+ * FIXME: raw_field_value() returns unsigned long long,
* so address of lockdep_map should be dealed as 64bit.
- * Is there more better solution? */
- void *addr; /* address of lockdep_map, used as ID */
- char *name; /* for strcpy(), we cannot use const */
- char *file;
- unsigned int line;
+ * Is there more better solution?
+ */
+ void *addr; /* address of lockdep_map, used as ID */
+ char *name; /* for strcpy(), we cannot use const */
- int state;
- u64 prev_event_time; /* timestamp of previous event */
+ int state;
+ u64 prev_event_time; /* timestamp of previous event */
- unsigned int nr_acquired;
- unsigned int nr_acquire;
- unsigned int nr_contended;
- unsigned int nr_release;
+ unsigned int nr_acquired;
+ unsigned int nr_acquire;
+ unsigned int nr_contended;
+ unsigned int nr_release;
/* these times are in nano sec. */
- u64 wait_time_total;
- u64 wait_time_min;
- u64 wait_time_max;
+ u64 wait_time_total;
+ u64 wait_time_min;
+ u64 wait_time_max;
};
/* build simple key function one is bigger than two */
-#define SINGLE_KEY(member) \
+#define SINGLE_KEY(member) \
static int lock_stat_key_ ## member(struct lock_stat *one, \
struct lock_stat *two) \
{ \
@@ -81,12 +81,15 @@ struct lock_key {
* this should be simpler than raw name of member
* e.g. nr_acquired -> acquired, wait_time_total -> wait_total
*/
- const char *name;
- int (*key)(struct lock_stat*, struct lock_stat*);
+ const char *name;
+ int (*key)(struct lock_stat*, struct lock_stat*);
};
-static const char *sort_key = "acquired";
-static int (*compare)(struct lock_stat *, struct lock_stat *);
+static const char *sort_key = "acquired";
+
+static int (*compare)(struct lock_stat *, struct lock_stat *);
+
+static struct rb_root result; /* place to store sorted data */
#define DEF_KEY_LOCK(name, fn_suffix) \
{ #name, lock_stat_key_ ## fn_suffix }
@@ -116,11 +119,8 @@ static void select_key(void)
die("Unknown compare key:%s\n", sort_key);
}
-static struct rb_root result; /* place to store sorted data */
-
static void insert_to_result(struct lock_stat *st,
- int (*bigger)(struct lock_stat *,
- struct lock_stat *))
+ int (*bigger)(struct lock_stat *, struct lock_stat *))
{
struct rb_node **rb = &result.rb_node;
struct rb_node *parent = NULL;
@@ -155,8 +155,7 @@ static struct lock_stat *pop_from_result(void)
return container_of(node, struct lock_stat, rb);
}
-static struct lock_stat *lock_stat_findnew(void *addr, const char *name,
- const char *file, unsigned int line)
+static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
{
struct list_head *entry = lockhashentry(addr);
struct lock_stat *ret, *new;
@@ -175,11 +174,6 @@ static struct lock_stat *lock_stat_findnew(void *addr, const char *name,
if (!new->name)
goto alloc_failed;
strcpy(new->name, name);
- new->file = zalloc(sizeof(char) * strlen(file) + 1);
- if (!new->file)
- goto alloc_failed;
- strcpy(new->file, file);
- new->line = line;
/* LOCK_STATE_UNLOCKED == 0 isn't guaranteed forever */
new->state = LOCK_STATE_UNLOCKED;
@@ -197,36 +191,28 @@ static char const *input_name = "perf.data";
static int profile_cpu = -1;
struct raw_event_sample {
- u32 size;
- char data[0];
+ u32 size;
+ char data[0];
};
struct trace_acquire_event {
- void *addr;
- const char *name;
- const char *file;
- unsigned int line;
+ void *addr;
+ const char *name;
};
struct trace_acquired_event {
- void *addr;
- const char *name;
- const char *file;
- unsigned int line;
+ void *addr;
+ const char *name;
};
struct trace_contended_event {
- void *addr;
- const char *name;
- const char *file;
- unsigned int line;
+ void *addr;
+ const char *name;
};
struct trace_release_event {
- void *addr;
- const char *name;
- const char *file;
- unsigned int line;
+ void *addr;
+ const char *name;
};
struct trace_lock_handler {
@@ -255,7 +241,8 @@ struct trace_lock_handler {
struct thread *thread);
};
-static void prof_lock_acquire_event(struct trace_acquire_event *acquire_event,
+static void
+report_lock_acquire_event(struct trace_acquire_event *acquire_event,
struct event *__event __used,
int cpu __used,
u64 timestamp,
@@ -263,8 +250,7 @@ static void prof_lock_acquire_event(struct trace_acquire_event *acquire_event,
{
struct lock_stat *st;
- st = lock_stat_findnew(acquire_event->addr, acquire_event->name,
- acquire_event->file, acquire_event->line);
+ st = lock_stat_findnew(acquire_event->addr, acquire_event->name);
switch (st->state) {
case LOCK_STATE_UNLOCKED:
@@ -279,7 +265,8 @@ static void prof_lock_acquire_event(struct trace_acquire_event *acquire_event,
st->prev_event_time = timestamp;
}
-static void prof_lock_acquired_event(struct trace_acquired_event *acquired_event,
+static void
+report_lock_acquired_event(struct trace_acquired_event *acquired_event,
struct event *__event __used,
int cpu __used,
u64 timestamp,
@@ -287,8 +274,7 @@ static void prof_lock_acquired_event(struct trace_acquired_event *acquired_event
{
struct lock_stat *st;
- st = lock_stat_findnew(acquired_event->addr, acquired_event->name,
- acquired_event->file, acquired_event->line);
+ st = lock_stat_findnew(acquired_event->addr, acquired_event->name);
switch (st->state) {
case LOCK_STATE_UNLOCKED:
@@ -305,7 +291,8 @@ static void prof_lock_acquired_event(struct trace_acquired_event *acquired_event
st->prev_event_time = timestamp;
}
-static void prof_lock_contended_event(struct trace_contended_event *contended_event,
+static void
+report_lock_contended_event(struct trace_contended_event *contended_event,
struct event *__event __used,
int cpu __used,
u64 timestamp,
@@ -313,8 +300,7 @@ static void prof_lock_contended_event(struct trace_contended_event *contended_ev
{
struct lock_stat *st;
- st = lock_stat_findnew(contended_event->addr, contended_event->name,
- contended_event->file, contended_event->line);
+ st = lock_stat_findnew(contended_event->addr, contended_event->name);
switch (st->state) {
case LOCK_STATE_UNLOCKED:
@@ -330,7 +316,8 @@ static void prof_lock_contended_event(struct trace_contended_event *contended_ev
st->prev_event_time = timestamp;
}
-static void prof_lock_release_event(struct trace_release_event *release_event,
+static void
+report_lock_release_event(struct trace_release_event *release_event,
struct event *__event __used,
int cpu __used,
u64 timestamp,
@@ -339,8 +326,7 @@ static void prof_lock_release_event(struct trace_release_event *release_event,
struct lock_stat *st;
u64 hold_time;
- st = lock_stat_findnew(release_event->addr, release_event->name,
- release_event->file, release_event->line);
+ st = lock_stat_findnew(release_event->addr, release_event->name);
switch (st->state) {
case LOCK_STATE_UNLOCKED:
@@ -373,11 +359,11 @@ static void prof_lock_release_event(struct trace_release_event *release_event,
/* lock oriented handlers */
/* TODO: handlers for CPU oriented, thread oriented */
-static struct trace_lock_handler prof_lock_ops = {
- .acquire_event = prof_lock_acquire_event,
- .acquired_event = prof_lock_acquired_event,
- .contended_event = prof_lock_contended_event,
- .release_event = prof_lock_release_event,
+static struct trace_lock_handler report_lock_ops = {
+ .acquire_event = report_lock_acquire_event,
+ .acquired_event = report_lock_acquired_event,
+ .contended_event = report_lock_contended_event,
+ .release_event = report_lock_release_event,
};
static struct trace_lock_handler *trace_handler;
@@ -395,14 +381,9 @@ process_lock_acquire_event(void *data,
tmp = raw_field_value(event, "lockdep_addr", data);
memcpy(&acquire_event.addr, &tmp, sizeof(void *));
acquire_event.name = (char *)raw_field_ptr(event, "name", data);
- acquire_event.file = (char *)raw_field_ptr(event, "file", data);
- acquire_event.line =
- (unsigned int)raw_field_value(event, "line", data);
- if (trace_handler->acquire_event) {
- trace_handler->acquire_event(&acquire_event,
- event, cpu, timestamp, thread);
- }
+ if (trace_handler->acquire_event)
+ trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
}
static void
@@ -418,14 +399,9 @@ process_lock_acquired_event(void *data,
tmp = raw_field_value(event, "lockdep_addr", data);
memcpy(&acquired_event.addr, &tmp, sizeof(void *));
acquired_event.name = (char *)raw_field_ptr(event, "name", data);
- acquired_event.file = (char *)raw_field_ptr(event, "file", data);
- acquired_event.line =
- (unsigned int)raw_field_value(event, "line", data);
- if (trace_handler->acquire_event) {
- trace_handler->acquired_event(&acquired_event,
- event, cpu, timestamp, thread);
- }
+ if (trace_handler->acquire_event)
+ trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread);
}
static void
@@ -441,14 +417,9 @@ process_lock_contended_event(void *data,
tmp = raw_field_value(event, "lockdep_addr", data);
memcpy(&contended_event.addr, &tmp, sizeof(void *));
contended_event.name = (char *)raw_field_ptr(event, "name", data);
- contended_event.file = (char *)raw_field_ptr(event, "file", data);
- contended_event.line =
- (unsigned int)raw_field_value(event, "line", data);
- if (trace_handler->acquire_event) {
- trace_handler->contended_event(&contended_event,
- event, cpu, timestamp, thread);
- }
+ if (trace_handler->acquire_event)
+ trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread);
}
static void
@@ -464,14 +435,9 @@ process_lock_release_event(void *data,
tmp = raw_field_value(event, "lockdep_addr", data);
memcpy(&release_event.addr, &tmp, sizeof(void *));
release_event.name = (char *)raw_field_ptr(event, "name", data);
- release_event.file = (char *)raw_field_ptr(event, "file", data);
- release_event.line =
- (unsigned int)raw_field_value(event, "line", data);
- if (trace_handler->acquire_event) {
- trace_handler->release_event(&release_event,
- event, cpu, timestamp, thread);
- }
+ if (trace_handler->acquire_event)
+ trace_handler->release_event(&release_event, event, cpu, timestamp, thread);
}
static void
@@ -503,14 +469,6 @@ static int process_sample_event(event_t *event, struct perf_session *session)
event__parse_sample(event, session->sample_type, &data);
thread = perf_session__findnew(session, data.pid);
- /*
- * FIXME: this causes warn on 32bit environment
- * because of (void *)data.ip (type of data.ip is u64)
- */
-/* dump_printf("(IP, %d): %d/%d: %p period: %llu\n", */
-/* event->header.misc, */
-/* data.pid, data.tid, (void *)data.ip, data.period); */
-
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
@@ -580,15 +538,14 @@ static void dump_map(void)
for (i = 0; i < LOCKHASH_SIZE; i++) {
list_for_each_entry(st, &lockhash_table[i], hash_entry) {
- printf("%p: %s (src: %s, line: %u)\n",
- st->addr, st->name, st->file, st->line);
+ printf("%p: %s\n", st->addr, st->name);
}
}
}
static struct perf_event_ops eops = {
- .sample = process_sample_event,
- .comm = event__process_comm,
+ .sample = process_sample_event,
+ .comm = event__process_comm,
};
static struct perf_session *session;
@@ -614,7 +571,7 @@ static void sort_result(void)
}
}
-static void __cmd_prof(void)
+static void __cmd_report(void)
{
setup_pager();
select_key();
@@ -623,12 +580,12 @@ static void __cmd_prof(void)
print_result();
}
-static const char * const prof_usage[] = {
- "perf sched prof [<options>]",
+static const char * const report_usage[] = {
+ "perf lock report [<options>]",
NULL
};
-static const struct option prof_options[] = {
+static const struct option report_options[] = {
OPT_STRING('k', "key", &sort_key, "acquired",
"key for sorting"),
/* TODO: type */
@@ -636,17 +593,14 @@ static const struct option prof_options[] = {
};
static const char * const lock_usage[] = {
- "perf lock [<options>] {record|trace|prof}",
+ "perf lock [<options>] {record|trace|report}",
NULL
};
static const struct option lock_options[] = {
- OPT_STRING('i', "input", &input_name, "file",
- "input file name"),
- OPT_BOOLEAN('v', "verbose", &verbose,
- "be more verbose (show symbol address, etc)"),
- OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
- "dump raw trace in ASCII"),
+ OPT_STRING('i', "input", &input_name, "file", "input file name"),
+ OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
OPT_END()
};
@@ -698,21 +652,21 @@ int cmd_lock(int argc, const char **argv, const char *prefix __used)
if (!strncmp(argv[0], "rec", 3)) {
return __cmd_record(argc, argv);
- } else if (!strncmp(argv[0], "prof", 4)) {
- trace_handler = &prof_lock_ops;
+ } else if (!strncmp(argv[0], "report", 6)) {
+ trace_handler = &report_lock_ops;
if (argc) {
argc = parse_options(argc, argv,
- prof_options, prof_usage, 0);
+ report_options, report_usage, 0);
if (argc)
- usage_with_options(prof_usage, prof_options);
+ usage_with_options(report_usage, report_options);
}
- __cmd_prof();
+ __cmd_report();
} else if (!strcmp(argv[0], "trace")) {
/* Aliased to 'perf trace' */
return cmd_trace(argc, argv, prefix);
} else if (!strcmp(argv[0], "map")) {
- /* recycling prof_lock_ops */
- trace_handler = &prof_lock_ops;
+ /* recycling report_lock_ops */
+ trace_handler = &report_lock_ops;
setup_pager();
read_events();
dump_map();
commit ae7f6711d6231c9ba54feb5ba9856c3775e482f8
Merge: 64abebf731df b23ff0e9330e
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jan 29 09:24:57 2010 +0100
Merge branch 'perf/urgent' into perf/core
Merge reason: We want to queue up a dependent patch. Also update to
later -rc's.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
commit 184f412c3341cd24fbd26604634a5800b83dbdc3
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 27 08:39:39 2010 +0100
perf, x86: Clean up event constraints code a bit
- Remove stray debug code
- Improve ugly macros a bit
- Remove some whitespace damage
- (Also fix up some accumulated damage in perf_event.h)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Stephane Eranian <eranian@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 66de282ad2fb..fdbe24842271 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -93,24 +93,19 @@ struct cpu_hw_events {
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
};
-#define EVENT_CONSTRAINT(c, n, m) { \
+#define EVENT_CONSTRAINT(c, n, m) { \
{ .idxmsk64[0] = (n) }, \
.code = (c), \
.cmask = (m), \
.weight = HWEIGHT64((u64)(n)), \
}
-#define INTEL_EVENT_CONSTRAINT(c, n) \
- EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
+#define INTEL_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
+#define FIXED_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
-#define FIXED_EVENT_CONSTRAINT(c, n) \
- EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
+#define EVENT_CONSTRAINT_END EVENT_CONSTRAINT(0, 0, 0)
-#define EVENT_CONSTRAINT_END \
- EVENT_CONSTRAINT(0, 0, 0)
-
-#define for_each_event_constraint(e, c) \
- for ((e) = (c); (e)->cmask; (e)++)
+#define for_each_event_constraint(e, c) for ((e) = (c); (e)->cmask; (e)++)
/*
* struct x86_pmu - generic x86 pmu
@@ -1276,14 +1271,6 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (test_bit(hwc->idx, used_mask))
break;
-#if 0
- pr_debug("CPU%d fast config=0x%llx idx=%d assign=%c\n",
- smp_processor_id(),
- hwc->config,
- hwc->idx,
- assign ? 'y' : 'n');
-#endif
-
set_bit(hwc->idx, used_mask);
if (assign)
assign[i] = hwc->idx;
@@ -1333,14 +1320,6 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (j == X86_PMC_IDX_MAX)
break;
-#if 0
- pr_debug("CPU%d slow config=0x%llx idx=%d assign=%c\n",
- smp_processor_id(),
- hwc->config,
- j,
- assign ? 'y' : 'n');
-#endif
-
set_bit(j, used_mask);
if (assign)
@@ -2596,9 +2575,9 @@ static const struct pmu pmu = {
* validate a single event group
*
* validation include:
- * - check events are compatible which each other
- * - events do not compete for the same counter
- * - number of events <= number of counters
+ * - check events are compatible which each other
+ * - events do not compete for the same counter
+ * - number of events <= number of counters
*
* validation ensures the group can be loaded onto the
* PMU if it was the only group available.
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 72b2615600d8..953c17731e0d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -290,7 +290,7 @@ struct perf_event_mmap_page {
};
#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0)
-#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
+#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
#define PERF_RECORD_MISC_KERNEL (1 << 0)
#define PERF_RECORD_MISC_USER (2 << 0)
#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
@@ -356,8 +356,8 @@ enum perf_event_type {
* u64 stream_id;
* };
*/
- PERF_RECORD_THROTTLE = 5,
- PERF_RECORD_UNTHROTTLE = 6,
+ PERF_RECORD_THROTTLE = 5,
+ PERF_RECORD_UNTHROTTLE = 6,
/*
* struct {
@@ -371,10 +371,10 @@ enum perf_event_type {
/*
* struct {
- * struct perf_event_header header;
- * u32 pid, tid;
+ * struct perf_event_header header;
+ * u32 pid, tid;
*
- * struct read_format values;
+ * struct read_format values;
* };
*/
PERF_RECORD_READ = 8,
@@ -412,7 +412,7 @@ enum perf_event_type {
* char data[size];}&& PERF_SAMPLE_RAW
* };
*/
- PERF_RECORD_SAMPLE = 9,
+ PERF_RECORD_SAMPLE = 9,
PERF_RECORD_MAX, /* non-ABI */
};
@@ -752,8 +752,7 @@ extern int perf_max_events;
extern const struct pmu *hw_perf_event_init(struct perf_event *event);
extern void perf_event_task_sched_in(struct task_struct *task);
-extern void perf_event_task_sched_out(struct task_struct *task,
- struct task_struct *next);
+extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
extern void perf_event_task_tick(struct task_struct *task);
extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
@@ -853,8 +852,7 @@ extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate;
extern void perf_event_init(void);
-extern void perf_tp_event(int event_id, u64 addr, u64 count,
- void *record, int entry_size);
+extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size);
extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags
@@ -895,13 +893,13 @@ static inline void
perf_sw_event(u32 event_id, u64 nr, int nmi,
struct pt_regs *regs, u64 addr) { }
static inline void
-perf_bp_event(struct perf_event *event, void *data) { }
+perf_bp_event(struct perf_event *event, void *data) { }
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
static inline void perf_event_comm(struct task_struct *tsk) { }
static inline void perf_event_fork(struct task_struct *tsk) { }
static inline void perf_event_init(void) { }
-static inline int perf_swevent_get_recursion_context(void) { return -1; }
+static inline int perf_swevent_get_recursion_context(void) { return -1; }
static inline void perf_swevent_put_recursion_context(int rctx) { }
static inline void perf_event_enable(struct perf_event *event) { }
static inline void perf_event_disable(struct perf_event *event) { }
commit e0b5f80dd4226a920257c91a3b9070e81149060b
Merge: b7a0afb0b464 a02b11937a6e
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 27 11:04:40 2010 +0100
Merge branch 'iommu/fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu into x86/urgent
commit b7a0afb0b4649d9b43488d5f5b1660bc9cffcff6
Merge: da482474b839 e83e452b0692
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Jan 27 10:52:36 2010 +0100
Merge branch 'urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile into x86/urgent
commit f426a7e02918e2e992b28adeb02e5a0ab39a7a25
Merge: 231e36f4d2e6 329c0e012b99
Author: Ingo Molnar <mingo@elte.hu>
Date: Mon Jan 18 08:56:41 2010 +0100
Merge branch 'perf/scheduling' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into perf/core