Patches contributed by Eötvös Lorand University
commit c04f5e5d7b523f90ee3cdd70a68c4002aaecd3fa
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri May 29 09:10:54 2009 +0200
perf_counter tools: Clean up builtin-stat.c's do_perfstat()
[ Impact: cleanup ]
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/Documentation/perf_counter/builtin-stat.c b/Documentation/perf_counter/builtin-stat.c
index ac14086d9a70..6a2936150f28 100644
--- a/Documentation/perf_counter/builtin-stat.c
+++ b/Documentation/perf_counter/builtin-stat.c
@@ -109,11 +109,75 @@ static void create_perfstat_counter(int counter)
}
}
+/*
+ * Does the counter have nsecs as a unit?
+ */
+static inline int nsec_counter(int counter)
+{
+ if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK))
+ return 1;
+ if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Print out the results of a single counter:
+ */
+static void print_counter(int counter)
+{
+ __u64 count[3], single_count[3];
+ ssize_t res;
+ int cpu, nv;
+ int scaled;
+
+ count[0] = count[1] = count[2] = 0;
+ nv = scale ? 3 : 1;
+ for (cpu = 0; cpu < nr_cpus; cpu ++) {
+ res = read(fd[cpu][counter], single_count, nv * sizeof(__u64));
+ assert(res == nv * sizeof(__u64));
+
+ count[0] += single_count[0];
+ if (scale) {
+ count[1] += single_count[1];
+ count[2] += single_count[2];
+ }
+ }
+
+ scaled = 0;
+ if (scale) {
+ if (count[2] == 0) {
+ fprintf(stderr, " %14s %-20s\n",
+ "<not counted>", event_name(counter));
+ return;
+ }
+ if (count[2] < count[1]) {
+ scaled = 1;
+ count[0] = (unsigned long long)
+ ((double)count[0] * count[1] / count[2] + 0.5);
+ }
+ }
+
+ if (nsec_counter(counter)) {
+ double msecs = (double)count[0] / 1000000;
+
+ fprintf(stderr, " %14.6f %-20s (msecs)",
+ msecs, event_name(counter));
+ } else {
+ fprintf(stderr, " %14Ld %-20s (events)",
+ count[0], event_name(counter));
+ }
+ if (scaled)
+ fprintf(stderr, " (scaled from %.2f%%)",
+ (double) count[2] / count[1] * 100);
+ fprintf(stderr, "\n");
+}
+
static int do_perfstat(int argc, const char **argv)
{
unsigned long long t0, t1;
int counter;
- ssize_t res;
int status;
int pid;
@@ -149,55 +213,10 @@ static int do_perfstat(int argc, const char **argv)
argv[0]);
fprintf(stderr, "\n");
- for (counter = 0; counter < nr_counters; counter++) {
- int cpu, nv;
- __u64 count[3], single_count[3];
- int scaled;
-
- count[0] = count[1] = count[2] = 0;
- nv = scale ? 3 : 1;
- for (cpu = 0; cpu < nr_cpus; cpu ++) {
- res = read(fd[cpu][counter],
- single_count, nv * sizeof(__u64));
- assert(res == nv * sizeof(__u64));
-
- count[0] += single_count[0];
- if (scale) {
- count[1] += single_count[1];
- count[2] += single_count[2];
- }
- }
-
- scaled = 0;
- if (scale) {
- if (count[2] == 0) {
- fprintf(stderr, " %14s %-20s\n",
- "<not counted>", event_name(counter));
- continue;
- }
- if (count[2] < count[1]) {
- scaled = 1;
- count[0] = (unsigned long long)
- ((double)count[0] * count[1] / count[2] + 0.5);
- }
- }
-
- if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK) ||
- event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) {
+ for (counter = 0; counter < nr_counters; counter++)
+ print_counter(counter);
- double msecs = (double)count[0] / 1000000;
- fprintf(stderr, " %14.6f %-20s (msecs)",
- msecs, event_name(counter));
- } else {
- fprintf(stderr, " %14Ld %-20s (events)",
- count[0], event_name(counter));
- }
- if (scaled)
- fprintf(stderr, " (scaled from %.2f%%)",
- (double) count[2] / count[1] * 100);
- fprintf(stderr, "\n");
- }
fprintf(stderr, "\n");
fprintf(stderr, " Wall-clock time elapsed: %12.6f msecs\n",
(double)(t1-t0)/1e6);
@@ -212,7 +231,6 @@ static void skip_signal(int signo)
static const char * const stat_usage[] = {
"perf stat [<options>] <command>",
- "perf stat [<options>] -- <command> [<options>]",
NULL
};
commit b659294b779565c60f5e12ef505328e2b974eb62
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Apr 8 12:31:27 2009 +0200
x86, mce: print number of MCE banks
The number of MCE banks supported by a CPU is a useful number to know,
so print it out during CPU initialization.
[ Impact: add printout ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index ba8dd41a10dc..49c74222359d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -570,6 +570,8 @@ static int mce_cap_init(void)
rdmsrl(MSR_IA32_MCG_CAP, cap);
b = cap & 0xff;
+ printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b);
+
if (b > MAX_NR_BANKS) {
printk(KERN_WARNING
"MCE: Using only %u machine check banks out of %u\n",
@@ -1287,6 +1289,7 @@ void mcheck_init(struct cpuinfo_x86 *c)
default:
break;
}
+ printk(KERN_INFO "mce: CPU supports %d MCE banks\n", nr_mce_banks);
}
static int __init mcheck_disable(char *str)
commit cb491fca55e5282f0a95ef39c55352e00d6ca75e
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Apr 8 12:31:17 2009 +0200
x86, mce: Rename sysfs variables
Shorten variable names. This also compacts the code a bit.
device_mce => mce_dev
mce_device_initialized => mce_dev_initialized
mce_attribute => mce_attrs
[ Impact: cleanup ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 8488210b866f..b9972a6bc2a1 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -90,7 +90,7 @@ extern int mce_disabled;
void mce_setup(struct mce *m);
void mce_log(struct mce *m);
-DECLARE_PER_CPU(struct sys_device, device_mce);
+DECLARE_PER_CPU(struct sys_device, mce_dev);
extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
/*
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 2e2c3d2e9586..ba8dd41a10dc 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -957,7 +957,7 @@ static struct sysdev_class mce_sysclass = {
.name = "machinecheck",
};
-DEFINE_PER_CPU(struct sys_device, device_mce);
+DEFINE_PER_CPU(struct sys_device, mce_dev);
__cpuinitdata
void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
@@ -1039,12 +1039,12 @@ static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
ACCESSOR(check_interval, check_interval, mce_restart())
-static struct sysdev_attribute *mce_attributes[] = {
+static struct sysdev_attribute *mce_attrs[] = {
&attr_tolerant.attr, &attr_check_interval, &attr_trigger,
NULL
};
-static cpumask_var_t mce_device_initialized;
+static cpumask_var_t mce_dev_initialized;
/* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */
static __cpuinit int mce_create_device(unsigned int cpu)
@@ -1055,40 +1055,36 @@ static __cpuinit int mce_create_device(unsigned int cpu)
if (!mce_available(&boot_cpu_data))
return -EIO;
- memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
- per_cpu(device_mce, cpu).id = cpu;
- per_cpu(device_mce, cpu).cls = &mce_sysclass;
+ memset(&per_cpu(mce_dev, cpu).kobj, 0, sizeof(struct kobject));
+ per_cpu(mce_dev, cpu).id = cpu;
+ per_cpu(mce_dev, cpu).cls = &mce_sysclass;
- err = sysdev_register(&per_cpu(device_mce, cpu));
+ err = sysdev_register(&per_cpu(mce_dev, cpu));
if (err)
return err;
- for (i = 0; mce_attributes[i]; i++) {
- err = sysdev_create_file(&per_cpu(device_mce, cpu),
- mce_attributes[i]);
+ for (i = 0; mce_attrs[i]; i++) {
+ err = sysdev_create_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
if (err)
goto error;
}
for (i = 0; i < banks; i++) {
- err = sysdev_create_file(&per_cpu(device_mce, cpu),
+ err = sysdev_create_file(&per_cpu(mce_dev, cpu),
&bank_attrs[i]);
if (err)
goto error2;
}
- cpumask_set_cpu(cpu, mce_device_initialized);
+ cpumask_set_cpu(cpu, mce_dev_initialized);
return 0;
error2:
- while (--i >= 0) {
- sysdev_remove_file(&per_cpu(device_mce, cpu),
- &bank_attrs[i]);
- }
+ while (--i >= 0)
+ sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[i]);
error:
- while (--i >= 0) {
- sysdev_remove_file(&per_cpu(device_mce, cpu),
- mce_attributes[i]);
- }
- sysdev_unregister(&per_cpu(device_mce, cpu));
+ while (--i >= 0)
+ sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
+
+ sysdev_unregister(&per_cpu(mce_dev, cpu));
return err;
}
@@ -1097,24 +1093,24 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
{
int i;
- if (!cpumask_test_cpu(cpu, mce_device_initialized))
+ if (!cpumask_test_cpu(cpu, mce_dev_initialized))
return;
- for (i = 0; mce_attributes[i]; i++)
- sysdev_remove_file(&per_cpu(device_mce, cpu),
- mce_attributes[i]);
+ for (i = 0; mce_attrs[i]; i++)
+ sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
+
for (i = 0; i < banks; i++)
- sysdev_remove_file(&per_cpu(device_mce, cpu),
- &bank_attrs[i]);
- sysdev_unregister(&per_cpu(device_mce, cpu));
- cpumask_clear_cpu(cpu, mce_device_initialized);
+ sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[i]);
+
+ sysdev_unregister(&per_cpu(mce_dev, cpu));
+ cpumask_clear_cpu(cpu, mce_dev_initialized);
}
/* Make sure there are no machine checks on offlined CPUs. */
static void mce_disable_cpu(void *h)
{
- int i;
unsigned long action = *(unsigned long *)h;
+ int i;
if (!mce_available(¤t_cpu_data))
return;
@@ -1221,7 +1217,7 @@ static __init int mce_init_device(void)
if (!mce_available(&boot_cpu_data))
return -EIO;
- alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
+ alloc_cpumask_var(&mce_dev_initialized, GFP_KERNEL);
err = mce_init_banks();
if (err)
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 4d90ec3eb51d..083f270251fa 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -517,7 +517,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (!b)
goto out;
- err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj,
+ err = sysfs_create_link(&per_cpu(mce_dev, cpu).kobj,
b->kobj, name);
if (err)
goto out;
@@ -540,7 +540,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
goto out;
}
- b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj);
+ b->kobj = kobject_create_and_add(name, &per_cpu(mce_dev, cpu).kobj);
if (!b->kobj)
goto out_free;
@@ -560,7 +560,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (i == cpu)
continue;
- err = sysfs_create_link(&per_cpu(device_mce, i).kobj,
+ err = sysfs_create_link(&per_cpu(mce_dev, i).kobj,
b->kobj, name);
if (err)
goto out;
@@ -638,7 +638,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
#ifdef CONFIG_SMP
/* sibling symlink */
if (shared_bank[bank] && b->blocks->cpu != cpu) {
- sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
+ sysfs_remove_link(&per_cpu(mce_dev, cpu).kobj, name);
per_cpu(threshold_banks, cpu)[bank] = NULL;
return;
@@ -650,7 +650,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
if (i == cpu)
continue;
- sysfs_remove_link(&per_cpu(device_mce, i).kobj, name);
+ sysfs_remove_link(&per_cpu(mce_dev, i).kobj, name);
per_cpu(threshold_banks, i)[bank] = NULL;
}
commit dba3725d44f5dfb5711fd509fca10b5b828c43b7
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Apr 8 12:31:26 2009 +0200
x86, mce: unify
move mce_64.c => mce.c and glue it up in the Makefile.
Remove mce_32.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile
index 6def76942bf2..55f01b39a105 100644
--- a/arch/x86/kernel/cpu/mcheck/Makefile
+++ b/arch/x86/kernel/cpu/mcheck/Makefile
@@ -1,4 +1,4 @@
-obj-y = mce_$(BITS).o therm_throt.o
+obj-y = mce.o therm_throt.o
obj-$(CONFIG_X86_32) += k7.o p4.o p5.o p6.o winchip.o
obj-$(CONFIG_X86_MCE_P4THERMAL) += mce_intel.o
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce.c
similarity index 100%
rename from arch/x86/kernel/cpu/mcheck/mce_64.c
rename to arch/x86/kernel/cpu/mcheck/mce.c
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c
deleted file mode 100644
index 05979e7eff12..000000000000
--- a/arch/x86/kernel/cpu/mcheck/mce_32.c
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * mce.c - x86 Machine Check Exception Reporting
- * (c) 2002 Alan Cox <alan@lxorguk.ukuu.org.uk>, Dave Jones <davej@redhat.com>
- */
-#include <linux/thread_info.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/mce.h>
-
-#include "mce.h"
-
-int mce_disabled;
-
-int nr_mce_banks;
-EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */
-
-/* Handle unconfigured int18 (should never happen) */
-static void unexpected_machine_check(struct pt_regs *regs, long error_code)
-{
- printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n",
- smp_processor_id());
-}
-
-/* Call the installed machine check handler for this CPU setup. */
-void (*machine_check_vector)(struct pt_regs *, long error_code) =
- unexpected_machine_check;
-
-/* This has to be run for each processor */
-void mcheck_init(struct cpuinfo_x86 *c)
-{
- if (mce_disabled == 1)
- return;
-
- switch (c->x86_vendor) {
- case X86_VENDOR_AMD:
- amd_mcheck_init(c);
- break;
-
- case X86_VENDOR_INTEL:
- if (c->x86 == 5)
- intel_p5_mcheck_init(c);
- if (c->x86 == 6)
- intel_p6_mcheck_init(c);
- if (c->x86 == 15)
- intel_p4_mcheck_init(c);
- break;
-
- case X86_VENDOR_CENTAUR:
- if (c->x86 == 5)
- winchip_mcheck_init(c);
- break;
-
- default:
- break;
- }
-}
-
-static int __init mcheck_disable(char *str)
-{
- mce_disabled = 1;
- return 1;
-}
-
-static int __init mcheck_enable(char *str)
-{
- mce_disabled = -1;
- return 1;
-}
-
-__setup("nomce", mcheck_disable);
-__setup("mce", mcheck_enable);
commit 711c2e481c9d1113650d09de10f61ee88ab56fda
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Apr 8 12:31:26 2009 +0200
x86, mce: unify, prepare for 32-bit v2
Prepare the 64-bit mce_64.c code side to be built on 32-bit.
[ includes ifdef relocation by Andi Kleen ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Andi Kleen <ak@firstfloor.org>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
diff --git a/arch/x86/kernel/cpu/mcheck/mce.h b/arch/x86/kernel/cpu/mcheck/mce.h
index 2d1a54bdadfc..cd6cffcc2de0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.h
+++ b/arch/x86/kernel/cpu/mcheck/mce.h
@@ -1,14 +1,14 @@
#include <linux/init.h>
#include <asm/mce.h>
-#ifdef CONFIG_X86_32
-
void amd_mcheck_init(struct cpuinfo_x86 *c);
void intel_p4_mcheck_init(struct cpuinfo_x86 *c);
void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
void intel_p6_mcheck_init(struct cpuinfo_x86 *c);
void winchip_mcheck_init(struct cpuinfo_x86 *c);
+#ifdef CONFIG_X86_32
+
/* Call the installed machine check handler for this CPU setup. */
extern void (*machine_check_vector)(struct pt_regs *, long error_code);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index ce48ae75e1dc..2e2c3d2e9586 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -37,6 +37,10 @@
#include <asm/msr.h>
#include <asm/smp.h>
+#include "mce.h"
+
+#ifdef CONFIG_X86_64
+
#define MISC_MCELOG_MINOR 227
atomic_t mce_entry;
@@ -1241,7 +1245,7 @@ static __init int mce_init_device(void)
device_initcall(mce_init_device);
-#ifdef CONFIG_X86_32
+#else /* CONFIG_X86_32: */
int mce_disabled;
commit a988d334ae8213c0e0e62327222f6e5e6e52bcf1
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Apr 8 12:31:25 2009 +0200
x86, mce: unify, prepare codes
Move current 32-bit mce_32.c code into mce_64.c.
[ Remove unused artifact stop/restart_mce pointed by Andi Kleen ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Andi Kleen <ak@firstfloor.org>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 1491246c4d69..ce48ae75e1dc 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -1240,3 +1240,68 @@ static __init int mce_init_device(void)
}
device_initcall(mce_init_device);
+
+#ifdef CONFIG_X86_32
+
+int mce_disabled;
+
+int nr_mce_banks;
+EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */
+
+/* Handle unconfigured int18 (should never happen) */
+static void unexpected_machine_check(struct pt_regs *regs, long error_code)
+{
+ printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n",
+ smp_processor_id());
+}
+
+/* Call the installed machine check handler for this CPU setup. */
+void (*machine_check_vector)(struct pt_regs *, long error_code) =
+ unexpected_machine_check;
+
+/* This has to be run for each processor */
+void mcheck_init(struct cpuinfo_x86 *c)
+{
+ if (mce_disabled == 1)
+ return;
+
+ switch (c->x86_vendor) {
+ case X86_VENDOR_AMD:
+ amd_mcheck_init(c);
+ break;
+
+ case X86_VENDOR_INTEL:
+ if (c->x86 == 5)
+ intel_p5_mcheck_init(c);
+ if (c->x86 == 6)
+ intel_p6_mcheck_init(c);
+ if (c->x86 == 15)
+ intel_p4_mcheck_init(c);
+ break;
+
+ case X86_VENDOR_CENTAUR:
+ if (c->x86 == 5)
+ winchip_mcheck_init(c);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int __init mcheck_disable(char *str)
+{
+ mce_disabled = 1;
+ return 1;
+}
+
+static int __init mcheck_enable(char *str)
+{
+ mce_disabled = -1;
+ return 1;
+}
+
+__setup("nomce", mcheck_disable);
+__setup("mce", mcheck_enable);
+
+#endif /* CONFIG_X86_32 */
commit 06b851d98266b812b2fa23d007cdf53f41194bbb
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Apr 8 12:31:25 2009 +0200
x86, mce: unify, prepare 64bit in mce.h
Prepare mce.h for unification, so that it will build on 32-bit x86
kernels too.
[ Impact: cleanup ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 4f8c199584e7..8488210b866f 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -1,8 +1,6 @@
#ifndef _ASM_X86_MCE_H
#define _ASM_X86_MCE_H
-#ifdef __x86_64__
-
#include <linux/types.h>
#include <asm/ioctls.h>
@@ -10,21 +8,21 @@
* Machine Check support for x86
*/
-#define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */
+#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
-#define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */
-#define MCG_STATUS_EIPV (1UL<<1) /* ip points to correct instruction */
-#define MCG_STATUS_MCIP (1UL<<2) /* machine check in progress */
+#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
+#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
+#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
-#define MCI_STATUS_VAL (1UL<<63) /* valid error */
-#define MCI_STATUS_OVER (1UL<<62) /* previous errors lost */
-#define MCI_STATUS_UC (1UL<<61) /* uncorrected error */
-#define MCI_STATUS_EN (1UL<<60) /* error enabled */
-#define MCI_STATUS_MISCV (1UL<<59) /* misc error reg. valid */
-#define MCI_STATUS_ADDRV (1UL<<58) /* addr reg. valid */
-#define MCI_STATUS_PCC (1UL<<57) /* processor context corrupt */
+#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
+#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
+#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
+#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
+#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
+#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
+#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
/* Fields are zero when not available */
struct mce {
@@ -82,13 +80,11 @@ struct mce_log {
#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9)
#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0)
-#endif /* __x86_64__ */
-
#ifdef __KERNEL__
#ifdef CONFIG_X86_32
extern int mce_disabled;
-#else /* CONFIG_X86_32 */
+#endif
#include <asm/atomic.h>
@@ -143,8 +139,6 @@ extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
extern int mce_notify_user(void);
-#endif /* !CONFIG_X86_32 */
-
#ifdef CONFIG_X86_MCE
extern void mcheck_init(struct cpuinfo_x86 *c);
#else
commit 1cb2a8e1767ab60370ecce90654c0f281c602d95
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Apr 8 12:31:18 2009 +0200
x86, mce: clean up mce_amd_64.c
Make the coding style match that of the rest of the x86 arch code.
[ Impact: cleanup ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 56dde9c4bc96..4d90ec3eb51d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -13,22 +13,22 @@
*
* All MC4_MISCi registers are shared between multi-cores
*/
-
-#include <linux/cpu.h>
-#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/kobject.h>
#include <linux/notifier.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
+#include <linux/kobject.h>
#include <linux/sysdev.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+
+#include <asm/percpu.h>
#include <asm/apic.h>
+#include <asm/idle.h>
#include <asm/mce.h>
#include <asm/msr.h>
-#include <asm/percpu.h>
-#include <asm/idle.h>
#define PFX "mce_threshold: "
#define VERSION "version 1.1.1"
@@ -48,26 +48,26 @@
#define MCG_XBLK_ADDR 0xC0000400
struct threshold_block {
- unsigned int block;
- unsigned int bank;
- unsigned int cpu;
- u32 address;
- u16 interrupt_enable;
- u16 threshold_limit;
- struct kobject kobj;
- struct list_head miscj;
+ unsigned int block;
+ unsigned int bank;
+ unsigned int cpu;
+ u32 address;
+ u16 interrupt_enable;
+ u16 threshold_limit;
+ struct kobject kobj;
+ struct list_head miscj;
};
/* defaults used early on boot */
static struct threshold_block threshold_defaults = {
- .interrupt_enable = 0,
- .threshold_limit = THRESHOLD_MAX,
+ .interrupt_enable = 0,
+ .threshold_limit = THRESHOLD_MAX,
};
struct threshold_bank {
- struct kobject *kobj;
- struct threshold_block *blocks;
- cpumask_var_t cpus;
+ struct kobject *kobj;
+ struct threshold_block *blocks;
+ cpumask_var_t cpus;
};
static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
@@ -86,9 +86,9 @@ static void amd_threshold_interrupt(void);
*/
struct thresh_restart {
- struct threshold_block *b;
- int reset;
- u16 old_limit;
+ struct threshold_block *b;
+ int reset;
+ u16 old_limit;
};
/* must be called with correct cpu affinity */
@@ -110,6 +110,7 @@ static void threshold_restart_bank(void *_tr)
} else if (tr->old_limit) { /* change limit w/o reset */
int new_count = (mci_misc_hi & THRESHOLD_MAX) +
(tr->old_limit - tr->b->threshold_limit);
+
mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) |
(new_count & THRESHOLD_MAX);
}
@@ -125,11 +126,11 @@ static void threshold_restart_bank(void *_tr)
/* cpu init entry point, called from mce.c with preempt off */
void mce_amd_feature_init(struct cpuinfo_x86 *c)
{
- unsigned int bank, block;
unsigned int cpu = smp_processor_id();
- u8 lvt_off;
u32 low = 0, high = 0, address = 0;
+ unsigned int bank, block;
struct thresh_restart tr;
+ u8 lvt_off;
for (bank = 0; bank < NR_BANKS; ++bank) {
for (block = 0; block < NR_BLOCKS; ++block) {
@@ -140,8 +141,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
if (!address)
break;
address += MCG_XBLK_ADDR;
- }
- else
+ } else
++address;
if (rdmsr_safe(address, &low, &high))
@@ -193,9 +193,9 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
*/
static void amd_threshold_interrupt(void)
{
+ u32 low = 0, high = 0, address = 0;
unsigned int bank, block;
struct mce m;
- u32 low = 0, high = 0, address = 0;
mce_setup(&m);
@@ -204,16 +204,16 @@ static void amd_threshold_interrupt(void)
if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
continue;
for (block = 0; block < NR_BLOCKS; ++block) {
- if (block == 0)
+ if (block == 0) {
address = MSR_IA32_MC0_MISC + bank * 4;
- else if (block == 1) {
+ } else if (block == 1) {
address = (low & MASK_BLKPTR_LO) >> 21;
if (!address)
break;
address += MCG_XBLK_ADDR;
- }
- else
+ } else {
++address;
+ }
if (rdmsr_safe(address, &low, &high))
break;
@@ -229,8 +229,10 @@ static void amd_threshold_interrupt(void)
(high & MASK_LOCKED_HI))
continue;
- /* Log the machine check that caused the threshold
- event. */
+ /*
+ * Log the machine check that caused the threshold
+ * event.
+ */
machine_check_poll(MCP_TIMESTAMP,
&__get_cpu_var(mce_poll_banks));
@@ -254,48 +256,56 @@ static void amd_threshold_interrupt(void)
struct threshold_attr {
struct attribute attr;
- ssize_t(*show) (struct threshold_block *, char *);
- ssize_t(*store) (struct threshold_block *, const char *, size_t count);
+ ssize_t (*show) (struct threshold_block *, char *);
+ ssize_t (*store) (struct threshold_block *, const char *, size_t count);
};
-#define SHOW_FIELDS(name) \
-static ssize_t show_ ## name(struct threshold_block * b, char *buf) \
-{ \
- return sprintf(buf, "%lx\n", (unsigned long) b->name); \
+#define SHOW_FIELDS(name) \
+static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
+{ \
+ return sprintf(buf, "%lx\n", (unsigned long) b->name); \
}
SHOW_FIELDS(interrupt_enable)
SHOW_FIELDS(threshold_limit)
-static ssize_t store_interrupt_enable(struct threshold_block *b,
- const char *buf, size_t count)
+static ssize_t
+store_interrupt_enable(struct threshold_block *b, const char *buf, size_t count)
{
- char *end;
struct thresh_restart tr;
- unsigned long new = simple_strtoul(buf, &end, 0);
+ unsigned long new;
+ char *end;
+
+ new = simple_strtoul(buf, &end, 0);
if (end == buf)
return -EINVAL;
+
b->interrupt_enable = !!new;
- tr.b = b;
- tr.reset = 0;
- tr.old_limit = 0;
+ tr.b = b;
+ tr.reset = 0;
+ tr.old_limit = 0;
+
smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
return end - buf;
}
-static ssize_t store_threshold_limit(struct threshold_block *b,
- const char *buf, size_t count)
+static ssize_t
+store_threshold_limit(struct threshold_block *b, const char *buf, size_t count)
{
- char *end;
struct thresh_restart tr;
- unsigned long new = simple_strtoul(buf, &end, 0);
+ unsigned long new;
+ char *end;
+
+ new = simple_strtoul(buf, &end, 0);
if (end == buf)
return -EINVAL;
+
if (new > THRESHOLD_MAX)
new = THRESHOLD_MAX;
if (new < 1)
new = 1;
+
tr.old_limit = b->threshold_limit;
b->threshold_limit = new;
tr.b = b;
@@ -307,8 +317,8 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
}
struct threshold_block_cross_cpu {
- struct threshold_block *tb;
- long retval;
+ struct threshold_block *tb;
+ long retval;
};
static void local_error_count_handler(void *_tbcc)
@@ -338,15 +348,16 @@ static ssize_t store_error_count(struct threshold_block *b,
return 1;
}
-#define THRESHOLD_ATTR(_name,_mode,_show,_store) { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .show = _show, \
- .store = _store, \
+#define THRESHOLD_ATTR(_name, _mode, _show, _store) \
+{ \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
};
-#define RW_ATTR(name) \
-static struct threshold_attr name = \
- THRESHOLD_ATTR(name, 0644, show_## name, store_## name)
+#define RW_ATTR(name) \
+static struct threshold_attr name = \
+ THRESHOLD_ATTR(name, 0644, show_## name, store_## name)
RW_ATTR(interrupt_enable);
RW_ATTR(threshold_limit);
@@ -359,15 +370,17 @@ static struct attribute *default_attrs[] = {
NULL
};
-#define to_block(k) container_of(k, struct threshold_block, kobj)
-#define to_attr(a) container_of(a, struct threshold_attr, attr)
+#define to_block(k) container_of(k, struct threshold_block, kobj)
+#define to_attr(a) container_of(a, struct threshold_attr, attr)
static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct threshold_block *b = to_block(kobj);
struct threshold_attr *a = to_attr(attr);
ssize_t ret;
+
ret = a->show ? a->show(b, buf) : -EIO;
+
return ret;
}
@@ -377,18 +390,20 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct threshold_block *b = to_block(kobj);
struct threshold_attr *a = to_attr(attr);
ssize_t ret;
+
ret = a->store ? a->store(b, buf, count) : -EIO;
+
return ret;
}
static struct sysfs_ops threshold_ops = {
- .show = show,
- .store = store,
+ .show = show,
+ .store = store,
};
static struct kobj_type threshold_ktype = {
- .sysfs_ops = &threshold_ops,
- .default_attrs = default_attrs,
+ .sysfs_ops = &threshold_ops,
+ .default_attrs = default_attrs,
};
static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
@@ -396,9 +411,9 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
unsigned int block,
u32 address)
{
- int err;
- u32 low, high;
struct threshold_block *b = NULL;
+ u32 low, high;
+ int err;
if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
return 0;
@@ -421,20 +436,21 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
if (!b)
return -ENOMEM;
- b->block = block;
- b->bank = bank;
- b->cpu = cpu;
- b->address = address;
- b->interrupt_enable = 0;
- b->threshold_limit = THRESHOLD_MAX;
+ b->block = block;
+ b->bank = bank;
+ b->cpu = cpu;
+ b->address = address;
+ b->interrupt_enable = 0;
+ b->threshold_limit = THRESHOLD_MAX;
INIT_LIST_HEAD(&b->miscj);
- if (per_cpu(threshold_banks, cpu)[bank]->blocks)
+ if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
list_add(&b->miscj,
&per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
- else
+ } else {
per_cpu(threshold_banks, cpu)[bank]->blocks = b;
+ }
err = kobject_init_and_add(&b->kobj, &threshold_ktype,
per_cpu(threshold_banks, cpu)[bank]->kobj,
@@ -447,8 +463,9 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
if (!address)
return 0;
address += MCG_XBLK_ADDR;
- } else
+ } else {
++address;
+ }
err = allocate_threshold_blocks(cpu, bank, ++block, address);
if (err)
@@ -507,6 +524,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
cpumask_copy(b->cpus, cpu_core_mask(cpu));
per_cpu(threshold_banks, cpu)[bank] = b;
+
goto out;
}
#endif
@@ -605,15 +623,13 @@ static void deallocate_threshold_block(unsigned int cpu,
static void threshold_remove_bank(unsigned int cpu, int bank)
{
- int i = 0;
struct threshold_bank *b;
char name[32];
+ int i = 0;
b = per_cpu(threshold_banks, cpu)[bank];
-
if (!b)
return;
-
if (!b->blocks)
goto free_out;
@@ -624,6 +640,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
if (shared_bank[bank] && b->blocks->cpu != cpu) {
sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
per_cpu(threshold_banks, cpu)[bank] = NULL;
+
return;
}
#endif
@@ -659,8 +676,8 @@ static void threshold_remove_device(unsigned int cpu)
}
/* get notified when a cpu comes on/off */
-static void __cpuinit amd_64_threshold_cpu_callback(unsigned long action,
- unsigned int cpu)
+static void __cpuinit
+amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
{
if (cpu >= NR_CPUS)
return;
@@ -686,11 +703,12 @@ static __init int threshold_init_device(void)
/* to hit CPUs online before the notifier is up */
for_each_online_cpu(lcpu) {
int err = threshold_create_device(lcpu);
+
if (err)
return err;
}
threshold_cpu_callback = amd_64_threshold_cpu_callback;
+
return 0;
}
-
device_initcall(threshold_init_device);
commit cb6f3c155b0afabc48667efb9e7b1ce92ccfcab4
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Apr 8 12:31:19 2009 +0200
x86, mce: clean up therm_throt.c
Make the coding style match that of the rest of the x86 arch code.
[ Impact: cleanup ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index d5ae2243f0b9..a2b5d7ddb19f 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -1,7 +1,7 @@
/*
- *
* Thermal throttle event support code (such as syslog messaging and rate
* limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
+ *
* This allows consistent reporting of CPU thermal throttle events.
*
* Maintains a counter in /sys that keeps track of the number of thermal
@@ -13,43 +13,44 @@
* Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c.
* Inspired by Ross Biro's and Al Borchers' counter code.
*/
-
+#include <linux/notifier.h>
+#include <linux/jiffies.h>
#include <linux/percpu.h>
#include <linux/sysdev.h>
#include <linux/cpu.h>
-#include <asm/cpu.h>
-#include <linux/notifier.h>
-#include <linux/jiffies.h>
+
#include <asm/therm_throt.h>
+#include <asm/cpu.h>
/* How long to wait between reporting thermal events */
-#define CHECK_INTERVAL (300 * HZ)
+#define CHECK_INTERVAL (300 * HZ)
static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES;
static DEFINE_PER_CPU(unsigned long, thermal_throttle_count);
-atomic_t therm_throt_en = ATOMIC_INIT(0);
+
+atomic_t therm_throt_en = ATOMIC_INIT(0);
#ifdef CONFIG_SYSFS
-#define define_therm_throt_sysdev_one_ro(_name) \
- static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL)
-
-#define define_therm_throt_sysdev_show_func(name) \
-static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
- char *buf) \
-{ \
- unsigned int cpu = dev->id; \
- ssize_t ret; \
- \
- preempt_disable(); /* CPU hotplug */ \
- if (cpu_online(cpu)) \
- ret = sprintf(buf, "%lu\n", \
- per_cpu(thermal_throttle_##name, cpu)); \
- else \
- ret = 0; \
- preempt_enable(); \
- \
- return ret; \
+#define define_therm_throt_sysdev_one_ro(_name) \
+ static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL)
+
+#define define_therm_throt_sysdev_show_func(name) \
+static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \
+ struct sysdev_attribute *attr, \
+ char *buf) \
+{ \
+ unsigned int cpu = dev->id; \
+ ssize_t ret; \
+ \
+ preempt_disable(); /* CPU hotplug */ \
+ if (cpu_online(cpu)) \
+ ret = sprintf(buf, "%lu\n", \
+ per_cpu(thermal_throttle_##name, cpu)); \
+ else \
+ ret = 0; \
+ preempt_enable(); \
+ \
+ return ret; \
}
define_therm_throt_sysdev_show_func(count);
@@ -61,8 +62,8 @@ static struct attribute *thermal_throttle_attrs[] = {
};
static struct attribute_group thermal_throttle_attr_group = {
- .attrs = thermal_throttle_attrs,
- .name = "thermal_throttle"
+ .attrs = thermal_throttle_attrs,
+ .name = "thermal_throttle"
};
#endif /* CONFIG_SYSFS */
@@ -110,10 +111,11 @@ int therm_throt_process(int curr)
}
#ifdef CONFIG_SYSFS
-/* Add/Remove thermal_throttle interface for CPU device */
+/* Add/Remove thermal_throttle interface for CPU device: */
static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
{
- return sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group);
+ return sysfs_create_group(&sys_dev->kobj,
+ &thermal_throttle_attr_group);
}
static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
@@ -121,19 +123,21 @@ static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group);
}
-/* Mutex protecting device creation against CPU hotplug */
+/* Mutex protecting device creation against CPU hotplug: */
static DEFINE_MUTEX(therm_cpu_lock);
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+static __cpuinit int
+thermal_throttle_cpu_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct sys_device *sys_dev;
int err = 0;
sys_dev = get_cpu_sysdev(cpu);
+
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
commit bdbfbdd5e8f0efb9bfef2e597f8ac673c36317ab
Author: Ingo Molnar <mingo@elte.hu>
Date: Wed Apr 8 12:31:20 2009 +0200
x86, mce: clean up non-fatal.c
Make the coding style match that of the rest of the x86 arch code.
[ Impact: cleanup ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c
index a74af128efc9..70b710420f74 100644
--- a/arch/x86/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c
@@ -6,15 +6,14 @@
* This file contains routines to check for non-fatal MCEs every 15s
*
*/
-
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/jiffies.h>
-#include <linux/workqueue.h>
#include <linux/interrupt.h>
-#include <linux/smp.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/smp.h>
#include <asm/processor.h>
#include <asm/system.h>
@@ -22,9 +21,9 @@
#include "mce.h"
-static int firstbank;
+static int firstbank;
-#define MCE_RATE 15*HZ /* timer rate is 15s */
+#define MCE_RATE (15*HZ) /* timer rate is 15s */
static void mce_checkregs(void *info)
{
@@ -34,23 +33,24 @@ static void mce_checkregs(void *info)
for (i = firstbank; i < nr_mce_banks; i++) {
rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
- if (high & (1<<31)) {
- printk(KERN_INFO "MCE: The hardware reports a non "
- "fatal, correctable incident occurred on "
- "CPU %d.\n",
+ if (!(high & (1<<31)))
+ continue;
+
+ printk(KERN_INFO "MCE: The hardware reports a non fatal, "
+ "correctable incident occurred on CPU %d.\n",
smp_processor_id());
- printk(KERN_INFO "Bank %d: %08x%08x\n", i, high, low);
-
- /*
- * Scrub the error so we don't pick it up in MCE_RATE
- * seconds time.
- */
- wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
-
- /* Serialize */
- wmb();
- add_taint(TAINT_MACHINE_CHECK);
- }
+
+ printk(KERN_INFO "Bank %d: %08x%08x\n", i, high, low);
+
+ /*
+ * Scrub the error so we don't pick it up in MCE_RATE
+ * seconds time:
+ */
+ wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
+
+ /* Serialize: */
+ wmb();
+ add_taint(TAINT_MACHINE_CHECK);
}
}
@@ -77,16 +77,17 @@ static int __init init_nonfatal_mce_checker(void)
/* Some Athlons misbehave when we frob bank 0 */
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
- boot_cpu_data.x86 == 6)
- firstbank = 1;
+ boot_cpu_data.x86 == 6)
+ firstbank = 1;
else
- firstbank = 0;
+ firstbank = 0;
/*
* Check for non-fatal errors every MCE_RATE s
*/
schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE));
printk(KERN_INFO "Machine check exception polling timer started.\n");
+
return 0;
}
module_init(init_nonfatal_mce_checker);