[CRIU] [PATCH 08/12] cpuinfo: x86 -- Rework cpuinfo features fetching

Cyrill Gorcunov gorcunov at openvz.org
Tue Sep 30 10:18:51 PDT 2014


Instead of parsing procfs lets use native cpuid(), it's a way faster.
The dark side is that the kernel may disable some of features via
bootline options even if they are present on hardware but for us
it's fine -- we will be testing hardware cpu for features anyway.

The X86_FEATURE_ bits are gathered from two sources: linux kernel
and cpu specifications.

Signed-off-by: Cyrill Gorcunov <gorcunov at openvz.org>
---
 arch/x86/cpu.c             | 178 ++++++++++++++++++++++++++++++++++++++++-----
 arch/x86/include/asm/cpu.h |  21 +++++-
 2 files changed, 179 insertions(+), 20 deletions(-)

diff --git a/arch/x86/cpu.c b/arch/x86/cpu.c
index 82d8e13647d1..12943e46927c 100644
--- a/arch/x86/cpu.c
+++ b/arch/x86/cpu.c
@@ -22,41 +22,181 @@
 #undef	LOG_PREFIX
 #define LOG_PREFIX "cpu: "
 
-const char * const x86_cap_flags[NCAPINTS_BITS] = {
-	[X86_FEATURE_FPU]                = "fpu",
-	[X86_FEATURE_FXSR]               = "fxsr",
-	[X86_FEATURE_XSAVE]              = "xsave",
-};
+static struct cpuinfo_x86 rt_cpu_info;
 
-static DECLARE_BITMAP(cpu_features, NCAPINTS_BITS);
+static void set_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature)
+{
+	if (likely(feature < NCAPINTS_BITS))
+		set_bit(feature, (unsigned long *)c->x86_capability);
+}
 
-static void cpu_set_feature(unsigned int feature)
+static void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature)
 {
 	if (likely(feature < NCAPINTS_BITS))
-		set_bit(feature, cpu_features);
+		clear_bit(feature, (unsigned long *)c->x86_capability);
 }
 
-bool cpu_has_feature(unsigned int feature)
+static int test_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature)
 {
 	if (likely(feature < NCAPINTS_BITS))
-		return test_bit(feature, cpu_features);
-	return false;
+		return test_bit(feature, (unsigned long *)c->x86_capability);
+	return 0;
 }
 
-static int proc_cpuinfo_match(char *tok)
+bool cpu_has_feature(unsigned int feature)
 {
-	if (!strcmp(tok, x86_cap_flags[X86_FEATURE_FXSR]))
-		cpu_set_feature(X86_FEATURE_FXSR);
-	else if (!strcmp(tok, x86_cap_flags[X86_FEATURE_XSAVE]))
-		cpu_set_feature(X86_FEATURE_XSAVE);
-	else if (!strcmp(tok, x86_cap_flags[X86_FEATURE_FPU]))
-		cpu_set_feature(X86_FEATURE_FPU);
+	return test_cpu_cap(&rt_cpu_info, feature);
+}
+
+static int cpu_init_cpuid(struct cpuinfo_x86 *c)
+{
+	/*
+	 * See cpu_detect() in the kernel, also
+	 * read cpuid specs not only from general
+	 * SDM but for extended instructions set
+	 * reference.
+	 */
+
+	/* Get vendor name */
+	cpuid(0x00000000,
+	      (unsigned int *)&c->cpuid_level,
+	      (unsigned int *)&c->x86_vendor_id[0],
+	      (unsigned int *)&c->x86_vendor_id[8],
+	      (unsigned int *)&c->x86_vendor_id[4]);
+
+	if (!strcmp(c->x86_vendor_id, "GenuineIntel")) {
+		c->x86_vendor = X86_VENDOR_INTEL;
+	} else if (!strcmp(c->x86_vendor_id, "AuthenticAMD")) {
+		c->x86_vendor = X86_VENDOR_AMD;
+	} else {
+		pr_err("Unsupported CPU vendor %s\n",
+		       c->x86_vendor_id);
+		return -1;
+	}
+
+	c->x86_family = 4;
+
+	/* Intel-defined flags: level 0x00000001 */
+	if (c->cpuid_level >= 0x00000001) {
+		u32 eax, ebx, ecx, edx;
+
+		cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
+		c->x86_family = (eax >> 8) & 0xf;
+		c->x86_model = (eax >> 4) & 0xf;
+		c->x86_mask = eax & 0xf;
+
+		if (c->x86_family == 0xf)
+			c->x86_family += (eax >> 20) & 0xff;
+		if (c->x86_family >= 0x6)
+			c->x86_model += ((eax >> 16) & 0xf) << 4;
+
+		c->x86_capability[0] = edx;
+		c->x86_capability[4] = ecx;
+	}
+
+	/* Additional Intel-defined flags: level 0x00000007 */
+	if (c->cpuid_level >= 0x00000007) {
+		u32 eax, ebx, ecx, edx;
+
+		cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
+		c->x86_capability[9] = ebx;
+		c->x86_capability[11] = ecx;
+	}
+
+	/* Extended state features: level 0x0000000d */
+	if (c->cpuid_level >= 0x0000000d) {
+		u32 eax, ebx, ecx, edx;
+
+		cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
+		c->x86_capability[10] = eax;
+	}
+
+	/* AMD-defined flags: level 0x80000001 */
+	c->extended_cpuid_level = cpuid_eax(0x80000000);
+
+	if ((c->extended_cpuid_level & 0xffff0000) == 0x80000000) {
+		if (c->extended_cpuid_level >= 0x80000001) {
+			c->x86_capability[1] = cpuid_edx(0x80000001);
+			c->x86_capability[6] = cpuid_ecx(0x80000001);
+		}
+	}
+
+	/*
+	 * We're don't care about scattered features for now,
+	 * otherwise look into init_scattered_cpuid_features()
+	 * in kernel.
+	 */
+
+	if (c->extended_cpuid_level >= 0x80000004) {
+		unsigned int *v;
+		char *p, *q;
+		v = (unsigned int *)c->x86_model_id;
+		cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
+		cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
+		cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
+		c->x86_model_id[48] = 0;
+
+		/*
+		 * Intel chips right-justify this string for some dumb reason;
+		 * undo that brain damage:
+		 */
+		p = q = &c->x86_model_id[0];
+		while (*p == ' ')
+			p++;
+		if (p != q) {
+			while (*p)
+				*q++ = *p++;
+			while (q <= &c->x86_model_id[48])
+				*q++ = '\0';	/* Zero-pad the rest */
+		}
+	}
+
+	/* On x86-64 NOP is always present */
+	set_cpu_cap(c, X86_FEATURE_NOPL);
+
+	switch (c->x86_vendor) {
+	case X86_VENDOR_INTEL:
+		/*
+		 * Strictly speaking we need to read MSR_IA32_MISC_ENABLE
+		 * here but on ring3 it's impossible.
+		 */
+		if (c->x86_family == 15) {
+			clear_cpu_cap(c, X86_FEATURE_REP_GOOD);
+			clear_cpu_cap(c, X86_FEATURE_ERMS);
+		} else if (c->x86_family == 6) {
+			/* On x86-64 rep is fine */
+			set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+		}
+
+		/* See filter_cpuid_features in kernel */
+		if ((s32)c->cpuid_level < (s32)0x0000000d)
+			clear_cpu_cap(c, X86_FEATURE_XSAVE);
+		break;
+	case X86_VENDOR_AMD:
+		/*
+		 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+		 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
+		 */
+		clear_cpu_cap(c, 0 * 32 + 31);
+		if (c->x86_family >= 0x10)
+			set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+		if (c->x86_family == 0xf) {
+			u32 level;
+
+			/* On C+ stepping K8 rep microcode works well for copy/memset */
+			level = cpuid_eax(1);
+			if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
+				set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+		}
+		break;
+	}
+
 	return 0;
 }
 
 int cpu_init(void)
 {
-	if (parse_cpuinfo_features(proc_cpuinfo_match))
+	if (cpu_init_cpuid(&rt_cpu_info))
 		return -1;
 
 	BUILD_BUG_ON(sizeof(struct xsave_struct) != XSAVE_SIZE);
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 5914c5c2b417..4d5f3ed84d09 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -176,7 +176,26 @@ static inline unsigned int cpuid_edx(unsigned int op)
 	return edx;
 }
 
-extern const char * const x86_cap_flags[NCAPINTS_BITS];
+#define X86_FEATURE_VERSION		1
+
+enum {
+	X86_VENDOR_INTEL	= 0,
+	X86_VENDOR_AMD		= 1,
+
+	X86_VENDOR_MAX
+};
+
+struct cpuinfo_x86 {
+	u8			x86_family;
+	u8			x86_vendor;
+	u8			x86_model;
+	u8			x86_mask;
+	u32			x86_capability[NCAPINTS];
+	u32			extended_cpuid_level;
+	int			cpuid_level;
+	char			x86_vendor_id[16];
+	char			x86_model_id[64];
+};
 
 extern bool cpu_has_feature(unsigned int feature);
 extern int cpu_init(void);
-- 
1.9.3



More information about the CRIU mailing list