{ CEF_XOP, 1 << 11, false }
};
-static bool has_feature(int feature, uint32_t ecx, uint32_t edx)
-{
- int i;
-
- for (i = 0; i < sizeof(features) / sizeof(features[0]); ++i) {
- if (features[i].feature == feature) {
- if (features[i].use_edx)
- return (edx & features[i].mask);
- else
- return (ecx & features[i].mask);
- }
- }
-
- return false;
-}
-
bool cpuid_is_supported(void)
{
- int ret = 0;
+ /* The following assembly code uses EAX as the return value,
+ * but we store the value of EAX into ret since GCC uses EAX
+ * as the return register for every C function. That's a double
+ * operation, but there's no other way to do this unless doing this
+ * function entirely in assembly.
+ *
+ * The following assembly code has been shamelessly stolen from:
+ * http://wiki.osdev.org/CPUID
+ * and converted to work with AT&T syntax.
+ *
+ * This check is to make sure that the compiler is actually compiling
+ * for 64-bit.
+ *
+ * The compiler can be 32-bit and the system 64-bit so the
+ * following would be true:
+ * #if defined(__x86_64) ...
+ */
+
+#if UINTPTR_MAX == 0xffffffffffffffff
+#define ASM_PUSHF "pushfq\n\t"
+#define ASM_POPF "popfq\n\t"
+#define ASM_PUSHEAX "pushq %%rax\n\t"
+#define ASM_POPEAX "popq %%rax\n\t"
+#define ASM_PUSHECX "popq %%rcx\n\t"
+#elif UINTPTR_MAX == 0xffffffff
+#define ASM_PUSHF "pushfl\n\t"
+#define ASM_POPF "popfl\n\t"
+#define ASM_PUSHEAX "pushl %%eax\n\t"
+#define ASM_POPEAX "popl %%eax\n\t"
+#define ASM_PUSHECX "popl %%ecx\n\t"
+#endif
+ int ret = 0;
asm volatile(
- "pushfl\n\t"
- "popl %%eax\n\t"
+ ASM_PUSHF
+ ASM_POPEAX
"movl %%eax, %%ecx\n\t"
"xorl $0x200000, %%eax\n\t"
- "pushl %%eax\n\t"
- "popfl\n\t"
-
- "pushfl\n\t"
- "popl %%eax\n\t"
+ ASM_PUSHEAX
+ ASM_POPF
+ ASM_PUSHF
+ ASM_POPEAX
"xorl %%ecx, %%eax\n\t"
"shrl $21, %%eax\n\t"
"andl $1, %%eax\n\t"
- "pushl %%ecx\n\t"
- "popfl\n\t"
-
+ ASM_PUSHECX
+ ASM_POPF
: "=a" (ret)
);
+#undef ASM_PUSHF
+#undef ASM_POPF
+#undef ASM_PUSHEAX
+#undef ASM_POPEAX
+#undef ASM_PUSHECX
+
return !!ret;
}
bool cpuid_has_feature(int feature, bool extended)
{
- uint32_t eax, ebx, ecx, edx;
+ uint32_t eax, ebx, ecx, edx, i;
if (!extended)
___cpuid(CPU_PROCINFO_AND_FEATUREBITS, &eax, &ebx, &ecx, &edx);
else
___cpuid(CPU_EXTENDED_PROC_INFO_FEATURE_BITS, &eax, &ebx, &ecx, &edx);
- return has_feature(feature, ecx, edx);
+ for (i = 0; i < sizeof(features) / sizeof(features[0]); ++i) {
+ if (features[i].feature == feature) {
+ if (features[i].use_edx)
+ return (edx & features[i].mask);
+ else
+ return (ecx & features[i].mask);
+ }
+ }
+ return false;
}
static const char *const cpuids[] = {
uint32_t i;
___cpuid(CPU_VENDORID, &i, &u.bufu32[0], &u.bufu32[2], &u.bufu32[1]);
- u.buf[12] = '\0';
-
for (i = 0; i < sizeof(cpuids) / sizeof(cpuids[0]); ++i) {
if (strncmp(cpuids[i], u.buf, 12) == 0) {
cputype = (cputype_t)i;
return;
if (info == CPU_PROC_BRAND_STRING) {
- ___cpuid(CPU_PROC_BRAND_STRING, &buf[0], &buf[1], &buf[2], &buf[3]);
- ___cpuid(CPU_PROC_BRAND_STRING_INTERNAL0, &buf[4], &buf[5], &buf[6], &buf[7]);
- ___cpuid(CPU_PROC_BRAND_STRING_INTERNAL1, &buf[8], &buf[9], &buf[10], &buf[11]);
+ static char cached[48] = { 0 };
+ if (cached[0] == '\0') {
+ ___cpuid(CPU_PROC_BRAND_STRING, &buf[0], &buf[1], &buf[2], &buf[3]);
+ ___cpuid(CPU_PROC_BRAND_STRING_INTERNAL0, &buf[4], &buf[5], &buf[6], &buf[7]);
+ ___cpuid(CPU_PROC_BRAND_STRING_INTERNAL1, &buf[8], &buf[9], &buf[10], &buf[11]);
+
+ memcpy(cached, buf, sizeof cached);
+ } else
+ buf = (uint32_t *)cached;
+
return;
} else if (info == CPU_HIGHEST_EXTENDED_FUNCTION_SUPPORTED) {
*buf = cpuid_highest_ext_func_supported();
}
#endif
-