1 /**
2  * x86 CPU Identification tool
3  *
4  * This was initially used internally, so it's pretty unfriendly.
5  *
6  * The best way to use this module would be:
7  * ---
8  * CPUINFO cpu;          // CPUINFO.init or memset
9  * ddcpuid_leaves(cpu);  // Get maximum CPUID leaves (mandatory step before info)
10  * ddcpuid_cpuinfo(cpu); // Fill CPUINFO structure (optional)
11  * ---
12  *
13  * Then checking the corresponding field:
14  * ---
15  * if (cpu.amx_xfd) {
16  *   // Intel AMX with AMX_XFD is available
17  * }
18  * ---
19  *
20  * See the CPUINFO structure for available fields.
21  *
22  * To further understand these fields, it's encouraged to consult the technical manual.
23  *
24  * Authors: dd86k (dd@dax.moe)
25  * Copyright: © 2016-2022 dd86k
26  * License: MIT
27  */
28 module ddcpuid;
29 
30 // NOTE: GAS syntax crash course
31 //       While ';' and '\n\t' are accepted, GNU typically recommends the
32 //       latter for readability in output. (Don't know if this affects binaries)
33 //       syntax:
34 //       asm { "statement\n\t"
35 //             : "constraint" (output), ...
36 //             : "constraint" (input), ...
37 //             : "clobbers", ... }
38 //       constraints:
39 //       - "r": register
40 //       - "a","b","c","d": AX, BX, CX, or DX respectively
41 //       - "m": memory
42 //       - "i": immediate (known at compile-time)
43 //       - "0",...: Use same constraint as operand n
44 //       constraint modifiers (for output):
45 //       - "=": Write-only
46 //       - "+": Read-write
47 
48 @system:
49 extern (C):
50 
51 version (X86)
52 	enum DDCPUID_PLATFORM = "i686"; /// Target platform
53 else version (X86_64)
54 	enum DDCPUID_PLATFORM = "amd64"; /// Target platform
55 else static assert(0, "Unsupported platform");
56 
57 version (DigitalMars) {
58 	version = DMD;	// DMD compiler
59 	version = DMDLDC;	// DMD or LDC compilers
60 } else version (GNU) {
61 	version = GDC;	// GDC compiler
62 } else version (LDC) {
63 	version = DMDLDC;	// DMD or LDC compilers
64 } else static assert(0, "Unsupported compiler");
65 
66 enum DDCPUID_VERSION   = "0.21.0";	/// Library version
67 private enum CACHE_LEVELS = 6;	/// For buffer
68 private enum CACHE_MAX_LEVEL = CACHE_LEVELS - 1;
69 
70 version (PrintInfo) {
71 	pragma(msg, "CPUINFO.sizeof\t", CPUINFO.sizeof);
72 	pragma(msg, "CACHE.sizeof\t", CACHEINFO.sizeof);
73 }
74 
75 /// Make a bit mask of one bit at n position
76 private
77 template BIT(int n) if (n <= 31) { enum uint BIT = 1 << n; }
78 
79 /// Vendor ID template
80 // Little-endian only, unless x86 gets any crazier
81 private
82 template ID(char[4] c) {
83 	enum uint ID = c[0] | c[1] << 8 | c[2] << 16 | c[3] << 24;
84 }
85 
86 /// Vendor ID.
87 enum Vendor {
88 	Other = 0,
89 	Intel = ID!"Genu",	/// `"GenuineIntel"`: Intel
90 	AMD   = ID!"Auth",	/// `"AuthenticAMD"`: AMD
91 	VIA   = ID!"VIA ",	/// `"VIA VIA VIA "`: VIA
92 }
93 
94 /// Virtual Vendor ID, used as the interface type.
95 // NOTE: bhyve doesn't not emit cpuid bits within 0x40000000, so not supported
96 enum VirtVendor {
97 	Other = 0,
98 	KVM        = ID!"KVMK",	/// `"KVMKVMKVM\0\0\0"`: KVM
99 	HyperV     = ID!"Micr",	/// `"Microsoft Hv"`: Hyper-V interface
100 	VBoxHyperV = ID!"VBox",	/// `"VBoxVBoxVBox"`: VirtualBox's Hyper-V interface
101 	VBoxMin    = 0,	/// Unset: VirtualBox minimal interface
102 }
103 
104 /// Registers structure used with the ddcpuid function.
105 struct REGISTERS {
106 	union {
107 		uint eax;
108 		ushort ax;
109 		struct { ubyte al, ah; }
110 	}
111 	union {
112 		uint ebx;
113 		ushort bx;
114 		struct { ubyte bl, bh; }
115 	}
116 	union {
117 		uint ecx;
118 		ushort cx;
119 		struct { ubyte cl, ch; }
120 	}
121 	union {
122 		uint edx;
123 		ushort dx;
124 		struct { ubyte dl, dh; }
125 	}
126 }
127 ///
128 @system unittest {
129 	REGISTERS regs = void;
130 	regs.eax = 0xaabbccdd;
131 	assert(regs.eax == 0xaabbccdd);
132 	assert(regs.ax  == 0xccdd);
133 	assert(regs.al  == 0xdd);
134 	assert(regs.ah  == 0xcc);
135 }
136 
137 /// Cache entry.
138 struct CACHEINFO { align(1):
139 	this(ubyte level_, char type_, uint kbsize_, ushort shared_,
140 		ushort ways_, ushort parts_, ushort lineSize_, uint sets_) {
141 		level = level_;
142 		type = type_;
143 		size = kbsize_;
144 		sharedCores = shared_;
145 		ways = ways_;
146 		partitions = parts_;
147 		lineSize = lineSize_;
148 		sets = sets_;
149 		features = 0;
150 	}
151 	//TODO: Sort fields (totalSize, coresShared, ways, partitions, lineSize, sets)
152 	ushort lineSize;	/// Size of the line in bytes.
153 	union {
154 		ushort partitions;	/// Number of partitions.
155 		ushort lines;	/// Legacy name of partitions.
156 	}
157 	ushort ways;	/// Number of ways per line.
158 	uint sets; /// Number of cache sets. (Entries)
159 	/// Cache size in kilobytes.
160 	// (Ways + 1) * (Partitions + 1) * (LineSize + 1) * (Sets + 1)
161 	// (EBX[31:22] + 1) * (EBX[21:12] + 1) * (EBX[11:0] + 1) * (ECX + 1)
162 	uint size;
163 	/// Number of CPU cores sharing this cache.
164 	ushort sharedCores;
165 	/// Cache feature, bit flags.
166 	/// - Bit 0: Self Initializing cache
167 	/// - Bit 1: Fully Associative cache
168 	/// - Bit 2: No Write-Back Invalidation (toggle)
169 	/// - Bit 3:  Cache Inclusiveness (toggle)
170 	/// - Bit 4: Complex Cache Indexing (toggle)
171 	ushort features;
172 	ubyte level;	/// Cache level: L1, L2, etc.
173 	char type = 0;	/// Type entry character: 'D'=Data, 'I'=Instructions, 'U'=Unified
174 }
175 
176 /// Vendor string structure.
177 struct VendorString { align(1):
178 	union {
179 		struct { uint ebx, edx, ecx; }
180 		char[12] string_;
181 	}
182 	Vendor id;	/// Validated vendor ID
183 }
184 
185 @system unittest {
186 	VendorString s;
187 	s.string_ = "AuthenticAMD";
188 	assert(s.ebx == ID!"Auth");
189 	assert(s.edx == ID!"enti");
190 	assert(s.ecx == ID!"cAMD");
191 }
192 
193 /// Virtualization vendor string structure.
194 struct VirtVendorString { align(1):
195 	union {
196 		struct { uint ebx, ecx, edx; }
197 		char[12] string_;
198 	}
199 	VirtVendor id;	/// Validated vendor ID
200 }
201 
202 @system unittest {
203 	VirtVendorString s;
204 	s.string_ = "AuthenticAMD";
205 	assert(s.ebx == ID!"Auth");
206 	assert(s.ecx == ID!"enti");
207 	assert(s.edx == ID!"cAMD");
208 }
209 
210 /// CPU information structure
211 struct CPUINFO { align(1):
212 	uint maxLeaf;	/// Highest cpuid leaf
213 	uint maxLeafVirt;	/// Highest cpuid virtualization leaf
214 	uint maxLeafExtended;	/// Highest cpuid extended leaf
215 	
216 	// Vendor/brand strings
217 	
218 	VendorString vendor;	/// Vendor string and id
219 	
220 //	const(char) *microArchitecture;	/// Microarchitecture name string
221 	
222 	union {
223 		private uint[12] brand32;	// For init only
224 		char[48] brandString;	/// Processor Brand String
225 	}
226 	ubyte brandIndex;	/// Brand string index
227 	private ubyte __pad;
228 	
229 	uint cacheLevels;
230 	CACHEINFO[CACHE_LEVELS] cache;
231 	
232 	ushort logicalCores;	/// Logical cores in this processor
233 	ushort physicalCores;	/// Physical cores in this processor
234 	
235 	// Identifier
236 	
237 	uint identifier;	/// Raw identifier (CPUID.01h.EAX)
238 	ushort family;	/// Effective family identifier
239 	ushort model;	/// Effective model identifier
240 	ubyte familyBase;	/// Base family identifier
241 	ubyte familyExtended;	/// Extended family identifier
242 	ubyte modelBase;	/// Base model identifier
243 	ubyte modelExtended;	/// Extended model identifier
244 	ubyte stepping;	/// Stepping revision
245 	ubyte type;	/// Processor type number
246 	const(char) *typeString;	/// Processor type string.
247 	
248 	//
249 	// Extensions
250 	//
251 	
252 	bool fpu;	/// On-Chip x87 FPU
253 	bool f16c;	/// Float16 Conversions
254 	bool mmx;	/// MMX
255 	bool mmxExtended;	/// MMX Extended
256 	bool _3DNow;	/// 3DNow!
257 	bool _3DNowExtended;	/// 3DNow! Extended
258 	bool aes_ni;	/// Advanced Encryption Standard New Instructions
259 	bool sha;	/// SHA-1
260 	bool bmi1;	/// BMI1
261 	bool bmi2;	/// BMI2
262 	bool x86_64;	/// 64-bit mode (Long mode)
263 	bool lahf64;	/// LAHF+SAHF in 64-bit mode
264 	bool waitpkg;	/// User Level Monitor Wait (UMWAIT)
265 	bool xop;	/// AMD eXtended OPerations
266 	bool tbm;	/// Trailing Bit Manipulation
267 	bool adx;	/// Multi-precision Add-Carry (ADCX+ADOX)
268 	
269 	//
270 	// SSE
271 	//
272 	
273 	bool sse;	/// Streaming SIMD Extensions
274 	bool sse2;	/// SSE2
275 	bool sse3;	/// SSE3
276 	bool ssse3;	/// SSSE3
277 	bool sse41;	/// SSE4.1
278 	bool sse42;	/// SSE4.2
279 	bool sse4a;	/// SSE4a
280 	bool fma;	/// Fused Multiply-Add (FMA)
281 	bool fma4;	/// FMA4
282 	private bool __pad_2;
283 	
284 	//
285 	// AVX
286 	//
287 	
288 	bool avx;	/// Advanced Vector eXtension
289 	bool avx2;	/// AVX2
290 	bool avx512f;	/// AVX512
291 	bool avx512er;	/// AVX512_ER
292 	bool avx512pf;	/// AVX512_PF
293 	bool avx512cd;	/// AVX512_CD
294 	bool avx512dq;	/// AVX512_DQ
295 	bool avx512bw;	/// AVX512_BW
296 	bool avx512vl;	/// AVX512_VL
297 	bool avx512_ifma;	/// AVX512_IFMA
298 	bool avx512_vbmi;	/// AVX512_VBMI
299 	bool avx512_vbmi2;	/// AVX512_VBMI2
300 	bool avx512_gfni;	/// AVX512_GFNI
301 	bool avx512_vaes;	/// AVX512_VAES
302 	bool avx512_vnni;	/// AVX512_VNNI
303 	bool avx512_bitalg;	/// AVX512_BITALG
304 	bool avx512_vpopcntdq;	/// AVX512_VPOPCNTDQ
305 	bool avx512_4vnniw;	/// AVX512_4VNNIW
306 	bool avx512_4fmaps;	/// AVX512_4FMAPS
307 	bool avx512_bf16;	/// AVX512_BF16
308 	bool avx512_vp2intersect;	/// AVX512_VP2INTERSECT
309 	private bool __pad_3;
310 	
311 	//
312 	// AMX
313 	//
314 	
315 	bool amx;	/// Advanced Matrix eXtension
316 	bool amx_bf16;	/// AMX_BF16
317 	bool amx_int8;	/// AMX_INT8
318 	bool amx_xtilecfg;	/// AMX_XTILECFG
319 	bool amx_xtiledata;	/// AMX_XTILEDATA
320 	bool amx_xfd;	/// AMX_XFD
321 	
322 	//
323 	// SGX
324 	//
325 	
326 	bool sgx;	/// If SGX is supported (and enabled)
327 	bool sgx1;	/// SGX1
328 	bool sgx2;	/// SGX2
329 	ubyte sgxMaxSize;	/// 2^n maximum enclave size in non-64-bit
330 	ubyte sgxMaxSize64;	/// 2^n maximum enclave size in 64-bit
331 	private bool __pad_4;
332 	
333 	//
334 	// Additional instructions.
335 	//
336 	
337 	bool pclmulqdq;	/// PCLMULQDQ instruction
338 	bool monitor;	/// MONITOR and MWAIT instructions
339 	ushort mwaitMin;	/// (With MONITOR+MWAIT) MWAIT minimum size in bytes
340 	ushort mwaitMax;	/// (With MONITOR+MWAIT) MWAIT maximum size in bytes
341 	bool cmpxchg8b;	/// CMPXCHG8B
342 	bool cmpxchg16b;	/// CMPXCHG16B instruction
343 	bool movbe;	/// MOVBE instruction
344 	bool rdrand;	/// RDRAND instruction
345 	bool rdseed;	/// RDSEED instruction
346 	bool rdmsr;	/// RDMSR instruction
347 	bool sysenter;	/// SYSENTER and SYSEXIT instructions
348 	bool rdtsc;	/// RDTSC instruction
349 	bool rdtscDeadline;	/// (With RDTSC) IA32_TSC_DEADLINE MSR
350 	bool rdtscInvariant;	/// (With RDTSC) Timestamp counter invariant of C/P/T-state
351 	bool rdtscp;	/// RDTSCP instruction
352 	bool rdpid;	/// RDPID instruction
353 	bool cmov;	/// CMOVcc instruction
354 	bool lzcnt;	/// LZCNT instruction
355 	bool popcnt;	/// POPCNT instruction
356 	bool xsave;	/// XSAVE and XRSTOR instructions
357 	bool osxsave;	/// OSXSAVE and XGETBV instructions
358 	bool fxsr;	/// FXSAVE and FXRSTOR instructions
359 	bool pconfig;	/// PCONFIG instruction
360 	bool cldemote;	/// CLDEMOTE instruction
361 	bool movdiri;	/// MOVDIRI instruction
362 	bool movdir64b;	/// MOVDIR64B instruction
363 	bool enqcmd;	/// ENQCMD instruction
364 	bool syscall;	/// SYSCALL and SYSRET instructions
365 	bool monitorx;	/// MONITORX and MWAITX instructions
366 	bool skinit;	/// SKINIT instruction
367 	bool serialize;	/// SERIALIZE instruction
368 	private bool __pad_5;
369 	
370 	// Features.
371 	
372 	bool eist;	/// Intel SpeedStep/AMD PowerNow/AMD Cool'n'Quiet
373 	bool turboboost;	/// Intel TurboBoost/AMD CorePerformanceBoost
374 	bool turboboost30;	/// Intel TurboBoost 3.0
375 	bool smx;	/// Intel TXT
376 	bool htt;	/// (HTT) HyperThreading Technology, or just SMT available
377 	private bool __pad_6;
378 	
379 	/// Cache-related.
380 	
381 	bool clflush;	/// CLFLUSH instruction
382 	ubyte clflushLinesize;	/// Linesize of CLFLUSH in bytes
383 	bool clflushopt;	/// CLFLUSH instruction
384 	bool cnxtId;	/// L1 Context ID
385 	bool ss;	/// SelfSnoop
386 	bool prefetchw;	/// PREFETCHW instruction
387 	bool invpcid;	/// INVPCID instruction
388 	bool wbnoinvd;	/// WBNOINVD instruction
389 	
390 	/// ACPI information.
391 	
392 	bool apci;	/// ACPI
393 	bool apic;	/// APIC
394 	bool x2apic;	/// x2APIC
395 	bool arat;	/// Always-Running-APIC-Timer
396 	bool tm;	/// Thermal Monitor
397 	bool tm2;	/// Thermal Monitor 2
398 	ubyte apicMaxId;	/// Maximum APIC ID
399 	ubyte apicId;	/// Initial APIC ID (running core where CPUID was called)
400 	
401 	//
402 	// Virtualization features.
403 	//
404 	
405 	bool virtualization;	/// Intel VT-x/AMD-V
406 	ubyte virtVersion;	/// (AMD) Virtualization platform version
407 	bool vme;	/// Enhanced vm8086
408 	bool apicv;	/// (AMD) APICv. Intel's is available via a MSR.
409 	
410 	VirtVendorString virtVendor;
411 	
412 	struct VBox { align(1):
413 		uint tsc_freq_khz;	/// (VBox) Timestamp counter frequency in KHz
414 		uint apic_freq_khz;	/// (VBox) Paravirtualization API KHz frequency
415 	}
416 	VBox vbox;
417 	
418 	struct KVM { align(1):
419 		bool feature_clocksource;	/// (KVM) kvmclock interface
420 		bool feature_nop_io_delay;	/// (KVM) No delays required on I/O operations
421 		bool feature_mmu_op;	/// (KVM) Deprecated
422 		bool feature_clocksource2;	/// (KVM) Remapped kvmclock interface
423 		bool feature_async_pf;	/// (KVM) Asynchronous Page Fault
424 		bool feature_steal_time;	/// (KVM) Steal time
425 		bool feature_pv_eoi;	/// (KVM) Paravirtualized End Of the Interrupt handler
426 		bool feature_pv_unhault;	/// (KVM) Paravirtualized spinlock
427 		bool feature_pv_tlb_flush;	/// (KVM) Paravirtualized TLB flush
428 		bool feature_async_pf_vmexit;	/// (KVM) Asynchronous Page Fault at VM exit
429 		bool feature_pv_send_ipi;	/// (KVM) Paravirtualized SEBD inter-processor-interrupt
430 		bool feature_pv_poll_control;	/// (KVM) Host-side polling on HLT
431 		bool feature_pv_sched_yield;	/// (KVM) paravirtualized scheduler yield
432 		bool feature_clocsource_stable_bit;	/// (KVM) kvmclock warning
433 		bool hint_realtime;	/// (KVM) vCPUs are never preempted for an unlimited amount of time
434 		private bool res2;
435 	}
436 	KVM kvm;
437 	
438 	struct HyperV { align(1):
439 		ushort guest_vendor_id;	/// (Hyper-V) Paravirtualization Guest Vendor ID
440 		ushort guest_build;	/// (Hyper-V) Paravirtualization Guest Build number
441 		ubyte guest_os;	/// (Hyper-V) Paravirtualization Guest OS ID
442 		ubyte guest_major;	/// (Hyper-V) Paravirtualization Guest OS Major version
443 		ubyte guest_minor;	/// (Hyper-V) Paravirtualization Guest OS Minor version
444 		ubyte guest_service;	/// (Hyper-V) Paravirtualization Guest Service ID
445 		bool guest_opensource;	/// (Hyper-V) Paravirtualization Guest additions open-source
446 		bool base_feat_vp_runtime_msr;	/// (Hyper-V) Virtual processor runtime MSR
447 		bool base_feat_part_time_ref_count_msr;	/// (Hyper-V) Partition reference counter MSR
448 		bool base_feat_basic_synic_msrs;	/// (Hyper-V) Basic Synthetic Interrupt Controller MSRs
449 		bool base_feat_stimer_msrs;	/// (Hyper-V) Synthetic Timer MSRs
450 		bool base_feat_apic_access_msrs;	/// (Hyper-V) APIC access MSRs (EOI, ICR, TPR)
451 		bool base_feat_hypercall_msrs;	/// (Hyper-V) Hypercalls API MSRs
452 		bool base_feat_vp_id_msr;	/// (Hyper-V) vCPU index MSR
453 		bool base_feat_virt_sys_reset_msr;	/// (Hyper-V) Virtual system reset MSR
454 		bool base_feat_stat_pages_msr;	/// (Hyper-V) Statistic pages MSRs
455 		bool base_feat_part_ref_tsc_msr;	/// (Hyper-V) Partition reference timestamp counter MSR
456 		bool base_feat_guest_idle_state_msr;	/// (Hyper-V) Virtual guest idle state MSR
457 		bool base_feat_timer_freq_msrs;	/// (Hyper-V) Timer frequency MSRs (TSC and APIC)
458 		bool base_feat_debug_msrs;	/// (Hyper-V) Debug MSRs
459 		bool part_flags_create_part;	/// (Hyper-V) Partitions can be created
460 		bool part_flags_access_part_id;	/// (Hyper-V) Partitions IDs can be accessed
461 		bool part_flags_access_memory_pool;	/// (Hyper-V) Memory pool can be accessed
462 		bool part_flags_adjust_msg_buffers;	/// (Hyper-V) Possible to adjust message buffers
463 		bool part_flags_post_msgs;	/// (Hyper-V) Possible to send messages
464 		bool part_flags_signal_events;	/// (Hyper-V) Possible to signal events
465 		bool part_flags_create_port;	/// (Hyper-V) Possible to create ports
466 		bool part_flags_connect_port;	/// (Hyper-V) Possible to connect to ports
467 		bool part_flags_access_stats;	/// (Hyper-V) Can access statistics
468 		bool part_flags_debugging;	/// (Hyper-V) Debugging features available
469 		bool part_flags_cpu_mgmt;	/// (Hyper-V) Processor management available
470 		bool part_flags_cpu_profiler;	/// (Hyper-V) Processor profiler available
471 		bool part_flags_expanded_stack_walk;	/// (Hyper-V) Extended stack walking available
472 		bool part_flags_access_vsm;	/// (Hyper-V) Virtual system monitor available
473 		bool part_flags_access_vp_regs;	/// (Hyper-V) Virtual private registers available
474 		bool part_flags_extended_hypercalls;	/// (Hyper-V) Extended hypercalls API available
475 		bool part_flags_start_vp;	/// (Hyper-V) Virtual processor has started
476 		bool pm_max_cpu_power_state_c0;	/// (Hyper-V) Processor C0 is maximum state
477 		bool pm_max_cpu_power_state_c1;	/// (Hyper-V) Processor C1 is maximum state
478 		bool pm_max_cpu_power_state_c2;	/// (Hyper-V) Processor C2 is maximum state
479 		bool pm_max_cpu_power_state_c3;	/// (Hyper-V) Processor C3 is maximum state
480 		bool pm_hpet_reqd_for_c3;	/// (Hyper-V) High-precision event timer required for C3 state
481 		bool misc_feat_mwait;	/// (Hyper-V) MWAIT instruction available for guest
482 		bool misc_feat_guest_debugging;	/// (Hyper-V) Guest supports debugging
483 		bool misc_feat_perf_mon;	/// (Hyper-V) Performance monitor support available
484 		bool misc_feat_pcpu_dyn_part_event;	/// (Hyper-V) Physicap CPU dynamic partitioning event available
485 		bool misc_feat_xmm_hypercall_input;	/// (Hyper-V) Hypercalls via XMM registers available
486 		bool misc_feat_guest_idle_state;	/// (Hyper-V) Virtual guest supports idle state
487 		bool misc_feat_hypervisor_sleep_state;	/// (Hyper-V) Hypervisor supports sleep
488 		bool misc_feat_query_numa_distance;	/// (Hyper-V) NUMA distance query available
489 		bool misc_feat_timer_freq;	/// (Hyper-V) Determining timer frequencies available
490 		bool misc_feat_inject_synmc_xcpt;	/// (Hyper-V) Support for injecting synthetic machine checks
491 		bool misc_feat_guest_crash_msrs;	/// (Hyper-V) Guest crash MSR available
492 		bool misc_feat_debug_msrs;	/// (Hyper-V) Debug MSR available
493 		bool misc_feat_npiep1;	/// (Hyper-V) Documentation unavailable
494 		bool misc_feat_disable_hypervisor;	/// (Hyper-V) Hypervisor can be disabled
495 		bool misc_feat_ext_gva_range_for_flush_va_list;	/// (Hyper-V) Extended guest virtual address (GVA) ranges for FlushVirtualAddressList available
496 		bool misc_feat_hypercall_output_xmm;	/// (Hyper-V) Returning hypercall output via XMM registers available
497 		bool misc_feat_sint_polling_mode;	/// (Hyper-V) Synthetic interrupt source polling mode available
498 		bool misc_feat_hypercall_msr_lock;	/// (Hyper-V) Hypercall MISR lock feature available
499 		bool misc_feat_use_direct_synth_msrs;	/// (Hyper-V) Possible to directly use synthetic MSRs
500 		bool hint_hypercall_for_process_switch;	/// (Hyper-V) Guest should use the Hypercall API for address space switches rather than MOV CR3
501 		bool hint_hypercall_for_tlb_flush;	/// (Hyper-V) Guest should use the Hypercall API for local TLB flushes rather than INVLPG/MOV CR3
502 		bool hint_hypercall_for_tlb_shootdown;	/// (Hyper-V) Guest should use the Hypercall API for inter-CPU TLB flushes rather than inter-processor-interrupts (IPI)
503 		bool hint_msr_for_apic_access;	/// (Hyper-V) Guest should use the MSRs for APIC access (EOI, ICR, TPR) rather than memory-mapped input/output (MMIO)
504 		bool hint_msr_for_sys_reset;	/// (Hyper-V) Guest should use the hypervisor-provided MSR for a system reset instead of traditional methods
505 		bool hint_relax_time_checks;	/// (Hyper-V) Guest should relax timer-related checks (watchdogs/deadman timeouts) that rely on timely deliver of external interrupts
506 		bool hint_dma_remapping;	/// (Hyper-V) Guest should use the direct memory access (DMA) remapping
507 		bool hint_interrupt_remapping;	/// (Hyper-V) Guest should use the interrupt remapping
508 		bool hint_x2apic_msrs;	/// (Hyper-V) Guest should use the X2APIC MSRs rather than memory mapped input/output (MMIO)
509 		bool hint_deprecate_auto_eoi;	/// (Hyper-V) Guest should deprecate Auto EOI (End Of Interrupt) features
510 		bool hint_synth_cluster_ipi_hypercall;	/// (Hyper-V) Guest should use the SyntheticClusterIpi Hypercall
511 		bool hint_ex_proc_masks_interface;	/// (Hyper-V) Guest should use the newer ExProcessMasks interface over ProcessMasks
512 		bool hint_nested_hyperv;	/// (Hyper-V) Hyper-V instance is nested within a Hyper-V partition
513 		bool hint_int_for_mbec_syscalls;	/// (Hyper-V) Guest should use the INT instruction for Mode Based Execution Control (MBEC) system calls
514 		bool hint_nested_enlightened_vmcs_interface;	/// (Hyper-V) Guest should use enlightened Virtual Machine Control Structure (VMCS) interfaces and nested enlightenment
515 		bool host_feat_avic;	/// (Hyper-V) Hypervisor is using the Advanced Virtual Interrupt Controller (AVIC) overlay
516 		bool host_feat_msr_bitmap;	/// (Hyper-V) Hypervisor is using MSR bitmaps
517 		bool host_feat_perf_counter;	/// (Hyper-V) Hypervisor supports the architectural performance counter
518 		bool host_feat_nested_paging;	/// (Hyper-V) Hypervisor is using nested paging
519 		bool host_feat_dma_remapping;	/// (Hyper-V) Hypervisor is using direct memory access (DMA) remapping
520 		bool host_feat_interrupt_remapping;	/// (Hyper-V) Hypervisor is using interrupt remapping
521 		bool host_feat_mem_patrol_scrubber;	/// (Hyper-V) Hypervisor's memory patrol scrubber is present
522 		bool host_feat_dma_prot_in_use;	/// (Hyper-V) Hypervisor is using direct memory access (DMA) protection
523 		bool host_feat_hpet_requested;	/// (Hyper-V) Hypervisor requires a High Precision Event Timer (HPET)
524 		bool host_feat_stimer_volatile;	/// (Hyper-V) Hypervisor's synthetic timers are volatile
525 	}
526 	HyperV hv;
527 	
528 	//
529 	// Memory features.
530 	//
531 	
532 	bool pae;	/// Physical Address Extension 
533 	bool pse;	/// Page Size Extension
534 	bool pse36;	/// 36-bit PSE
535 	bool page1gb;	/// 1GiB pages in 4-level paging and higher
536 	bool mtrr;	/// Memory Type Range Registers
537 	bool pat;	/// Page Attribute Table
538 	bool pge;	/// Page Global Bit
539 	bool dca;	/// Direct Cache Access
540 	union {
541 		uint tsx;	/// Intel TSX. If set, has one of HLE, RTM, or TSXLDTRK.
542 		struct {
543 			bool hle;	/// (TSX) Hardware Lock Elision
544 			bool rtm;	/// (TSX) Restricted Transactional Memory
545 			bool tsxldtrk;	/// (TSX) Suspend Load Address Tracking
546 		}
547 	}
548 	bool nx;	/// Intel XD (No eXecute bit)
549 	bool smep;	/// Supervisor Mode Execution Protection
550 	bool smap;	/// Supervisor Mode Access Protection
551 	bool pku;	/// Protection Key Units
552 	bool _5pl;	/// 5-level paging
553 	bool fsrepmov;	/// Fast Short REP MOVSB optimization
554 	bool lam;	/// Linear Address Masking
555 	ubyte physicalBits;	/// Memory physical bits
556 	ubyte linearBits;	/// Memory linear bits
557 	private bool __pad_8;
558 	
559 	//
560 	// Debugging features.
561 	//
562 	
563 	bool mca;	/// Machine Check Architecture
564 	bool mce;	/// Machine Check Exception
565 	bool de;	/// Degging Extensions
566 	bool ds;	/// Debug Store
567 	bool ds_cpl;	/// Debug Store for Current Privilege Level
568 	bool dtes64;	/// 64-bit Debug Store area
569 	bool pdcm;	/// Perfmon And Debug Capability
570 	bool sdbg;	/// Silicon Debug
571 	bool pbe;	/// Pending Break Enable
572 	private bool __pad_9;
573 	
574 	/// Security features and mitigations.
575 	// NOTE: IA32_CORE_CAPABILITIES is currently empty
576 	
577 	bool ia32_arch_capabilities;	/// IA32_ARCH_CAPABILITIES MSR
578 	bool ibpb;	/// Indirect Branch Predictor Barrier
579 	bool ibrs;	/// Indirect Branch Restricted Speculation
580 	bool ibrsAlwaysOn;	/// IBRS always enabled
581 	bool ibrsPreferred;	/// IBRS preferred over software solution
582 	bool stibp;	/// Single Thread Indirect Branch Predictors
583 	bool stibpAlwaysOn;	/// STIBP always enabled
584 	bool ssbd;	/// Speculative Store Bypass Disable
585 	bool l1dFlush;	/// L1D Cache Flush
586 	bool md_clear;	/// MDS mitigation
587 	bool cetIbt;	/// (Control-flow Enforcement Technology) Indirect Branch Tracking 
588 	bool cetSs;	/// (Control-flow Enforcement Technology) Shadow Stack
589 	
590 	/// Miscellaneous features.
591 	
592 	bool psn;	/// Processor Serial Number (Pentium III only)
593 	bool pcid;	/// PCID
594 	bool xtpr;	/// xTPR
595 	bool fsgsbase;	/// FS and GS register base
596 	bool uintr;	/// User Interrupts
597 	private bool __pad_10;
598 }
599 
600 // EAX[4:0], 0-31, but there aren't that many
601 // So we limit it to 0-7
602 private enum CACHE_MASK = 7; // Max 31
603 private immutable const(char)* CACHE_TYPE = "?DIU????";
604 
605 private
606 immutable const(char)*[4] PROCESSOR_TYPE = [ "Original", "OverDrive", "Dual", "Reserved" ];
607 
608 version (Trace) {
609 	import core.stdc.stdio;
610 	import core.stdc.stdarg;
611 	
612 	private extern (C) int putchar(int);
613 	
614 	/// Trace application
615 	void trace(string func = __FUNCTION__)(const(char) *fmt, ...) {
616 		va_list va;
617 		va_start(va, fmt);
618 		printf("TRACE:%s: ", func.ptr);
619 		vprintf(fmt, va);
620 		putchar('\n');
621 	}
622 }
623 
624 /// Test if a bit is set.
625 /// Params:
626 /// 	val = 32-bit content.
627 /// 	pos = Bit position.
628 /// Returns: True if bit set.
629 pragma(inline, true)
630 private bool bit(uint val, int pos) pure @safe {
631 	return (val & (1 << pos)) != 0;
632 }
633 
634 @safe unittest {
635 	assert( bit(0b10, 1));
636 	assert(!bit(   0, 1));
637 }
638 
639 // GDC and LDC may inline the assembler code.
640 pragma(inline, false):
641 
642 /// Query processor with CPUID.
643 /// Params:
644 ///   regs = REGISTERS structure
645 ///   level = Leaf (EAX)
646 ///   sublevel = Sub-leaf (ECX)
647 void ddcpuid_id(ref REGISTERS regs, uint level, uint sublevel = 0) {
648 	version (DMD) {
649 		version (X86) asm {
650 			mov EDI, regs;
651 			mov EAX, level;
652 			mov ECX, sublevel;
653 			cpuid;
654 			mov [EDI + regs.eax.offsetof], EAX;
655 			mov [EDI + regs.ebx.offsetof], EBX;
656 			mov [EDI + regs.ecx.offsetof], ECX;
657 			mov [EDI + regs.edx.offsetof], EDX;
658 		} else version (X86_64) asm {
659 			mov RDI, regs;
660 			mov EAX, level;
661 			mov ECX, sublevel;
662 			cpuid;
663 			mov [RDI + regs.eax.offsetof], EAX;
664 			mov [RDI + regs.ebx.offsetof], EBX;
665 			mov [RDI + regs.ecx.offsetof], ECX;
666 			mov [RDI + regs.edx.offsetof], EDX;
667 		}
668 	} else version (GDC) {
669 		asm {
670 			"cpuid"
671 			: "=a" (regs.eax), "=b" (regs.ebx), "=c" (regs.ecx), "=d" (regs.edx)
672 			: "a" (level), "c" (sublevel);
673 		}
674 	} else version (LDC) {
675 		version (X86) asm {
676 			lea EDI, regs;
677 			mov EAX, level;
678 			mov ECX, sublevel;
679 			cpuid;
680 			mov [EDI + regs.eax.offsetof], EAX;
681 			mov [EDI + regs.ebx.offsetof], EBX;
682 			mov [EDI + regs.ecx.offsetof], ECX;
683 			mov [EDI + regs.edx.offsetof], EDX;
684 		} else version (X86_64) asm {
685 			lea RDI, regs;
686 			mov EAX, level;
687 			mov ECX, sublevel;
688 			cpuid;
689 			mov [RDI + regs.eax.offsetof], EAX;
690 			mov [RDI + regs.ebx.offsetof], EBX;
691 			mov [RDI + regs.ecx.offsetof], ECX;
692 			mov [RDI + regs.edx.offsetof], EDX;
693 		}
694 	}
695 	version (Trace) with (regs) trace(
696 		"level=%x sub=%x -> eax=%x ebx=%x ecx=%x edx=%x",
697 		level, sublevel, eax, ebx, ecx, edx);
698 }
699 /// Typically these tests are done on Pentium 4 and later processors
700 @system unittest {
701 	REGISTERS regs;
702 	ddcpuid_id(regs, 0);
703 	assert(regs.eax > 0 && regs.eax < 0x4000_0000);
704 	ddcpuid_id(regs, 0x8000_0000);
705 	assert(regs.eax > 0x8000_0000);
706 }
707 
708 private uint ddcpuid_max_leaf() {
709 	version (DMDLDC) asm {
710 		xor EAX,EAX;
711 		cpuid;
712 	} else version (GDC) asm {
713 		"xor %eax,%eax\n\t"~
714 		"cpuid";
715 	}
716 }
717 
718 private uint ddcpuid_max_leaf_virt() {
719 	version (DMDLDC) asm {
720 		mov EAX,0x4000_0000;
721 		cpuid;
722 	} else version (GDC) asm {
723 		"mov $0x40000000,%eax\n\t"~
724 		"cpuid";
725 	}
726 }
727 
728 private uint ddcpuid_max_leaf_ext() {
729 	version (DMDLDC) asm {
730 		mov EAX,0x8000_0000;
731 		cpuid;
732 	} else version (GDC) asm {
733 		"mov $0x80000000,%eax\n\t"~
734 		"cpuid";
735 	}
736 }
737 
738 /// Get CPU leaf levels.
739 /// Params: cpu = CPUINFO structure
740 void ddcpuid_leaves(ref CPUINFO cpu) {
741 	cpu.maxLeaf = ddcpuid_max_leaf;
742 	cpu.maxLeafVirt = ddcpuid_max_leaf_virt;
743 	cpu.maxLeafExtended = ddcpuid_max_leaf_ext;
744 }
745 
746 private
747 void ddcpuid_vendor(ref char[12] string_) {
748 	version (DMD) {
749 		version (X86) asm {
750 			mov EDI, string_;
751 			xor EAX, EAX;
752 			cpuid;
753 			mov [EDI], EBX;
754 			mov [EDI + 4], EDX;
755 			mov [EDI + 8], ECX;
756 		} else asm { // x86-64
757 			mov RDI, string_;
758 			xor EAX,EAX;
759 			cpuid;
760 			mov [RDI], EBX;
761 			mov [RDI + 4], EDX;
762 			mov [RDI + 8], ECX;
763 		}
764 	} else version (GDC) {
765 		version (X86) asm {
766 			"lea %0, %%edi\n\t"~
767 			"xor %%eax, %%eax\n\t"~
768 			"cpuid\n"~
769 			"mov %%ebx, (%%edi)\n\t"~
770 			"mov %%edx, 4(%%edi)\n\t"~
771 			"mov %%ecx, 8(%%edi)"
772 			:
773 			: "m" (string_)
774 			: "edi", "eax", "ebx", "ecx", "edx";
775 		} else asm { // x86-64
776 			"lea %0, %%rdi\n\t"~
777 			"xor %%eax, %%eax\n\t"~
778 			"cpuid\n"~
779 			"mov %%ebx, (%%rdi)\n\t"~
780 			"mov %%edx, 4(%%rdi)\n\t"~
781 			"mov %%ecx, 8(%%rdi)"
782 			:
783 			: "m" (string_)
784 			: "rdi", "rax", "rbx", "rcx", "rdx";
785 		}
786 	} else version (LDC) {
787 		version (X86) asm {
788 			lea EDI, string_;
789 			xor EAX, EAX;
790 			cpuid;
791 			mov [EDI], EBX;
792 			mov [EDI + 4], EDX;
793 			mov [EDI + 8], ECX;
794 		} else asm { // x86-64
795 			lea RDI, string_;
796 			xor EAX, EAX;
797 			cpuid;
798 			mov [RDI], EBX;
799 			mov [RDI + 4], EDX;
800 			mov [RDI + 8], ECX;
801 		}
802 	}
803 }
804 
805 private
806 Vendor ddcpuid_vendor_id(ref VendorString vendor) {
807 	// Vendor string verification
808 	// If the rest of the string doesn't correspond, the id is unset
809 	switch (vendor.ebx) with (Vendor) {
810 	case Intel:	// "GenuineIntel"
811 		if (vendor.edx != ID!("ineI")) break;
812 		if (vendor.ecx != ID!("ntel")) break;
813 		return Vendor.Intel;
814 	case AMD:	// "AuthenticAMD"
815 		if (vendor.edx != ID!("enti")) break;
816 		if (vendor.ecx != ID!("cAMD")) break;
817 		return Vendor.AMD;
818 	case VIA:	// "VIA VIA VIA "
819 		if (vendor.edx != ID!("VIA ")) break;
820 		if (vendor.ecx != ID!("VIA ")) break;
821 		return Vendor.VIA;
822 	default: // Unknown
823 	}
824 	return Vendor.Other;
825 }
826 
827 private
828 void ddcpuid_extended_brand(ref char[48] string_) {
829 	version (DMD) {
830 		version (X86) asm {
831 			mov EDI, string_;
832 			mov EAX, 0x8000_0002;
833 			cpuid;
834 			mov [EDI], EAX;
835 			mov [EDI +  4], EBX;
836 			mov [EDI +  8], ECX;
837 			mov [EDI + 12], EDX;
838 			mov EAX, 0x8000_0003;
839 			cpuid;
840 			mov [EDI + 16], EAX;
841 			mov [EDI + 20], EBX;
842 			mov [EDI + 24], ECX;
843 			mov [EDI + 28], EDX;
844 			mov EAX, 0x8000_0004;
845 			cpuid;
846 			mov [EDI + 32], EAX;
847 			mov [EDI + 36], EBX;
848 			mov [EDI + 40], ECX;
849 			mov [EDI + 44], EDX;
850 		} else version (X86_64) asm {
851 			mov RDI, string_;
852 			mov EAX, 0x8000_0002;
853 			cpuid;
854 			mov [RDI], EAX;
855 			mov [RDI +  4], EBX;
856 			mov [RDI +  8], ECX;
857 			mov [RDI + 12], EDX;
858 			mov EAX, 0x8000_0003;
859 			cpuid;
860 			mov [RDI + 16], EAX;
861 			mov [RDI + 20], EBX;
862 			mov [RDI + 24], ECX;
863 			mov [RDI + 28], EDX;
864 			mov EAX, 0x8000_0004;
865 			cpuid;
866 			mov [RDI + 32], EAX;
867 			mov [RDI + 36], EBX;
868 			mov [RDI + 40], ECX;
869 			mov [RDI + 44], EDX;
870 		}
871 	} else version (GDC) {
872 		version (X86) asm {
873 			"lea %0, %%edi\n\t"~
874 			"mov $0x80000002, %%eax\n\t"~
875 			"cpuid\n\t"~
876 			"mov %%eax, (%%rdi)\n\t"~
877 			"mov %%ebx, 4(%%rdi)\n\t"~
878 			"mov %%ecx, 8(%%rdi)\n\t"~
879 			"mov %%edx, 12(%%rdi)\n\t"~
880 			"mov $0x80000003, %%eax\n\t"~
881 			"cpuid\n\t"~
882 			"mov %%eax, 16(%%rdi)\n\t"~
883 			"mov %%ebx, 20(%%rdi)\n\t"~
884 			"mov %%ecx, 24(%%rdi)\n\t"~
885 			"mov %%edx, 28(%%rdi)\n\t"~
886 			"mov $0x80000004, %%eax\n\t"~
887 			"cpuid\n\t"~
888 			"mov %%eax, 32(%%rdi)\n\t"~
889 			"mov %%ebx, 36(%%rdi)\n\t"~
890 			"mov %%ecx, 40(%%rdi)\n\t"~
891 			"mov %%edx, 44(%%rdi)"
892 			:
893 			: "m" (string_)
894 			: "edi", "eax", "ebx", "ecx", "edx";
895 		} else version (X86_64) asm {
896 			"lea %0, %%rdi\n\t"~
897 			"mov $0x80000002, %%eax\n\t"~
898 			"cpuid\n\t"~
899 			"mov %%eax, (%%rdi)\n\t"~
900 			"mov %%ebx, 4(%%rdi)\n\t"~
901 			"mov %%ecx, 8(%%rdi)\n\t"~
902 			"mov %%edx, 12(%%rdi)\n\t"~
903 			"mov $0x80000003, %%eax\n\t"~
904 			"cpuid\n\t"~
905 			"mov %%eax, 16(%%rdi)\n\t"~
906 			"mov %%ebx, 20(%%rdi)\n\t"~
907 			"mov %%ecx, 24(%%rdi)\n\t"~
908 			"mov %%edx, 28(%%rdi)\n\t"~
909 			"mov $0x80000004, %%eax\n\t"~
910 			"cpuid\n\t"~
911 			"mov %%eax, 32(%%rdi)\n\t"~
912 			"mov %%ebx, 36(%%rdi)\n\t"~
913 			"mov %%ecx, 40(%%rdi)\n\t"~
914 			"mov %%edx, 44(%%rdi)"
915 			:
916 			: "m" (string_)
917 			: "rdi", "rax", "rbx", "rcx", "rdx";
918 		}
919 	} else version (LDC) {
920 		version (X86) asm {
921 			lea EDI, string_;
922 			mov EAX, 0x8000_0002;
923 			cpuid;
924 			mov [EDI], EAX;
925 			mov [EDI +  4], EBX;
926 			mov [EDI +  8], ECX;
927 			mov [EDI + 12], EDX;
928 			mov EAX, 0x8000_0003;
929 			cpuid;
930 			mov [EDI + 16], EAX;
931 			mov [EDI + 20], EBX;
932 			mov [EDI + 24], ECX;
933 			mov [EDI + 28], EDX;
934 			mov EAX, 0x8000_0004;
935 			cpuid;
936 			mov [EDI + 32], EAX;
937 			mov [EDI + 36], EBX;
938 			mov [EDI + 40], ECX;
939 			mov [EDI + 44], EDX;
940 		} else version (X86_64) asm {
941 			lea RDI, string_;
942 			mov EAX, 0x8000_0002;
943 			cpuid;
944 			mov [RDI], EAX;
945 			mov [RDI +  4], EBX;
946 			mov [RDI +  8], ECX;
947 			mov [RDI + 12], EDX;
948 			mov EAX, 0x8000_0003;
949 			cpuid;
950 			mov [RDI + 16], EAX;
951 			mov [RDI + 20], EBX;
952 			mov [RDI + 24], ECX;
953 			mov [RDI + 28], EDX;
954 			mov EAX, 0x8000_0004;
955 			cpuid;
956 			mov [RDI + 32], EAX;
957 			mov [RDI + 36], EBX;
958 			mov [RDI + 40], ECX;
959 			mov [RDI + 44], EDX;
960 		}
961 	}
962 }
963 
964 // Avoids depending on C runtime for library.
965 /// Copy brand string
966 /// Params:
967 /// 	dst = Destination buffer
968 /// 	src = Source constant string
969 private
970 void ddcpuid_strcpy48(ref char[48] dst, const(char) *src) {
971 	for (size_t i; i < 48; ++i) {
972 		char c = src[i];
973 		dst[i] = c;
974 		if (c == 0) break;
975 	}
976 }
977 private alias strcpy48 = ddcpuid_strcpy48;
978 
979 @system unittest {
980 	char[48] buffer = void;
981 	strcpy48(buffer, "ea");
982 	assert(buffer[0] == 'e');
983 	assert(buffer[1] == 'a');
984 	assert(buffer[2] == 0);
985 }
986 
987 /// Get the legacy processor brand string.
988 /// These indexes/tables were introduced in Intel's Pentium III.
989 /// AMD does not use them.
990 /// Params:
991 /// 	cpu = CPUINFO structure.
992 /// 	index = CPUID.01h.BL value.
993 private
994 const(char)* ddcpuid_intel_brand_index(uint identifier, ubyte index) {
995 	switch (index) {
996 	case 1, 0xA, 0xF, 0x14: return "Intel(R) Celeron(R)";
997 	case 2, 4: return "Intel(R) Pentium(R) III";
998 	case 3:
999 		if (identifier == 0x6b1) goto case 1;
1000 		return "Intel(R) Pentium(R) III Xeon(R)";
1001 	case 6:
1002 		return "Mobile Intel(R) Pentium(R) III";
1003 	case 7, 0x13, 0x17: // Same as Intel(R) Celeron(R) M?
1004 		return "Mobile Intel(R) Celeron(R)";
1005 	case 8, 9:
1006 		return "Intel(R) Pentium(R) 4";
1007 	case 0xB:
1008 		if (identifier == 0xf13) goto case 0xC;
1009 	L_XEON: // Needed to avoid loop with case 0xe
1010 		return "Intel(R) Xeon(R)";
1011 	case 0xC: return "Intel(R) Xeon(R) MP";
1012 	case 0xE:
1013 		if (identifier == 0xf13) goto L_XEON;
1014 		return "Mobile Intel(R) Pentium(R) 4";
1015 	case 0x11, 0x15: // Yes, really.
1016 		return "Mobile Genuine Intel(R)";
1017 	case 0x12: return "Intel(R) Celeron(R) M";
1018 	case 0x16: return "Intel(R) Pentium(R) M";
1019 	default:   return "Unknown";
1020 	}
1021 }
1022 
1023 private
1024 const(char)* ddcpuid_intel_brand_family(ref CPUINFO cpu) {
1025 	// This function exist for processors that does not support the
1026 	// brand name table.
1027 	// At least do from Pentium to late Pentium II processors.
1028 	switch (cpu.family) {
1029 	case 5: // i586, Pentium
1030 		if (cpu.model >= 4) return "Intel(R) Pentium(R) MMX";
1031 		return "Intel(R) Pentium(R)";
1032 	case 6: // i686, Pentium Pro
1033 		if (cpu.model >= 3) return "Intel(R) Pentium(R) II";
1034 		return "Intel(R) Pentium(R) Pro";
1035 	default: return "Unknown";
1036 	}
1037 }
1038 
1039 private
1040 void ddcpuid_amd_brand_family(ref CPUINFO cpu) {
1041 	// This function exist for processors that does not support the
1042 	// extended brand string which is the Am5x86 and AMD K-5 model 0.
1043 	// K-5 model 1 has extended brand string so case 5 is only model 0.
1044 	// AMD has no official names for these.
1045 	switch (cpu.family) {
1046 	case 4:  strcpy48(cpu.brandString, "AMD Am5x86"); return;
1047 	case 5:  strcpy48(cpu.brandString, "AMD K5"); return;
1048 	default: strcpy48(cpu.brandString, "Unknown"); return;
1049 	}
1050 }
1051 
1052 private
1053 void ddcpuid_virt_vendor(ref char[12] string_) {
1054 	version (DMD) {
1055 		version (X86) asm {
1056 			mov EDI, string_;
1057 			mov EAX, 0x40000000;
1058 			cpuid;
1059 			mov [EDI], EBX;
1060 			mov [EDI + 4], ECX;
1061 			mov [EDI + 8], EDX;
1062 		} else asm { // x86-64
1063 			mov RDI, string_;
1064 			mov EAX, 0x40000000;
1065 			cpuid;
1066 			mov [RDI], EBX;
1067 			mov [RDI + 4], ECX;
1068 			mov [RDI + 8], EDX;
1069 		}
1070 	} else version (GDC) {
1071 		version (X86) asm {
1072 			"lea %0, %%edi\n\t"~
1073 			"mov $0x40000000, %%eax\n\t"~
1074 			"cpuid\n"~
1075 			"mov %%ebx, (%%edi)\n\t"~
1076 			"mov %%ecx, 4(%%edi)\n\t"~
1077 			"mov %%edx, 8(%%edi)"
1078 			:
1079 			: "m" (string_)
1080 			: "edi", "eax", "ebx", "ecx", "edx";
1081 		} else asm { // x86-64
1082 			"lea %0, %%rdi\n\t"~
1083 			"mov $0x40000000, %%eax\n\t"~
1084 			"cpuid\n"~
1085 			"mov %%ebx, (%%rdi)\n\t"~
1086 			"mov %%ecx, 4(%%rdi)\n\t"~
1087 			"mov %%edx, 8(%%rdi)"
1088 			:
1089 			: "m" (string_)
1090 			: "rdi", "rax", "rbx", "rcx", "rdx";
1091 		}
1092 	} else version (LDC) {
1093 		version (X86) asm {
1094 			lea EDI, string_;
1095 			mov EAX, 0x40000000;
1096 			cpuid;
1097 			mov [EDI], EBX;
1098 			mov [EDI + 4], ECX;
1099 			mov [EDI + 8], EDX;
1100 		} else asm { // x86-64
1101 			lea RDI, string_;
1102 			mov EAX, 0x40000000;
1103 			cpuid;
1104 			mov [RDI], EBX;
1105 			mov [RDI + 4], ECX;
1106 			mov [RDI + 8], EDX;
1107 		}
1108 	}
1109 }
1110 
1111 private
1112 VirtVendor ddcpuid_virt_vendor_id(ref VirtVendorString vendor) {
1113 	// Paravirtual vendor string verification
1114 	// If the rest of the string doesn't correspond, the id is unset
1115 	switch (vendor.ebx) {
1116 	case VirtVendor.KVM:	// "KVMKVMKVM\0\0\0"
1117 		if (vendor.ecx != ID!("VMKV")) goto default;
1118 		if (vendor.edx != ID!("M\0\0\0")) goto default;
1119 		return VirtVendor.KVM;
1120 	case VirtVendor.HyperV:	// "Microsoft Hv"
1121 		if (vendor.ecx != ID!("osof")) goto default;
1122 		if (vendor.edx != ID!("t Hv")) goto default;
1123 		return VirtVendor.HyperV;
1124 	case VirtVendor.VBoxHyperV:	// "VBoxVBoxVBox"
1125 		if (vendor.ecx != ID!("VBox")) goto default;
1126 		if (vendor.edx != ID!("VBox")) goto default;
1127 		return VirtVendor.HyperV; // Bug according to VBox
1128 	default:
1129 		return VirtVendor.Other;
1130 	}
1131 }
1132 
1133 @system unittest {
1134 	VirtVendorString vendor;
1135 	vendor.string_ = "KVMKVMKVM\0\0\0";
1136 	assert(ddcpuid_virt_vendor_id(vendor) == VirtVendor.KVM);
1137 }
1138 
1139 private
1140 void ddcpuid_model_string(ref CPUINFO cpu) {
1141 	switch (cpu.vendor.id) with (Vendor) {
1142 	case Intel:
1143 		// Brand string
1144 		if (cpu.maxLeafExtended >= 0x8000_0004)
1145 			ddcpuid_extended_brand(cpu.brandString);
1146 		else if (cpu.brandIndex)
1147 			strcpy48(cpu.brandString,
1148 				ddcpuid_intel_brand_index(cpu.identifier, cpu.brandIndex));
1149 		else
1150 			strcpy48(cpu.brandString, ddcpuid_intel_brand_family(cpu));
1151 		return;
1152 	case AMD, VIA:
1153 		// Brand string
1154 		// NOTE: AMD processor never supported string tables.
1155 		//       Am486DX4 and Am5x86 processors do not support the extended brand string.
1156 		//       K5 model 0 does not support the extended brand string.
1157 		//       K5 model 1, 2, and 3 support the extended brand string.
1158 		if (cpu.maxLeafExtended >= 0x8000_0004)
1159 			ddcpuid_extended_brand(cpu.brandString);
1160 		else
1161 			ddcpuid_amd_brand_family(cpu);
1162 		return;
1163 	default:
1164 		strcpy48(cpu.brandString, "Unknown");
1165 		return;
1166 	}
1167 }
1168 
1169 private
1170 void ddcpuid_leaf1(ref CPUINFO cpu, ref REGISTERS regs) {
1171 	// EAX
1172 	cpu.identifier = regs.eax;
1173 	cpu.stepping   = regs.eax & 15;       // EAX[3:0]
1174 	cpu.modelBase  = regs.eax >>  4 & 15; // EAX[7:4]
1175 	cpu.familyBase = regs.eax >>  8 & 15; // EAX[11:8]
1176 	cpu.type       = regs.eax >> 12 & 3;  // EAX[13:12]
1177 	cpu.typeString = PROCESSOR_TYPE[cpu.type];
1178 	cpu.modelExtended   = regs.eax >> 16 & 15; // EAX[19:16]
1179 	cpu.familyExtended  = cast(ubyte)(regs.eax >> 20); // EAX[27:20]
1180 	
1181 	switch (cpu.vendor.id) with (Vendor) {
1182 	case Intel:
1183 		cpu.family = cpu.familyBase != 15 ?
1184 			cast(ushort)cpu.familyBase :
1185 			cast(ushort)(cpu.familyExtended + cpu.familyBase);
1186 		
1187 		cpu.model = cpu.familyBase == 6 || cpu.familyBase == 0 ?
1188 			cast(ushort)((cpu.modelExtended << 4) + cpu.modelBase) :
1189 			cast(ushort)cpu.modelBase; // DisplayModel = Model_ID;
1190 		
1191 		// ECX
1192 		cpu.dtes64	= bit(regs.ecx, 2);
1193 		cpu.ds_cpl	= bit(regs.ecx, 4);
1194 		cpu.virtualization	= bit(regs.ecx, 5);
1195 		cpu.smx	= bit(regs.ecx, 6);
1196 		cpu.eist	= bit(regs.ecx, 7);
1197 		cpu.tm2	= bit(regs.ecx, 8);
1198 		cpu.cnxtId	= bit(regs.ecx, 10);
1199 		cpu.sdbg	= bit(regs.ecx, 11);
1200 		cpu.xtpr	= bit(regs.ecx, 14);
1201 		cpu.pdcm	= bit(regs.ecx, 15);
1202 		cpu.pcid	= bit(regs.ecx, 17);
1203 		cpu.mca	= bit(regs.ecx, 18);
1204 		cpu.x2apic	= bit(regs.ecx, 21);
1205 		cpu.rdtscDeadline	= bit(regs.ecx, 24);
1206 		
1207 		// EDX
1208 		cpu.psn	= bit(regs.edx, 18);
1209 		cpu.ds	= bit(regs.edx, 21);
1210 		cpu.apci	= bit(regs.edx, 22);
1211 		cpu.ss	= bit(regs.edx, 27);
1212 		cpu.tm	= bit(regs.edx, 29);
1213 		cpu.pbe	= regs.edx >= BIT!(31);
1214 		break;
1215 	case AMD:
1216 		if (cpu.familyBase < 15) {
1217 			cpu.family = cpu.familyBase;
1218 			cpu.model = cpu.modelBase;
1219 		} else {
1220 			cpu.family = cast(ushort)(cpu.familyExtended + cpu.familyBase);
1221 			cpu.model = cast(ushort)((cpu.modelExtended << 4) + cpu.modelBase);
1222 		}
1223 		break;
1224 	default:
1225 	}
1226 	
1227 	// EBX
1228 	cpu.apicId	= regs.ebx >> 24;
1229 	cpu.apicMaxId	= cast(ubyte)(regs.ebx >> 16);
1230 	cpu.clflushLinesize	= regs.bh;
1231 	cpu.brandIndex	= regs.bl;
1232 	
1233 	// ECX
1234 	cpu.sse3	= bit(regs.ecx, 0);
1235 	cpu.pclmulqdq	= bit(regs.ecx, 1);
1236 	cpu.monitor	= bit(regs.ecx, 3);
1237 	cpu.ssse3	= bit(regs.ecx, 9);
1238 	cpu.fma	= bit(regs.ecx, 12);
1239 	cpu.cmpxchg16b	= bit(regs.ecx, 13);
1240 	cpu.sse41	= bit(regs.ecx, 15);
1241 	cpu.sse42	= bit(regs.ecx, 20);
1242 	cpu.movbe	= bit(regs.ecx, 22);
1243 	cpu.popcnt	= bit(regs.ecx, 23);
1244 	cpu.aes_ni	= bit(regs.ecx, 25);
1245 	cpu.xsave	= bit(regs.ecx, 26);
1246 	cpu.osxsave	= bit(regs.ecx, 27);
1247 	cpu.avx	= bit(regs.ecx, 28);
1248 	cpu.f16c	= bit(regs.ecx, 29);
1249 	cpu.rdrand	= bit(regs.ecx, 30);
1250 	
1251 	// EDX
1252 	cpu.fpu	= bit(regs.edx, 0);
1253 	cpu.vme	= bit(regs.edx, 1);
1254 	cpu.de	= bit(regs.edx, 2);
1255 	cpu.pse	= bit(regs.edx, 3);
1256 	cpu.rdtsc	= bit(regs.edx, 4);
1257 	cpu.rdmsr	= bit(regs.edx, 5);
1258 	cpu.pae	= bit(regs.edx, 6);
1259 	cpu.mce	= bit(regs.edx, 7);
1260 	cpu.cmpxchg8b	= bit(regs.edx, 8);
1261 	cpu.apic	= bit(regs.edx, 9);
1262 	cpu.sysenter	= bit(regs.edx, 11);
1263 	cpu.mtrr	= bit(regs.edx, 12);
1264 	cpu.pge	= bit(regs.edx, 13);
1265 	cpu.mca	= bit(regs.edx, 14);
1266 	cpu.cmov	= bit(regs.edx, 15);
1267 	cpu.pat	= bit(regs.edx, 16);
1268 	cpu.pse36	= bit(regs.edx, 17);
1269 	cpu.clflush	= bit(regs.edx, 19);
1270 	cpu.mmx	= bit(regs.edx, 23);
1271 	cpu.fxsr	= bit(regs.edx, 24);
1272 	cpu.sse	= bit(regs.edx, 25);
1273 	cpu.sse2	= bit(regs.edx, 26);
1274 	cpu.htt	= bit(regs.edx, 28);
1275 }
1276 
1277 //NOTE: Only Intel officially supports CPUID.02h
1278 //      No dedicated functions to a cache descriptor to avoid a definition.
1279 private
1280 void ddcpuid_leaf2(ref CPUINFO cpu, ref REGISTERS regs) {
1281 	struct leaf2_t {
1282 		union {
1283 			REGISTERS registers;
1284 			ubyte[16] values;
1285 		}
1286 	}
1287 	leaf2_t data = void;
1288 	
1289 	data.registers = regs;
1290 	
1291 	enum L1I = 0;
1292 	enum L1D = 1;
1293 	enum L2 = 2;
1294 	enum L3 = 3;
1295 	// Skips value in AL
1296 	with (cpu) for (size_t index = 1; index < 16; ++index) {
1297 		ubyte value = data.values[index];
1298 		
1299 		// Cache entries only, the rest is "don't care".
1300 		// Unless if one day I support looking up TLB data, but AMD does not support this.
1301 		// continue: Explicitly skip cache, this includes 0x00 (null), 0x40 (no L2 or L3).
1302 		// break: Valid cache descriptor, increment cache level.
1303 		switch (value) {
1304 		case 0x06: // 1st-level instruction cache: 8 KBytes, 4-way set associative, 32 byte line size
1305 			cache[L1I] = CACHEINFO(1, 'I', 8, 1, 4, 1, 32, 64);
1306 			break;
1307 		case 0x08: // 1st-level instruction cache: 16 KBytes, 4-way set associative, 32 byte line size
1308 			cache[L1I] = CACHEINFO(1, 'I', 16, 1, 4, 1, 32, 128);
1309 			break;
1310 		case 0x09: // 1st-level instruction cache: 32 KBytes, 4-way set associative, 64 byte line size
1311 			cache[L1I] = CACHEINFO(1, 'I', 32, 1, 4, 1, 64, 128);
1312 			break;
1313 		case 0x0A: // 1st-level data cache: 8 KBytes, 2-way set associative, 32 byte line size
1314 			cache[L1D] = CACHEINFO(1, 'D', 8, 1, 2, 1, 32, 128);
1315 			break;
1316 		case 0x0C: // 1st-level data cache: 16 KBytes, 4-way set associative, 32 byte line size
1317 			cache[L1D] = CACHEINFO(1, 'D', 16, 1, 4, 1, 32, 128);
1318 			break;
1319 		case 0x0D: // 1st-level data cache: 16 KBytes, 4-way set associative, 64 byte line size (ECC?)
1320 			cache[L1D] = CACHEINFO(1, 'D', 16, 1, 4, 1, 64, 64);
1321 			break;
1322 		case 0x0E: // 1st-level data cache: 24 KBytes, 6-way set associative, 64 byte line size
1323 			cache[L1D] = CACHEINFO(1, 'D', 24, 1, 6, 1, 64, 64);
1324 			break;
1325 		case 0x10: // (sandpile) data L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
1326 			cache[L1D] = CACHEINFO(1, 'D', 16, 1, 4, 1, 32, 64);
1327 			break;
1328 		case 0x15: // (sandpile) code L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
1329 			cache[L1I] = CACHEINFO(1, 'I', 16, 1, 4, 1, 32, 64);
1330 			break;
1331 		case 0x1a: // (sandpile) code and data L2 cache, 96 KB, 6 ways, 64 byte lines (IA-64)
1332 			cache[L2] = CACHEINFO(2, 'I', 96, 1, 6, 1, 64, 256);
1333 			break;
1334 		case 0x1D: // 2nd-level cache: 128 KBytes, 2-way set associative, 64 byte line size
1335 			cache[L2] = CACHEINFO(2, 'U', 128, 1, 2, 1, 64, 1024);
1336 			break;
1337 		case 0x21: // 2nd-level cache: 256 KBytes, 8-way set associative, 64 byte line size
1338 			cache[L2] = CACHEINFO(2, 'U', 256, 1, 8, 1, 64, 512);
1339 			break;
1340 		case 0x22: // 3rd-level cache: 512 KBytes, 4-way set associative, 64 byte line size, 2 lines per sector
1341 			cache[L3] = CACHEINFO(3, 'U', 512, 1, 4, 2, 64, 1024);
1342 			break;
1343 		case 0x23: // 3rd-level cache: 1 MBytes, 8-way set associative, 64 byte line size, 2 lines per sector
1344 			cache[L3] = CACHEINFO(3, 'U', 1024, 1, 8, 2, 64, 1024);
1345 			break;
1346 		case 0x24: // 2nd-level cache: 1 MBytes, 16-way set associative, 64 byte line size
1347 			cache[L2] = CACHEINFO(2, 'U', 1024, 1, 16, 1, 64, 1024);
1348 			break;
1349 		case 0x25: // 3rd-level cache: 2 MBytes, 8-way set associative, 64 byte line size, 2 lines per sector
1350 			cache[L3] = CACHEINFO(3, 'U', 2048, 1, 8, 2, 64, 2048);
1351 			break;
1352 		case 0x29: // 3rd-level cache: 4 MBytes, 8-way set associative, 64 byte line size, 2 lines per sector
1353 			cache[L3] = CACHEINFO(3, 'U', 4096, 1, 8, 2, 64, 4096);
1354 			break;
1355 		case 0x2C: // 1st-level data cache: 32 KBytes, 8-way set associative, 64 byte line size
1356 			cache[L1D] = CACHEINFO(1, 'D', 32, 1, 8, 1, 64, 64);
1357 			break;
1358 		case 0x30: // 1st-level instruction cache: 32 KBytes, 8-way set associative, 64 byte line size
1359 			cache[L1I] = CACHEINFO(1, 'I', 32, 1, 8, 1, 64, 64);
1360 			break;
1361 		case 0x39: // (sandpile) code and data L2 cache, 128 KB, 4 ways, 64 byte lines, sectored (htt?)
1362 			cache[L2] = CACHEINFO(2, 'U', 128, 1, 4, 1, 64, 512);
1363 			break;
1364 		case 0x3A: // (sandpile) code and data L2 cache, 192 KB, 6 ways, 64 byte lines, sectored (htt?)
1365 			cache[L2] = CACHEINFO(2, 'U', 192, 1, 6, 1, 64, 512);
1366 			break;
1367 		case 0x3B: // (sandpile) code and data L2 cache, 128 KB, 2 ways, 64 byte lines, sectored (htt?)
1368 			cache[L2] = CACHEINFO(2, 'U', 128, 1, 2, 1, 64, 1024);
1369 			break;
1370 		case 0x3C: // (sandpile) code and data L2 cache, 256 KB, 4 ways, 64 byte lines, sectored
1371 			cache[L2] = CACHEINFO(2, 'U', 256, 1, 4, 1, 64, 1024);
1372 			break;
1373 		case 0x3D: // (sandpile) code and data L2 cache, 384 KB, 6 ways, 64 byte lines, sectored (htt?)
1374 			cache[L2] = CACHEINFO(2, 'U', 384, 1, 6, 1, 64, 1024);
1375 			break;
1376 		case 0x3E: // (sandpile) code and data L2 cache, 512 KB, 4 ways, 64 byte lines, sectored (htt?)
1377 			cache[L2] = CACHEINFO(2, 'U', 512, 1, 4, 1, 64, 2048);
1378 			break;
1379 		case 0x41: // 2nd-level cache: 128 KBytes, 4-way set associative, 32 byte line size
1380 			cache[L2] = CACHEINFO(2, 'U', 128, 1, 4, 1, 32, 1024);
1381 			break;
1382 		case 0x42: // 2nd-level cache: 256 KBytes, 4-way set associative, 32 byte line size
1383 			cache[L2] = CACHEINFO(2, 'U', 256, 1, 4, 1, 32, 2048);
1384 			break;
1385 		case 0x43: // 2nd-level cache: 512 KBytes, 4-way set associative, 32 byte line size
1386 			cache[L2] = CACHEINFO(2, 'U', 512, 1, 4, 1, 32, 4096);
1387 			break;
1388 		case 0x44: // 2nd-level cache: 1 MByte, 4-way set associative, 32 byte line size
1389 			cache[L2] = CACHEINFO(2, 'U', 1024, 1, 4, 1, 32, 8192);
1390 			break;
1391 		case 0x45: // 2nd-level cache: 2 MByte, 4-way set associative, 32 byte line size
1392 			cache[L2] = CACHEINFO(2, 'U', 2048, 1, 4, 1, 32, 16384);
1393 			break;
1394 		case 0x46: // 3rd-level cache: 4 MByte, 4-way set associative, 64 byte line size
1395 			cache[L2] = CACHEINFO(2, 'U', 4096, 1, 4, 1, 64, 16384);
1396 			break;
1397 		case 0x47: // 3rd-level cache: 8 MByte, 8-way set associative, 64 byte line size
1398 			cache[L3] = CACHEINFO(3, 'U', 8192, 1, 8, 1, 64, 16384);
1399 			break;
1400 		case 0x48: // 2nd-level cache: 3 MByte, 12-way set associative, 64 byte line size
1401 			cache[L2] = CACHEINFO(2, 'U', 3072, 1, 12, 1, 64, 4096);
1402 			break;
1403 		// 3rd-level cache: 4 MByte, 16-way set associative, 64-byte line size (Intel Xeon processor MP, Family 0FH, Model 06H);			
1404 		// 2nd-level cache: 4 MByte, 16-way set associative, 64 byte line size
1405 		case 0x49:
1406 			if (cpu.family == 0xf && cpu.family == 6)
1407 				cache[L3] = CACHEINFO(3, 'U', 4096, 1, 16, 1, 64, 4096);
1408 			else
1409 				cache[L2] = CACHEINFO(2, 'U', 4096, 1, 16, 1, 64, 4096);
1410 			break;
1411 		case 0x4A: // 3rd-level cache: 6 MByte, 12-way set associative, 64 byte line size
1412 			cache[L3] = CACHEINFO(3, 'U', 6144, 1, 12, 1, 64, 6144);
1413 			break;
1414 		case 0x4B: // 3rd-level cache: 8 MByte, 16-way set associative, 64 byte line size
1415 			cache[L3] = CACHEINFO(3, 'U', 8192, 1, 16, 1, 64, 8192);
1416 			break;
1417 		case 0x4C: // 3rd-level cache: 12 MByte, 12-way set associative, 64 byte line size
1418 			cache[L3] = CACHEINFO(3, 'U', 8192, 1, 12, 1, 64, 16384);
1419 			break;
1420 		case 0x4D: // 3rd-level cache: 16 MByte, 16-way set associative, 64 byte line size
1421 			cache[L3] = CACHEINFO(3, 'U', 16384, 1, 16, 1, 64, 16384);
1422 			break;
1423 		case 0x4E: // 2nd-level cache: 6MByte, 24-way set associative, 64 byte line size
1424 			cache[L2] = CACHEINFO(2, 'U', 6144, 1, 24, 1, 64, 4096);
1425 			break;
1426 		case 0x60: // 1st-level data cache: 16 KByte, 8-way set associative, 64 byte line size
1427 			cache[L1D] = CACHEINFO(1, 'D', 16, 1, 8, 1, 64, 32);
1428 			break;
1429 		case 0x66: // 1st-level data cache: 8 KByte, 4-way set associative, 64 byte line size
1430 			cache[L1D] = CACHEINFO(1, 'D', 8, 1, 4, 1, 64, 32);
1431 			break;
1432 		case 0x67: // 1st-level data cache: 16 KByte, 4-way set associative, 64 byte line size
1433 			cache[L1D] = CACHEINFO(1, 'D', 16, 1, 4, 1, 64, 64);
1434 			break;
1435 		case 0x68: // 1st-level data cache: 32 KByte, 4-way set associative, 64 byte line size
1436 			cache[L1D] = CACHEINFO(1, 'D', 32, 1, 4, 1, 64, 128);
1437 			break;
1438 		case 0x77: // (sandpile) code L1 cache, 16 KB, 4 ways, 64 byte lines, sectored (IA-64)
1439 			cache[L1I] = CACHEINFO(1, 'I', 16, 1, 4, 1, 64, 64);
1440 			break;
1441 		case 0x78: // 2nd-level cache: 1 MByte, 4-way set associative, 64byte line size
1442 			cache[L2] = CACHEINFO(2, 'U', 1024, 1, 4, 1, 64, 4096);
1443 			break;
1444 		case 0x79: // 2nd-level cache: 128 KByte, 8-way set associative, 64 byte line size, 2 lines per sector
1445 			cache[L2] = CACHEINFO(2, 'U', 128, 1, 8, 2, 64, 128);
1446 			break;
1447 		case 0x7A: // 2nd-level cache: 256 KByte, 8-way set associative, 64 byte line size, 2 lines per sector
1448 			cache[L2] = CACHEINFO(2, 'U', 256, 1, 8, 2, 64, 256);
1449 			break;
1450 		case 0x7B: // 2nd-level cache: 512 KByte, 8-way set associative, 64 byte line size, 2 lines per sector
1451 			cache[L2] = CACHEINFO(2, 'U', 512, 1, 8, 2, 64, 512);
1452 			break;
1453 		case 0x7C: // 2nd-level cache: 1 MByte, 8-way set associative, 64 byte line size, 2 lines per sector
1454 			cache[L2] = CACHEINFO(2, 'U', 1024, 1, 8, 2, 64, 1024);
1455 			break;
1456 		case 0x7D: // 2nd-level cache: 2 MByte, 8-way set associative, 64 byte line size
1457 			cache[L2] = CACHEINFO(2, 'U', 2048, 1, 8, 1, 64, 4096);
1458 			break;
1459 		case 0x7E: // (sandpile) code and data L2 cache, 256 KB, 8 ways, 128 byte lines, sect. (IA-64)
1460 			cache[L2] = CACHEINFO(2, 'U', 256, 1, 8, 1, 128, 256);
1461 			break;
1462 		case 0x7F: // 2nd-level cache: 512 KByte, 2-way set associative, 64-byte line size
1463 			cache[L2] = CACHEINFO(2, 'U', 512, 1, 2, 1, 64, 4096);
1464 			break;
1465 		case 0x80: // 2nd-level cache: 512 KByte, 8-way set associative, 64-byte line size
1466 			cache[L2] = CACHEINFO(2, 'U', 512, 1, 8, 1, 64, 1024);
1467 			break;
1468 		case 0x81: // (sandpile) code and data L2 cache, 128 KB, 8 ways, 32 byte lines
1469 			cache[L2] = CACHEINFO(2, 'U', 128, 1, 8, 1, 32, 512);
1470 			break;
1471 		case 0x82: // 2nd-level cache: 256 KByte, 8-way set associative, 32 byte line size
1472 			cache[L2] = CACHEINFO(2, 'U', 256, 1, 8, 1, 32, 1024);
1473 			break;
1474 		case 0x83: // 2nd-level cache: 512 KByte, 8-way set associative, 32 byte line size
1475 			cache[L2] = CACHEINFO(2, 'U', 512, 1, 8, 1, 32, 2048);
1476 			break;
1477 		case 0x84: // 2nd-level cache: 1 MByte, 8-way set associative, 32 byte line size
1478 			cache[L2] = CACHEINFO(2, 'U', 1024, 1, 8, 1, 32, 4096);
1479 			break;
1480 		case 0x85: // 2nd-level cache: 2 MByte, 8-way set associative, 32 byte line size
1481 			cache[L2] = CACHEINFO(2, 'U', 2048, 1, 8, 1, 32, 8192);
1482 			break;
1483 		case 0x86: // 2nd-level cache: 512 KByte, 4-way set associative, 64 byte line size
1484 			cache[L2] = CACHEINFO(2, 'U', 512, 1, 4, 1, 64, 2048);
1485 			break;
1486 		case 0x87: // 2nd-level cache: 1 MByte, 8-way set associative, 64 byte line size
1487 			cache[L2] = CACHEINFO(2, 'U', 1024, 1, 8, 1, 64, 2048);
1488 			break;
1489 		case 0xD0: // 3rd-level cache: 512 KByte, 4-way set associative, 64 byte line size
1490 			cache[L3] = CACHEINFO(3, 'U', 512, 1, 4, 1, 64, 2048);
1491 			break;
1492 		case 0xD1: // 3rd-level cache: 1 MByte, 4-way set associative, 64 byte line size
1493 			cache[L3] = CACHEINFO(3, 'U', 1024, 1, 4, 1, 64, 4096);
1494 			break;
1495 		case 0xD2: // 3rd-level cache: 2 MByte, 4-way set associative, 64 byte line size
1496 			cache[L3] = CACHEINFO(3, 'U', 2048, 1, 4, 1, 64, 8192);
1497 			break;
1498 		case 0xD6: // 3rd-level cache: 1 MByte, 8-way set associative, 64 byte line size
1499 			cache[L3] = CACHEINFO(3, 'U', 1024, 1, 8, 1, 64, 2048);
1500 			break;
1501 		case 0xD7: // 3rd-level cache: 2 MByte, 8-way set associative, 64 byte line size
1502 			cache[L3] = CACHEINFO(3, 'U', 2048, 1, 8, 1, 64, 4096);
1503 			break;
1504 		case 0xD8: // 3rd-level cache: 4 MByte, 8-way set associative, 64 byte line size
1505 			cache[L3] = CACHEINFO(3, 'U', 4096, 1, 8, 1, 64, 8192);
1506 			break;
1507 		case 0xDC: // 3rd-level cache: 1.5 MByte, 12-way set associative, 64 byte line size
1508 			cache[L3] = CACHEINFO(3, 'U', 1536, 1, 12, 1, 64, 2048);
1509 			break;
1510 		case 0xDD: // 3rd-level cache: 3 MByte, 12-way set associative, 64 byte line size
1511 			cache[L3] = CACHEINFO(3, 'U', 3072, 1, 12, 1, 64, 4096);
1512 			break;
1513 		case 0xDE: // 3rd-level cache: 6 MByte, 12-way set associative, 64 byte line size
1514 			cache[L3] = CACHEINFO(3, 'U', 6144, 1, 12, 1, 64, 8192);
1515 			break;
1516 		case 0xE2: // 3rd-level cache: 2 MByte, 16-way set associative, 64 byte line size
1517 			cache[L3] = CACHEINFO(3, 'U', 2048, 1, 16, 1, 64, 2048);
1518 			break;
1519 		case 0xE3: // 3rd-level cache: 4 MByte, 16-way set associative, 64 byte line size
1520 			cache[L3] = CACHEINFO(3, 'U', 4096, 1, 16, 1, 64, 4096);
1521 			break;
1522 		case 0xE4: // 3rd-level cache: 8 MByte, 16-way set associative, 64 byte line size
1523 			cache[L3] = CACHEINFO(3, 'U', 8192, 1, 16, 1, 64, 8192);
1524 			break;
1525 		case 0xEA: // 3rd-level cache: 12MByte, 24-way set associative, 64 byte line size
1526 			cache[L3] = CACHEINFO(3, 'U', 12288, 1, 24, 1, 64, 8192);
1527 			break;
1528 		case 0xEB: // 3rd-level cache: 18MByte, 24-way set associative, 64 byte line size
1529 			cache[L3] = CACHEINFO(3, 'U', 18432, 1, 24, 1, 64, 12288);
1530 			break;
1531 		case 0xEC: // 3rd-level cache: 24MByte, 24-way set associative, 64 byte line size
1532 			cache[L3] = CACHEINFO(3, 'U', 24576, 1, 24, 1, 64, 16384);
1533 			break;
1534 		default: continue;
1535 		}
1536 		
1537 		++cacheLevels;
1538 	}
1539 	with (cpu) { // Some do not have L1I, so move items down
1540 		if (cache[0].level == 0) {
1541 			for (size_t i; i < cacheLevels; ++i) {
1542 				cache[i] = cache[i+1];
1543 			}
1544 			cache[cacheLevels] = CACHEINFO.init;
1545 		}
1546 	}
1547 }
1548 
1549 version (TestCPUID02h) @system unittest {
1550 	import std.stdio : write, writeln, writef;
1551 	REGISTERS regs; // Celeron 0xf34
1552 	regs.eax = 0x605b5101;
1553 	regs.ebx = 0;
1554 	regs.ecx = 0;
1555 	regs.edx = 0x3c7040;
1556 	
1557 	CPUINFO cpu;
1558 	ddcpuid_leaf2(cpu, regs);
1559 	
1560 	writeln("TEST: CPUID.02h");
1561 	CACHEINFO *cache = void;
1562 	for (uint i; i < CACHE_MAX_LEVEL; ++i) {
1563 		cache = &cpu.cache[i];
1564 		writef("Level %u-%c   : %2ux %6u KiB, %u ways, %u parts, %u B, %u sets",
1565 			cache.level, cache.type, cache.sharedCores, cache.size,
1566 			cache.ways, cache.partitions, cache.lineSize, cache.sets
1567 		);
1568 		if (cache.features) {
1569 			write(',');
1570 			if (cache.features & BIT!(0)) write(" si"); // Self Initiative
1571 			if (cache.features & BIT!(1)) write(" fa"); // Fully Associative
1572 			if (cache.features & BIT!(2)) write(" nwbv"); // No Write-Back Validation
1573 			if (cache.features & BIT!(3)) write(" ci"); // Cache Inclusive
1574 			if (cache.features & BIT!(4)) write(" cci"); // Complex Cache Indexing
1575 		}
1576 		writeln;
1577 	}
1578 }
1579 
1580 private
1581 void ddcpuid_leaf5(ref CPUINFO cpu, ref REGISTERS regs) {
1582 	cpu.mwaitMin = regs.ax;
1583 	cpu.mwaitMax = regs.bx;
1584 }
1585 
1586 private
1587 void ddcpuid_leaf6(ref CPUINFO cpu, ref REGISTERS regs) {
1588 	switch (cpu.vendor.id) with (Vendor) {
1589 	case Intel:
1590 		cpu.turboboost	= bit(regs.eax, 1);
1591 		cpu.turboboost30	= bit(regs.eax, 14);
1592 		break;
1593 	default:
1594 	}
1595 	
1596 	cpu.arat = bit(regs.eax, 2);
1597 }
1598 
1599 private
1600 void ddcpuid_leaf7(ref CPUINFO cpu, ref REGISTERS regs) {
1601 	switch (cpu.vendor.id) with (Vendor) {
1602 	case Intel:
1603 		// EBX
1604 		cpu.sgx	= bit(regs.ebx, 2);
1605 		cpu.hle	= bit(regs.ebx, 4);
1606 		cpu.invpcid	= bit(regs.ebx, 10);
1607 		cpu.rtm	= bit(regs.ebx, 11);
1608 		cpu.avx512f	= bit(regs.ebx, 16);
1609 		cpu.smap	= bit(regs.ebx, 20);
1610 		cpu.avx512er	= bit(regs.ebx, 27);
1611 		cpu.avx512pf	= bit(regs.ebx, 26);
1612 		cpu.avx512cd	= bit(regs.ebx, 28);
1613 		cpu.avx512dq	= bit(regs.ebx, 17);
1614 		cpu.avx512bw	= bit(regs.ebx, 30);
1615 		cpu.avx512_ifma	= bit(regs.ebx, 21);
1616 		cpu.avx512_vbmi	= regs.ebx >= BIT!(31);
1617 		// ECX
1618 		cpu.avx512vl	= bit(regs.ecx, 1);
1619 		cpu.pku	= bit(regs.ecx, 3);
1620 		cpu.fsrepmov	= bit(regs.ecx, 4);
1621 		cpu.waitpkg	= bit(regs.ecx, 5);
1622 		cpu.avx512_vbmi2	= bit(regs.ecx, 6);
1623 		cpu.cetSs	= bit(regs.ecx, 7);
1624 		cpu.avx512_gfni	= bit(regs.ecx, 8);
1625 		cpu.avx512_vaes	= bit(regs.ecx, 9);
1626 		cpu.avx512_vnni	= bit(regs.ecx, 11);
1627 		cpu.avx512_bitalg	= bit(regs.ecx, 12);
1628 		cpu.avx512_vpopcntdq	= bit(regs.ecx, 14);
1629 		cpu._5pl	= bit(regs.ecx, 16);
1630 		cpu.cldemote	= bit(regs.ecx, 25);
1631 		cpu.movdiri	= bit(regs.ecx, 27);
1632 		cpu.movdir64b	= bit(regs.ecx, 28);
1633 		cpu.enqcmd	= bit(regs.ecx, 29);
1634 		// EDX
1635 		cpu.avx512_4vnniw	= bit(regs.edx, 2);
1636 		cpu.avx512_4fmaps	= bit(regs.edx, 3);
1637 		cpu.uintr	= bit(regs.edx, 5);
1638 		cpu.avx512_vp2intersect	= bit(regs.edx, 8);
1639 		cpu.md_clear	= bit(regs.edx, 10);
1640 		cpu.serialize	= bit(regs.edx, 14);
1641 		cpu.tsxldtrk	= bit(regs.edx, 16);
1642 		cpu.pconfig	= bit(regs.edx, 18);
1643 		cpu.cetIbt	= bit(regs.edx, 20);
1644 		cpu.amx_bf16	= bit(regs.edx, 22);
1645 		cpu.amx	= bit(regs.edx, 24);
1646 		cpu.amx_int8	= bit(regs.edx, 25);
1647 		cpu.ibrs = bit(regs.edx, 26);
1648 		cpu.stibp	= bit(regs.edx, 27);
1649 		cpu.l1dFlush	= bit(regs.edx, 28);
1650 		cpu.ia32_arch_capabilities	= bit(regs.edx, 29);
1651 		cpu.ssbd	= regs.edx >= BIT!(31);
1652 		break;
1653 	default:
1654 	}
1655 
1656 	// ebx
1657 	cpu.fsgsbase	= bit(regs.ebx, 0);
1658 	cpu.bmi1	= bit(regs.ebx, 3);
1659 	cpu.avx2	= bit(regs.ebx, 5);
1660 	cpu.smep	= bit(regs.ebx, 7);
1661 	cpu.bmi2	= bit(regs.ebx, 8);
1662 	cpu.rdseed	= bit(regs.ebx, 18);
1663 	cpu.adx	= bit(regs.ebx, 19);
1664 	cpu.clflushopt	= bit(regs.ebx, 23);
1665 	cpu.sha	= bit(regs.ebx, 29);
1666 	// ecx
1667 	cpu.rdpid	= bit(regs.ecx, 22);
1668 }
1669 
1670 private
1671 void ddcpuid_leaf7sub1(ref CPUINFO cpu, ref REGISTERS regs) {
1672 	switch (cpu.vendor.id) with (Vendor) {
1673 	case Intel:
1674 		// a
1675 		cpu.avx512_bf16	= bit(regs.eax, 5);
1676 		cpu.lam	= bit(regs.eax, 26);
1677 		break;
1678 	default:
1679 	}
1680 }
1681 
1682 private
1683 void ddcpuid_leafD(ref CPUINFO cpu, ref REGISTERS regs) {
1684 	switch (cpu.vendor.id) with (Vendor) {
1685 	case Intel:
1686 		cpu.amx_xtilecfg	= bit(regs.eax, 17);
1687 		cpu.amx_xtiledata	= bit(regs.eax, 18);
1688 		break;
1689 	default:
1690 	}
1691 }
1692 
1693 private
1694 void ddcpuid_leafDsub1(ref CPUINFO cpu, ref REGISTERS regs) {
1695 	switch (cpu.vendor.id) with (Vendor) {
1696 	case Intel:
1697 		cpu.amx_xfd	= bit(regs.eax, 18);
1698 		break;
1699 	default:
1700 	}
1701 }
1702 
1703 private
1704 void ddcpuid_leaf12(ref CPUINFO cpu, ref REGISTERS regs) {
1705 	switch (cpu.vendor.id) with (Vendor) {
1706 	case Intel:
1707 		cpu.sgx1 = bit(regs.al, 0);
1708 		cpu.sgx2 = bit(regs.al, 1);
1709 		cpu.sgxMaxSize   = regs.dl;
1710 		cpu.sgxMaxSize64 = regs.dh;
1711 		break;
1712 	default:
1713 	}
1714 }
1715 
1716 private
1717 void ddcpuid_leaf4000_0001(ref CPUINFO cpu, ref REGISTERS regs) {
1718 //	switch (cpu.virt.vendor.id) with (VirtVendor) {
1719 //	case KVM:
1720 		cpu.kvm.feature_clocksource	= bit(regs.eax, 0);
1721 		cpu.kvm.feature_nop_io_delay	= bit(regs.eax, 1);
1722 		cpu.kvm.feature_mmu_op	= bit(regs.eax, 2);
1723 		cpu.kvm.feature_clocksource2	= bit(regs.eax, 3);
1724 		cpu.kvm.feature_async_pf	= bit(regs.eax, 4);
1725 		cpu.kvm.feature_steal_time	= bit(regs.eax, 5);
1726 		cpu.kvm.feature_pv_eoi	= bit(regs.eax, 6);
1727 		cpu.kvm.feature_pv_unhault	= bit(regs.eax, 7);
1728 		cpu.kvm.feature_pv_tlb_flush	= bit(regs.eax, 9);
1729 		cpu.kvm.feature_async_pf_vmexit	= bit(regs.eax, 10);
1730 		cpu.kvm.feature_pv_send_ipi	= bit(regs.eax, 11);
1731 		cpu.kvm.feature_pv_poll_control	= bit(regs.eax, 12);
1732 		cpu.kvm.feature_pv_sched_yield	= bit(regs.eax, 13);
1733 		cpu.kvm.feature_clocsource_stable_bit	= bit(regs.eax, 24);
1734 		cpu.kvm.hint_realtime	= bit(regs.edx, 0);
1735 //		break;
1736 //	default:
1737 //	}
1738 }
1739 
1740 private
1741 void ddcpuid_leaf4000_0002(ref CPUINFO cpu, ref REGISTERS regs) {
1742 //	switch (cpu.virt.vendor.id) with (VirtVendor) {
1743 //	case HyperV:
1744 		cpu.hv.guest_minor	= cast(ubyte)(regs.eax >> 24);
1745 		cpu.hv.guest_service	= cast(ubyte)(regs.eax >> 16);
1746 		cpu.hv.guest_build	= regs.ax;
1747 		cpu.hv.guest_opensource	= regs.edx >= BIT!(31);
1748 		cpu.hv.guest_vendor_id	= (regs.edx >> 16) & 0xFFF;
1749 		cpu.hv.guest_os	= regs.dh;
1750 		cpu.hv.guest_major	= regs.dl;
1751 //		break;
1752 //	default:
1753 //	}
1754 }
1755 
1756 private
1757 void ddcpuid_leaf4000_0003(ref CPUINFO cpu, ref REGISTERS regs) {
1758 //	switch (cpu.virt.vendor.id) with (VirtVendor) {
1759 //	case HyperV:
1760 		cpu.hv.base_feat_vp_runtime_msr	= bit(regs.eax, 0);
1761 		cpu.hv.base_feat_part_time_ref_count_msr	= bit(regs.eax, 1);
1762 		cpu.hv.base_feat_basic_synic_msrs	= bit(regs.eax, 2);
1763 		cpu.hv.base_feat_stimer_msrs	= bit(regs.eax, 3);
1764 		cpu.hv.base_feat_apic_access_msrs	= bit(regs.eax, 4);
1765 		cpu.hv.base_feat_hypercall_msrs	= bit(regs.eax, 5);
1766 		cpu.hv.base_feat_vp_id_msr	= bit(regs.eax, 6);
1767 		cpu.hv.base_feat_virt_sys_reset_msr	= bit(regs.eax, 7);
1768 		cpu.hv.base_feat_stat_pages_msr	= bit(regs.eax, 8);
1769 		cpu.hv.base_feat_part_ref_tsc_msr	= bit(regs.eax, 9);
1770 		cpu.hv.base_feat_guest_idle_state_msr	= bit(regs.eax, 10);
1771 		cpu.hv.base_feat_timer_freq_msrs	= bit(regs.eax, 11);
1772 		cpu.hv.base_feat_debug_msrs	= bit(regs.eax, 12);
1773 		cpu.hv.part_flags_create_part	= bit(regs.ebx, 0);
1774 		cpu.hv.part_flags_access_part_id	= bit(regs.ebx, 1);
1775 		cpu.hv.part_flags_access_memory_pool	= bit(regs.ebx, 2);
1776 		cpu.hv.part_flags_adjust_msg_buffers	= bit(regs.ebx, 3);
1777 		cpu.hv.part_flags_post_msgs	= bit(regs.ebx, 4);
1778 		cpu.hv.part_flags_signal_events	= bit(regs.ebx, 5);
1779 		cpu.hv.part_flags_create_port	= bit(regs.ebx, 6);
1780 		cpu.hv.part_flags_connect_port	= bit(regs.ebx, 7);
1781 		cpu.hv.part_flags_access_stats	= bit(regs.ebx, 8);
1782 		cpu.hv.part_flags_debugging	= bit(regs.ebx, 11);
1783 		cpu.hv.part_flags_cpu_mgmt	= bit(regs.ebx, 12);
1784 		cpu.hv.part_flags_cpu_profiler	= bit(regs.ebx, 13);
1785 		cpu.hv.part_flags_expanded_stack_walk	= bit(regs.ebx, 14);
1786 		cpu.hv.part_flags_access_vsm	= bit(regs.ebx, 16);
1787 		cpu.hv.part_flags_access_vp_regs	= bit(regs.ebx, 17);
1788 		cpu.hv.part_flags_extended_hypercalls	= bit(regs.ebx, 20);
1789 		cpu.hv.part_flags_start_vp	= bit(regs.ebx, 21);
1790 		cpu.hv.pm_max_cpu_power_state_c0	= bit(regs.ecx, 0);
1791 		cpu.hv.pm_max_cpu_power_state_c1	= bit(regs.ecx, 1);
1792 		cpu.hv.pm_max_cpu_power_state_c2	= bit(regs.ecx, 2);
1793 		cpu.hv.pm_max_cpu_power_state_c3	= bit(regs.ecx, 3);
1794 		cpu.hv.pm_hpet_reqd_for_c3	= bit(regs.ecx, 4);
1795 		cpu.hv.misc_feat_mwait	= bit(regs.eax, 0);
1796 		cpu.hv.misc_feat_guest_debugging	= bit(regs.eax, 1);
1797 		cpu.hv.misc_feat_perf_mon	= bit(regs.eax, 2);
1798 		cpu.hv.misc_feat_pcpu_dyn_part_event	= bit(regs.eax, 3);
1799 		cpu.hv.misc_feat_xmm_hypercall_input	= bit(regs.eax, 4);
1800 		cpu.hv.misc_feat_guest_idle_state	= bit(regs.eax, 5);
1801 		cpu.hv.misc_feat_hypervisor_sleep_state	= bit(regs.eax, 6);
1802 		cpu.hv.misc_feat_query_numa_distance	= bit(regs.eax, 7);
1803 		cpu.hv.misc_feat_timer_freq	= bit(regs.eax, 8);
1804 		cpu.hv.misc_feat_inject_synmc_xcpt	= bit(regs.eax, 9);
1805 		cpu.hv.misc_feat_guest_crash_msrs	= bit(regs.eax, 10);
1806 		cpu.hv.misc_feat_debug_msrs	= bit(regs.eax, 11);
1807 		cpu.hv.misc_feat_npiep1	= bit(regs.eax, 12);
1808 		cpu.hv.misc_feat_disable_hypervisor	= bit(regs.eax, 13);
1809 		cpu.hv.misc_feat_ext_gva_range_for_flush_va_list	= bit(regs.eax, 14);
1810 		cpu.hv.misc_feat_hypercall_output_xmm	= bit(regs.eax, 15);
1811 		cpu.hv.misc_feat_sint_polling_mode	= bit(regs.eax, 17);
1812 		cpu.hv.misc_feat_hypercall_msr_lock	= bit(regs.eax, 18);
1813 		cpu.hv.misc_feat_use_direct_synth_msrs	= bit(regs.eax, 19);
1814 //		break;
1815 //	default:
1816 //	}
1817 }
1818 
1819 private
1820 void ddcpuid_leaf4000_0004(ref CPUINFO cpu, ref REGISTERS regs) {
1821 //	switch (cpu.virt.vendor.id) with (VirtVendor) {
1822 //	case HyperV:
1823 		cpu.hv.hint_hypercall_for_process_switch	= bit(regs.eax, 0);
1824 		cpu.hv.hint_hypercall_for_tlb_flush	= bit(regs.eax, 1);
1825 		cpu.hv.hint_hypercall_for_tlb_shootdown	= bit(regs.eax, 2);
1826 		cpu.hv.hint_msr_for_apic_access	= bit(regs.eax, 3);
1827 		cpu.hv.hint_msr_for_sys_reset	= bit(regs.eax, 4);
1828 		cpu.hv.hint_relax_time_checks	= bit(regs.eax, 5);
1829 		cpu.hv.hint_dma_remapping	= bit(regs.eax, 6);
1830 		cpu.hv.hint_interrupt_remapping	= bit(regs.eax, 7);
1831 		cpu.hv.hint_x2apic_msrs	= bit(regs.eax, 8);
1832 		cpu.hv.hint_deprecate_auto_eoi	= bit(regs.eax, 9);
1833 		cpu.hv.hint_synth_cluster_ipi_hypercall	= bit(regs.eax, 10);
1834 		cpu.hv.hint_ex_proc_masks_interface	= bit(regs.eax, 11);
1835 		cpu.hv.hint_nested_hyperv	= bit(regs.eax, 12);
1836 		cpu.hv.hint_int_for_mbec_syscalls	= bit(regs.eax, 13);
1837 		cpu.hv.hint_nested_enlightened_vmcs_interface	= bit(regs.eax, 14);
1838 //		break;
1839 //	default:
1840 //	}
1841 }
1842 
1843 private
1844 void ddcpuid_leaf4000_0006(ref CPUINFO cpu, ref REGISTERS regs) {
1845 //	switch (cpu.virt.vendor.id) with (VirtVendor) {
1846 //	case HyperV:
1847 		cpu.hv.host_feat_avic	= bit(regs.eax, 0);
1848 		cpu.hv.host_feat_msr_bitmap	= bit(regs.eax, 1);
1849 		cpu.hv.host_feat_perf_counter	= bit(regs.eax, 2);
1850 		cpu.hv.host_feat_nested_paging	= bit(regs.eax, 3);
1851 		cpu.hv.host_feat_dma_remapping	= bit(regs.eax, 4);
1852 		cpu.hv.host_feat_interrupt_remapping	= bit(regs.eax, 5);
1853 		cpu.hv.host_feat_mem_patrol_scrubber	= bit(regs.eax, 6);
1854 		cpu.hv.host_feat_dma_prot_in_use	= bit(regs.eax, 7);
1855 		cpu.hv.host_feat_hpet_requested	= bit(regs.eax, 8);
1856 		cpu.hv.host_feat_stimer_volatile	= bit(regs.eax, 9);
1857 //		break;
1858 //	default:
1859 //	}
1860 }
1861 
1862 private
1863 void ddcpuid_leaf4000_0010(ref CPUINFO cpu, ref REGISTERS regs) {
1864 //	switch (cpu.virt.vendor.id) with (VirtVendor) {
1865 //	case VBoxMin: // VBox Minimal
1866 		cpu.vbox.tsc_freq_khz = regs.eax;
1867 		cpu.vbox.apic_freq_khz = regs.ebx;
1868 //		break;
1869 //	default:
1870 //	}
1871 }
1872 
1873 private
1874 void ddcpuid_leaf8000_0001(ref CPUINFO cpu, ref REGISTERS regs) {
1875 	switch (cpu.vendor.id) with (Vendor) {
1876 	case AMD:
1877 		// ecx
1878 		cpu.virtualization	= bit(regs.ecx, 2);
1879 		cpu.x2apic	= bit(regs.ecx, 3);
1880 		cpu.sse4a	= bit(regs.ecx, 6);
1881 		cpu.xop	= bit(regs.ecx, 11);
1882 		cpu.skinit	= bit(regs.ecx, 12);
1883 		cpu.fma4	= bit(regs.ecx, 16);
1884 		cpu.tbm	= bit(regs.ecx, 21);
1885 		// edx
1886 		cpu.mmxExtended	= bit(regs.edx, 22);
1887 		cpu._3DNowExtended	= bit(regs.edx, 30);
1888 		cpu._3DNow	= regs.edx >= BIT!(31);
1889 		break;
1890 	default:
1891 	}
1892 	
1893 	// ecx
1894 	cpu.lahf64	= bit(regs.ecx, 0);
1895 	cpu.lzcnt	= bit(regs.ecx, 5);
1896 	cpu.prefetchw	= bit(regs.ecx, 8);
1897 	cpu.monitorx	= bit(regs.ecx, 29);
1898 	// edx
1899 	cpu.syscall	= bit(regs.edx, 11);
1900 	cpu.nx	= bit(regs.edx, 20);
1901 	cpu.page1gb	= bit(regs.edx, 26);
1902 	cpu.rdtscp	= bit(regs.edx, 27);
1903 	cpu.x86_64	= bit(regs.edx, 29);
1904 }
1905 
1906 private
1907 void ddcpuid_leaf8000_0007(ref CPUINFO cpu, ref REGISTERS regs) {
1908 	switch (cpu.vendor.id) with (Vendor) {
1909 	case Intel:
1910 		cpu.rdseed	= bit(regs.ebx, 28);
1911 		break;
1912 	case AMD:
1913 		cpu.tm	= bit(regs.edx, 4);
1914 		cpu.turboboost	= bit(regs.edx, 9);
1915 		break;
1916 	default:
1917 	}
1918 	
1919 	cpu.rdtscInvariant	= bit(regs.edx, 8);
1920 }
1921 
1922 private
1923 void ddcpuid_leaf8000_0008(ref CPUINFO cpu, ref REGISTERS regs) {
1924 	switch (cpu.vendor.id) with (Vendor) {
1925 	case Intel:
1926 		cpu.wbnoinvd	= bit(regs.ebx, 9);
1927 		break;
1928 	case AMD:
1929 		cpu.ibpb	= bit(regs.ebx, 12);
1930 		cpu.ibrs	= bit(regs.ebx, 14);
1931 		cpu.stibp	= bit(regs.ebx, 15);
1932 		cpu.ibrsAlwaysOn	= bit(regs.ebx, 16);
1933 		cpu.stibpAlwaysOn	= bit(regs.ebx, 17);
1934 		cpu.ibrsPreferred	= bit(regs.ebx, 18);
1935 		cpu.ssbd	= bit(regs.ebx, 24);
1936 		break;
1937 	default:
1938 	}
1939 	
1940 	cpu.physicalBits = regs.al;
1941 	cpu.linearBits = regs.ah;
1942 }
1943 
1944 private
1945 void ddcpuid_leaf8000_000A(ref CPUINFO cpu, ref REGISTERS regs) {
1946 	switch (cpu.vendor.id) {
1947 	case Vendor.AMD:
1948 		cpu.virtVersion	= regs.al; // EAX[7:0]
1949 		cpu.apicv	= bit(regs.edx, 13);
1950 		break;
1951 	default:
1952 	}
1953 }
1954 
1955 private
1956 void ddcpuid_topology(ref CPUINFO cpu) {
1957 	ushort sc = void;	/// raw cores shared across cache level
1958 	ushort crshrd = void;	/// actual count of shared cores
1959 	ubyte type = void;	/// cache type
1960 	ubyte mids = void;	/// maximum IDs to this cache
1961 	int shared_ = 1;    /// (Total logical) Shared threads per core
1962 	REGISTERS regs = void;	/// registers
1963 	
1964 	cpu.cacheLevels = 0;
1965 	CACHEINFO *ca = cast(CACHEINFO*)cpu.cache;
1966 	
1967 	//TODO: Make 1FH/BH/4H/etc. functions.
1968 	switch (cpu.vendor.id) with (Vendor) {
1969 	case Intel:
1970 		if (cpu.maxLeaf >= 0x1f) goto L_CACHE_INTEL_1FH;
1971 		if (cpu.maxLeaf >= 0xb)  goto L_CACHE_INTEL_BH;
1972 		if (cpu.maxLeaf >= 4)    goto L_CACHE_INTEL_4H;
1973 		// Celeron 0xf34 has maxLeaf=03h and ext=8000_0008h
1974 		if (cpu.maxLeaf >= 2)    goto L_CACHE_INTEL_2H;
1975 		if (cpu.maxLeafExtended >= 0x8000_0005) goto L_AMD_TOPOLOGY_EXT_5H; // Yes
1976 		break;
1977 		
1978 L_CACHE_INTEL_1FH:
1979 		//TODO: Support levels 3,4,5 in CPUID.1FH
1980 		//      (Module, Tile, and Die)
1981 		ddcpuid_id(regs, 0x1f, 1); // Cores (logical)
1982 		cpu.logicalCores = regs.bx;
1983 		
1984 		ddcpuid_id(regs, 0x1f, 0); // SMT (architectural states per core)
1985 		cpu.physicalCores = cast(ushort)(cpu.logicalCores / regs.bx);
1986 		
1987 		goto L_CACHE_INTEL_4H;
1988 		
1989 L_CACHE_INTEL_BH:
1990 		ddcpuid_id(regs, 0xb, 1); // Cores (logical)
1991 		cpu.logicalCores = regs.bx;
1992 		
1993 		ddcpuid_id(regs, 0xb, 0); // SMT (architectural states per core)
1994 		cpu.physicalCores = cast(ushort)(cpu.logicalCores / regs.bx);
1995 		
1996 L_CACHE_INTEL_4H:
1997 		ddcpuid_id(regs, 4, cpu.cacheLevels);
1998 		
1999 		type = regs.eax & CACHE_MASK; // EAX[4:0]
2000 		if (type == 0 || cpu.cacheLevels >= CACHE_MAX_LEVEL) return;
2001 		
2002 		ca.type = CACHE_TYPE[type];
2003 		ca.level = regs.al >> 5;
2004 		ca.lineSize = (regs.bx & 0xfff) + 1; // bits 11-0
2005 		ca.partitions = ((regs.ebx >> 12) & 0x3ff) + 1; // bits 21-12
2006 		ca.ways = ((regs.ebx >> 22) + 1); // bits 31-22
2007 		ca.sets = regs.ecx + 1;
2008 		if (regs.eax & BIT!(8)) ca.features = 1;
2009 		if (regs.eax & BIT!(9)) ca.features |= BIT!(1);
2010 		if (regs.edx & BIT!(0)) ca.features |= BIT!(2);
2011 		if (regs.edx & BIT!(1)) ca.features |= BIT!(3);
2012 		if (regs.edx & BIT!(2)) ca.features |= BIT!(4);
2013 		ca.size = (ca.sets * ca.lineSize * ca.partitions * ca.ways) >> 10;
2014 		
2015 		mids = (regs.eax >> 26) + 1;	// EAX[31:26]
2016 		
2017 		if (cpu.logicalCores == 0) with (cpu) { // skip if already populated
2018 			logicalCores = mids;
2019 			physicalCores = cpu.htt ? mids >> 1 : mids;
2020 		}
2021 		
2022 		crshrd = (((regs.eax >> 14) & 2047) + 1);	// EAX[25:14]
2023 		sc = cast(ushort)(cpu.logicalCores / crshrd); // cast for ldc 0.17.1
2024 		ca.sharedCores = sc ? sc : 1;
2025 		version (Trace) trace("intel.4h mids=%u shared=%u crshrd=%u sc=%u",
2026 			mids, ca.sharedCores, crshrd, sc);
2027 		
2028 		++cpu.cacheLevels; ++ca;
2029 		goto L_CACHE_INTEL_4H;
2030 
2031 L_CACHE_INTEL_2H:
2032 		ddcpuid_id(regs, 2);
2033 		ddcpuid_leaf2(cpu, regs);
2034 		break;
2035 	case AMD:
2036 		if (cpu.maxLeafExtended >= 0x8000_001E) goto L_AMD_TOPOLOGY_EXT_1EH;
2037 		if (cpu.maxLeafExtended >= 0x8000_0008) goto L_AMD_TOPOLOGY_EXT_8H;
2038 		if (cpu.maxLeafExtended >= 0x8000_0005) goto L_AMD_TOPOLOGY_EXT_5H;
2039 		
2040 		break;
2041 
2042 L_AMD_TOPOLOGY_EXT_1EH:
2043 		
2044 		ddcpuid_id(regs, 0x8000_0001);
2045 		
2046 		if ((regs.ecx & BIT!22) == 0) goto L_AMD_TOPOLOGY_EXT_8H;
2047 		
2048 		ddcpuid_id(regs, 0x8000_001E);
2049 		
2050 		shared_ = regs.bh + 1; // ThreadsPerComputeUnit
2051 		version (Trace) trace("amd.0x8000_001E shared=%u", shared_);
2052 		
2053 		goto L_AMD_TOPOLOGY_EXT_1DH;
2054 		
2055 L_AMD_TOPOLOGY_EXT_8H:
2056 		// See APM Volume 3 Appendix E.5
2057 		// For some reason, CPUID Fn8000_001E_EBX is not mentioned there
2058 		ddcpuid_id(regs, 0x8000_0008);
2059 		
2060 		type = regs.cx >> 12; // ApicIdSize
2061 		
2062 		if (type) { // Extended
2063 			cpu.physicalCores = regs.cl + 1;
2064 			cpu.logicalCores = cast(ushort)(1 << type);
2065 		} else { // Legacy
2066 			cpu.logicalCores = cpu.physicalCores = regs.cl + 1;
2067 		}
2068 		
2069 		//
2070 		// AMD newer cache method
2071 		//
2072 		
2073 L_AMD_TOPOLOGY_EXT_1DH: // Almost the same as Intel's
2074 		ddcpuid_id(regs, 0x8000_001d, cpu.cacheLevels);
2075 		
2076 		type = regs.eax & CACHE_MASK; // EAX[4:0]
2077 		if (type == 0 || cpu.cacheLevels >= CACHE_MAX_LEVEL) return;
2078 		
2079 		ca.type = CACHE_TYPE[type];
2080 		ca.level = (regs.eax >> 5) & 7;
2081 		ca.lineSize = (regs.ebx & 0xfff) + 1;
2082 		ca.partitions = ((regs.ebx >> 12) & 0x3ff) + 1;
2083 		ca.ways = (regs.ebx >> 22) + 1;
2084 		ca.sets = regs.ecx + 1;
2085 		if (regs.eax & BIT!(8)) ca.features = 1;
2086 		if (regs.eax & BIT!(9)) ca.features |= BIT!(1);
2087 		if (regs.edx & BIT!(0)) ca.features |= BIT!(2);
2088 		if (regs.edx & BIT!(1)) ca.features |= BIT!(3);
2089 		ca.size = (ca.sets * ca.lineSize * ca.partitions * ca.ways) >> 10;
2090 		
2091 		crshrd = (((regs.eax >> 14) & 0xfff) + 1); // bits 25-14
2092 		sc = cast(ushort)(cpu.apicMaxId / crshrd); // cast for ldc 0.17.1
2093 		ca.sharedCores = sc ? sc : 1;
2094 		
2095 		if (cpu.logicalCores == 0) with (cpu) { // skip if already populated
2096 			logicalCores = cpu.apicMaxId;
2097 			physicalCores = cpu.apicMaxId / shared_;
2098 		}
2099 		
2100 		version (Trace) trace("amd.8000_001Dh mids=%u shared=%u crshrd=%u sc=%u",
2101 			mids, ca.sharedCores, crshrd, sc);
2102 		
2103 		++cpu.cacheLevels; ++ca;
2104 		goto L_AMD_TOPOLOGY_EXT_1DH;
2105 		
2106 		//
2107 		// AMD legacy cache
2108 		//
2109 		
2110 L_AMD_TOPOLOGY_EXT_5H:
2111 		ddcpuid_id(regs, 0x8000_0005);
2112 		
2113 		cpu.cache[0].level = 1; // L1-D
2114 		cpu.cache[0].type = 'D'; // data
2115 		cpu.cache[0].size = regs.ecx >> 24;
2116 		cpu.cache[0].ways = cast(ubyte)(regs.ecx >> 16);
2117 		cpu.cache[0].lines = regs.ch;
2118 		cpu.cache[0].lineSize = regs.cl;
2119 		cpu.cache[0].sets = 1;
2120 		
2121 		cpu.cache[1].level = 1; // L1-I
2122 		cpu.cache[1].type = 'I'; // instructions
2123 		cpu.cache[1].size = regs.edx >> 24;
2124 		cpu.cache[1].ways = cast(ubyte)(regs.edx >> 16);
2125 		cpu.cache[1].lines = regs.dh;
2126 		cpu.cache[1].lineSize = regs.dl;
2127 		cpu.cache[1].sets = 1;
2128 		
2129 		cpu.cacheLevels = 2;
2130 		
2131 		if (cpu.maxLeafExtended < 0x8000_0006)
2132 			return; // No L2/L3
2133 		
2134 		// See Table E-4. L2/L3 Cache and TLB Associativity Field Encoding
2135 		static immutable ubyte[16] _amd_cache_ways = [
2136 			// 7h is reserved
2137 			// 9h mentions 8000_001D but that's already supported
2138 			0, 1, 2, 3, 4, 6, 8, 0, 16, 0, 32, 48, 64, 96, 128, 255
2139 		];
2140 		
2141 		ddcpuid_id(regs, 0x8000_0006);
2142 		
2143 		type = regs.cx >> 12; // amd_ways_l2
2144 		if (type == 0) break;
2145 		
2146 		cpu.cache[2].level = 2;  // L2
2147 		cpu.cache[2].type = 'U'; // unified
2148 		cpu.cache[2].size = regs.ecx >> 16;
2149 		cpu.cache[2].ways = _amd_cache_ways[type];
2150 		cpu.cache[2].lines = regs.ch & 0xf;
2151 		cpu.cache[2].lineSize = regs.cl;
2152 		cpu.cache[2].sets = 1;
2153 		cpu.cacheLevels = 3;
2154 		
2155 		type = regs.dx >> 12; // amd_ways_l3
2156 		if (type == 0) break;
2157 		
2158 		cpu.cache[3].level = 3;  // L3
2159 		cpu.cache[3].type = 'U'; // unified
2160 		cpu.cache[3].size = ((regs.edx >> 18) + 1) << 9;
2161 		cpu.cache[3].ways = _amd_cache_ways[type];
2162 		cpu.cache[3].lines = regs.dh & 0xf;
2163 		cpu.cache[3].lineSize = regs.dl & 0x7F;
2164 		cpu.cache[3].sets = 1;
2165 		cpu.cacheLevels = 4;
2166 		break;
2167 	default:
2168 	}
2169 	
2170 	with (cpu) physicalCores = logicalCores = 1;
2171 }
2172 
2173 private struct LeafInfo {
2174 	uint leaf;
2175 	uint sub;
2176 	void function(ref CPUINFO, ref REGISTERS) func;
2177 }
2178 private struct LeafExtInfo {
2179 	uint leaf;
2180 	void function(ref CPUINFO, ref REGISTERS) func;
2181 }
2182 
2183 /// Fetch CPU information.
2184 /// Params: cpu = CPUINFO structure
2185 void ddcpuid_cpuinfo(ref CPUINFO cpu) {
2186 	static immutable LeafInfo[] regulars = [
2187 		{ 0x1,	0,	&ddcpuid_leaf1 },	// Sets brand index
2188 		{ 0x5,	0,	&ddcpuid_leaf5 },
2189 		{ 0x6,	0,	&ddcpuid_leaf6 },
2190 		{ 0x7,	0,	&ddcpuid_leaf7 },
2191 		{ 0x7,	1,	&ddcpuid_leaf7sub1 },
2192 		{ 0xd,	0,	&ddcpuid_leafD },
2193 		{ 0xd,	1,	&ddcpuid_leafDsub1 },
2194 		{ 0x12,	0,	&ddcpuid_leaf12 },
2195 	];
2196 	static immutable LeafExtInfo[] extended = [
2197 		{ 0x8000_0001,	&ddcpuid_leaf8000_0001 },
2198 		{ 0x8000_0007,	&ddcpuid_leaf8000_0007 },
2199 		{ 0x8000_0008,	&ddcpuid_leaf8000_0008 },
2200 		{ 0x8000_000a,	&ddcpuid_leaf8000_000A },
2201 	];
2202 	REGISTERS regs = void;	/// registers
2203 	
2204 	ddcpuid_vendor(cpu.vendor.string_);
2205 	cpu.vendor.id = ddcpuid_vendor_id(cpu.vendor);
2206 	
2207 	foreach (ref immutable(LeafInfo) l; regulars) {
2208 		if (l.leaf > cpu.maxLeaf) break;
2209 		
2210 		ddcpuid_id(regs, l.leaf, l.sub);
2211 		l.func(cpu, regs);
2212 	}
2213 	
2214 	// Paravirtualization leaves
2215 	if (cpu.maxLeafVirt >= 0x4000_0000) {
2216 		ddcpuid_virt_vendor(cpu.virtVendor.string_);
2217 		cpu.virtVendor.id = ddcpuid_virt_vendor_id(cpu.virtVendor);
2218 		
2219 		switch (cpu.virtVendor.id) with (VirtVendor) {
2220 		case KVM:
2221 			ddcpuid_id(regs, 0x4000_0001);
2222 			ddcpuid_leaf4000_0001(cpu, regs);
2223 			break;
2224 		case HyperV:
2225 			ddcpuid_id(regs, 0x4000_0002);
2226 			ddcpuid_leaf4000_0002(cpu, regs);
2227 			ddcpuid_id(regs, 0x4000_0003);
2228 			ddcpuid_leaf4000_0003(cpu, regs);
2229 			ddcpuid_id(regs, 0x4000_0004);
2230 			ddcpuid_leaf4000_0004(cpu, regs);
2231 			ddcpuid_id(regs, 0x4000_0006);
2232 			ddcpuid_leaf4000_0006(cpu, regs);
2233 			break;
2234 		case VBoxMin:
2235 			ddcpuid_id(regs, 0x4000_0010);
2236 			ddcpuid_leaf4000_0010(cpu, regs);
2237 			break;
2238 		default:
2239 		}
2240 	}
2241 	
2242 	// Extended leaves
2243 	if (cpu.maxLeafExtended >= 0x8000_0000) {
2244 		foreach (ref immutable(LeafExtInfo) l; extended) {
2245 			if (l.leaf > cpu.maxLeafExtended) break;
2246 			
2247 			ddcpuid_id(regs, l.leaf);
2248 			l.func(cpu, regs);
2249 		}
2250 	}
2251 	
2252 	ddcpuid_model_string(cpu); // Sets brand string
2253 	ddcpuid_topology(cpu);	 // Sets core/thread/cache topology
2254 }
2255 
2256 const(char) *ddcpuid_baseline(ref CPUINFO cpu) {
2257 	if (cpu.avx512f && cpu.avx512bw && cpu.avx512cd &&
2258 		cpu.avx512dq && cpu.avx512vl) {
2259 		return "x86-64-v4";
2260 	}
2261 	
2262 	if (cpu.avx2 && cpu.avx && cpu.bmi2 && cpu.bmi1 &&
2263 		cpu.f16c && cpu.fma && cpu.lzcnt &&
2264 		cpu.movbe && cpu.osxsave) {
2265 		return "x86-64-v3";
2266 	}
2267 	
2268 	if (cpu.sse42 && cpu.sse41 && cpu.ssse3 && cpu.sse3 &&
2269 		cpu.lahf64 && cpu.popcnt && cpu.cmpxchg16b) {
2270 		return "x86-64-v2";
2271 	}
2272 	
2273 	if (cpu.sse && cpu.sse2 && cpu.mmx && cpu.fxsr &&
2274 		cpu.cmpxchg8b && cpu.cmov && cpu.fpu && cpu.syscall) {
2275 		return "x86-64"; // baseline, v1
2276 	}
2277 	
2278 	// NOTE: K7 is still family 5 and didn't have SSE2.
2279 	// NOTE: Whoever manages to run this on an i486 has my respect.
2280 	switch (cpu.family) {
2281 	case 3:  return "i386"; // 80386
2282 	case 4:  return "i486"; // 80486
2283 	case 5:  return "i586"; // Pentium / MMX
2284 	default: return "i686"; // Pentium Pro / II
2285 	}
2286 }