mirror of
https://github.com/anrieff/libcpuid
synced 2025-10-03 11:01:30 +00:00
Decode deterministic cache info for AMD CPUs too
Since Zen-based CPUs, cpu_id_t::l3_cache is the size of the total L3 cache for the whole chip, while cpu_id_t::l1_cache and cpu_id_t::l2_cache are size for each instances. This change provide L3 cache size per instance.
This commit is contained in:
parent
2ec692b579
commit
0c9ef3249c
36 changed files with 220 additions and 296 deletions
|
@ -34,6 +34,7 @@
|
|||
#endif
|
||||
#include "libcpuid.h"
|
||||
#include "libcpuid_util.h"
|
||||
#include "libcpuid_internal.h"
|
||||
|
||||
int _current_verboselevel;
|
||||
|
||||
|
@ -265,3 +266,112 @@ void clear_affinity_mask_bit(logical_cpu_t logical_cpu, cpu_affinity_mask_t *aff
|
|||
{
|
||||
affinity_mask->__bits[logical_cpu / __MASK_NCPUBITS] &= ~(0x1 << (logical_cpu % __MASK_NCPUBITS));
|
||||
}
|
||||
|
||||
/* https://github.com/torvalds/linux/blob/3e5c673f0d75bc22b3c26eade87e4db4f374cd34/include/linux/bitops.h#L210-L216 */
|
||||
static int get_count_order(unsigned int x)
|
||||
{
|
||||
int r = 32;
|
||||
|
||||
if (x == 0)
|
||||
return -1;
|
||||
|
||||
--x;
|
||||
if (!x)
|
||||
return 0;
|
||||
if (!(x & 0xffff0000u)) {
|
||||
x <<= 16;
|
||||
r -= 16;
|
||||
}
|
||||
if (!(x & 0xff000000u)) {
|
||||
x <<= 8;
|
||||
r -= 8;
|
||||
}
|
||||
if (!(x & 0xf0000000u)) {
|
||||
x <<= 4;
|
||||
r -= 4;
|
||||
}
|
||||
if (!(x & 0xc0000000u)) {
|
||||
x <<= 2;
|
||||
r -= 2;
|
||||
}
|
||||
if (!(x & 0x80000000u)) {
|
||||
x <<= 1;
|
||||
r -= 1;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
void assign_cache_data(uint8_t on, cache_type_t cache, int size, int assoc, int linesize, struct cpu_id_t* data)
|
||||
{
|
||||
if (!on) return;
|
||||
switch (cache) {
|
||||
case L1I:
|
||||
data->l1_instruction_cache = size;
|
||||
data->l1_instruction_assoc = assoc;
|
||||
data->l1_instruction_cacheline = linesize;
|
||||
break;
|
||||
case L1D:
|
||||
data->l1_data_cache = size;
|
||||
data->l1_data_assoc = assoc;
|
||||
data->l1_data_cacheline = linesize;
|
||||
break;
|
||||
case L2:
|
||||
data->l2_cache = size;
|
||||
data->l2_assoc = assoc;
|
||||
data->l2_cacheline = linesize;
|
||||
break;
|
||||
case L3:
|
||||
data->l3_cache = size;
|
||||
data->l3_assoc = assoc;
|
||||
data->l3_cacheline = linesize;
|
||||
break;
|
||||
case L4:
|
||||
data->l4_cache = size;
|
||||
data->l4_assoc = assoc;
|
||||
data->l4_cacheline = linesize;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void decode_deterministic_cache_info_x86(uint32_t cache_regs[][NUM_REGS],
|
||||
uint8_t subleaf_count,
|
||||
struct cpu_id_t* data,
|
||||
struct internal_id_info_t* internal)
|
||||
{
|
||||
uint8_t i;
|
||||
uint32_t cache_level, cache_type, ways, partitions, linesize, sets, size, num_sharing_cache, index_msb;
|
||||
cache_type_t type;
|
||||
|
||||
for (i = 0; i < subleaf_count; i++) {
|
||||
cache_level = EXTRACTS_BITS(cache_regs[i][EAX], 7, 5);
|
||||
cache_type = EXTRACTS_BITS(cache_regs[i][EAX], 4, 0);
|
||||
if ((cache_level == 0) || (cache_type == 0))
|
||||
break;
|
||||
if (cache_level == 1 && cache_type == 1)
|
||||
type = L1D;
|
||||
else if (cache_level == 1 && cache_type == 2)
|
||||
type = L1I;
|
||||
else if (cache_level == 2 && cache_type == 3)
|
||||
type = L2;
|
||||
else if (cache_level == 3 && cache_type == 3)
|
||||
type = L3;
|
||||
else if (cache_level == 4 && cache_type == 3)
|
||||
type = L4;
|
||||
else {
|
||||
warnf("deterministic_cache: unknown level/typenumber combo (%d/%d), cannot\n", cache_level, cache_type);
|
||||
warnf("deterministic_cache: recognize cache type\n");
|
||||
continue;
|
||||
}
|
||||
num_sharing_cache = EXTRACTS_BITS(cache_regs[i][EAX], 25, 14) + 1;
|
||||
ways = EXTRACTS_BITS(cache_regs[i][EBX], 31, 22) + 1;
|
||||
partitions = EXTRACTS_BITS(cache_regs[i][EBX], 21, 12) + 1;
|
||||
linesize = EXTRACTS_BITS(cache_regs[i][EBX], 11, 0) + 1;
|
||||
sets = EXTRACTS_BITS(cache_regs[i][ECX], 31, 0) + 1;
|
||||
size = ways * partitions * linesize * sets / 1024;
|
||||
index_msb = get_count_order(num_sharing_cache);
|
||||
internal->cache_mask[i] = ~((1 << index_msb) - 1);
|
||||
assign_cache_data(1, type, size, ways, linesize, data);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue