Loading...
1/*
2 * Copyright 2015-2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/pci.h>
24#include <linux/acpi.h>
25#include "kfd_crat.h"
26#include "kfd_priv.h"
27#include "kfd_topology.h"
28#include "kfd_iommu.h"
29#include "amdgpu_amdkfd.h"
30
31/* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
32 * GPU processor ID are expressed with Bit[31]=1.
33 * The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs
34 * used in the CRAT.
35 */
36static uint32_t gpu_processor_id_low = 0x80001000;
37
38/* Return the next available gpu_processor_id and increment it for next GPU
39 * @total_cu_count - Total CUs present in the GPU including ones
40 * masked off
41 */
42static inline unsigned int get_and_inc_gpu_processor_id(
43 unsigned int total_cu_count)
44{
45 int current_id = gpu_processor_id_low;
46
47 gpu_processor_id_low += total_cu_count;
48 return current_id;
49}
50
51/* Static table to describe GPU Cache information */
52struct kfd_gpu_cache_info {
53 uint32_t cache_size;
54 uint32_t cache_level;
55 uint32_t flags;
56 /* Indicates how many Compute Units share this cache
57 * Value = 1 indicates the cache is not shared
58 */
59 uint32_t num_cu_shared;
60};
61
62static struct kfd_gpu_cache_info kaveri_cache_info[] = {
63 {
64 /* TCP L1 Cache per CU */
65 .cache_size = 16,
66 .cache_level = 1,
67 .flags = (CRAT_CACHE_FLAGS_ENABLED |
68 CRAT_CACHE_FLAGS_DATA_CACHE |
69 CRAT_CACHE_FLAGS_SIMD_CACHE),
70 .num_cu_shared = 1,
71
72 },
73 {
74 /* Scalar L1 Instruction Cache (in SQC module) per bank */
75 .cache_size = 16,
76 .cache_level = 1,
77 .flags = (CRAT_CACHE_FLAGS_ENABLED |
78 CRAT_CACHE_FLAGS_INST_CACHE |
79 CRAT_CACHE_FLAGS_SIMD_CACHE),
80 .num_cu_shared = 2,
81 },
82 {
83 /* Scalar L1 Data Cache (in SQC module) per bank */
84 .cache_size = 8,
85 .cache_level = 1,
86 .flags = (CRAT_CACHE_FLAGS_ENABLED |
87 CRAT_CACHE_FLAGS_DATA_CACHE |
88 CRAT_CACHE_FLAGS_SIMD_CACHE),
89 .num_cu_shared = 2,
90 },
91
92 /* TODO: Add L2 Cache information */
93};
94
95
96static struct kfd_gpu_cache_info carrizo_cache_info[] = {
97 {
98 /* TCP L1 Cache per CU */
99 .cache_size = 16,
100 .cache_level = 1,
101 .flags = (CRAT_CACHE_FLAGS_ENABLED |
102 CRAT_CACHE_FLAGS_DATA_CACHE |
103 CRAT_CACHE_FLAGS_SIMD_CACHE),
104 .num_cu_shared = 1,
105 },
106 {
107 /* Scalar L1 Instruction Cache (in SQC module) per bank */
108 .cache_size = 8,
109 .cache_level = 1,
110 .flags = (CRAT_CACHE_FLAGS_ENABLED |
111 CRAT_CACHE_FLAGS_INST_CACHE |
112 CRAT_CACHE_FLAGS_SIMD_CACHE),
113 .num_cu_shared = 4,
114 },
115 {
116 /* Scalar L1 Data Cache (in SQC module) per bank. */
117 .cache_size = 4,
118 .cache_level = 1,
119 .flags = (CRAT_CACHE_FLAGS_ENABLED |
120 CRAT_CACHE_FLAGS_DATA_CACHE |
121 CRAT_CACHE_FLAGS_SIMD_CACHE),
122 .num_cu_shared = 4,
123 },
124
125 /* TODO: Add L2 Cache information */
126};
127
128/* NOTE: In future if more information is added to struct kfd_gpu_cache_info
129 * the following ASICs may need a separate table.
130 */
131#define hawaii_cache_info kaveri_cache_info
132#define tonga_cache_info carrizo_cache_info
133#define fiji_cache_info carrizo_cache_info
134#define polaris10_cache_info carrizo_cache_info
135#define polaris11_cache_info carrizo_cache_info
136#define polaris12_cache_info carrizo_cache_info
137#define vegam_cache_info carrizo_cache_info
138/* TODO - check & update Vega10 cache details */
139#define vega10_cache_info carrizo_cache_info
140#define raven_cache_info carrizo_cache_info
141#define renoir_cache_info carrizo_cache_info
142/* TODO - check & update Navi10 cache details */
143#define navi10_cache_info carrizo_cache_info
144
145static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
146 struct crat_subtype_computeunit *cu)
147{
148 dev->node_props.cpu_cores_count = cu->num_cpu_cores;
149 dev->node_props.cpu_core_id_base = cu->processor_id_low;
150 if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
151 dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
152
153 pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
154 cu->processor_id_low);
155}
156
157static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
158 struct crat_subtype_computeunit *cu)
159{
160 dev->node_props.simd_id_base = cu->processor_id_low;
161 dev->node_props.simd_count = cu->num_simd_cores;
162 dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
163 dev->node_props.max_waves_per_simd = cu->max_waves_simd;
164 dev->node_props.wave_front_size = cu->wave_front_size;
165 dev->node_props.array_count = cu->array_count;
166 dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
167 dev->node_props.simd_per_cu = cu->num_simd_per_cu;
168 dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
169 if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
170 dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
171 pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low);
172}
173
174/* kfd_parse_subtype_cu - parse compute unit subtypes and attach it to correct
175 * topology device present in the device_list
176 */
177static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
178 struct list_head *device_list)
179{
180 struct kfd_topology_device *dev;
181
182 pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
183 cu->proximity_domain, cu->hsa_capability);
184 list_for_each_entry(dev, device_list, list) {
185 if (cu->proximity_domain == dev->proximity_domain) {
186 if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
187 kfd_populated_cu_info_cpu(dev, cu);
188
189 if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
190 kfd_populated_cu_info_gpu(dev, cu);
191 break;
192 }
193 }
194
195 return 0;
196}
197
198static struct kfd_mem_properties *
199find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
200 struct kfd_topology_device *dev)
201{
202 struct kfd_mem_properties *props;
203
204 list_for_each_entry(props, &dev->mem_props, list) {
205 if (props->heap_type == heap_type
206 && props->flags == flags
207 && props->width == width)
208 return props;
209 }
210
211 return NULL;
212}
213/* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
214 * topology device present in the device_list
215 */
216static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
217 struct list_head *device_list)
218{
219 struct kfd_mem_properties *props;
220 struct kfd_topology_device *dev;
221 uint32_t heap_type;
222 uint64_t size_in_bytes;
223 uint32_t flags = 0;
224 uint32_t width;
225
226 pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
227 mem->proximity_domain);
228 list_for_each_entry(dev, device_list, list) {
229 if (mem->proximity_domain == dev->proximity_domain) {
230 /* We're on GPU node */
231 if (dev->node_props.cpu_cores_count == 0) {
232 /* APU */
233 if (mem->visibility_type == 0)
234 heap_type =
235 HSA_MEM_HEAP_TYPE_FB_PRIVATE;
236 /* dGPU */
237 else
238 heap_type = mem->visibility_type;
239 } else
240 heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
241
242 if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
243 flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
244 if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
245 flags |= HSA_MEM_FLAGS_NON_VOLATILE;
246
247 size_in_bytes =
248 ((uint64_t)mem->length_high << 32) +
249 mem->length_low;
250 width = mem->width;
251
252 /* Multiple banks of the same type are aggregated into
253 * one. User mode doesn't care about multiple physical
254 * memory segments. It's managed as a single virtual
255 * heap for user mode.
256 */
257 props = find_subtype_mem(heap_type, flags, width, dev);
258 if (props) {
259 props->size_in_bytes += size_in_bytes;
260 break;
261 }
262
263 props = kfd_alloc_struct(props);
264 if (!props)
265 return -ENOMEM;
266
267 props->heap_type = heap_type;
268 props->flags = flags;
269 props->size_in_bytes = size_in_bytes;
270 props->width = width;
271
272 dev->node_props.mem_banks_count++;
273 list_add_tail(&props->list, &dev->mem_props);
274
275 break;
276 }
277 }
278
279 return 0;
280}
281
282/* kfd_parse_subtype_cache - parse cache subtypes and attach it to correct
283 * topology device present in the device_list
284 */
285static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
286 struct list_head *device_list)
287{
288 struct kfd_cache_properties *props;
289 struct kfd_topology_device *dev;
290 uint32_t id;
291 uint32_t total_num_of_cu;
292
293 id = cache->processor_id_low;
294
295 pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
296 list_for_each_entry(dev, device_list, list) {
297 total_num_of_cu = (dev->node_props.array_count *
298 dev->node_props.cu_per_simd_array);
299
300 /* Cache infomration in CRAT doesn't have proximity_domain
301 * information as it is associated with a CPU core or GPU
302 * Compute Unit. So map the cache using CPU core Id or SIMD
303 * (GPU) ID.
304 * TODO: This works because currently we can safely assume that
305 * Compute Units are parsed before caches are parsed. In
306 * future, remove this dependency
307 */
308 if ((id >= dev->node_props.cpu_core_id_base &&
309 id <= dev->node_props.cpu_core_id_base +
310 dev->node_props.cpu_cores_count) ||
311 (id >= dev->node_props.simd_id_base &&
312 id < dev->node_props.simd_id_base +
313 total_num_of_cu)) {
314 props = kfd_alloc_struct(props);
315 if (!props)
316 return -ENOMEM;
317
318 props->processor_id_low = id;
319 props->cache_level = cache->cache_level;
320 props->cache_size = cache->cache_size;
321 props->cacheline_size = cache->cache_line_size;
322 props->cachelines_per_tag = cache->lines_per_tag;
323 props->cache_assoc = cache->associativity;
324 props->cache_latency = cache->cache_latency;
325 memcpy(props->sibling_map, cache->sibling_map,
326 sizeof(props->sibling_map));
327
328 if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
329 props->cache_type |= HSA_CACHE_TYPE_DATA;
330 if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
331 props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
332 if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
333 props->cache_type |= HSA_CACHE_TYPE_CPU;
334 if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
335 props->cache_type |= HSA_CACHE_TYPE_HSACU;
336
337 dev->cache_count++;
338 dev->node_props.caches_count++;
339 list_add_tail(&props->list, &dev->cache_props);
340
341 break;
342 }
343 }
344
345 return 0;
346}
347
348/* kfd_parse_subtype_iolink - parse iolink subtypes and attach it to correct
349 * topology device present in the device_list
350 */
351static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
352 struct list_head *device_list)
353{
354 struct kfd_iolink_properties *props = NULL, *props2;
355 struct kfd_topology_device *dev, *to_dev;
356 uint32_t id_from;
357 uint32_t id_to;
358
359 id_from = iolink->proximity_domain_from;
360 id_to = iolink->proximity_domain_to;
361
362 pr_debug("Found IO link entry in CRAT table with id_from=%d, id_to %d\n",
363 id_from, id_to);
364 list_for_each_entry(dev, device_list, list) {
365 if (id_from == dev->proximity_domain) {
366 props = kfd_alloc_struct(props);
367 if (!props)
368 return -ENOMEM;
369
370 props->node_from = id_from;
371 props->node_to = id_to;
372 props->ver_maj = iolink->version_major;
373 props->ver_min = iolink->version_minor;
374 props->iolink_type = iolink->io_interface_type;
375
376 if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
377 props->weight = 20;
378 else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
379 props->weight = 15 * iolink->num_hops_xgmi;
380 else
381 props->weight = node_distance(id_from, id_to);
382
383 props->min_latency = iolink->minimum_latency;
384 props->max_latency = iolink->maximum_latency;
385 props->min_bandwidth = iolink->minimum_bandwidth_mbs;
386 props->max_bandwidth = iolink->maximum_bandwidth_mbs;
387 props->rec_transfer_size =
388 iolink->recommended_transfer_size;
389
390 dev->io_link_count++;
391 dev->node_props.io_links_count++;
392 list_add_tail(&props->list, &dev->io_link_props);
393 break;
394 }
395 }
396
397 /* CPU topology is created before GPUs are detected, so CPU->GPU
398 * links are not built at that time. If a PCIe type is discovered, it
399 * means a GPU is detected and we are adding GPU->CPU to the topology.
400 * At this time, also add the corresponded CPU->GPU link if GPU
401 * is large bar.
402 * For xGMI, we only added the link with one direction in the crat
403 * table, add corresponded reversed direction link now.
404 */
405 if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) {
406 to_dev = kfd_topology_device_by_proximity_domain(id_to);
407 if (!to_dev)
408 return -ENODEV;
409 /* same everything but the other direction */
410 props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
411 props2->node_from = id_to;
412 props2->node_to = id_from;
413 props2->kobj = NULL;
414 to_dev->io_link_count++;
415 to_dev->node_props.io_links_count++;
416 list_add_tail(&props2->list, &to_dev->io_link_props);
417 }
418
419 return 0;
420}
421
422/* kfd_parse_subtype - parse subtypes and attach it to correct topology device
423 * present in the device_list
424 * @sub_type_hdr - subtype section of crat_image
425 * @device_list - list of topology devices present in this crat_image
426 */
427static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
428 struct list_head *device_list)
429{
430 struct crat_subtype_computeunit *cu;
431 struct crat_subtype_memory *mem;
432 struct crat_subtype_cache *cache;
433 struct crat_subtype_iolink *iolink;
434 int ret = 0;
435
436 switch (sub_type_hdr->type) {
437 case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
438 cu = (struct crat_subtype_computeunit *)sub_type_hdr;
439 ret = kfd_parse_subtype_cu(cu, device_list);
440 break;
441 case CRAT_SUBTYPE_MEMORY_AFFINITY:
442 mem = (struct crat_subtype_memory *)sub_type_hdr;
443 ret = kfd_parse_subtype_mem(mem, device_list);
444 break;
445 case CRAT_SUBTYPE_CACHE_AFFINITY:
446 cache = (struct crat_subtype_cache *)sub_type_hdr;
447 ret = kfd_parse_subtype_cache(cache, device_list);
448 break;
449 case CRAT_SUBTYPE_TLB_AFFINITY:
450 /*
451 * For now, nothing to do here
452 */
453 pr_debug("Found TLB entry in CRAT table (not processing)\n");
454 break;
455 case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
456 /*
457 * For now, nothing to do here
458 */
459 pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
460 break;
461 case CRAT_SUBTYPE_IOLINK_AFFINITY:
462 iolink = (struct crat_subtype_iolink *)sub_type_hdr;
463 ret = kfd_parse_subtype_iolink(iolink, device_list);
464 break;
465 default:
466 pr_warn("Unknown subtype %d in CRAT\n",
467 sub_type_hdr->type);
468 }
469
470 return ret;
471}
472
473/* kfd_parse_crat_table - parse CRAT table. For each node present in CRAT
474 * create a kfd_topology_device and add in to device_list. Also parse
475 * CRAT subtypes and attach it to appropriate kfd_topology_device
476 * @crat_image - input image containing CRAT
477 * @device_list - [OUT] list of kfd_topology_device generated after
478 * parsing crat_image
479 * @proximity_domain - Proximity domain of the first device in the table
480 *
481 * Return - 0 if successful else -ve value
482 */
483int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
484 uint32_t proximity_domain)
485{
486 struct kfd_topology_device *top_dev = NULL;
487 struct crat_subtype_generic *sub_type_hdr;
488 uint16_t node_id;
489 int ret = 0;
490 struct crat_header *crat_table = (struct crat_header *)crat_image;
491 uint16_t num_nodes;
492 uint32_t image_len;
493
494 if (!crat_image)
495 return -EINVAL;
496
497 if (!list_empty(device_list)) {
498 pr_warn("Error device list should be empty\n");
499 return -EINVAL;
500 }
501
502 num_nodes = crat_table->num_domains;
503 image_len = crat_table->length;
504
505 pr_debug("Parsing CRAT table with %d nodes\n", num_nodes);
506
507 for (node_id = 0; node_id < num_nodes; node_id++) {
508 top_dev = kfd_create_topology_device(device_list);
509 if (!top_dev)
510 break;
511 top_dev->proximity_domain = proximity_domain++;
512 }
513
514 if (!top_dev) {
515 ret = -ENOMEM;
516 goto err;
517 }
518
519 memcpy(top_dev->oem_id, crat_table->oem_id, CRAT_OEMID_LENGTH);
520 memcpy(top_dev->oem_table_id, crat_table->oem_table_id,
521 CRAT_OEMTABLEID_LENGTH);
522 top_dev->oem_revision = crat_table->oem_revision;
523
524 sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
525 while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
526 ((char *)crat_image) + image_len) {
527 if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
528 ret = kfd_parse_subtype(sub_type_hdr, device_list);
529 if (ret)
530 break;
531 }
532
533 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
534 sub_type_hdr->length);
535 }
536
537err:
538 if (ret)
539 kfd_release_topology_device_list(device_list);
540
541 return ret;
542}
543
544/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
545static int fill_in_pcache(struct crat_subtype_cache *pcache,
546 struct kfd_gpu_cache_info *pcache_info,
547 struct kfd_cu_info *cu_info,
548 int mem_available,
549 int cu_bitmask,
550 int cache_type, unsigned int cu_processor_id,
551 int cu_block)
552{
553 unsigned int cu_sibling_map_mask;
554 int first_active_cu;
555
556 /* First check if enough memory is available */
557 if (sizeof(struct crat_subtype_cache) > mem_available)
558 return -ENOMEM;
559
560 cu_sibling_map_mask = cu_bitmask;
561 cu_sibling_map_mask >>= cu_block;
562 cu_sibling_map_mask &=
563 ((1 << pcache_info[cache_type].num_cu_shared) - 1);
564 first_active_cu = ffs(cu_sibling_map_mask);
565
566 /* CU could be inactive. In case of shared cache find the first active
567 * CU. and incase of non-shared cache check if the CU is inactive. If
568 * inactive active skip it
569 */
570 if (first_active_cu) {
571 memset(pcache, 0, sizeof(struct crat_subtype_cache));
572 pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
573 pcache->length = sizeof(struct crat_subtype_cache);
574 pcache->flags = pcache_info[cache_type].flags;
575 pcache->processor_id_low = cu_processor_id
576 + (first_active_cu - 1);
577 pcache->cache_level = pcache_info[cache_type].cache_level;
578 pcache->cache_size = pcache_info[cache_type].cache_size;
579
580 /* Sibling map is w.r.t processor_id_low, so shift out
581 * inactive CU
582 */
583 cu_sibling_map_mask =
584 cu_sibling_map_mask >> (first_active_cu - 1);
585
586 pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
587 pcache->sibling_map[1] =
588 (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
589 pcache->sibling_map[2] =
590 (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
591 pcache->sibling_map[3] =
592 (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
593 return 0;
594 }
595 return 1;
596}
597
598/* kfd_fill_gpu_cache_info - Fill GPU cache info using kfd_gpu_cache_info
599 * tables
600 *
601 * @kdev - [IN] GPU device
602 * @gpu_processor_id - [IN] GPU processor ID to which these caches
603 * associate
604 * @available_size - [IN] Amount of memory available in pcache
605 * @cu_info - [IN] Compute Unit info obtained from KGD
606 * @pcache - [OUT] memory into which cache data is to be filled in.
607 * @size_filled - [OUT] amount of data used up in pcache.
608 * @num_of_entries - [OUT] number of caches added
609 */
610static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
611 int gpu_processor_id,
612 int available_size,
613 struct kfd_cu_info *cu_info,
614 struct crat_subtype_cache *pcache,
615 int *size_filled,
616 int *num_of_entries)
617{
618 struct kfd_gpu_cache_info *pcache_info;
619 int num_of_cache_types = 0;
620 int i, j, k;
621 int ct = 0;
622 int mem_available = available_size;
623 unsigned int cu_processor_id;
624 int ret;
625
626 switch (kdev->device_info->asic_family) {
627 case CHIP_KAVERI:
628 pcache_info = kaveri_cache_info;
629 num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
630 break;
631 case CHIP_HAWAII:
632 pcache_info = hawaii_cache_info;
633 num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
634 break;
635 case CHIP_CARRIZO:
636 pcache_info = carrizo_cache_info;
637 num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
638 break;
639 case CHIP_TONGA:
640 pcache_info = tonga_cache_info;
641 num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
642 break;
643 case CHIP_FIJI:
644 pcache_info = fiji_cache_info;
645 num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
646 break;
647 case CHIP_POLARIS10:
648 pcache_info = polaris10_cache_info;
649 num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
650 break;
651 case CHIP_POLARIS11:
652 pcache_info = polaris11_cache_info;
653 num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
654 break;
655 case CHIP_POLARIS12:
656 pcache_info = polaris12_cache_info;
657 num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
658 break;
659 case CHIP_VEGAM:
660 pcache_info = vegam_cache_info;
661 num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
662 break;
663 case CHIP_VEGA10:
664 case CHIP_VEGA12:
665 case CHIP_VEGA20:
666 case CHIP_ARCTURUS:
667 pcache_info = vega10_cache_info;
668 num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
669 break;
670 case CHIP_RAVEN:
671 pcache_info = raven_cache_info;
672 num_of_cache_types = ARRAY_SIZE(raven_cache_info);
673 break;
674 case CHIP_RENOIR:
675 pcache_info = renoir_cache_info;
676 num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
677 break;
678 case CHIP_NAVI10:
679 case CHIP_NAVI12:
680 case CHIP_NAVI14:
681 case CHIP_SIENNA_CICHLID:
682 case CHIP_NAVY_FLOUNDER:
683 pcache_info = navi10_cache_info;
684 num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
685 break;
686 default:
687 return -EINVAL;
688 }
689
690 *size_filled = 0;
691 *num_of_entries = 0;
692
693 /* For each type of cache listed in the kfd_gpu_cache_info table,
694 * go through all available Compute Units.
695 * The [i,j,k] loop will
696 * if kfd_gpu_cache_info.num_cu_shared = 1
697 * will parse through all available CU
698 * If (kfd_gpu_cache_info.num_cu_shared != 1)
699 * then it will consider only one CU from
700 * the shared unit
701 */
702
703 for (ct = 0; ct < num_of_cache_types; ct++) {
704 cu_processor_id = gpu_processor_id;
705 for (i = 0; i < cu_info->num_shader_engines; i++) {
706 for (j = 0; j < cu_info->num_shader_arrays_per_engine;
707 j++) {
708 for (k = 0; k < cu_info->num_cu_per_sh;
709 k += pcache_info[ct].num_cu_shared) {
710
711 ret = fill_in_pcache(pcache,
712 pcache_info,
713 cu_info,
714 mem_available,
715 cu_info->cu_bitmap[i % 4][j + i / 4],
716 ct,
717 cu_processor_id,
718 k);
719
720 if (ret < 0)
721 break;
722
723 if (!ret) {
724 pcache++;
725 (*num_of_entries)++;
726 mem_available -=
727 sizeof(*pcache);
728 (*size_filled) +=
729 sizeof(*pcache);
730 }
731
732 /* Move to next CU block */
733 cu_processor_id +=
734 pcache_info[ct].num_cu_shared;
735 }
736 }
737 }
738 }
739
740 pr_debug("Added [%d] GPU cache entries\n", *num_of_entries);
741
742 return 0;
743}
744
745/*
746 * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
747 * copies CRAT from ACPI (if available).
748 * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
749 *
750 * @crat_image: CRAT read from ACPI. If no CRAT in ACPI then
751 * crat_image will be NULL
752 * @size: [OUT] size of crat_image
753 *
754 * Return 0 if successful else return error code
755 */
756int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
757{
758 struct acpi_table_header *crat_table;
759 acpi_status status;
760 void *pcrat_image;
761
762 if (!crat_image)
763 return -EINVAL;
764
765 *crat_image = NULL;
766
767 /* Fetch the CRAT table from ACPI */
768 status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
769 if (status == AE_NOT_FOUND) {
770 pr_warn("CRAT table not found\n");
771 return -ENODATA;
772 } else if (ACPI_FAILURE(status)) {
773 const char *err = acpi_format_exception(status);
774
775 pr_err("CRAT table error: %s\n", err);
776 return -EINVAL;
777 }
778
779 if (ignore_crat) {
780 pr_info("CRAT table disabled by module option\n");
781 return -ENODATA;
782 }
783
784 pcrat_image = kmemdup(crat_table, crat_table->length, GFP_KERNEL);
785 if (!pcrat_image)
786 return -ENOMEM;
787
788 *crat_image = pcrat_image;
789 *size = crat_table->length;
790
791 return 0;
792}
793
794/* Memory required to create Virtual CRAT.
795 * Since there is no easy way to predict the amount of memory required, the
796 * following amount are allocated for CPU and GPU Virtual CRAT. This is
797 * expected to cover all known conditions. But to be safe additional check
798 * is put in the code to ensure we don't overwrite.
799 */
800#define VCRAT_SIZE_FOR_CPU (2 * PAGE_SIZE)
801#define VCRAT_SIZE_FOR_GPU (4 * PAGE_SIZE)
802
803/* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
804 *
805 * @numa_node_id: CPU NUMA node id
806 * @avail_size: Available size in the memory
807 * @sub_type_hdr: Memory into which compute info will be filled in
808 *
809 * Return 0 if successful else return -ve value
810 */
811static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
812 int proximity_domain,
813 struct crat_subtype_computeunit *sub_type_hdr)
814{
815 const struct cpumask *cpumask;
816
817 *avail_size -= sizeof(struct crat_subtype_computeunit);
818 if (*avail_size < 0)
819 return -ENOMEM;
820
821 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
822
823 /* Fill in subtype header data */
824 sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
825 sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
826 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
827
828 cpumask = cpumask_of_node(numa_node_id);
829
830 /* Fill in CU data */
831 sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
832 sub_type_hdr->proximity_domain = proximity_domain;
833 sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
834 if (sub_type_hdr->processor_id_low == -1)
835 return -EINVAL;
836
837 sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
838
839 return 0;
840}
841
842/* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
843 *
844 * @numa_node_id: CPU NUMA node id
845 * @avail_size: Available size in the memory
846 * @sub_type_hdr: Memory into which compute info will be filled in
847 *
848 * Return 0 if successful else return -ve value
849 */
850static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
851 int proximity_domain,
852 struct crat_subtype_memory *sub_type_hdr)
853{
854 uint64_t mem_in_bytes = 0;
855 pg_data_t *pgdat;
856 int zone_type;
857
858 *avail_size -= sizeof(struct crat_subtype_memory);
859 if (*avail_size < 0)
860 return -ENOMEM;
861
862 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
863
864 /* Fill in subtype header data */
865 sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
866 sub_type_hdr->length = sizeof(struct crat_subtype_memory);
867 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
868
869 /* Fill in Memory Subunit data */
870
871 /* Unlike si_meminfo, si_meminfo_node is not exported. So
872 * the following lines are duplicated from si_meminfo_node
873 * function
874 */
875 pgdat = NODE_DATA(numa_node_id);
876 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
877 mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
878 mem_in_bytes <<= PAGE_SHIFT;
879
880 sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
881 sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
882 sub_type_hdr->proximity_domain = proximity_domain;
883
884 return 0;
885}
886
887#ifdef CONFIG_X86_64
888static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
889 uint32_t *num_entries,
890 struct crat_subtype_iolink *sub_type_hdr)
891{
892 int nid;
893 struct cpuinfo_x86 *c = &cpu_data(0);
894 uint8_t link_type;
895
896 if (c->x86_vendor == X86_VENDOR_AMD)
897 link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
898 else
899 link_type = CRAT_IOLINK_TYPE_QPI_1_1;
900
901 *num_entries = 0;
902
903 /* Create IO links from this node to other CPU nodes */
904 for_each_online_node(nid) {
905 if (nid == numa_node_id) /* node itself */
906 continue;
907
908 *avail_size -= sizeof(struct crat_subtype_iolink);
909 if (*avail_size < 0)
910 return -ENOMEM;
911
912 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
913
914 /* Fill in subtype header data */
915 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
916 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
917 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
918
919 /* Fill in IO link data */
920 sub_type_hdr->proximity_domain_from = numa_node_id;
921 sub_type_hdr->proximity_domain_to = nid;
922 sub_type_hdr->io_interface_type = link_type;
923
924 (*num_entries)++;
925 sub_type_hdr++;
926 }
927
928 return 0;
929}
930#endif
931
932/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
933 *
934 * @pcrat_image: Fill in VCRAT for CPU
935 * @size: [IN] allocated size of crat_image.
936 * [OUT] actual size of data filled in crat_image
937 */
938static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
939{
940 struct crat_header *crat_table = (struct crat_header *)pcrat_image;
941 struct acpi_table_header *acpi_table;
942 acpi_status status;
943 struct crat_subtype_generic *sub_type_hdr;
944 int avail_size = *size;
945 int numa_node_id;
946#ifdef CONFIG_X86_64
947 uint32_t entries = 0;
948#endif
949 int ret = 0;
950
951 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
952 return -EINVAL;
953
954 /* Fill in CRAT Header.
955 * Modify length and total_entries as subunits are added.
956 */
957 avail_size -= sizeof(struct crat_header);
958 if (avail_size < 0)
959 return -ENOMEM;
960
961 memset(crat_table, 0, sizeof(struct crat_header));
962 memcpy(&crat_table->signature, CRAT_SIGNATURE,
963 sizeof(crat_table->signature));
964 crat_table->length = sizeof(struct crat_header);
965
966 status = acpi_get_table("DSDT", 0, &acpi_table);
967 if (status != AE_OK)
968 pr_warn("DSDT table not found for OEM information\n");
969 else {
970 crat_table->oem_revision = acpi_table->revision;
971 memcpy(crat_table->oem_id, acpi_table->oem_id,
972 CRAT_OEMID_LENGTH);
973 memcpy(crat_table->oem_table_id, acpi_table->oem_table_id,
974 CRAT_OEMTABLEID_LENGTH);
975 }
976 crat_table->total_entries = 0;
977 crat_table->num_domains = 0;
978
979 sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
980
981 for_each_online_node(numa_node_id) {
982 if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
983 continue;
984
985 /* Fill in Subtype: Compute Unit */
986 ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
987 crat_table->num_domains,
988 (struct crat_subtype_computeunit *)sub_type_hdr);
989 if (ret < 0)
990 return ret;
991 crat_table->length += sub_type_hdr->length;
992 crat_table->total_entries++;
993
994 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
995 sub_type_hdr->length);
996
997 /* Fill in Subtype: Memory */
998 ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
999 crat_table->num_domains,
1000 (struct crat_subtype_memory *)sub_type_hdr);
1001 if (ret < 0)
1002 return ret;
1003 crat_table->length += sub_type_hdr->length;
1004 crat_table->total_entries++;
1005
1006 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1007 sub_type_hdr->length);
1008
1009 /* Fill in Subtype: IO Link */
1010#ifdef CONFIG_X86_64
1011 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
1012 &entries,
1013 (struct crat_subtype_iolink *)sub_type_hdr);
1014 if (ret < 0)
1015 return ret;
1016 crat_table->length += (sub_type_hdr->length * entries);
1017 crat_table->total_entries += entries;
1018
1019 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1020 sub_type_hdr->length * entries);
1021#else
1022 pr_info("IO link not available for non x86 platforms\n");
1023#endif
1024
1025 crat_table->num_domains++;
1026 }
1027
1028 /* TODO: Add cache Subtype for CPU.
1029 * Currently, CPU cache information is available in function
1030 * detect_cache_attributes(cpu) defined in the file
1031 * ./arch/x86/kernel/cpu/intel_cacheinfo.c. This function is not
1032 * exported and to get the same information the code needs to be
1033 * duplicated.
1034 */
1035
1036 *size = crat_table->length;
1037 pr_info("Virtual CRAT table created for CPU\n");
1038
1039 return 0;
1040}
1041
1042static int kfd_fill_gpu_memory_affinity(int *avail_size,
1043 struct kfd_dev *kdev, uint8_t type, uint64_t size,
1044 struct crat_subtype_memory *sub_type_hdr,
1045 uint32_t proximity_domain,
1046 const struct kfd_local_mem_info *local_mem_info)
1047{
1048 *avail_size -= sizeof(struct crat_subtype_memory);
1049 if (*avail_size < 0)
1050 return -ENOMEM;
1051
1052 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1053 sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1054 sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1055 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1056
1057 sub_type_hdr->proximity_domain = proximity_domain;
1058
1059 pr_debug("Fill gpu memory affinity - type 0x%x size 0x%llx\n",
1060 type, size);
1061
1062 sub_type_hdr->length_low = lower_32_bits(size);
1063 sub_type_hdr->length_high = upper_32_bits(size);
1064
1065 sub_type_hdr->width = local_mem_info->vram_width;
1066 sub_type_hdr->visibility_type = type;
1067
1068 return 0;
1069}
1070
1071/* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
1072 * to its NUMA node
1073 * @avail_size: Available size in the memory
1074 * @kdev - [IN] GPU device
1075 * @sub_type_hdr: Memory into which io link info will be filled in
1076 * @proximity_domain - proximity domain of the GPU node
1077 *
1078 * Return 0 if successful else return -ve value
1079 */
1080static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
1081 struct kfd_dev *kdev,
1082 struct crat_subtype_iolink *sub_type_hdr,
1083 uint32_t proximity_domain)
1084{
1085 *avail_size -= sizeof(struct crat_subtype_iolink);
1086 if (*avail_size < 0)
1087 return -ENOMEM;
1088
1089 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1090
1091 /* Fill in subtype header data */
1092 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1093 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1094 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1095 if (kfd_dev_is_large_bar(kdev))
1096 sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1097
1098 /* Fill in IOLINK subtype.
1099 * TODO: Fill-in other fields of iolink subtype
1100 */
1101 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
1102 sub_type_hdr->proximity_domain_from = proximity_domain;
1103#ifdef CONFIG_NUMA
1104 if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
1105 sub_type_hdr->proximity_domain_to = 0;
1106 else
1107 sub_type_hdr->proximity_domain_to = kdev->pdev->dev.numa_node;
1108#else
1109 sub_type_hdr->proximity_domain_to = 0;
1110#endif
1111 return 0;
1112}
1113
1114static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
1115 struct kfd_dev *kdev,
1116 struct kfd_dev *peer_kdev,
1117 struct crat_subtype_iolink *sub_type_hdr,
1118 uint32_t proximity_domain_from,
1119 uint32_t proximity_domain_to)
1120{
1121 *avail_size -= sizeof(struct crat_subtype_iolink);
1122 if (*avail_size < 0)
1123 return -ENOMEM;
1124
1125 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1126
1127 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1128 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1129 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED |
1130 CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1131
1132 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
1133 sub_type_hdr->proximity_domain_from = proximity_domain_from;
1134 sub_type_hdr->proximity_domain_to = proximity_domain_to;
1135 sub_type_hdr->num_hops_xgmi =
1136 amdgpu_amdkfd_get_xgmi_hops_count(kdev->kgd, peer_kdev->kgd);
1137 return 0;
1138}
1139
1140/* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
1141 *
1142 * @pcrat_image: Fill in VCRAT for GPU
1143 * @size: [IN] allocated size of crat_image.
1144 * [OUT] actual size of data filled in crat_image
1145 */
1146static int kfd_create_vcrat_image_gpu(void *pcrat_image,
1147 size_t *size, struct kfd_dev *kdev,
1148 uint32_t proximity_domain)
1149{
1150 struct crat_header *crat_table = (struct crat_header *)pcrat_image;
1151 struct crat_subtype_generic *sub_type_hdr;
1152 struct kfd_local_mem_info local_mem_info;
1153 struct kfd_topology_device *peer_dev;
1154 struct crat_subtype_computeunit *cu;
1155 struct kfd_cu_info cu_info;
1156 int avail_size = *size;
1157 uint32_t total_num_of_cu;
1158 int num_of_cache_entries = 0;
1159 int cache_mem_filled = 0;
1160 uint32_t nid = 0;
1161 int ret = 0;
1162
1163 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
1164 return -EINVAL;
1165
1166 /* Fill the CRAT Header.
1167 * Modify length and total_entries as subunits are added.
1168 */
1169 avail_size -= sizeof(struct crat_header);
1170 if (avail_size < 0)
1171 return -ENOMEM;
1172
1173 memset(crat_table, 0, sizeof(struct crat_header));
1174
1175 memcpy(&crat_table->signature, CRAT_SIGNATURE,
1176 sizeof(crat_table->signature));
1177 /* Change length as we add more subtypes*/
1178 crat_table->length = sizeof(struct crat_header);
1179 crat_table->num_domains = 1;
1180 crat_table->total_entries = 0;
1181
1182 /* Fill in Subtype: Compute Unit
1183 * First fill in the sub type header and then sub type data
1184 */
1185 avail_size -= sizeof(struct crat_subtype_computeunit);
1186 if (avail_size < 0)
1187 return -ENOMEM;
1188
1189 sub_type_hdr = (struct crat_subtype_generic *)(crat_table + 1);
1190 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
1191
1192 sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
1193 sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
1194 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1195
1196 /* Fill CU subtype data */
1197 cu = (struct crat_subtype_computeunit *)sub_type_hdr;
1198 cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
1199 cu->proximity_domain = proximity_domain;
1200
1201 amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info);
1202 cu->num_simd_per_cu = cu_info.simd_per_cu;
1203 cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
1204 cu->max_waves_simd = cu_info.max_waves_per_simd;
1205
1206 cu->wave_front_size = cu_info.wave_front_size;
1207 cu->array_count = cu_info.num_shader_arrays_per_engine *
1208 cu_info.num_shader_engines;
1209 total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
1210 cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
1211 cu->num_cu_per_array = cu_info.num_cu_per_sh;
1212 cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
1213 cu->num_banks = cu_info.num_shader_engines;
1214 cu->lds_size_in_kb = cu_info.lds_size;
1215
1216 cu->hsa_capability = 0;
1217
1218 /* Check if this node supports IOMMU. During parsing this flag will
1219 * translate to HSA_CAP_ATS_PRESENT
1220 */
1221 if (!kfd_iommu_check_device(kdev))
1222 cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
1223
1224 crat_table->length += sub_type_hdr->length;
1225 crat_table->total_entries++;
1226
1227 /* Fill in Subtype: Memory. Only on systems with large BAR (no
1228 * private FB), report memory as public. On other systems
1229 * report the total FB size (public+private) as a single
1230 * private heap.
1231 */
1232 amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info);
1233 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1234 sub_type_hdr->length);
1235
1236 if (debug_largebar)
1237 local_mem_info.local_mem_size_private = 0;
1238
1239 if (local_mem_info.local_mem_size_private == 0)
1240 ret = kfd_fill_gpu_memory_affinity(&avail_size,
1241 kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC,
1242 local_mem_info.local_mem_size_public,
1243 (struct crat_subtype_memory *)sub_type_hdr,
1244 proximity_domain,
1245 &local_mem_info);
1246 else
1247 ret = kfd_fill_gpu_memory_affinity(&avail_size,
1248 kdev, HSA_MEM_HEAP_TYPE_FB_PRIVATE,
1249 local_mem_info.local_mem_size_public +
1250 local_mem_info.local_mem_size_private,
1251 (struct crat_subtype_memory *)sub_type_hdr,
1252 proximity_domain,
1253 &local_mem_info);
1254 if (ret < 0)
1255 return ret;
1256
1257 crat_table->length += sizeof(struct crat_subtype_memory);
1258 crat_table->total_entries++;
1259
1260 /* TODO: Fill in cache information. This information is NOT readily
1261 * available in KGD
1262 */
1263 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1264 sub_type_hdr->length);
1265 ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low,
1266 avail_size,
1267 &cu_info,
1268 (struct crat_subtype_cache *)sub_type_hdr,
1269 &cache_mem_filled,
1270 &num_of_cache_entries);
1271
1272 if (ret < 0)
1273 return ret;
1274
1275 crat_table->length += cache_mem_filled;
1276 crat_table->total_entries += num_of_cache_entries;
1277 avail_size -= cache_mem_filled;
1278
1279 /* Fill in Subtype: IO_LINKS
1280 * Only direct links are added here which is Link from GPU to
1281 * to its NUMA node. Indirect links are added by userspace.
1282 */
1283 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1284 cache_mem_filled);
1285 ret = kfd_fill_gpu_direct_io_link_to_cpu(&avail_size, kdev,
1286 (struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
1287
1288 if (ret < 0)
1289 return ret;
1290
1291 crat_table->length += sub_type_hdr->length;
1292 crat_table->total_entries++;
1293
1294
1295 /* Fill in Subtype: IO_LINKS
1296 * Direct links from GPU to other GPUs through xGMI.
1297 * We will loop GPUs that already be processed (with lower value
1298 * of proximity_domain), add the link for the GPUs with same
1299 * hive id (from this GPU to other GPU) . The reversed iolink
1300 * (from other GPU to this GPU) will be added
1301 * in kfd_parse_subtype_iolink.
1302 */
1303 if (kdev->hive_id) {
1304 for (nid = 0; nid < proximity_domain; ++nid) {
1305 peer_dev = kfd_topology_device_by_proximity_domain(nid);
1306 if (!peer_dev->gpu)
1307 continue;
1308 if (peer_dev->gpu->hive_id != kdev->hive_id)
1309 continue;
1310 sub_type_hdr = (typeof(sub_type_hdr))(
1311 (char *)sub_type_hdr +
1312 sizeof(struct crat_subtype_iolink));
1313 ret = kfd_fill_gpu_xgmi_link_to_gpu(
1314 &avail_size, kdev, peer_dev->gpu,
1315 (struct crat_subtype_iolink *)sub_type_hdr,
1316 proximity_domain, nid);
1317 if (ret < 0)
1318 return ret;
1319 crat_table->length += sub_type_hdr->length;
1320 crat_table->total_entries++;
1321 }
1322 }
1323 *size = crat_table->length;
1324 pr_info("Virtual CRAT table created for GPU\n");
1325
1326 return ret;
1327}
1328
1329/* kfd_create_crat_image_virtual - Allocates memory for CRAT image and
1330 * creates a Virtual CRAT (VCRAT) image
1331 *
1332 * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
1333 *
1334 * @crat_image: VCRAT image created because ACPI does not have a
1335 * CRAT for this device
1336 * @size: [OUT] size of virtual crat_image
1337 * @flags: COMPUTE_UNIT_CPU - Create VCRAT for CPU device
1338 * COMPUTE_UNIT_GPU - Create VCRAT for GPU
1339 * (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
1340 * -- this option is not currently implemented.
1341 * The assumption is that all AMD APUs will have CRAT
1342 * @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
1343 *
1344 * Return 0 if successful else return -ve value
1345 */
1346int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
1347 int flags, struct kfd_dev *kdev,
1348 uint32_t proximity_domain)
1349{
1350 void *pcrat_image = NULL;
1351 int ret = 0;
1352
1353 if (!crat_image)
1354 return -EINVAL;
1355
1356 *crat_image = NULL;
1357
1358 /* Allocate one VCRAT_SIZE_FOR_CPU for CPU virtual CRAT image and
1359 * VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image. This should cover
1360 * all the current conditions. A check is put not to overwrite beyond
1361 * allocated size
1362 */
1363 switch (flags) {
1364 case COMPUTE_UNIT_CPU:
1365 pcrat_image = kmalloc(VCRAT_SIZE_FOR_CPU, GFP_KERNEL);
1366 if (!pcrat_image)
1367 return -ENOMEM;
1368 *size = VCRAT_SIZE_FOR_CPU;
1369 ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
1370 break;
1371 case COMPUTE_UNIT_GPU:
1372 if (!kdev)
1373 return -EINVAL;
1374 pcrat_image = kmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
1375 if (!pcrat_image)
1376 return -ENOMEM;
1377 *size = VCRAT_SIZE_FOR_GPU;
1378 ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
1379 proximity_domain);
1380 break;
1381 case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
1382 /* TODO: */
1383 ret = -EINVAL;
1384 pr_err("VCRAT not implemented for APU\n");
1385 break;
1386 default:
1387 ret = -EINVAL;
1388 }
1389
1390 if (!ret)
1391 *crat_image = pcrat_image;
1392 else
1393 kfree(pcrat_image);
1394
1395 return ret;
1396}
1397
1398
1399/* kfd_destroy_crat_image
1400 *
1401 * @crat_image: [IN] - crat_image from kfd_create_crat_image_xxx(..)
1402 *
1403 */
1404void kfd_destroy_crat_image(void *crat_image)
1405{
1406 kfree(crat_image);
1407}
1/*
2 * Copyright 2015-2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/pci.h>
24#include <linux/acpi.h>
25#include "kfd_crat.h"
26#include "kfd_priv.h"
27#include "kfd_topology.h"
28#include "kfd_iommu.h"
29#include "amdgpu.h"
30#include "amdgpu_amdkfd.h"
31
32/* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
33 * GPU processor ID are expressed with Bit[31]=1.
34 * The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs
35 * used in the CRAT.
36 */
37static uint32_t gpu_processor_id_low = 0x80001000;
38
39/* Return the next available gpu_processor_id and increment it for next GPU
40 * @total_cu_count - Total CUs present in the GPU including ones
41 * masked off
42 */
43static inline unsigned int get_and_inc_gpu_processor_id(
44 unsigned int total_cu_count)
45{
46 int current_id = gpu_processor_id_low;
47
48 gpu_processor_id_low += total_cu_count;
49 return current_id;
50}
51
52/* Static table to describe GPU Cache information */
53struct kfd_gpu_cache_info {
54 uint32_t cache_size;
55 uint32_t cache_level;
56 uint32_t flags;
57 /* Indicates how many Compute Units share this cache
58 * within a SA. Value = 1 indicates the cache is not shared
59 */
60 uint32_t num_cu_shared;
61};
62
63static struct kfd_gpu_cache_info kaveri_cache_info[] = {
64 {
65 /* TCP L1 Cache per CU */
66 .cache_size = 16,
67 .cache_level = 1,
68 .flags = (CRAT_CACHE_FLAGS_ENABLED |
69 CRAT_CACHE_FLAGS_DATA_CACHE |
70 CRAT_CACHE_FLAGS_SIMD_CACHE),
71 .num_cu_shared = 1,
72 },
73 {
74 /* Scalar L1 Instruction Cache (in SQC module) per bank */
75 .cache_size = 16,
76 .cache_level = 1,
77 .flags = (CRAT_CACHE_FLAGS_ENABLED |
78 CRAT_CACHE_FLAGS_INST_CACHE |
79 CRAT_CACHE_FLAGS_SIMD_CACHE),
80 .num_cu_shared = 2,
81 },
82 {
83 /* Scalar L1 Data Cache (in SQC module) per bank */
84 .cache_size = 8,
85 .cache_level = 1,
86 .flags = (CRAT_CACHE_FLAGS_ENABLED |
87 CRAT_CACHE_FLAGS_DATA_CACHE |
88 CRAT_CACHE_FLAGS_SIMD_CACHE),
89 .num_cu_shared = 2,
90 },
91
92 /* TODO: Add L2 Cache information */
93};
94
95
96static struct kfd_gpu_cache_info carrizo_cache_info[] = {
97 {
98 /* TCP L1 Cache per CU */
99 .cache_size = 16,
100 .cache_level = 1,
101 .flags = (CRAT_CACHE_FLAGS_ENABLED |
102 CRAT_CACHE_FLAGS_DATA_CACHE |
103 CRAT_CACHE_FLAGS_SIMD_CACHE),
104 .num_cu_shared = 1,
105 },
106 {
107 /* Scalar L1 Instruction Cache (in SQC module) per bank */
108 .cache_size = 8,
109 .cache_level = 1,
110 .flags = (CRAT_CACHE_FLAGS_ENABLED |
111 CRAT_CACHE_FLAGS_INST_CACHE |
112 CRAT_CACHE_FLAGS_SIMD_CACHE),
113 .num_cu_shared = 4,
114 },
115 {
116 /* Scalar L1 Data Cache (in SQC module) per bank. */
117 .cache_size = 4,
118 .cache_level = 1,
119 .flags = (CRAT_CACHE_FLAGS_ENABLED |
120 CRAT_CACHE_FLAGS_DATA_CACHE |
121 CRAT_CACHE_FLAGS_SIMD_CACHE),
122 .num_cu_shared = 4,
123 },
124
125 /* TODO: Add L2 Cache information */
126};
127
128#define hawaii_cache_info kaveri_cache_info
129#define tonga_cache_info carrizo_cache_info
130#define fiji_cache_info carrizo_cache_info
131#define polaris10_cache_info carrizo_cache_info
132#define polaris11_cache_info carrizo_cache_info
133#define polaris12_cache_info carrizo_cache_info
134#define vegam_cache_info carrizo_cache_info
135
136/* NOTE: L1 cache information has been updated and L2/L3
137 * cache information has been added for Vega10 and
138 * newer ASICs. The unit for cache_size is KiB.
139 * In future, check & update cache details
140 * for every new ASIC is required.
141 */
142
143static struct kfd_gpu_cache_info vega10_cache_info[] = {
144 {
145 /* TCP L1 Cache per CU */
146 .cache_size = 16,
147 .cache_level = 1,
148 .flags = (CRAT_CACHE_FLAGS_ENABLED |
149 CRAT_CACHE_FLAGS_DATA_CACHE |
150 CRAT_CACHE_FLAGS_SIMD_CACHE),
151 .num_cu_shared = 1,
152 },
153 {
154 /* Scalar L1 Instruction Cache per SQC */
155 .cache_size = 32,
156 .cache_level = 1,
157 .flags = (CRAT_CACHE_FLAGS_ENABLED |
158 CRAT_CACHE_FLAGS_INST_CACHE |
159 CRAT_CACHE_FLAGS_SIMD_CACHE),
160 .num_cu_shared = 3,
161 },
162 {
163 /* Scalar L1 Data Cache per SQC */
164 .cache_size = 16,
165 .cache_level = 1,
166 .flags = (CRAT_CACHE_FLAGS_ENABLED |
167 CRAT_CACHE_FLAGS_DATA_CACHE |
168 CRAT_CACHE_FLAGS_SIMD_CACHE),
169 .num_cu_shared = 3,
170 },
171 {
172 /* L2 Data Cache per GPU (Total Tex Cache) */
173 .cache_size = 4096,
174 .cache_level = 2,
175 .flags = (CRAT_CACHE_FLAGS_ENABLED |
176 CRAT_CACHE_FLAGS_DATA_CACHE |
177 CRAT_CACHE_FLAGS_SIMD_CACHE),
178 .num_cu_shared = 16,
179 },
180};
181
182static struct kfd_gpu_cache_info raven_cache_info[] = {
183 {
184 /* TCP L1 Cache per CU */
185 .cache_size = 16,
186 .cache_level = 1,
187 .flags = (CRAT_CACHE_FLAGS_ENABLED |
188 CRAT_CACHE_FLAGS_DATA_CACHE |
189 CRAT_CACHE_FLAGS_SIMD_CACHE),
190 .num_cu_shared = 1,
191 },
192 {
193 /* Scalar L1 Instruction Cache per SQC */
194 .cache_size = 32,
195 .cache_level = 1,
196 .flags = (CRAT_CACHE_FLAGS_ENABLED |
197 CRAT_CACHE_FLAGS_INST_CACHE |
198 CRAT_CACHE_FLAGS_SIMD_CACHE),
199 .num_cu_shared = 3,
200 },
201 {
202 /* Scalar L1 Data Cache per SQC */
203 .cache_size = 16,
204 .cache_level = 1,
205 .flags = (CRAT_CACHE_FLAGS_ENABLED |
206 CRAT_CACHE_FLAGS_DATA_CACHE |
207 CRAT_CACHE_FLAGS_SIMD_CACHE),
208 .num_cu_shared = 3,
209 },
210 {
211 /* L2 Data Cache per GPU (Total Tex Cache) */
212 .cache_size = 1024,
213 .cache_level = 2,
214 .flags = (CRAT_CACHE_FLAGS_ENABLED |
215 CRAT_CACHE_FLAGS_DATA_CACHE |
216 CRAT_CACHE_FLAGS_SIMD_CACHE),
217 .num_cu_shared = 11,
218 },
219};
220
221static struct kfd_gpu_cache_info renoir_cache_info[] = {
222 {
223 /* TCP L1 Cache per CU */
224 .cache_size = 16,
225 .cache_level = 1,
226 .flags = (CRAT_CACHE_FLAGS_ENABLED |
227 CRAT_CACHE_FLAGS_DATA_CACHE |
228 CRAT_CACHE_FLAGS_SIMD_CACHE),
229 .num_cu_shared = 1,
230 },
231 {
232 /* Scalar L1 Instruction Cache per SQC */
233 .cache_size = 32,
234 .cache_level = 1,
235 .flags = (CRAT_CACHE_FLAGS_ENABLED |
236 CRAT_CACHE_FLAGS_INST_CACHE |
237 CRAT_CACHE_FLAGS_SIMD_CACHE),
238 .num_cu_shared = 3,
239 },
240 {
241 /* Scalar L1 Data Cache per SQC */
242 .cache_size = 16,
243 .cache_level = 1,
244 .flags = (CRAT_CACHE_FLAGS_ENABLED |
245 CRAT_CACHE_FLAGS_DATA_CACHE |
246 CRAT_CACHE_FLAGS_SIMD_CACHE),
247 .num_cu_shared = 3,
248 },
249 {
250 /* L2 Data Cache per GPU (Total Tex Cache) */
251 .cache_size = 1024,
252 .cache_level = 2,
253 .flags = (CRAT_CACHE_FLAGS_ENABLED |
254 CRAT_CACHE_FLAGS_DATA_CACHE |
255 CRAT_CACHE_FLAGS_SIMD_CACHE),
256 .num_cu_shared = 8,
257 },
258};
259
260static struct kfd_gpu_cache_info vega12_cache_info[] = {
261 {
262 /* TCP L1 Cache per CU */
263 .cache_size = 16,
264 .cache_level = 1,
265 .flags = (CRAT_CACHE_FLAGS_ENABLED |
266 CRAT_CACHE_FLAGS_DATA_CACHE |
267 CRAT_CACHE_FLAGS_SIMD_CACHE),
268 .num_cu_shared = 1,
269 },
270 {
271 /* Scalar L1 Instruction Cache per SQC */
272 .cache_size = 32,
273 .cache_level = 1,
274 .flags = (CRAT_CACHE_FLAGS_ENABLED |
275 CRAT_CACHE_FLAGS_INST_CACHE |
276 CRAT_CACHE_FLAGS_SIMD_CACHE),
277 .num_cu_shared = 3,
278 },
279 {
280 /* Scalar L1 Data Cache per SQC */
281 .cache_size = 16,
282 .cache_level = 1,
283 .flags = (CRAT_CACHE_FLAGS_ENABLED |
284 CRAT_CACHE_FLAGS_DATA_CACHE |
285 CRAT_CACHE_FLAGS_SIMD_CACHE),
286 .num_cu_shared = 3,
287 },
288 {
289 /* L2 Data Cache per GPU (Total Tex Cache) */
290 .cache_size = 2048,
291 .cache_level = 2,
292 .flags = (CRAT_CACHE_FLAGS_ENABLED |
293 CRAT_CACHE_FLAGS_DATA_CACHE |
294 CRAT_CACHE_FLAGS_SIMD_CACHE),
295 .num_cu_shared = 5,
296 },
297};
298
299static struct kfd_gpu_cache_info vega20_cache_info[] = {
300 {
301 /* TCP L1 Cache per CU */
302 .cache_size = 16,
303 .cache_level = 1,
304 .flags = (CRAT_CACHE_FLAGS_ENABLED |
305 CRAT_CACHE_FLAGS_DATA_CACHE |
306 CRAT_CACHE_FLAGS_SIMD_CACHE),
307 .num_cu_shared = 1,
308 },
309 {
310 /* Scalar L1 Instruction Cache per SQC */
311 .cache_size = 32,
312 .cache_level = 1,
313 .flags = (CRAT_CACHE_FLAGS_ENABLED |
314 CRAT_CACHE_FLAGS_INST_CACHE |
315 CRAT_CACHE_FLAGS_SIMD_CACHE),
316 .num_cu_shared = 3,
317 },
318 {
319 /* Scalar L1 Data Cache per SQC */
320 .cache_size = 16,
321 .cache_level = 1,
322 .flags = (CRAT_CACHE_FLAGS_ENABLED |
323 CRAT_CACHE_FLAGS_DATA_CACHE |
324 CRAT_CACHE_FLAGS_SIMD_CACHE),
325 .num_cu_shared = 3,
326 },
327 {
328 /* L2 Data Cache per GPU (Total Tex Cache) */
329 .cache_size = 8192,
330 .cache_level = 2,
331 .flags = (CRAT_CACHE_FLAGS_ENABLED |
332 CRAT_CACHE_FLAGS_DATA_CACHE |
333 CRAT_CACHE_FLAGS_SIMD_CACHE),
334 .num_cu_shared = 16,
335 },
336};
337
338static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
339 {
340 /* TCP L1 Cache per CU */
341 .cache_size = 16,
342 .cache_level = 1,
343 .flags = (CRAT_CACHE_FLAGS_ENABLED |
344 CRAT_CACHE_FLAGS_DATA_CACHE |
345 CRAT_CACHE_FLAGS_SIMD_CACHE),
346 .num_cu_shared = 1,
347 },
348 {
349 /* Scalar L1 Instruction Cache per SQC */
350 .cache_size = 32,
351 .cache_level = 1,
352 .flags = (CRAT_CACHE_FLAGS_ENABLED |
353 CRAT_CACHE_FLAGS_INST_CACHE |
354 CRAT_CACHE_FLAGS_SIMD_CACHE),
355 .num_cu_shared = 2,
356 },
357 {
358 /* Scalar L1 Data Cache per SQC */
359 .cache_size = 16,
360 .cache_level = 1,
361 .flags = (CRAT_CACHE_FLAGS_ENABLED |
362 CRAT_CACHE_FLAGS_DATA_CACHE |
363 CRAT_CACHE_FLAGS_SIMD_CACHE),
364 .num_cu_shared = 2,
365 },
366 {
367 /* L2 Data Cache per GPU (Total Tex Cache) */
368 .cache_size = 8192,
369 .cache_level = 2,
370 .flags = (CRAT_CACHE_FLAGS_ENABLED |
371 CRAT_CACHE_FLAGS_DATA_CACHE |
372 CRAT_CACHE_FLAGS_SIMD_CACHE),
373 .num_cu_shared = 14,
374 },
375};
376
377static struct kfd_gpu_cache_info navi10_cache_info[] = {
378 {
379 /* TCP L1 Cache per CU */
380 .cache_size = 16,
381 .cache_level = 1,
382 .flags = (CRAT_CACHE_FLAGS_ENABLED |
383 CRAT_CACHE_FLAGS_DATA_CACHE |
384 CRAT_CACHE_FLAGS_SIMD_CACHE),
385 .num_cu_shared = 1,
386 },
387 {
388 /* Scalar L1 Instruction Cache per SQC */
389 .cache_size = 32,
390 .cache_level = 1,
391 .flags = (CRAT_CACHE_FLAGS_ENABLED |
392 CRAT_CACHE_FLAGS_INST_CACHE |
393 CRAT_CACHE_FLAGS_SIMD_CACHE),
394 .num_cu_shared = 2,
395 },
396 {
397 /* Scalar L1 Data Cache per SQC */
398 .cache_size = 16,
399 .cache_level = 1,
400 .flags = (CRAT_CACHE_FLAGS_ENABLED |
401 CRAT_CACHE_FLAGS_DATA_CACHE |
402 CRAT_CACHE_FLAGS_SIMD_CACHE),
403 .num_cu_shared = 2,
404 },
405 {
406 /* GL1 Data Cache per SA */
407 .cache_size = 128,
408 .cache_level = 1,
409 .flags = (CRAT_CACHE_FLAGS_ENABLED |
410 CRAT_CACHE_FLAGS_DATA_CACHE |
411 CRAT_CACHE_FLAGS_SIMD_CACHE),
412 .num_cu_shared = 10,
413 },
414 {
415 /* L2 Data Cache per GPU (Total Tex Cache) */
416 .cache_size = 4096,
417 .cache_level = 2,
418 .flags = (CRAT_CACHE_FLAGS_ENABLED |
419 CRAT_CACHE_FLAGS_DATA_CACHE |
420 CRAT_CACHE_FLAGS_SIMD_CACHE),
421 .num_cu_shared = 10,
422 },
423};
424
425static struct kfd_gpu_cache_info vangogh_cache_info[] = {
426 {
427 /* TCP L1 Cache per CU */
428 .cache_size = 16,
429 .cache_level = 1,
430 .flags = (CRAT_CACHE_FLAGS_ENABLED |
431 CRAT_CACHE_FLAGS_DATA_CACHE |
432 CRAT_CACHE_FLAGS_SIMD_CACHE),
433 .num_cu_shared = 1,
434 },
435 {
436 /* Scalar L1 Instruction Cache per SQC */
437 .cache_size = 32,
438 .cache_level = 1,
439 .flags = (CRAT_CACHE_FLAGS_ENABLED |
440 CRAT_CACHE_FLAGS_INST_CACHE |
441 CRAT_CACHE_FLAGS_SIMD_CACHE),
442 .num_cu_shared = 2,
443 },
444 {
445 /* Scalar L1 Data Cache per SQC */
446 .cache_size = 16,
447 .cache_level = 1,
448 .flags = (CRAT_CACHE_FLAGS_ENABLED |
449 CRAT_CACHE_FLAGS_DATA_CACHE |
450 CRAT_CACHE_FLAGS_SIMD_CACHE),
451 .num_cu_shared = 2,
452 },
453 {
454 /* GL1 Data Cache per SA */
455 .cache_size = 128,
456 .cache_level = 1,
457 .flags = (CRAT_CACHE_FLAGS_ENABLED |
458 CRAT_CACHE_FLAGS_DATA_CACHE |
459 CRAT_CACHE_FLAGS_SIMD_CACHE),
460 .num_cu_shared = 8,
461 },
462 {
463 /* L2 Data Cache per GPU (Total Tex Cache) */
464 .cache_size = 1024,
465 .cache_level = 2,
466 .flags = (CRAT_CACHE_FLAGS_ENABLED |
467 CRAT_CACHE_FLAGS_DATA_CACHE |
468 CRAT_CACHE_FLAGS_SIMD_CACHE),
469 .num_cu_shared = 8,
470 },
471};
472
473static struct kfd_gpu_cache_info navi14_cache_info[] = {
474 {
475 /* TCP L1 Cache per CU */
476 .cache_size = 16,
477 .cache_level = 1,
478 .flags = (CRAT_CACHE_FLAGS_ENABLED |
479 CRAT_CACHE_FLAGS_DATA_CACHE |
480 CRAT_CACHE_FLAGS_SIMD_CACHE),
481 .num_cu_shared = 1,
482 },
483 {
484 /* Scalar L1 Instruction Cache per SQC */
485 .cache_size = 32,
486 .cache_level = 1,
487 .flags = (CRAT_CACHE_FLAGS_ENABLED |
488 CRAT_CACHE_FLAGS_INST_CACHE |
489 CRAT_CACHE_FLAGS_SIMD_CACHE),
490 .num_cu_shared = 2,
491 },
492 {
493 /* Scalar L1 Data Cache per SQC */
494 .cache_size = 16,
495 .cache_level = 1,
496 .flags = (CRAT_CACHE_FLAGS_ENABLED |
497 CRAT_CACHE_FLAGS_DATA_CACHE |
498 CRAT_CACHE_FLAGS_SIMD_CACHE),
499 .num_cu_shared = 2,
500 },
501 {
502 /* GL1 Data Cache per SA */
503 .cache_size = 128,
504 .cache_level = 1,
505 .flags = (CRAT_CACHE_FLAGS_ENABLED |
506 CRAT_CACHE_FLAGS_DATA_CACHE |
507 CRAT_CACHE_FLAGS_SIMD_CACHE),
508 .num_cu_shared = 12,
509 },
510 {
511 /* L2 Data Cache per GPU (Total Tex Cache) */
512 .cache_size = 2048,
513 .cache_level = 2,
514 .flags = (CRAT_CACHE_FLAGS_ENABLED |
515 CRAT_CACHE_FLAGS_DATA_CACHE |
516 CRAT_CACHE_FLAGS_SIMD_CACHE),
517 .num_cu_shared = 12,
518 },
519};
520
521static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
522 {
523 /* TCP L1 Cache per CU */
524 .cache_size = 16,
525 .cache_level = 1,
526 .flags = (CRAT_CACHE_FLAGS_ENABLED |
527 CRAT_CACHE_FLAGS_DATA_CACHE |
528 CRAT_CACHE_FLAGS_SIMD_CACHE),
529 .num_cu_shared = 1,
530 },
531 {
532 /* Scalar L1 Instruction Cache per SQC */
533 .cache_size = 32,
534 .cache_level = 1,
535 .flags = (CRAT_CACHE_FLAGS_ENABLED |
536 CRAT_CACHE_FLAGS_INST_CACHE |
537 CRAT_CACHE_FLAGS_SIMD_CACHE),
538 .num_cu_shared = 2,
539 },
540 {
541 /* Scalar L1 Data Cache per SQC */
542 .cache_size = 16,
543 .cache_level = 1,
544 .flags = (CRAT_CACHE_FLAGS_ENABLED |
545 CRAT_CACHE_FLAGS_DATA_CACHE |
546 CRAT_CACHE_FLAGS_SIMD_CACHE),
547 .num_cu_shared = 2,
548 },
549 {
550 /* GL1 Data Cache per SA */
551 .cache_size = 128,
552 .cache_level = 1,
553 .flags = (CRAT_CACHE_FLAGS_ENABLED |
554 CRAT_CACHE_FLAGS_DATA_CACHE |
555 CRAT_CACHE_FLAGS_SIMD_CACHE),
556 .num_cu_shared = 10,
557 },
558 {
559 /* L2 Data Cache per GPU (Total Tex Cache) */
560 .cache_size = 4096,
561 .cache_level = 2,
562 .flags = (CRAT_CACHE_FLAGS_ENABLED |
563 CRAT_CACHE_FLAGS_DATA_CACHE |
564 CRAT_CACHE_FLAGS_SIMD_CACHE),
565 .num_cu_shared = 10,
566 },
567 {
568 /* L3 Data Cache per GPU */
569 .cache_size = 128*1024,
570 .cache_level = 3,
571 .flags = (CRAT_CACHE_FLAGS_ENABLED |
572 CRAT_CACHE_FLAGS_DATA_CACHE |
573 CRAT_CACHE_FLAGS_SIMD_CACHE),
574 .num_cu_shared = 10,
575 },
576};
577
578static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
579 {
580 /* TCP L1 Cache per CU */
581 .cache_size = 16,
582 .cache_level = 1,
583 .flags = (CRAT_CACHE_FLAGS_ENABLED |
584 CRAT_CACHE_FLAGS_DATA_CACHE |
585 CRAT_CACHE_FLAGS_SIMD_CACHE),
586 .num_cu_shared = 1,
587 },
588 {
589 /* Scalar L1 Instruction Cache per SQC */
590 .cache_size = 32,
591 .cache_level = 1,
592 .flags = (CRAT_CACHE_FLAGS_ENABLED |
593 CRAT_CACHE_FLAGS_INST_CACHE |
594 CRAT_CACHE_FLAGS_SIMD_CACHE),
595 .num_cu_shared = 2,
596 },
597 {
598 /* Scalar L1 Data Cache per SQC */
599 .cache_size = 16,
600 .cache_level = 1,
601 .flags = (CRAT_CACHE_FLAGS_ENABLED |
602 CRAT_CACHE_FLAGS_DATA_CACHE |
603 CRAT_CACHE_FLAGS_SIMD_CACHE),
604 .num_cu_shared = 2,
605 },
606 {
607 /* GL1 Data Cache per SA */
608 .cache_size = 128,
609 .cache_level = 1,
610 .flags = (CRAT_CACHE_FLAGS_ENABLED |
611 CRAT_CACHE_FLAGS_DATA_CACHE |
612 CRAT_CACHE_FLAGS_SIMD_CACHE),
613 .num_cu_shared = 10,
614 },
615 {
616 /* L2 Data Cache per GPU (Total Tex Cache) */
617 .cache_size = 3072,
618 .cache_level = 2,
619 .flags = (CRAT_CACHE_FLAGS_ENABLED |
620 CRAT_CACHE_FLAGS_DATA_CACHE |
621 CRAT_CACHE_FLAGS_SIMD_CACHE),
622 .num_cu_shared = 10,
623 },
624 {
625 /* L3 Data Cache per GPU */
626 .cache_size = 96*1024,
627 .cache_level = 3,
628 .flags = (CRAT_CACHE_FLAGS_ENABLED |
629 CRAT_CACHE_FLAGS_DATA_CACHE |
630 CRAT_CACHE_FLAGS_SIMD_CACHE),
631 .num_cu_shared = 10,
632 },
633};
634
635static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
636 {
637 /* TCP L1 Cache per CU */
638 .cache_size = 16,
639 .cache_level = 1,
640 .flags = (CRAT_CACHE_FLAGS_ENABLED |
641 CRAT_CACHE_FLAGS_DATA_CACHE |
642 CRAT_CACHE_FLAGS_SIMD_CACHE),
643 .num_cu_shared = 1,
644 },
645 {
646 /* Scalar L1 Instruction Cache per SQC */
647 .cache_size = 32,
648 .cache_level = 1,
649 .flags = (CRAT_CACHE_FLAGS_ENABLED |
650 CRAT_CACHE_FLAGS_INST_CACHE |
651 CRAT_CACHE_FLAGS_SIMD_CACHE),
652 .num_cu_shared = 2,
653 },
654 {
655 /* Scalar L1 Data Cache per SQC */
656 .cache_size = 16,
657 .cache_level = 1,
658 .flags = (CRAT_CACHE_FLAGS_ENABLED |
659 CRAT_CACHE_FLAGS_DATA_CACHE |
660 CRAT_CACHE_FLAGS_SIMD_CACHE),
661 .num_cu_shared = 2,
662 },
663 {
664 /* GL1 Data Cache per SA */
665 .cache_size = 128,
666 .cache_level = 1,
667 .flags = (CRAT_CACHE_FLAGS_ENABLED |
668 CRAT_CACHE_FLAGS_DATA_CACHE |
669 CRAT_CACHE_FLAGS_SIMD_CACHE),
670 .num_cu_shared = 8,
671 },
672 {
673 /* L2 Data Cache per GPU (Total Tex Cache) */
674 .cache_size = 2048,
675 .cache_level = 2,
676 .flags = (CRAT_CACHE_FLAGS_ENABLED |
677 CRAT_CACHE_FLAGS_DATA_CACHE |
678 CRAT_CACHE_FLAGS_SIMD_CACHE),
679 .num_cu_shared = 8,
680 },
681 {
682 /* L3 Data Cache per GPU */
683 .cache_size = 32*1024,
684 .cache_level = 3,
685 .flags = (CRAT_CACHE_FLAGS_ENABLED |
686 CRAT_CACHE_FLAGS_DATA_CACHE |
687 CRAT_CACHE_FLAGS_SIMD_CACHE),
688 .num_cu_shared = 8,
689 },
690};
691
692static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
693 {
694 /* TCP L1 Cache per CU */
695 .cache_size = 16,
696 .cache_level = 1,
697 .flags = (CRAT_CACHE_FLAGS_ENABLED |
698 CRAT_CACHE_FLAGS_DATA_CACHE |
699 CRAT_CACHE_FLAGS_SIMD_CACHE),
700 .num_cu_shared = 1,
701 },
702 {
703 /* Scalar L1 Instruction Cache per SQC */
704 .cache_size = 32,
705 .cache_level = 1,
706 .flags = (CRAT_CACHE_FLAGS_ENABLED |
707 CRAT_CACHE_FLAGS_INST_CACHE |
708 CRAT_CACHE_FLAGS_SIMD_CACHE),
709 .num_cu_shared = 2,
710 },
711 {
712 /* Scalar L1 Data Cache per SQC */
713 .cache_size = 16,
714 .cache_level = 1,
715 .flags = (CRAT_CACHE_FLAGS_ENABLED |
716 CRAT_CACHE_FLAGS_DATA_CACHE |
717 CRAT_CACHE_FLAGS_SIMD_CACHE),
718 .num_cu_shared = 2,
719 },
720 {
721 /* GL1 Data Cache per SA */
722 .cache_size = 128,
723 .cache_level = 1,
724 .flags = (CRAT_CACHE_FLAGS_ENABLED |
725 CRAT_CACHE_FLAGS_DATA_CACHE |
726 CRAT_CACHE_FLAGS_SIMD_CACHE),
727 .num_cu_shared = 8,
728 },
729 {
730 /* L2 Data Cache per GPU (Total Tex Cache) */
731 .cache_size = 1024,
732 .cache_level = 2,
733 .flags = (CRAT_CACHE_FLAGS_ENABLED |
734 CRAT_CACHE_FLAGS_DATA_CACHE |
735 CRAT_CACHE_FLAGS_SIMD_CACHE),
736 .num_cu_shared = 8,
737 },
738 {
739 /* L3 Data Cache per GPU */
740 .cache_size = 16*1024,
741 .cache_level = 3,
742 .flags = (CRAT_CACHE_FLAGS_ENABLED |
743 CRAT_CACHE_FLAGS_DATA_CACHE |
744 CRAT_CACHE_FLAGS_SIMD_CACHE),
745 .num_cu_shared = 8,
746 },
747};
748
749static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
750 {
751 /* TCP L1 Cache per CU */
752 .cache_size = 16,
753 .cache_level = 1,
754 .flags = (CRAT_CACHE_FLAGS_ENABLED |
755 CRAT_CACHE_FLAGS_DATA_CACHE |
756 CRAT_CACHE_FLAGS_SIMD_CACHE),
757 .num_cu_shared = 1,
758 },
759 {
760 /* Scalar L1 Instruction Cache per SQC */
761 .cache_size = 32,
762 .cache_level = 1,
763 .flags = (CRAT_CACHE_FLAGS_ENABLED |
764 CRAT_CACHE_FLAGS_INST_CACHE |
765 CRAT_CACHE_FLAGS_SIMD_CACHE),
766 .num_cu_shared = 2,
767 },
768 {
769 /* Scalar L1 Data Cache per SQC */
770 .cache_size = 16,
771 .cache_level = 1,
772 .flags = (CRAT_CACHE_FLAGS_ENABLED |
773 CRAT_CACHE_FLAGS_DATA_CACHE |
774 CRAT_CACHE_FLAGS_SIMD_CACHE),
775 .num_cu_shared = 2,
776 },
777 {
778 /* GL1 Data Cache per SA */
779 .cache_size = 128,
780 .cache_level = 1,
781 .flags = (CRAT_CACHE_FLAGS_ENABLED |
782 CRAT_CACHE_FLAGS_DATA_CACHE |
783 CRAT_CACHE_FLAGS_SIMD_CACHE),
784 .num_cu_shared = 6,
785 },
786 {
787 /* L2 Data Cache per GPU (Total Tex Cache) */
788 .cache_size = 2048,
789 .cache_level = 2,
790 .flags = (CRAT_CACHE_FLAGS_ENABLED |
791 CRAT_CACHE_FLAGS_DATA_CACHE |
792 CRAT_CACHE_FLAGS_SIMD_CACHE),
793 .num_cu_shared = 6,
794 },
795};
796
797static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
798 struct crat_subtype_computeunit *cu)
799{
800 dev->node_props.cpu_cores_count = cu->num_cpu_cores;
801 dev->node_props.cpu_core_id_base = cu->processor_id_low;
802 if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
803 dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
804
805 pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
806 cu->processor_id_low);
807}
808
809static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
810 struct crat_subtype_computeunit *cu)
811{
812 dev->node_props.simd_id_base = cu->processor_id_low;
813 dev->node_props.simd_count = cu->num_simd_cores;
814 dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
815 dev->node_props.max_waves_per_simd = cu->max_waves_simd;
816 dev->node_props.wave_front_size = cu->wave_front_size;
817 dev->node_props.array_count = cu->array_count;
818 dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
819 dev->node_props.simd_per_cu = cu->num_simd_per_cu;
820 dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
821 if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
822 dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
823 pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low);
824}
825
826/* kfd_parse_subtype_cu - parse compute unit subtypes and attach it to correct
827 * topology device present in the device_list
828 */
829static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
830 struct list_head *device_list)
831{
832 struct kfd_topology_device *dev;
833
834 pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
835 cu->proximity_domain, cu->hsa_capability);
836 list_for_each_entry(dev, device_list, list) {
837 if (cu->proximity_domain == dev->proximity_domain) {
838 if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
839 kfd_populated_cu_info_cpu(dev, cu);
840
841 if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
842 kfd_populated_cu_info_gpu(dev, cu);
843 break;
844 }
845 }
846
847 return 0;
848}
849
850static struct kfd_mem_properties *
851find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
852 struct kfd_topology_device *dev)
853{
854 struct kfd_mem_properties *props;
855
856 list_for_each_entry(props, &dev->mem_props, list) {
857 if (props->heap_type == heap_type
858 && props->flags == flags
859 && props->width == width)
860 return props;
861 }
862
863 return NULL;
864}
865/* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
866 * topology device present in the device_list
867 */
868static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
869 struct list_head *device_list)
870{
871 struct kfd_mem_properties *props;
872 struct kfd_topology_device *dev;
873 uint32_t heap_type;
874 uint64_t size_in_bytes;
875 uint32_t flags = 0;
876 uint32_t width;
877
878 pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
879 mem->proximity_domain);
880 list_for_each_entry(dev, device_list, list) {
881 if (mem->proximity_domain == dev->proximity_domain) {
882 /* We're on GPU node */
883 if (dev->node_props.cpu_cores_count == 0) {
884 /* APU */
885 if (mem->visibility_type == 0)
886 heap_type =
887 HSA_MEM_HEAP_TYPE_FB_PRIVATE;
888 /* dGPU */
889 else
890 heap_type = mem->visibility_type;
891 } else
892 heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
893
894 if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
895 flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
896 if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
897 flags |= HSA_MEM_FLAGS_NON_VOLATILE;
898
899 size_in_bytes =
900 ((uint64_t)mem->length_high << 32) +
901 mem->length_low;
902 width = mem->width;
903
904 /* Multiple banks of the same type are aggregated into
905 * one. User mode doesn't care about multiple physical
906 * memory segments. It's managed as a single virtual
907 * heap for user mode.
908 */
909 props = find_subtype_mem(heap_type, flags, width, dev);
910 if (props) {
911 props->size_in_bytes += size_in_bytes;
912 break;
913 }
914
915 props = kfd_alloc_struct(props);
916 if (!props)
917 return -ENOMEM;
918
919 props->heap_type = heap_type;
920 props->flags = flags;
921 props->size_in_bytes = size_in_bytes;
922 props->width = width;
923
924 dev->node_props.mem_banks_count++;
925 list_add_tail(&props->list, &dev->mem_props);
926
927 break;
928 }
929 }
930
931 return 0;
932}
933
934/* kfd_parse_subtype_cache - parse cache subtypes and attach it to correct
935 * topology device present in the device_list
936 */
937static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
938 struct list_head *device_list)
939{
940 struct kfd_cache_properties *props;
941 struct kfd_topology_device *dev;
942 uint32_t id;
943 uint32_t total_num_of_cu;
944
945 id = cache->processor_id_low;
946
947 pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
948 list_for_each_entry(dev, device_list, list) {
949 total_num_of_cu = (dev->node_props.array_count *
950 dev->node_props.cu_per_simd_array);
951
952 /* Cache infomration in CRAT doesn't have proximity_domain
953 * information as it is associated with a CPU core or GPU
954 * Compute Unit. So map the cache using CPU core Id or SIMD
955 * (GPU) ID.
956 * TODO: This works because currently we can safely assume that
957 * Compute Units are parsed before caches are parsed. In
958 * future, remove this dependency
959 */
960 if ((id >= dev->node_props.cpu_core_id_base &&
961 id <= dev->node_props.cpu_core_id_base +
962 dev->node_props.cpu_cores_count) ||
963 (id >= dev->node_props.simd_id_base &&
964 id < dev->node_props.simd_id_base +
965 total_num_of_cu)) {
966 props = kfd_alloc_struct(props);
967 if (!props)
968 return -ENOMEM;
969
970 props->processor_id_low = id;
971 props->cache_level = cache->cache_level;
972 props->cache_size = cache->cache_size;
973 props->cacheline_size = cache->cache_line_size;
974 props->cachelines_per_tag = cache->lines_per_tag;
975 props->cache_assoc = cache->associativity;
976 props->cache_latency = cache->cache_latency;
977 memcpy(props->sibling_map, cache->sibling_map,
978 sizeof(props->sibling_map));
979
980 if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
981 props->cache_type |= HSA_CACHE_TYPE_DATA;
982 if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
983 props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
984 if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
985 props->cache_type |= HSA_CACHE_TYPE_CPU;
986 if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
987 props->cache_type |= HSA_CACHE_TYPE_HSACU;
988
989 dev->cache_count++;
990 dev->node_props.caches_count++;
991 list_add_tail(&props->list, &dev->cache_props);
992
993 break;
994 }
995 }
996
997 return 0;
998}
999
1000/* kfd_parse_subtype_iolink - parse iolink subtypes and attach it to correct
1001 * topology device present in the device_list
1002 */
1003static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
1004 struct list_head *device_list)
1005{
1006 struct kfd_iolink_properties *props = NULL, *props2;
1007 struct kfd_topology_device *dev, *to_dev;
1008 uint32_t id_from;
1009 uint32_t id_to;
1010
1011 id_from = iolink->proximity_domain_from;
1012 id_to = iolink->proximity_domain_to;
1013
1014 pr_debug("Found IO link entry in CRAT table with id_from=%d, id_to %d\n",
1015 id_from, id_to);
1016 list_for_each_entry(dev, device_list, list) {
1017 if (id_from == dev->proximity_domain) {
1018 props = kfd_alloc_struct(props);
1019 if (!props)
1020 return -ENOMEM;
1021
1022 props->node_from = id_from;
1023 props->node_to = id_to;
1024 props->ver_maj = iolink->version_major;
1025 props->ver_min = iolink->version_minor;
1026 props->iolink_type = iolink->io_interface_type;
1027
1028 if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
1029 props->weight = 20;
1030 else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
1031 props->weight = 15 * iolink->num_hops_xgmi;
1032 else
1033 props->weight = node_distance(id_from, id_to);
1034
1035 props->min_latency = iolink->minimum_latency;
1036 props->max_latency = iolink->maximum_latency;
1037 props->min_bandwidth = iolink->minimum_bandwidth_mbs;
1038 props->max_bandwidth = iolink->maximum_bandwidth_mbs;
1039 props->rec_transfer_size =
1040 iolink->recommended_transfer_size;
1041
1042 dev->io_link_count++;
1043 dev->node_props.io_links_count++;
1044 list_add_tail(&props->list, &dev->io_link_props);
1045 break;
1046 }
1047 }
1048
1049 /* CPU topology is created before GPUs are detected, so CPU->GPU
1050 * links are not built at that time. If a PCIe type is discovered, it
1051 * means a GPU is detected and we are adding GPU->CPU to the topology.
1052 * At this time, also add the corresponded CPU->GPU link if GPU
1053 * is large bar.
1054 * For xGMI, we only added the link with one direction in the crat
1055 * table, add corresponded reversed direction link now.
1056 */
1057 if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) {
1058 to_dev = kfd_topology_device_by_proximity_domain(id_to);
1059 if (!to_dev)
1060 return -ENODEV;
1061 /* same everything but the other direction */
1062 props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
1063 props2->node_from = id_to;
1064 props2->node_to = id_from;
1065 props2->kobj = NULL;
1066 to_dev->io_link_count++;
1067 to_dev->node_props.io_links_count++;
1068 list_add_tail(&props2->list, &to_dev->io_link_props);
1069 }
1070
1071 return 0;
1072}
1073
1074/* kfd_parse_subtype - parse subtypes and attach it to correct topology device
1075 * present in the device_list
1076 * @sub_type_hdr - subtype section of crat_image
1077 * @device_list - list of topology devices present in this crat_image
1078 */
1079static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
1080 struct list_head *device_list)
1081{
1082 struct crat_subtype_computeunit *cu;
1083 struct crat_subtype_memory *mem;
1084 struct crat_subtype_cache *cache;
1085 struct crat_subtype_iolink *iolink;
1086 int ret = 0;
1087
1088 switch (sub_type_hdr->type) {
1089 case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
1090 cu = (struct crat_subtype_computeunit *)sub_type_hdr;
1091 ret = kfd_parse_subtype_cu(cu, device_list);
1092 break;
1093 case CRAT_SUBTYPE_MEMORY_AFFINITY:
1094 mem = (struct crat_subtype_memory *)sub_type_hdr;
1095 ret = kfd_parse_subtype_mem(mem, device_list);
1096 break;
1097 case CRAT_SUBTYPE_CACHE_AFFINITY:
1098 cache = (struct crat_subtype_cache *)sub_type_hdr;
1099 ret = kfd_parse_subtype_cache(cache, device_list);
1100 break;
1101 case CRAT_SUBTYPE_TLB_AFFINITY:
1102 /*
1103 * For now, nothing to do here
1104 */
1105 pr_debug("Found TLB entry in CRAT table (not processing)\n");
1106 break;
1107 case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
1108 /*
1109 * For now, nothing to do here
1110 */
1111 pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
1112 break;
1113 case CRAT_SUBTYPE_IOLINK_AFFINITY:
1114 iolink = (struct crat_subtype_iolink *)sub_type_hdr;
1115 ret = kfd_parse_subtype_iolink(iolink, device_list);
1116 break;
1117 default:
1118 pr_warn("Unknown subtype %d in CRAT\n",
1119 sub_type_hdr->type);
1120 }
1121
1122 return ret;
1123}
1124
1125/* kfd_parse_crat_table - parse CRAT table. For each node present in CRAT
1126 * create a kfd_topology_device and add in to device_list. Also parse
1127 * CRAT subtypes and attach it to appropriate kfd_topology_device
1128 * @crat_image - input image containing CRAT
1129 * @device_list - [OUT] list of kfd_topology_device generated after
1130 * parsing crat_image
1131 * @proximity_domain - Proximity domain of the first device in the table
1132 *
1133 * Return - 0 if successful else -ve value
1134 */
1135int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
1136 uint32_t proximity_domain)
1137{
1138 struct kfd_topology_device *top_dev = NULL;
1139 struct crat_subtype_generic *sub_type_hdr;
1140 uint16_t node_id;
1141 int ret = 0;
1142 struct crat_header *crat_table = (struct crat_header *)crat_image;
1143 uint16_t num_nodes;
1144 uint32_t image_len;
1145
1146 if (!crat_image)
1147 return -EINVAL;
1148
1149 if (!list_empty(device_list)) {
1150 pr_warn("Error device list should be empty\n");
1151 return -EINVAL;
1152 }
1153
1154 num_nodes = crat_table->num_domains;
1155 image_len = crat_table->length;
1156
1157 pr_debug("Parsing CRAT table with %d nodes\n", num_nodes);
1158
1159 for (node_id = 0; node_id < num_nodes; node_id++) {
1160 top_dev = kfd_create_topology_device(device_list);
1161 if (!top_dev)
1162 break;
1163 top_dev->proximity_domain = proximity_domain++;
1164 }
1165
1166 if (!top_dev) {
1167 ret = -ENOMEM;
1168 goto err;
1169 }
1170
1171 memcpy(top_dev->oem_id, crat_table->oem_id, CRAT_OEMID_LENGTH);
1172 memcpy(top_dev->oem_table_id, crat_table->oem_table_id,
1173 CRAT_OEMTABLEID_LENGTH);
1174 top_dev->oem_revision = crat_table->oem_revision;
1175
1176 sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
1177 while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
1178 ((char *)crat_image) + image_len) {
1179 if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
1180 ret = kfd_parse_subtype(sub_type_hdr, device_list);
1181 if (ret)
1182 break;
1183 }
1184
1185 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1186 sub_type_hdr->length);
1187 }
1188
1189err:
1190 if (ret)
1191 kfd_release_topology_device_list(device_list);
1192
1193 return ret;
1194}
1195
1196/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
1197static int fill_in_l1_pcache(struct crat_subtype_cache *pcache,
1198 struct kfd_gpu_cache_info *pcache_info,
1199 struct kfd_cu_info *cu_info,
1200 int mem_available,
1201 int cu_bitmask,
1202 int cache_type, unsigned int cu_processor_id,
1203 int cu_block)
1204{
1205 unsigned int cu_sibling_map_mask;
1206 int first_active_cu;
1207
1208 /* First check if enough memory is available */
1209 if (sizeof(struct crat_subtype_cache) > mem_available)
1210 return -ENOMEM;
1211
1212 cu_sibling_map_mask = cu_bitmask;
1213 cu_sibling_map_mask >>= cu_block;
1214 cu_sibling_map_mask &=
1215 ((1 << pcache_info[cache_type].num_cu_shared) - 1);
1216 first_active_cu = ffs(cu_sibling_map_mask);
1217
1218 /* CU could be inactive. In case of shared cache find the first active
1219 * CU. and incase of non-shared cache check if the CU is inactive. If
1220 * inactive active skip it
1221 */
1222 if (first_active_cu) {
1223 memset(pcache, 0, sizeof(struct crat_subtype_cache));
1224 pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
1225 pcache->length = sizeof(struct crat_subtype_cache);
1226 pcache->flags = pcache_info[cache_type].flags;
1227 pcache->processor_id_low = cu_processor_id
1228 + (first_active_cu - 1);
1229 pcache->cache_level = pcache_info[cache_type].cache_level;
1230 pcache->cache_size = pcache_info[cache_type].cache_size;
1231
1232 /* Sibling map is w.r.t processor_id_low, so shift out
1233 * inactive CU
1234 */
1235 cu_sibling_map_mask =
1236 cu_sibling_map_mask >> (first_active_cu - 1);
1237
1238 pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
1239 pcache->sibling_map[1] =
1240 (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
1241 pcache->sibling_map[2] =
1242 (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
1243 pcache->sibling_map[3] =
1244 (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
1245 return 0;
1246 }
1247 return 1;
1248}
1249
1250/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
1251static int fill_in_l2_l3_pcache(struct crat_subtype_cache *pcache,
1252 struct kfd_gpu_cache_info *pcache_info,
1253 struct kfd_cu_info *cu_info,
1254 int mem_available,
1255 int cache_type, unsigned int cu_processor_id)
1256{
1257 unsigned int cu_sibling_map_mask;
1258 int first_active_cu;
1259 int i, j, k;
1260
1261 /* First check if enough memory is available */
1262 if (sizeof(struct crat_subtype_cache) > mem_available)
1263 return -ENOMEM;
1264
1265 cu_sibling_map_mask = cu_info->cu_bitmap[0][0];
1266 cu_sibling_map_mask &=
1267 ((1 << pcache_info[cache_type].num_cu_shared) - 1);
1268 first_active_cu = ffs(cu_sibling_map_mask);
1269
1270 /* CU could be inactive. In case of shared cache find the first active
1271 * CU. and incase of non-shared cache check if the CU is inactive. If
1272 * inactive active skip it
1273 */
1274 if (first_active_cu) {
1275 memset(pcache, 0, sizeof(struct crat_subtype_cache));
1276 pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
1277 pcache->length = sizeof(struct crat_subtype_cache);
1278 pcache->flags = pcache_info[cache_type].flags;
1279 pcache->processor_id_low = cu_processor_id
1280 + (first_active_cu - 1);
1281 pcache->cache_level = pcache_info[cache_type].cache_level;
1282 pcache->cache_size = pcache_info[cache_type].cache_size;
1283
1284 /* Sibling map is w.r.t processor_id_low, so shift out
1285 * inactive CU
1286 */
1287 cu_sibling_map_mask =
1288 cu_sibling_map_mask >> (first_active_cu - 1);
1289 k = 0;
1290 for (i = 0; i < cu_info->num_shader_engines; i++) {
1291 for (j = 0; j < cu_info->num_shader_arrays_per_engine;
1292 j++) {
1293 pcache->sibling_map[k] =
1294 (uint8_t)(cu_sibling_map_mask & 0xFF);
1295 pcache->sibling_map[k+1] =
1296 (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
1297 pcache->sibling_map[k+2] =
1298 (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
1299 pcache->sibling_map[k+3] =
1300 (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
1301 k += 4;
1302 cu_sibling_map_mask =
1303 cu_info->cu_bitmap[i % 4][j + i / 4];
1304 cu_sibling_map_mask &= (
1305 (1 << pcache_info[cache_type].num_cu_shared)
1306 - 1);
1307 }
1308 }
1309 return 0;
1310 }
1311 return 1;
1312}
1313
1314/* kfd_fill_gpu_cache_info - Fill GPU cache info using kfd_gpu_cache_info
1315 * tables
1316 *
1317 * @kdev - [IN] GPU device
1318 * @gpu_processor_id - [IN] GPU processor ID to which these caches
1319 * associate
1320 * @available_size - [IN] Amount of memory available in pcache
1321 * @cu_info - [IN] Compute Unit info obtained from KGD
1322 * @pcache - [OUT] memory into which cache data is to be filled in.
1323 * @size_filled - [OUT] amount of data used up in pcache.
1324 * @num_of_entries - [OUT] number of caches added
1325 */
1326static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
1327 int gpu_processor_id,
1328 int available_size,
1329 struct kfd_cu_info *cu_info,
1330 struct crat_subtype_cache *pcache,
1331 int *size_filled,
1332 int *num_of_entries)
1333{
1334 struct kfd_gpu_cache_info *pcache_info;
1335 int num_of_cache_types = 0;
1336 int i, j, k;
1337 int ct = 0;
1338 int mem_available = available_size;
1339 unsigned int cu_processor_id;
1340 int ret;
1341 unsigned int num_cu_shared;
1342
1343 switch (kdev->device_info->asic_family) {
1344 case CHIP_KAVERI:
1345 pcache_info = kaveri_cache_info;
1346 num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
1347 break;
1348 case CHIP_HAWAII:
1349 pcache_info = hawaii_cache_info;
1350 num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
1351 break;
1352 case CHIP_CARRIZO:
1353 pcache_info = carrizo_cache_info;
1354 num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
1355 break;
1356 case CHIP_TONGA:
1357 pcache_info = tonga_cache_info;
1358 num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
1359 break;
1360 case CHIP_FIJI:
1361 pcache_info = fiji_cache_info;
1362 num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
1363 break;
1364 case CHIP_POLARIS10:
1365 pcache_info = polaris10_cache_info;
1366 num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
1367 break;
1368 case CHIP_POLARIS11:
1369 pcache_info = polaris11_cache_info;
1370 num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
1371 break;
1372 case CHIP_POLARIS12:
1373 pcache_info = polaris12_cache_info;
1374 num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
1375 break;
1376 case CHIP_VEGAM:
1377 pcache_info = vegam_cache_info;
1378 num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
1379 break;
1380 case CHIP_VEGA10:
1381 pcache_info = vega10_cache_info;
1382 num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
1383 break;
1384 case CHIP_VEGA12:
1385 pcache_info = vega12_cache_info;
1386 num_of_cache_types = ARRAY_SIZE(vega12_cache_info);
1387 break;
1388 case CHIP_VEGA20:
1389 case CHIP_ARCTURUS:
1390 pcache_info = vega20_cache_info;
1391 num_of_cache_types = ARRAY_SIZE(vega20_cache_info);
1392 break;
1393 case CHIP_ALDEBARAN:
1394 pcache_info = aldebaran_cache_info;
1395 num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info);
1396 break;
1397 case CHIP_RAVEN:
1398 pcache_info = raven_cache_info;
1399 num_of_cache_types = ARRAY_SIZE(raven_cache_info);
1400 break;
1401 case CHIP_RENOIR:
1402 pcache_info = renoir_cache_info;
1403 num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
1404 break;
1405 case CHIP_NAVI10:
1406 case CHIP_NAVI12:
1407 pcache_info = navi10_cache_info;
1408 num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
1409 break;
1410 case CHIP_NAVI14:
1411 pcache_info = navi14_cache_info;
1412 num_of_cache_types = ARRAY_SIZE(navi14_cache_info);
1413 break;
1414 case CHIP_SIENNA_CICHLID:
1415 pcache_info = sienna_cichlid_cache_info;
1416 num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info);
1417 break;
1418 case CHIP_NAVY_FLOUNDER:
1419 pcache_info = navy_flounder_cache_info;
1420 num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info);
1421 break;
1422 case CHIP_DIMGREY_CAVEFISH:
1423 pcache_info = dimgrey_cavefish_cache_info;
1424 num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info);
1425 break;
1426 case CHIP_VANGOGH:
1427 pcache_info = vangogh_cache_info;
1428 num_of_cache_types = ARRAY_SIZE(vangogh_cache_info);
1429 break;
1430 case CHIP_BEIGE_GOBY:
1431 pcache_info = beige_goby_cache_info;
1432 num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
1433 break;
1434 case CHIP_YELLOW_CARP:
1435 pcache_info = yellow_carp_cache_info;
1436 num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
1437 break;
1438 default:
1439 return -EINVAL;
1440 }
1441
1442 *size_filled = 0;
1443 *num_of_entries = 0;
1444
1445 /* For each type of cache listed in the kfd_gpu_cache_info table,
1446 * go through all available Compute Units.
1447 * The [i,j,k] loop will
1448 * if kfd_gpu_cache_info.num_cu_shared = 1
1449 * will parse through all available CU
1450 * If (kfd_gpu_cache_info.num_cu_shared != 1)
1451 * then it will consider only one CU from
1452 * the shared unit
1453 */
1454
1455 for (ct = 0; ct < num_of_cache_types; ct++) {
1456 cu_processor_id = gpu_processor_id;
1457 if (pcache_info[ct].cache_level == 1) {
1458 for (i = 0; i < cu_info->num_shader_engines; i++) {
1459 for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
1460 for (k = 0; k < cu_info->num_cu_per_sh;
1461 k += pcache_info[ct].num_cu_shared) {
1462 ret = fill_in_l1_pcache(pcache,
1463 pcache_info,
1464 cu_info,
1465 mem_available,
1466 cu_info->cu_bitmap[i % 4][j + i / 4],
1467 ct,
1468 cu_processor_id,
1469 k);
1470
1471 if (ret < 0)
1472 break;
1473
1474 if (!ret) {
1475 pcache++;
1476 (*num_of_entries)++;
1477 mem_available -= sizeof(*pcache);
1478 (*size_filled) += sizeof(*pcache);
1479 }
1480
1481 /* Move to next CU block */
1482 num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
1483 cu_info->num_cu_per_sh) ?
1484 pcache_info[ct].num_cu_shared :
1485 (cu_info->num_cu_per_sh - k);
1486 cu_processor_id += num_cu_shared;
1487 }
1488 }
1489 }
1490 } else {
1491 ret = fill_in_l2_l3_pcache(pcache,
1492 pcache_info,
1493 cu_info,
1494 mem_available,
1495 ct,
1496 cu_processor_id);
1497
1498 if (ret < 0)
1499 break;
1500
1501 if (!ret) {
1502 pcache++;
1503 (*num_of_entries)++;
1504 mem_available -= sizeof(*pcache);
1505 (*size_filled) += sizeof(*pcache);
1506 }
1507 }
1508 }
1509
1510 pr_debug("Added [%d] GPU cache entries\n", *num_of_entries);
1511
1512 return 0;
1513}
1514
1515static bool kfd_ignore_crat(void)
1516{
1517 bool ret;
1518
1519 if (ignore_crat)
1520 return true;
1521
1522#ifndef KFD_SUPPORT_IOMMU_V2
1523 ret = true;
1524#else
1525 ret = false;
1526#endif
1527
1528 return ret;
1529}
1530
1531/*
1532 * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
1533 * copies CRAT from ACPI (if available).
1534 * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
1535 *
1536 * @crat_image: CRAT read from ACPI. If no CRAT in ACPI then
1537 * crat_image will be NULL
1538 * @size: [OUT] size of crat_image
1539 *
1540 * Return 0 if successful else return error code
1541 */
1542int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
1543{
1544 struct acpi_table_header *crat_table;
1545 acpi_status status;
1546 void *pcrat_image;
1547 int rc = 0;
1548
1549 if (!crat_image)
1550 return -EINVAL;
1551
1552 *crat_image = NULL;
1553
1554 if (kfd_ignore_crat()) {
1555 pr_info("CRAT table disabled by module option\n");
1556 return -ENODATA;
1557 }
1558
1559 /* Fetch the CRAT table from ACPI */
1560 status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
1561 if (status == AE_NOT_FOUND) {
1562 pr_warn("CRAT table not found\n");
1563 return -ENODATA;
1564 } else if (ACPI_FAILURE(status)) {
1565 const char *err = acpi_format_exception(status);
1566
1567 pr_err("CRAT table error: %s\n", err);
1568 return -EINVAL;
1569 }
1570
1571 pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL);
1572 if (!pcrat_image) {
1573 rc = -ENOMEM;
1574 goto out;
1575 }
1576
1577 memcpy(pcrat_image, crat_table, crat_table->length);
1578 *crat_image = pcrat_image;
1579 *size = crat_table->length;
1580out:
1581 acpi_put_table(crat_table);
1582 return rc;
1583}
1584
1585/* Memory required to create Virtual CRAT.
1586 * Since there is no easy way to predict the amount of memory required, the
1587 * following amount is allocated for GPU Virtual CRAT. This is
1588 * expected to cover all known conditions. But to be safe additional check
1589 * is put in the code to ensure we don't overwrite.
1590 */
1591#define VCRAT_SIZE_FOR_GPU (4 * PAGE_SIZE)
1592
1593/* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
1594 *
1595 * @numa_node_id: CPU NUMA node id
1596 * @avail_size: Available size in the memory
1597 * @sub_type_hdr: Memory into which compute info will be filled in
1598 *
1599 * Return 0 if successful else return -ve value
1600 */
1601static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
1602 int proximity_domain,
1603 struct crat_subtype_computeunit *sub_type_hdr)
1604{
1605 const struct cpumask *cpumask;
1606
1607 *avail_size -= sizeof(struct crat_subtype_computeunit);
1608 if (*avail_size < 0)
1609 return -ENOMEM;
1610
1611 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
1612
1613 /* Fill in subtype header data */
1614 sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
1615 sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
1616 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1617
1618 cpumask = cpumask_of_node(numa_node_id);
1619
1620 /* Fill in CU data */
1621 sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
1622 sub_type_hdr->proximity_domain = proximity_domain;
1623 sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
1624 if (sub_type_hdr->processor_id_low == -1)
1625 return -EINVAL;
1626
1627 sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
1628
1629 return 0;
1630}
1631
1632/* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
1633 *
1634 * @numa_node_id: CPU NUMA node id
1635 * @avail_size: Available size in the memory
1636 * @sub_type_hdr: Memory into which compute info will be filled in
1637 *
1638 * Return 0 if successful else return -ve value
1639 */
1640static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
1641 int proximity_domain,
1642 struct crat_subtype_memory *sub_type_hdr)
1643{
1644 uint64_t mem_in_bytes = 0;
1645 pg_data_t *pgdat;
1646 int zone_type;
1647
1648 *avail_size -= sizeof(struct crat_subtype_memory);
1649 if (*avail_size < 0)
1650 return -ENOMEM;
1651
1652 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1653
1654 /* Fill in subtype header data */
1655 sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1656 sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1657 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1658
1659 /* Fill in Memory Subunit data */
1660
1661 /* Unlike si_meminfo, si_meminfo_node is not exported. So
1662 * the following lines are duplicated from si_meminfo_node
1663 * function
1664 */
1665 pgdat = NODE_DATA(numa_node_id);
1666 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
1667 mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
1668 mem_in_bytes <<= PAGE_SHIFT;
1669
1670 sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
1671 sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
1672 sub_type_hdr->proximity_domain = proximity_domain;
1673
1674 return 0;
1675}
1676
1677#ifdef CONFIG_X86_64
1678static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
1679 uint32_t *num_entries,
1680 struct crat_subtype_iolink *sub_type_hdr)
1681{
1682 int nid;
1683 struct cpuinfo_x86 *c = &cpu_data(0);
1684 uint8_t link_type;
1685
1686 if (c->x86_vendor == X86_VENDOR_AMD)
1687 link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
1688 else
1689 link_type = CRAT_IOLINK_TYPE_QPI_1_1;
1690
1691 *num_entries = 0;
1692
1693 /* Create IO links from this node to other CPU nodes */
1694 for_each_online_node(nid) {
1695 if (nid == numa_node_id) /* node itself */
1696 continue;
1697
1698 *avail_size -= sizeof(struct crat_subtype_iolink);
1699 if (*avail_size < 0)
1700 return -ENOMEM;
1701
1702 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1703
1704 /* Fill in subtype header data */
1705 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1706 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1707 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1708
1709 /* Fill in IO link data */
1710 sub_type_hdr->proximity_domain_from = numa_node_id;
1711 sub_type_hdr->proximity_domain_to = nid;
1712 sub_type_hdr->io_interface_type = link_type;
1713
1714 (*num_entries)++;
1715 sub_type_hdr++;
1716 }
1717
1718 return 0;
1719}
1720#endif
1721
1722/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
1723 *
1724 * @pcrat_image: Fill in VCRAT for CPU
1725 * @size: [IN] allocated size of crat_image.
1726 * [OUT] actual size of data filled in crat_image
1727 */
1728static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
1729{
1730 struct crat_header *crat_table = (struct crat_header *)pcrat_image;
1731 struct acpi_table_header *acpi_table;
1732 acpi_status status;
1733 struct crat_subtype_generic *sub_type_hdr;
1734 int avail_size = *size;
1735 int numa_node_id;
1736#ifdef CONFIG_X86_64
1737 uint32_t entries = 0;
1738#endif
1739 int ret = 0;
1740
1741 if (!pcrat_image)
1742 return -EINVAL;
1743
1744 /* Fill in CRAT Header.
1745 * Modify length and total_entries as subunits are added.
1746 */
1747 avail_size -= sizeof(struct crat_header);
1748 if (avail_size < 0)
1749 return -ENOMEM;
1750
1751 memset(crat_table, 0, sizeof(struct crat_header));
1752 memcpy(&crat_table->signature, CRAT_SIGNATURE,
1753 sizeof(crat_table->signature));
1754 crat_table->length = sizeof(struct crat_header);
1755
1756 status = acpi_get_table("DSDT", 0, &acpi_table);
1757 if (status != AE_OK)
1758 pr_warn("DSDT table not found for OEM information\n");
1759 else {
1760 crat_table->oem_revision = acpi_table->revision;
1761 memcpy(crat_table->oem_id, acpi_table->oem_id,
1762 CRAT_OEMID_LENGTH);
1763 memcpy(crat_table->oem_table_id, acpi_table->oem_table_id,
1764 CRAT_OEMTABLEID_LENGTH);
1765 acpi_put_table(acpi_table);
1766 }
1767 crat_table->total_entries = 0;
1768 crat_table->num_domains = 0;
1769
1770 sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
1771
1772 for_each_online_node(numa_node_id) {
1773 if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
1774 continue;
1775
1776 /* Fill in Subtype: Compute Unit */
1777 ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
1778 crat_table->num_domains,
1779 (struct crat_subtype_computeunit *)sub_type_hdr);
1780 if (ret < 0)
1781 return ret;
1782 crat_table->length += sub_type_hdr->length;
1783 crat_table->total_entries++;
1784
1785 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1786 sub_type_hdr->length);
1787
1788 /* Fill in Subtype: Memory */
1789 ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
1790 crat_table->num_domains,
1791 (struct crat_subtype_memory *)sub_type_hdr);
1792 if (ret < 0)
1793 return ret;
1794 crat_table->length += sub_type_hdr->length;
1795 crat_table->total_entries++;
1796
1797 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1798 sub_type_hdr->length);
1799
1800 /* Fill in Subtype: IO Link */
1801#ifdef CONFIG_X86_64
1802 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
1803 &entries,
1804 (struct crat_subtype_iolink *)sub_type_hdr);
1805 if (ret < 0)
1806 return ret;
1807
1808 if (entries) {
1809 crat_table->length += (sub_type_hdr->length * entries);
1810 crat_table->total_entries += entries;
1811
1812 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1813 sub_type_hdr->length * entries);
1814 }
1815#else
1816 pr_info("IO link not available for non x86 platforms\n");
1817#endif
1818
1819 crat_table->num_domains++;
1820 }
1821
1822 /* TODO: Add cache Subtype for CPU.
1823 * Currently, CPU cache information is available in function
1824 * detect_cache_attributes(cpu) defined in the file
1825 * ./arch/x86/kernel/cpu/intel_cacheinfo.c. This function is not
1826 * exported and to get the same information the code needs to be
1827 * duplicated.
1828 */
1829
1830 *size = crat_table->length;
1831 pr_info("Virtual CRAT table created for CPU\n");
1832
1833 return 0;
1834}
1835
1836static int kfd_fill_gpu_memory_affinity(int *avail_size,
1837 struct kfd_dev *kdev, uint8_t type, uint64_t size,
1838 struct crat_subtype_memory *sub_type_hdr,
1839 uint32_t proximity_domain,
1840 const struct kfd_local_mem_info *local_mem_info)
1841{
1842 *avail_size -= sizeof(struct crat_subtype_memory);
1843 if (*avail_size < 0)
1844 return -ENOMEM;
1845
1846 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1847 sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1848 sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1849 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1850
1851 sub_type_hdr->proximity_domain = proximity_domain;
1852
1853 pr_debug("Fill gpu memory affinity - type 0x%x size 0x%llx\n",
1854 type, size);
1855
1856 sub_type_hdr->length_low = lower_32_bits(size);
1857 sub_type_hdr->length_high = upper_32_bits(size);
1858
1859 sub_type_hdr->width = local_mem_info->vram_width;
1860 sub_type_hdr->visibility_type = type;
1861
1862 return 0;
1863}
1864
1865#ifdef CONFIG_ACPI_NUMA
1866static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev)
1867{
1868 struct acpi_table_header *table_header = NULL;
1869 struct acpi_subtable_header *sub_header = NULL;
1870 unsigned long table_end, subtable_len;
1871 u32 pci_id = pci_domain_nr(kdev->pdev->bus) << 16 |
1872 pci_dev_id(kdev->pdev);
1873 u32 bdf;
1874 acpi_status status;
1875 struct acpi_srat_cpu_affinity *cpu;
1876 struct acpi_srat_generic_affinity *gpu;
1877 int pxm = 0, max_pxm = 0;
1878 int numa_node = NUMA_NO_NODE;
1879 bool found = false;
1880
1881 /* Fetch the SRAT table from ACPI */
1882 status = acpi_get_table(ACPI_SIG_SRAT, 0, &table_header);
1883 if (status == AE_NOT_FOUND) {
1884 pr_warn("SRAT table not found\n");
1885 return;
1886 } else if (ACPI_FAILURE(status)) {
1887 const char *err = acpi_format_exception(status);
1888 pr_err("SRAT table error: %s\n", err);
1889 return;
1890 }
1891
1892 table_end = (unsigned long)table_header + table_header->length;
1893
1894 /* Parse all entries looking for a match. */
1895 sub_header = (struct acpi_subtable_header *)
1896 ((unsigned long)table_header +
1897 sizeof(struct acpi_table_srat));
1898 subtable_len = sub_header->length;
1899
1900 while (((unsigned long)sub_header) + subtable_len < table_end) {
1901 /*
1902 * If length is 0, break from this loop to avoid
1903 * infinite loop.
1904 */
1905 if (subtable_len == 0) {
1906 pr_err("SRAT invalid zero length\n");
1907 break;
1908 }
1909
1910 switch (sub_header->type) {
1911 case ACPI_SRAT_TYPE_CPU_AFFINITY:
1912 cpu = (struct acpi_srat_cpu_affinity *)sub_header;
1913 pxm = *((u32 *)cpu->proximity_domain_hi) << 8 |
1914 cpu->proximity_domain_lo;
1915 if (pxm > max_pxm)
1916 max_pxm = pxm;
1917 break;
1918 case ACPI_SRAT_TYPE_GENERIC_AFFINITY:
1919 gpu = (struct acpi_srat_generic_affinity *)sub_header;
1920 bdf = *((u16 *)(&gpu->device_handle[0])) << 16 |
1921 *((u16 *)(&gpu->device_handle[2]));
1922 if (bdf == pci_id) {
1923 found = true;
1924 numa_node = pxm_to_node(gpu->proximity_domain);
1925 }
1926 break;
1927 default:
1928 break;
1929 }
1930
1931 if (found)
1932 break;
1933
1934 sub_header = (struct acpi_subtable_header *)
1935 ((unsigned long)sub_header + subtable_len);
1936 subtable_len = sub_header->length;
1937 }
1938
1939 acpi_put_table(table_header);
1940
1941 /* Workaround bad cpu-gpu binding case */
1942 if (found && (numa_node < 0 ||
1943 numa_node > pxm_to_node(max_pxm)))
1944 numa_node = 0;
1945
1946 if (numa_node != NUMA_NO_NODE)
1947 set_dev_node(&kdev->pdev->dev, numa_node);
1948}
1949#endif
1950
1951/* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
1952 * to its NUMA node
1953 * @avail_size: Available size in the memory
1954 * @kdev - [IN] GPU device
1955 * @sub_type_hdr: Memory into which io link info will be filled in
1956 * @proximity_domain - proximity domain of the GPU node
1957 *
1958 * Return 0 if successful else return -ve value
1959 */
1960static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
1961 struct kfd_dev *kdev,
1962 struct crat_subtype_iolink *sub_type_hdr,
1963 uint32_t proximity_domain)
1964{
1965 struct amdgpu_device *adev = (struct amdgpu_device *)kdev->kgd;
1966
1967 *avail_size -= sizeof(struct crat_subtype_iolink);
1968 if (*avail_size < 0)
1969 return -ENOMEM;
1970
1971 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1972
1973 /* Fill in subtype header data */
1974 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1975 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1976 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1977 if (kfd_dev_is_large_bar(kdev))
1978 sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1979
1980 /* Fill in IOLINK subtype.
1981 * TODO: Fill-in other fields of iolink subtype
1982 */
1983 if (adev->gmc.xgmi.connected_to_cpu) {
1984 /*
1985 * with host gpu xgmi link, host can access gpu memory whether
1986 * or not pcie bar type is large, so always create bidirectional
1987 * io link.
1988 */
1989 sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1990 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
1991 sub_type_hdr->num_hops_xgmi = 1;
1992 } else {
1993 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
1994 }
1995
1996 sub_type_hdr->proximity_domain_from = proximity_domain;
1997
1998#ifdef CONFIG_ACPI_NUMA
1999 if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
2000 kfd_find_numa_node_in_srat(kdev);
2001#endif
2002#ifdef CONFIG_NUMA
2003 if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
2004 sub_type_hdr->proximity_domain_to = 0;
2005 else
2006 sub_type_hdr->proximity_domain_to = kdev->pdev->dev.numa_node;
2007#else
2008 sub_type_hdr->proximity_domain_to = 0;
2009#endif
2010 return 0;
2011}
2012
2013static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
2014 struct kfd_dev *kdev,
2015 struct kfd_dev *peer_kdev,
2016 struct crat_subtype_iolink *sub_type_hdr,
2017 uint32_t proximity_domain_from,
2018 uint32_t proximity_domain_to)
2019{
2020 *avail_size -= sizeof(struct crat_subtype_iolink);
2021 if (*avail_size < 0)
2022 return -ENOMEM;
2023
2024 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
2025
2026 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
2027 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
2028 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED |
2029 CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
2030
2031 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
2032 sub_type_hdr->proximity_domain_from = proximity_domain_from;
2033 sub_type_hdr->proximity_domain_to = proximity_domain_to;
2034 sub_type_hdr->num_hops_xgmi =
2035 amdgpu_amdkfd_get_xgmi_hops_count(kdev->kgd, peer_kdev->kgd);
2036 return 0;
2037}
2038
2039/* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
2040 *
2041 * @pcrat_image: Fill in VCRAT for GPU
2042 * @size: [IN] allocated size of crat_image.
2043 * [OUT] actual size of data filled in crat_image
2044 */
2045static int kfd_create_vcrat_image_gpu(void *pcrat_image,
2046 size_t *size, struct kfd_dev *kdev,
2047 uint32_t proximity_domain)
2048{
2049 struct crat_header *crat_table = (struct crat_header *)pcrat_image;
2050 struct crat_subtype_generic *sub_type_hdr;
2051 struct kfd_local_mem_info local_mem_info;
2052 struct kfd_topology_device *peer_dev;
2053 struct crat_subtype_computeunit *cu;
2054 struct kfd_cu_info cu_info;
2055 int avail_size = *size;
2056 uint32_t total_num_of_cu;
2057 int num_of_cache_entries = 0;
2058 int cache_mem_filled = 0;
2059 uint32_t nid = 0;
2060 int ret = 0;
2061
2062 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
2063 return -EINVAL;
2064
2065 /* Fill the CRAT Header.
2066 * Modify length and total_entries as subunits are added.
2067 */
2068 avail_size -= sizeof(struct crat_header);
2069 if (avail_size < 0)
2070 return -ENOMEM;
2071
2072 memset(crat_table, 0, sizeof(struct crat_header));
2073
2074 memcpy(&crat_table->signature, CRAT_SIGNATURE,
2075 sizeof(crat_table->signature));
2076 /* Change length as we add more subtypes*/
2077 crat_table->length = sizeof(struct crat_header);
2078 crat_table->num_domains = 1;
2079 crat_table->total_entries = 0;
2080
2081 /* Fill in Subtype: Compute Unit
2082 * First fill in the sub type header and then sub type data
2083 */
2084 avail_size -= sizeof(struct crat_subtype_computeunit);
2085 if (avail_size < 0)
2086 return -ENOMEM;
2087
2088 sub_type_hdr = (struct crat_subtype_generic *)(crat_table + 1);
2089 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
2090
2091 sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
2092 sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
2093 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
2094
2095 /* Fill CU subtype data */
2096 cu = (struct crat_subtype_computeunit *)sub_type_hdr;
2097 cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
2098 cu->proximity_domain = proximity_domain;
2099
2100 amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info);
2101 cu->num_simd_per_cu = cu_info.simd_per_cu;
2102 cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
2103 cu->max_waves_simd = cu_info.max_waves_per_simd;
2104
2105 cu->wave_front_size = cu_info.wave_front_size;
2106 cu->array_count = cu_info.num_shader_arrays_per_engine *
2107 cu_info.num_shader_engines;
2108 total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
2109 cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
2110 cu->num_cu_per_array = cu_info.num_cu_per_sh;
2111 cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
2112 cu->num_banks = cu_info.num_shader_engines;
2113 cu->lds_size_in_kb = cu_info.lds_size;
2114
2115 cu->hsa_capability = 0;
2116
2117 /* Check if this node supports IOMMU. During parsing this flag will
2118 * translate to HSA_CAP_ATS_PRESENT
2119 */
2120 if (!kfd_iommu_check_device(kdev))
2121 cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
2122
2123 crat_table->length += sub_type_hdr->length;
2124 crat_table->total_entries++;
2125
2126 /* Fill in Subtype: Memory. Only on systems with large BAR (no
2127 * private FB), report memory as public. On other systems
2128 * report the total FB size (public+private) as a single
2129 * private heap.
2130 */
2131 amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info);
2132 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2133 sub_type_hdr->length);
2134
2135 if (debug_largebar)
2136 local_mem_info.local_mem_size_private = 0;
2137
2138 if (local_mem_info.local_mem_size_private == 0)
2139 ret = kfd_fill_gpu_memory_affinity(&avail_size,
2140 kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC,
2141 local_mem_info.local_mem_size_public,
2142 (struct crat_subtype_memory *)sub_type_hdr,
2143 proximity_domain,
2144 &local_mem_info);
2145 else
2146 ret = kfd_fill_gpu_memory_affinity(&avail_size,
2147 kdev, HSA_MEM_HEAP_TYPE_FB_PRIVATE,
2148 local_mem_info.local_mem_size_public +
2149 local_mem_info.local_mem_size_private,
2150 (struct crat_subtype_memory *)sub_type_hdr,
2151 proximity_domain,
2152 &local_mem_info);
2153 if (ret < 0)
2154 return ret;
2155
2156 crat_table->length += sizeof(struct crat_subtype_memory);
2157 crat_table->total_entries++;
2158
2159 /* TODO: Fill in cache information. This information is NOT readily
2160 * available in KGD
2161 */
2162 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2163 sub_type_hdr->length);
2164 ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low,
2165 avail_size,
2166 &cu_info,
2167 (struct crat_subtype_cache *)sub_type_hdr,
2168 &cache_mem_filled,
2169 &num_of_cache_entries);
2170
2171 if (ret < 0)
2172 return ret;
2173
2174 crat_table->length += cache_mem_filled;
2175 crat_table->total_entries += num_of_cache_entries;
2176 avail_size -= cache_mem_filled;
2177
2178 /* Fill in Subtype: IO_LINKS
2179 * Only direct links are added here which is Link from GPU to
2180 * to its NUMA node. Indirect links are added by userspace.
2181 */
2182 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2183 cache_mem_filled);
2184 ret = kfd_fill_gpu_direct_io_link_to_cpu(&avail_size, kdev,
2185 (struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
2186
2187 if (ret < 0)
2188 return ret;
2189
2190 crat_table->length += sub_type_hdr->length;
2191 crat_table->total_entries++;
2192
2193
2194 /* Fill in Subtype: IO_LINKS
2195 * Direct links from GPU to other GPUs through xGMI.
2196 * We will loop GPUs that already be processed (with lower value
2197 * of proximity_domain), add the link for the GPUs with same
2198 * hive id (from this GPU to other GPU) . The reversed iolink
2199 * (from other GPU to this GPU) will be added
2200 * in kfd_parse_subtype_iolink.
2201 */
2202 if (kdev->hive_id) {
2203 for (nid = 0; nid < proximity_domain; ++nid) {
2204 peer_dev = kfd_topology_device_by_proximity_domain(nid);
2205 if (!peer_dev->gpu)
2206 continue;
2207 if (peer_dev->gpu->hive_id != kdev->hive_id)
2208 continue;
2209 sub_type_hdr = (typeof(sub_type_hdr))(
2210 (char *)sub_type_hdr +
2211 sizeof(struct crat_subtype_iolink));
2212 ret = kfd_fill_gpu_xgmi_link_to_gpu(
2213 &avail_size, kdev, peer_dev->gpu,
2214 (struct crat_subtype_iolink *)sub_type_hdr,
2215 proximity_domain, nid);
2216 if (ret < 0)
2217 return ret;
2218 crat_table->length += sub_type_hdr->length;
2219 crat_table->total_entries++;
2220 }
2221 }
2222 *size = crat_table->length;
2223 pr_info("Virtual CRAT table created for GPU\n");
2224
2225 return ret;
2226}
2227
2228/* kfd_create_crat_image_virtual - Allocates memory for CRAT image and
2229 * creates a Virtual CRAT (VCRAT) image
2230 *
2231 * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
2232 *
2233 * @crat_image: VCRAT image created because ACPI does not have a
2234 * CRAT for this device
2235 * @size: [OUT] size of virtual crat_image
2236 * @flags: COMPUTE_UNIT_CPU - Create VCRAT for CPU device
2237 * COMPUTE_UNIT_GPU - Create VCRAT for GPU
2238 * (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
2239 * -- this option is not currently implemented.
2240 * The assumption is that all AMD APUs will have CRAT
2241 * @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
2242 *
2243 * Return 0 if successful else return -ve value
2244 */
2245int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
2246 int flags, struct kfd_dev *kdev,
2247 uint32_t proximity_domain)
2248{
2249 void *pcrat_image = NULL;
2250 int ret = 0, num_nodes;
2251 size_t dyn_size;
2252
2253 if (!crat_image)
2254 return -EINVAL;
2255
2256 *crat_image = NULL;
2257
2258 /* Allocate the CPU Virtual CRAT size based on the number of online
2259 * nodes. Allocate VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image.
2260 * This should cover all the current conditions. A check is put not
2261 * to overwrite beyond allocated size for GPUs
2262 */
2263 switch (flags) {
2264 case COMPUTE_UNIT_CPU:
2265 num_nodes = num_online_nodes();
2266 dyn_size = sizeof(struct crat_header) +
2267 num_nodes * (sizeof(struct crat_subtype_computeunit) +
2268 sizeof(struct crat_subtype_memory) +
2269 (num_nodes - 1) * sizeof(struct crat_subtype_iolink));
2270 pcrat_image = kvmalloc(dyn_size, GFP_KERNEL);
2271 if (!pcrat_image)
2272 return -ENOMEM;
2273 *size = dyn_size;
2274 pr_debug("CRAT size is %ld", dyn_size);
2275 ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
2276 break;
2277 case COMPUTE_UNIT_GPU:
2278 if (!kdev)
2279 return -EINVAL;
2280 pcrat_image = kvmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
2281 if (!pcrat_image)
2282 return -ENOMEM;
2283 *size = VCRAT_SIZE_FOR_GPU;
2284 ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
2285 proximity_domain);
2286 break;
2287 case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
2288 /* TODO: */
2289 ret = -EINVAL;
2290 pr_err("VCRAT not implemented for APU\n");
2291 break;
2292 default:
2293 ret = -EINVAL;
2294 }
2295
2296 if (!ret)
2297 *crat_image = pcrat_image;
2298 else
2299 kvfree(pcrat_image);
2300
2301 return ret;
2302}
2303
2304
2305/* kfd_destroy_crat_image
2306 *
2307 * @crat_image: [IN] - crat_image from kfd_create_crat_image_xxx(..)
2308 *
2309 */
2310void kfd_destroy_crat_image(void *crat_image)
2311{
2312 kvfree(crat_image);
2313}