Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.9.4.
  1/*
  2 * Copyright 2014 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include "kfd_kernel_queue.h"
 25#include "kfd_device_queue_manager.h"
 26#include "kfd_pm4_headers_vi.h"
 27#include "kfd_pm4_opcodes.h"
 28
 29static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
 30			enum kfd_queue_type type, unsigned int queue_size);
 31static void uninitialize_vi(struct kernel_queue *kq);
 32static void submit_packet_vi(struct kernel_queue *kq);
 33
 34void kernel_queue_init_vi(struct kernel_queue_ops *ops)
 35{
 36	ops->initialize = initialize_vi;
 37	ops->uninitialize = uninitialize_vi;
 38	ops->submit_packet = submit_packet_vi;
 39}
 40
 41static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
 42			enum kfd_queue_type type, unsigned int queue_size)
 43{
 44	int retval;
 45
 46	retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
 47	if (retval != 0)
 48		return false;
 49
 50	kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
 51	kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
 52
 53	memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
 54
 55	return true;
 56}
 57
 58static void uninitialize_vi(struct kernel_queue *kq)
 59{
 60	kfd_gtt_sa_free(kq->dev, kq->eop_mem);
 61}
 62
 63static void submit_packet_vi(struct kernel_queue *kq)
 64{
 65	*kq->wptr_kernel = kq->pending_wptr;
 66	write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
 67				kq->pending_wptr);
 68}
 69
 70unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
 71{
 72	union PM4_MES_TYPE_3_HEADER header;
 73
 74	header.u32All = 0;
 75	header.opcode = opcode;
 76	header.count = packet_size / 4 - 2;
 77	header.type = PM4_TYPE_3;
 78
 79	return header.u32All;
 80}
 81
 82static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer,
 83				struct qcm_process_device *qpd)
 84{
 85	struct pm4_mes_map_process *packet;
 86
 87	packet = (struct pm4_mes_map_process *)buffer;
 88
 89	memset(buffer, 0, sizeof(struct pm4_mes_map_process));
 90
 91	packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
 92					sizeof(struct pm4_mes_map_process));
 93	packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
 94	packet->bitfields2.process_quantum = 1;
 95	packet->bitfields2.pasid = qpd->pqm->process->pasid;
 96	packet->bitfields3.page_table_base = qpd->page_table_base;
 97	packet->bitfields10.gds_size = qpd->gds_size;
 98	packet->bitfields10.num_gws = qpd->num_gws;
 99	packet->bitfields10.num_oac = qpd->num_oac;
100	packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
101
102	packet->sh_mem_config = qpd->sh_mem_config;
103	packet->sh_mem_bases = qpd->sh_mem_bases;
104	packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
105	packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
106
107	packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base;
108
109	packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
110	packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
111
112	return 0;
113}
114
115static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
116			uint64_t ib, size_t ib_size_in_dwords, bool chain)
117{
118	struct pm4_mes_runlist *packet;
119	int concurrent_proc_cnt = 0;
120	struct kfd_dev *kfd = pm->dqm->dev;
121
122	if (WARN_ON(!ib))
123		return -EFAULT;
124
125	/* Determine the number of processes to map together to HW:
126	 * it can not exceed the number of VMIDs available to the
127	 * scheduler, and it is determined by the smaller of the number
128	 * of processes in the runlist and kfd module parameter
129	 * hws_max_conc_proc.
130	 * Note: the arbitration between the number of VMIDs and
131	 * hws_max_conc_proc has been done in
132	 * kgd2kfd_device_init().
133	 */
134	concurrent_proc_cnt = min(pm->dqm->processes_count,
135			kfd->max_proc_per_quantum);
136
137	packet = (struct pm4_mes_runlist *)buffer;
138
139	memset(buffer, 0, sizeof(struct pm4_mes_runlist));
140	packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
141						sizeof(struct pm4_mes_runlist));
142
143	packet->bitfields4.ib_size = ib_size_in_dwords;
144	packet->bitfields4.chain = chain ? 1 : 0;
145	packet->bitfields4.offload_polling = 0;
146	packet->bitfields4.valid = 1;
147	packet->bitfields4.process_cnt = concurrent_proc_cnt;
148	packet->ordinal2 = lower_32_bits(ib);
149	packet->bitfields3.ib_base_hi = upper_32_bits(ib);
150
151	return 0;
152}
153
154int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
155				struct scheduling_resources *res)
156{
157	struct pm4_mes_set_resources *packet;
158
159	packet = (struct pm4_mes_set_resources *)buffer;
160	memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
161
162	packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
163					sizeof(struct pm4_mes_set_resources));
164
165	packet->bitfields2.queue_type =
166			queue_type__mes_set_resources__hsa_interface_queue_hiq;
167	packet->bitfields2.vmid_mask = res->vmid_mask;
168	packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
169	packet->bitfields7.oac_mask = res->oac_mask;
170	packet->bitfields8.gds_heap_base = res->gds_heap_base;
171	packet->bitfields8.gds_heap_size = res->gds_heap_size;
172
173	packet->gws_mask_lo = lower_32_bits(res->gws_mask);
174	packet->gws_mask_hi = upper_32_bits(res->gws_mask);
175
176	packet->queue_mask_lo = lower_32_bits(res->queue_mask);
177	packet->queue_mask_hi = upper_32_bits(res->queue_mask);
178
179	return 0;
180}
181
182static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
183		struct queue *q, bool is_static)
184{
185	struct pm4_mes_map_queues *packet;
186	bool use_static = is_static;
187
188	packet = (struct pm4_mes_map_queues *)buffer;
189	memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
190
191	packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
192					sizeof(struct pm4_mes_map_queues));
193	packet->bitfields2.num_queues = 1;
194	packet->bitfields2.queue_sel =
195		queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
196
197	packet->bitfields2.engine_sel =
198		engine_sel__mes_map_queues__compute_vi;
199	packet->bitfields2.queue_type =
200		queue_type__mes_map_queues__normal_compute_vi;
201
202	switch (q->properties.type) {
203	case KFD_QUEUE_TYPE_COMPUTE:
204		if (use_static)
205			packet->bitfields2.queue_type =
206		queue_type__mes_map_queues__normal_latency_static_queue_vi;
207		break;
208	case KFD_QUEUE_TYPE_DIQ:
209		packet->bitfields2.queue_type =
210			queue_type__mes_map_queues__debug_interface_queue_vi;
211		break;
212	case KFD_QUEUE_TYPE_SDMA:
213	case KFD_QUEUE_TYPE_SDMA_XGMI:
214		packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
215				engine_sel__mes_map_queues__sdma0_vi;
216		use_static = false; /* no static queues under SDMA */
217		break;
218	default:
219		WARN(1, "queue type %d", q->properties.type);
220		return -EINVAL;
221	}
222	packet->bitfields3.doorbell_offset =
223			q->properties.doorbell_off;
224
225	packet->mqd_addr_lo =
226			lower_32_bits(q->gart_mqd_addr);
227
228	packet->mqd_addr_hi =
229			upper_32_bits(q->gart_mqd_addr);
230
231	packet->wptr_addr_lo =
232			lower_32_bits((uint64_t)q->properties.write_ptr);
233
234	packet->wptr_addr_hi =
235			upper_32_bits((uint64_t)q->properties.write_ptr);
236
237	return 0;
238}
239
240static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
241			enum kfd_queue_type type,
242			enum kfd_unmap_queues_filter filter,
243			uint32_t filter_param, bool reset,
244			unsigned int sdma_engine)
245{
246	struct pm4_mes_unmap_queues *packet;
247
248	packet = (struct pm4_mes_unmap_queues *)buffer;
249	memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
250
251	packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
252					sizeof(struct pm4_mes_unmap_queues));
253	switch (type) {
254	case KFD_QUEUE_TYPE_COMPUTE:
255	case KFD_QUEUE_TYPE_DIQ:
256		packet->bitfields2.engine_sel =
257			engine_sel__mes_unmap_queues__compute;
258		break;
259	case KFD_QUEUE_TYPE_SDMA:
260	case KFD_QUEUE_TYPE_SDMA_XGMI:
261		packet->bitfields2.engine_sel =
262			engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
263		break;
264	default:
265		WARN(1, "queue type %d", type);
266		return -EINVAL;
267	}
268
269	if (reset)
270		packet->bitfields2.action =
271			action__mes_unmap_queues__reset_queues;
272	else
273		packet->bitfields2.action =
274			action__mes_unmap_queues__preempt_queues;
275
276	switch (filter) {
277	case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
278		packet->bitfields2.queue_sel =
279			queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
280		packet->bitfields2.num_queues = 1;
281		packet->bitfields3b.doorbell_offset0 = filter_param;
282		break;
283	case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
284		packet->bitfields2.queue_sel =
285			queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
286		packet->bitfields3a.pasid = filter_param;
287		break;
288	case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
289		packet->bitfields2.queue_sel =
290			queue_sel__mes_unmap_queues__unmap_all_queues;
291		break;
292	case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
293		/* in this case, we do not preempt static queues */
294		packet->bitfields2.queue_sel =
295			queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
296		break;
297	default:
298		WARN(1, "filter %d", filter);
299		return -EINVAL;
300	}
301
302	return 0;
303
304}
305
306static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
307			uint64_t fence_address,	uint32_t fence_value)
308{
309	struct pm4_mes_query_status *packet;
310
311	packet = (struct pm4_mes_query_status *)buffer;
312	memset(buffer, 0, sizeof(struct pm4_mes_query_status));
313
314	packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
315					sizeof(struct pm4_mes_query_status));
316
317	packet->bitfields2.context_id = 0;
318	packet->bitfields2.interrupt_sel =
319			interrupt_sel__mes_query_status__completion_status;
320	packet->bitfields2.command =
321			command__mes_query_status__fence_only_after_write_ack;
322
323	packet->addr_hi = upper_32_bits((uint64_t)fence_address);
324	packet->addr_lo = lower_32_bits((uint64_t)fence_address);
325	packet->data_hi = upper_32_bits((uint64_t)fence_value);
326	packet->data_lo = lower_32_bits((uint64_t)fence_value);
327
328	return 0;
329}
330
331static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
332{
333	struct pm4_mec_release_mem *packet;
334
335	packet = (struct pm4_mec_release_mem *)buffer;
336	memset(buffer, 0, sizeof(*packet));
337
338	packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
339						 sizeof(*packet));
340
341	packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
342	packet->bitfields2.event_index = event_index___release_mem__end_of_pipe;
343	packet->bitfields2.tcl1_action_ena = 1;
344	packet->bitfields2.tc_action_ena = 1;
345	packet->bitfields2.cache_policy = cache_policy___release_mem__lru;
346	packet->bitfields2.atc = 0;
347
348	packet->bitfields3.data_sel = data_sel___release_mem__send_32_bit_low;
349	packet->bitfields3.int_sel =
350		int_sel___release_mem__send_interrupt_after_write_confirm;
351
352	packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
353	packet->address_hi = upper_32_bits(gpu_addr);
354
355	packet->data_lo = 0;
356
357	return 0;
358}
359
360const struct packet_manager_funcs kfd_vi_pm_funcs = {
361	.map_process		= pm_map_process_vi,
362	.runlist		= pm_runlist_vi,
363	.set_resources		= pm_set_resources_vi,
364	.map_queues		= pm_map_queues_vi,
365	.unmap_queues		= pm_unmap_queues_vi,
366	.query_status		= pm_query_status_vi,
367	.release_mem		= pm_release_mem_vi,
368	.map_process_size	= sizeof(struct pm4_mes_map_process),
369	.runlist_size		= sizeof(struct pm4_mes_runlist),
370	.set_resources_size	= sizeof(struct pm4_mes_set_resources),
371	.map_queues_size	= sizeof(struct pm4_mes_map_queues),
372	.unmap_queues_size	= sizeof(struct pm4_mes_unmap_queues),
373	.query_status_size	= sizeof(struct pm4_mes_query_status),
374	.release_mem_size	= sizeof(struct pm4_mec_release_mem)
375};