Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
3 */
4
5#include <linux/kref.h>
6#include <linux/uaccess.h>
7
8#include "msm_gpu.h"
9
10void msm_submitqueue_destroy(struct kref *kref)
11{
12 struct msm_gpu_submitqueue *queue = container_of(kref,
13 struct msm_gpu_submitqueue, ref);
14
15 kfree(queue);
16}
17
18struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
19 u32 id)
20{
21 struct msm_gpu_submitqueue *entry;
22
23 if (!ctx)
24 return NULL;
25
26 read_lock(&ctx->queuelock);
27
28 list_for_each_entry(entry, &ctx->submitqueues, node) {
29 if (entry->id == id) {
30 kref_get(&entry->ref);
31 read_unlock(&ctx->queuelock);
32
33 return entry;
34 }
35 }
36
37 read_unlock(&ctx->queuelock);
38 return NULL;
39}
40
41void msm_submitqueue_close(struct msm_file_private *ctx)
42{
43 struct msm_gpu_submitqueue *entry, *tmp;
44
45 if (!ctx)
46 return;
47
48 /*
49 * No lock needed in close and there won't
50 * be any more user ioctls coming our way
51 */
52 list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node)
53 msm_submitqueue_put(entry);
54}
55
56int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
57 u32 prio, u32 flags, u32 *id)
58{
59 struct msm_drm_private *priv = drm->dev_private;
60 struct msm_gpu_submitqueue *queue;
61
62 if (!ctx)
63 return -ENODEV;
64
65 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
66
67 if (!queue)
68 return -ENOMEM;
69
70 kref_init(&queue->ref);
71 queue->flags = flags;
72
73 if (priv->gpu) {
74 if (prio >= priv->gpu->nr_rings) {
75 kfree(queue);
76 return -EINVAL;
77 }
78
79 queue->prio = prio;
80 }
81
82 write_lock(&ctx->queuelock);
83
84 queue->id = ctx->queueid++;
85
86 if (id)
87 *id = queue->id;
88
89 list_add_tail(&queue->node, &ctx->submitqueues);
90
91 write_unlock(&ctx->queuelock);
92
93 return 0;
94}
95
96int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
97{
98 struct msm_drm_private *priv = drm->dev_private;
99 int default_prio;
100
101 if (!ctx)
102 return 0;
103
104 /*
105 * Select priority 2 as the "default priority" unless nr_rings is less
106 * than 2 and then pick the lowest pirority
107 */
108 default_prio = priv->gpu ?
109 clamp_t(uint32_t, 2, 0, priv->gpu->nr_rings - 1) : 0;
110
111 INIT_LIST_HEAD(&ctx->submitqueues);
112
113 rwlock_init(&ctx->queuelock);
114
115 return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
116}
117
118static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
119 struct drm_msm_submitqueue_query *args)
120{
121 size_t size = min_t(size_t, args->len, sizeof(queue->faults));
122 int ret;
123
124 /* If a zero length was passed in, return the data size we expect */
125 if (!args->len) {
126 args->len = sizeof(queue->faults);
127 return 0;
128 }
129
130 /* Set the length to the actual size of the data */
131 args->len = size;
132
133 ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
134
135 return ret ? -EFAULT : 0;
136}
137
138int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
139 struct drm_msm_submitqueue_query *args)
140{
141 struct msm_gpu_submitqueue *queue;
142 int ret = -EINVAL;
143
144 if (args->pad)
145 return -EINVAL;
146
147 queue = msm_submitqueue_get(ctx, args->id);
148 if (!queue)
149 return -ENOENT;
150
151 if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
152 ret = msm_submitqueue_query_faults(queue, args);
153
154 msm_submitqueue_put(queue);
155
156 return ret;
157}
158
159int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
160{
161 struct msm_gpu_submitqueue *entry;
162
163 if (!ctx)
164 return 0;
165
166 /*
167 * id 0 is the "default" queue and can't be destroyed
168 * by the user
169 */
170 if (!id)
171 return -ENOENT;
172
173 write_lock(&ctx->queuelock);
174
175 list_for_each_entry(entry, &ctx->submitqueues, node) {
176 if (entry->id == id) {
177 list_del(&entry->node);
178 write_unlock(&ctx->queuelock);
179
180 msm_submitqueue_put(entry);
181 return 0;
182 }
183 }
184
185 write_unlock(&ctx->queuelock);
186 return -ENOENT;
187}
188
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
3 */
4
5#include <linux/kref.h>
6#include <linux/uaccess.h>
7
8#include "msm_gpu.h"
9
10void msm_submitqueue_destroy(struct kref *kref)
11{
12 struct msm_gpu_submitqueue *queue = container_of(kref,
13 struct msm_gpu_submitqueue, ref);
14
15 msm_file_private_put(queue->ctx);
16
17 kfree(queue);
18}
19
20struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
21 u32 id)
22{
23 struct msm_gpu_submitqueue *entry;
24
25 if (!ctx)
26 return NULL;
27
28 read_lock(&ctx->queuelock);
29
30 list_for_each_entry(entry, &ctx->submitqueues, node) {
31 if (entry->id == id) {
32 kref_get(&entry->ref);
33 read_unlock(&ctx->queuelock);
34
35 return entry;
36 }
37 }
38
39 read_unlock(&ctx->queuelock);
40 return NULL;
41}
42
43void msm_submitqueue_close(struct msm_file_private *ctx)
44{
45 struct msm_gpu_submitqueue *entry, *tmp;
46
47 if (!ctx)
48 return;
49
50 /*
51 * No lock needed in close and there won't
52 * be any more user ioctls coming our way
53 */
54 list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
55 list_del(&entry->node);
56 msm_submitqueue_put(entry);
57 }
58}
59
60int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
61 u32 prio, u32 flags, u32 *id)
62{
63 struct msm_drm_private *priv = drm->dev_private;
64 struct msm_gpu_submitqueue *queue;
65
66 if (!ctx)
67 return -ENODEV;
68
69 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
70
71 if (!queue)
72 return -ENOMEM;
73
74 kref_init(&queue->ref);
75 queue->flags = flags;
76
77 if (priv->gpu) {
78 if (prio >= priv->gpu->nr_rings) {
79 kfree(queue);
80 return -EINVAL;
81 }
82
83 queue->prio = prio;
84 }
85
86 write_lock(&ctx->queuelock);
87
88 queue->ctx = msm_file_private_get(ctx);
89 queue->id = ctx->queueid++;
90
91 if (id)
92 *id = queue->id;
93
94 list_add_tail(&queue->node, &ctx->submitqueues);
95
96 write_unlock(&ctx->queuelock);
97
98 return 0;
99}
100
101int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
102{
103 struct msm_drm_private *priv = drm->dev_private;
104 int default_prio;
105
106 if (!ctx)
107 return 0;
108
109 /*
110 * Select priority 2 as the "default priority" unless nr_rings is less
111 * than 2 and then pick the lowest pirority
112 */
113 default_prio = priv->gpu ?
114 clamp_t(uint32_t, 2, 0, priv->gpu->nr_rings - 1) : 0;
115
116 INIT_LIST_HEAD(&ctx->submitqueues);
117
118 rwlock_init(&ctx->queuelock);
119
120 return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
121}
122
123static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
124 struct drm_msm_submitqueue_query *args)
125{
126 size_t size = min_t(size_t, args->len, sizeof(queue->faults));
127 int ret;
128
129 /* If a zero length was passed in, return the data size we expect */
130 if (!args->len) {
131 args->len = sizeof(queue->faults);
132 return 0;
133 }
134
135 /* Set the length to the actual size of the data */
136 args->len = size;
137
138 ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
139
140 return ret ? -EFAULT : 0;
141}
142
143int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
144 struct drm_msm_submitqueue_query *args)
145{
146 struct msm_gpu_submitqueue *queue;
147 int ret = -EINVAL;
148
149 if (args->pad)
150 return -EINVAL;
151
152 queue = msm_submitqueue_get(ctx, args->id);
153 if (!queue)
154 return -ENOENT;
155
156 if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
157 ret = msm_submitqueue_query_faults(queue, args);
158
159 msm_submitqueue_put(queue);
160
161 return ret;
162}
163
164int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
165{
166 struct msm_gpu_submitqueue *entry;
167
168 if (!ctx)
169 return 0;
170
171 /*
172 * id 0 is the "default" queue and can't be destroyed
173 * by the user
174 */
175 if (!id)
176 return -ENOENT;
177
178 write_lock(&ctx->queuelock);
179
180 list_for_each_entry(entry, &ctx->submitqueues, node) {
181 if (entry->id == id) {
182 list_del(&entry->node);
183 write_unlock(&ctx->queuelock);
184
185 msm_submitqueue_put(entry);
186 return 0;
187 }
188 }
189
190 write_unlock(&ctx->queuelock);
191 return -ENOENT;
192}
193