Linux Audio

Check our new training course

Loading...
v4.6
 
  1#include <dirent.h>
 
  2#include <limits.h>
  3#include <stdbool.h>
  4#include <stdlib.h>
  5#include <stdio.h>
  6#include <sys/types.h>
  7#include <sys/stat.h>
  8#include <unistd.h>
 
  9#include "strlist.h"
 10#include <string.h>
 11#include <api/fs/fs.h>
 
 
 12#include "asm/bug.h"
 13#include "thread_map.h"
 14#include "util.h"
 15#include "debug.h"
 16#include "event.h"
 17
 18/* Skip "." and ".." directories */
 19static int filter(const struct dirent *dir)
 20{
 21	if (dir->d_name[0] == '.')
 22		return 0;
 23	else
 24		return 1;
 25}
 26
 27static void thread_map__reset(struct thread_map *map, int start, int nr)
 28{
 29	size_t size = (nr - start) * sizeof(map->map[0]);
 30
 31	memset(&map->map[start], 0, size);
 32}
 33
 34static struct thread_map *thread_map__realloc(struct thread_map *map, int nr)
 35{
 36	size_t size = sizeof(*map) + sizeof(map->map[0]) * nr;
 37	int start = map ? map->nr : 0;
 38
 39	map = realloc(map, size);
 40	/*
 41	 * We only realloc to add more items, let's reset new items.
 42	 */
 43	if (map)
 44		thread_map__reset(map, start, nr);
 45
 46	return map;
 47}
 48
 49#define thread_map__alloc(__nr) thread_map__realloc(NULL, __nr)
 50
 51struct thread_map *thread_map__new_by_pid(pid_t pid)
 52{
 53	struct thread_map *threads;
 54	char name[256];
 55	int items;
 56	struct dirent **namelist = NULL;
 57	int i;
 58
 59	sprintf(name, "/proc/%d/task", pid);
 60	items = scandir(name, &namelist, filter, NULL);
 61	if (items <= 0)
 62		return NULL;
 63
 64	threads = thread_map__alloc(items);
 65	if (threads != NULL) {
 66		for (i = 0; i < items; i++)
 67			thread_map__set_pid(threads, i, atoi(namelist[i]->d_name));
 68		threads->nr = items;
 69		atomic_set(&threads->refcnt, 1);
 70	}
 71
 72	for (i=0; i<items; i++)
 73		zfree(&namelist[i]);
 74	free(namelist);
 75
 76	return threads;
 77}
 78
 79struct thread_map *thread_map__new_by_tid(pid_t tid)
 80{
 81	struct thread_map *threads = thread_map__alloc(1);
 82
 83	if (threads != NULL) {
 84		thread_map__set_pid(threads, 0, tid);
 85		threads->nr = 1;
 86		atomic_set(&threads->refcnt, 1);
 87	}
 88
 89	return threads;
 90}
 91
 92struct thread_map *thread_map__new_by_uid(uid_t uid)
 93{
 94	DIR *proc;
 95	int max_threads = 32, items, i;
 96	char path[256];
 97	struct dirent *dirent, **namelist = NULL;
 98	struct thread_map *threads = thread_map__alloc(max_threads);
 99
100	if (threads == NULL)
101		goto out;
102
103	proc = opendir("/proc");
104	if (proc == NULL)
105		goto out_free_threads;
106
107	threads->nr = 0;
108	atomic_set(&threads->refcnt, 1);
109
110	while ((dirent = readdir(proc)) != NULL) {
111		char *end;
112		bool grow = false;
113		struct stat st;
114		pid_t pid = strtol(dirent->d_name, &end, 10);
115
116		if (*end) /* only interested in proper numerical dirents */
117			continue;
118
119		snprintf(path, sizeof(path), "/proc/%s", dirent->d_name);
120
121		if (stat(path, &st) != 0)
122			continue;
123
124		if (st.st_uid != uid)
125			continue;
 
126
127		snprintf(path, sizeof(path), "/proc/%d/task", pid);
128		items = scandir(path, &namelist, filter, NULL);
129		if (items <= 0)
130			goto out_free_closedir;
131
132		while (threads->nr + items >= max_threads) {
133			max_threads *= 2;
134			grow = true;
135		}
136
137		if (grow) {
138			struct thread_map *tmp;
139
140			tmp = thread_map__realloc(threads, max_threads);
141			if (tmp == NULL)
142				goto out_free_namelist;
143
144			threads = tmp;
145		}
146
147		for (i = 0; i < items; i++) {
148			thread_map__set_pid(threads, threads->nr + i,
149					    atoi(namelist[i]->d_name));
150		}
151
152		for (i = 0; i < items; i++)
153			zfree(&namelist[i]);
154		free(namelist);
155
156		threads->nr += items;
157	}
158
159out_closedir:
160	closedir(proc);
161out:
162	return threads;
163
164out_free_threads:
165	free(threads);
166	return NULL;
167
168out_free_namelist:
169	for (i = 0; i < items; i++)
170		zfree(&namelist[i]);
171	free(namelist);
172
173out_free_closedir:
174	zfree(&threads);
175	goto out_closedir;
176}
177
178struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid)
 
 
 
 
 
 
 
 
 
 
179{
180	if (pid != -1)
181		return thread_map__new_by_pid(pid);
182
183	if (tid == -1 && uid != UINT_MAX)
184		return thread_map__new_by_uid(uid);
185
186	return thread_map__new_by_tid(tid);
187}
188
189static struct thread_map *thread_map__new_by_pid_str(const char *pid_str)
190{
191	struct thread_map *threads = NULL, *nt;
192	char name[256];
193	int items, total_tasks = 0;
194	struct dirent **namelist = NULL;
195	int i, j = 0;
196	pid_t pid, prev_pid = INT_MAX;
197	char *end_ptr;
198	struct str_node *pos;
199	struct strlist_config slist_config = { .dont_dupstr = true, };
200	struct strlist *slist = strlist__new(pid_str, &slist_config);
201
202	if (!slist)
203		return NULL;
204
205	strlist__for_each(pos, slist) {
206		pid = strtol(pos->s, &end_ptr, 10);
207
208		if (pid == INT_MIN || pid == INT_MAX ||
209		    (*end_ptr != '\0' && *end_ptr != ','))
210			goto out_free_threads;
211
212		if (pid == prev_pid)
213			continue;
214
215		sprintf(name, "/proc/%d/task", pid);
216		items = scandir(name, &namelist, filter, NULL);
217		if (items <= 0)
218			goto out_free_threads;
219
220		total_tasks += items;
221		nt = thread_map__realloc(threads, total_tasks);
222		if (nt == NULL)
223			goto out_free_namelist;
224
225		threads = nt;
226
227		for (i = 0; i < items; i++) {
228			thread_map__set_pid(threads, j++, atoi(namelist[i]->d_name));
229			zfree(&namelist[i]);
230		}
231		threads->nr = total_tasks;
232		free(namelist);
233	}
234
235out:
236	strlist__delete(slist);
237	if (threads)
238		atomic_set(&threads->refcnt, 1);
239	return threads;
240
241out_free_namelist:
242	for (i = 0; i < items; i++)
243		zfree(&namelist[i]);
244	free(namelist);
245
246out_free_threads:
247	zfree(&threads);
248	goto out;
249}
250
251struct thread_map *thread_map__new_dummy(void)
252{
253	struct thread_map *threads = thread_map__alloc(1);
254
255	if (threads != NULL) {
256		thread_map__set_pid(threads, 0, -1);
257		threads->nr = 1;
258		atomic_set(&threads->refcnt, 1);
259	}
260	return threads;
261}
262
263static struct thread_map *thread_map__new_by_tid_str(const char *tid_str)
264{
265	struct thread_map *threads = NULL, *nt;
266	int ntasks = 0;
267	pid_t tid, prev_tid = INT_MAX;
268	char *end_ptr;
269	struct str_node *pos;
270	struct strlist_config slist_config = { .dont_dupstr = true, };
271	struct strlist *slist;
272
273	/* perf-stat expects threads to be generated even if tid not given */
274	if (!tid_str)
275		return thread_map__new_dummy();
276
277	slist = strlist__new(tid_str, &slist_config);
278	if (!slist)
279		return NULL;
280
281	strlist__for_each(pos, slist) {
282		tid = strtol(pos->s, &end_ptr, 10);
283
284		if (tid == INT_MIN || tid == INT_MAX ||
285		    (*end_ptr != '\0' && *end_ptr != ','))
286			goto out_free_threads;
287
288		if (tid == prev_tid)
289			continue;
290
291		ntasks++;
292		nt = thread_map__realloc(threads, ntasks);
293
294		if (nt == NULL)
295			goto out_free_threads;
296
297		threads = nt;
298		thread_map__set_pid(threads, ntasks - 1, tid);
299		threads->nr = ntasks;
300	}
301out:
302	if (threads)
303		atomic_set(&threads->refcnt, 1);
304	return threads;
305
306out_free_threads:
307	zfree(&threads);
308	strlist__delete(slist);
309	goto out;
310}
311
312struct thread_map *thread_map__new_str(const char *pid, const char *tid,
313				       uid_t uid)
314{
315	if (pid)
316		return thread_map__new_by_pid_str(pid);
317
318	if (!tid && uid != UINT_MAX)
319		return thread_map__new_by_uid(uid);
320
321	return thread_map__new_by_tid_str(tid);
322}
323
324static void thread_map__delete(struct thread_map *threads)
325{
326	if (threads) {
327		int i;
328
329		WARN_ONCE(atomic_read(&threads->refcnt) != 0,
330			  "thread map refcnt unbalanced\n");
331		for (i = 0; i < threads->nr; i++)
332			free(thread_map__comm(threads, i));
333		free(threads);
334	}
335}
336
337struct thread_map *thread_map__get(struct thread_map *map)
338{
339	if (map)
340		atomic_inc(&map->refcnt);
341	return map;
342}
343
344void thread_map__put(struct thread_map *map)
345{
346	if (map && atomic_dec_and_test(&map->refcnt))
347		thread_map__delete(map);
348}
349
350size_t thread_map__fprintf(struct thread_map *threads, FILE *fp)
351{
352	int i;
353	size_t printed = fprintf(fp, "%d thread%s: ",
354				 threads->nr, threads->nr > 1 ? "s" : "");
355	for (i = 0; i < threads->nr; ++i)
356		printed += fprintf(fp, "%s%d", i ? ", " : "", thread_map__pid(threads, i));
357
358	return printed + fprintf(fp, "\n");
359}
360
361static int get_comm(char **comm, pid_t pid)
362{
363	char *path;
364	size_t size;
365	int err;
366
367	if (asprintf(&path, "%s/%d/comm", procfs__mountpoint(), pid) == -1)
368		return -ENOMEM;
369
370	err = filename__read_str(path, comm, &size);
371	if (!err) {
372		/*
373		 * We're reading 16 bytes, while filename__read_str
374		 * allocates data per BUFSIZ bytes, so we can safely
375		 * mark the end of the string.
376		 */
377		(*comm)[size] = 0;
378		rtrim(*comm);
379	}
380
381	free(path);
382	return err;
383}
384
385static void comm_init(struct thread_map *map, int i)
386{
387	pid_t pid = thread_map__pid(map, i);
388	char *comm = NULL;
389
390	/* dummy pid comm initialization */
391	if (pid == -1) {
392		map->map[i].comm = strdup("dummy");
393		return;
394	}
395
396	/*
397	 * The comm name is like extra bonus ;-),
398	 * so just warn if we fail for any reason.
399	 */
400	if (get_comm(&comm, pid))
401		pr_warning("Couldn't resolve comm name for pid %d\n", pid);
402
403	map->map[i].comm = comm;
404}
405
406void thread_map__read_comms(struct thread_map *threads)
407{
408	int i;
409
410	for (i = 0; i < threads->nr; ++i)
411		comm_init(threads, i);
412}
413
414static void thread_map__copy_event(struct thread_map *threads,
415				   struct thread_map_event *event)
416{
417	unsigned i;
418
419	threads->nr = (int) event->nr;
420
421	for (i = 0; i < event->nr; i++) {
422		thread_map__set_pid(threads, i, (pid_t) event->entries[i].pid);
423		threads->map[i].comm = strndup(event->entries[i].comm, 16);
424	}
425
426	atomic_set(&threads->refcnt, 1);
427}
428
429struct thread_map *thread_map__new_event(struct thread_map_event *event)
430{
431	struct thread_map *threads;
432
433	threads = thread_map__alloc(event->nr);
434	if (threads)
435		thread_map__copy_event(threads, event);
436
437	return threads;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
438}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <dirent.h>
  3#include <errno.h>
  4#include <limits.h>
  5#include <stdbool.h>
  6#include <stdlib.h>
  7#include <stdio.h>
  8#include <sys/types.h>
  9#include <sys/stat.h>
 10#include <unistd.h>
 11#include "string2.h"
 12#include "strlist.h"
 13#include <string.h>
 14#include <api/fs/fs.h>
 15#include <linux/string.h>
 16#include <linux/zalloc.h>
 17#include "asm/bug.h"
 18#include "thread_map.h"
 
 19#include "debug.h"
 20#include "event.h"
 21
 22/* Skip "." and ".." directories */
 23static int filter(const struct dirent *dir)
 24{
 25	if (dir->d_name[0] == '.')
 26		return 0;
 27	else
 28		return 1;
 29}
 30
 31#define thread_map__alloc(__nr) perf_thread_map__realloc(NULL, __nr)
 
 
 
 
 
 32
 33struct perf_thread_map *thread_map__new_by_pid(pid_t pid)
 34{
 35	struct perf_thread_map *threads;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 36	char name[256];
 37	int items;
 38	struct dirent **namelist = NULL;
 39	int i;
 40
 41	sprintf(name, "/proc/%d/task", pid);
 42	items = scandir(name, &namelist, filter, NULL);
 43	if (items <= 0)
 44		return NULL;
 45
 46	threads = thread_map__alloc(items);
 47	if (threads != NULL) {
 48		for (i = 0; i < items; i++)
 49			perf_thread_map__set_pid(threads, i, atoi(namelist[i]->d_name));
 50		threads->nr = items;
 51		refcount_set(&threads->refcnt, 1);
 52	}
 53
 54	for (i=0; i<items; i++)
 55		zfree(&namelist[i]);
 56	free(namelist);
 57
 58	return threads;
 59}
 60
 61struct perf_thread_map *thread_map__new_by_tid(pid_t tid)
 62{
 63	struct perf_thread_map *threads = thread_map__alloc(1);
 64
 65	if (threads != NULL) {
 66		perf_thread_map__set_pid(threads, 0, tid);
 67		threads->nr = 1;
 68		refcount_set(&threads->refcnt, 1);
 69	}
 70
 71	return threads;
 72}
 73
 74static struct perf_thread_map *__thread_map__new_all_cpus(uid_t uid)
 75{
 76	DIR *proc;
 77	int max_threads = 32, items, i;
 78	char path[NAME_MAX + 1 + 6];
 79	struct dirent *dirent, **namelist = NULL;
 80	struct perf_thread_map *threads = thread_map__alloc(max_threads);
 81
 82	if (threads == NULL)
 83		goto out;
 84
 85	proc = opendir("/proc");
 86	if (proc == NULL)
 87		goto out_free_threads;
 88
 89	threads->nr = 0;
 90	refcount_set(&threads->refcnt, 1);
 91
 92	while ((dirent = readdir(proc)) != NULL) {
 93		char *end;
 94		bool grow = false;
 
 95		pid_t pid = strtol(dirent->d_name, &end, 10);
 96
 97		if (*end) /* only interested in proper numerical dirents */
 98			continue;
 99
100		snprintf(path, sizeof(path), "/proc/%s", dirent->d_name);
101
102		if (uid != UINT_MAX) {
103			struct stat st;
104
105			if (stat(path, &st) != 0 || st.st_uid != uid)
106				continue;
107		}
108
109		snprintf(path, sizeof(path), "/proc/%d/task", pid);
110		items = scandir(path, &namelist, filter, NULL);
111		if (items <= 0)
112			goto out_free_closedir;
113
114		while (threads->nr + items >= max_threads) {
115			max_threads *= 2;
116			grow = true;
117		}
118
119		if (grow) {
120			struct perf_thread_map *tmp;
121
122			tmp = perf_thread_map__realloc(threads, max_threads);
123			if (tmp == NULL)
124				goto out_free_namelist;
125
126			threads = tmp;
127		}
128
129		for (i = 0; i < items; i++) {
130			perf_thread_map__set_pid(threads, threads->nr + i,
131						    atoi(namelist[i]->d_name));
132		}
133
134		for (i = 0; i < items; i++)
135			zfree(&namelist[i]);
136		free(namelist);
137
138		threads->nr += items;
139	}
140
141out_closedir:
142	closedir(proc);
143out:
144	return threads;
145
146out_free_threads:
147	free(threads);
148	return NULL;
149
150out_free_namelist:
151	for (i = 0; i < items; i++)
152		zfree(&namelist[i]);
153	free(namelist);
154
155out_free_closedir:
156	zfree(&threads);
157	goto out_closedir;
158}
159
160struct perf_thread_map *thread_map__new_all_cpus(void)
161{
162	return __thread_map__new_all_cpus(UINT_MAX);
163}
164
165struct perf_thread_map *thread_map__new_by_uid(uid_t uid)
166{
167	return __thread_map__new_all_cpus(uid);
168}
169
170struct perf_thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid)
171{
172	if (pid != -1)
173		return thread_map__new_by_pid(pid);
174
175	if (tid == -1 && uid != UINT_MAX)
176		return thread_map__new_by_uid(uid);
177
178	return thread_map__new_by_tid(tid);
179}
180
181static struct perf_thread_map *thread_map__new_by_pid_str(const char *pid_str)
182{
183	struct perf_thread_map *threads = NULL, *nt;
184	char name[256];
185	int items, total_tasks = 0;
186	struct dirent **namelist = NULL;
187	int i, j = 0;
188	pid_t pid, prev_pid = INT_MAX;
189	char *end_ptr;
190	struct str_node *pos;
191	struct strlist_config slist_config = { .dont_dupstr = true, };
192	struct strlist *slist = strlist__new(pid_str, &slist_config);
193
194	if (!slist)
195		return NULL;
196
197	strlist__for_each_entry(pos, slist) {
198		pid = strtol(pos->s, &end_ptr, 10);
199
200		if (pid == INT_MIN || pid == INT_MAX ||
201		    (*end_ptr != '\0' && *end_ptr != ','))
202			goto out_free_threads;
203
204		if (pid == prev_pid)
205			continue;
206
207		sprintf(name, "/proc/%d/task", pid);
208		items = scandir(name, &namelist, filter, NULL);
209		if (items <= 0)
210			goto out_free_threads;
211
212		total_tasks += items;
213		nt = perf_thread_map__realloc(threads, total_tasks);
214		if (nt == NULL)
215			goto out_free_namelist;
216
217		threads = nt;
218
219		for (i = 0; i < items; i++) {
220			perf_thread_map__set_pid(threads, j++, atoi(namelist[i]->d_name));
221			zfree(&namelist[i]);
222		}
223		threads->nr = total_tasks;
224		free(namelist);
225	}
226
227out:
228	strlist__delete(slist);
229	if (threads)
230		refcount_set(&threads->refcnt, 1);
231	return threads;
232
233out_free_namelist:
234	for (i = 0; i < items; i++)
235		zfree(&namelist[i]);
236	free(namelist);
237
238out_free_threads:
239	zfree(&threads);
240	goto out;
241}
242
243struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str)
 
 
 
 
 
 
 
 
 
 
 
 
244{
245	struct perf_thread_map *threads = NULL, *nt;
246	int ntasks = 0;
247	pid_t tid, prev_tid = INT_MAX;
248	char *end_ptr;
249	struct str_node *pos;
250	struct strlist_config slist_config = { .dont_dupstr = true, };
251	struct strlist *slist;
252
253	/* perf-stat expects threads to be generated even if tid not given */
254	if (!tid_str)
255		return perf_thread_map__new_dummy();
256
257	slist = strlist__new(tid_str, &slist_config);
258	if (!slist)
259		return NULL;
260
261	strlist__for_each_entry(pos, slist) {
262		tid = strtol(pos->s, &end_ptr, 10);
263
264		if (tid == INT_MIN || tid == INT_MAX ||
265		    (*end_ptr != '\0' && *end_ptr != ','))
266			goto out_free_threads;
267
268		if (tid == prev_tid)
269			continue;
270
271		ntasks++;
272		nt = perf_thread_map__realloc(threads, ntasks);
273
274		if (nt == NULL)
275			goto out_free_threads;
276
277		threads = nt;
278		perf_thread_map__set_pid(threads, ntasks - 1, tid);
279		threads->nr = ntasks;
280	}
281out:
282	if (threads)
283		refcount_set(&threads->refcnt, 1);
284	return threads;
285
286out_free_threads:
287	zfree(&threads);
288	strlist__delete(slist);
289	goto out;
290}
291
292struct perf_thread_map *thread_map__new_str(const char *pid, const char *tid,
293				       uid_t uid, bool all_threads)
294{
295	if (pid)
296		return thread_map__new_by_pid_str(pid);
297
298	if (!tid && uid != UINT_MAX)
299		return thread_map__new_by_uid(uid);
300
301	if (all_threads)
302		return thread_map__new_all_cpus();
 
 
 
 
 
 
 
 
 
 
 
 
 
303
304	return thread_map__new_by_tid_str(tid);
 
 
 
 
 
 
 
 
 
 
305}
306
307size_t thread_map__fprintf(struct perf_thread_map *threads, FILE *fp)
308{
309	int i;
310	size_t printed = fprintf(fp, "%d thread%s: ",
311				 threads->nr, threads->nr > 1 ? "s" : "");
312	for (i = 0; i < threads->nr; ++i)
313		printed += fprintf(fp, "%s%d", i ? ", " : "", perf_thread_map__pid(threads, i));
314
315	return printed + fprintf(fp, "\n");
316}
317
318static int get_comm(char **comm, pid_t pid)
319{
320	char *path;
321	size_t size;
322	int err;
323
324	if (asprintf(&path, "%s/%d/comm", procfs__mountpoint(), pid) == -1)
325		return -ENOMEM;
326
327	err = filename__read_str(path, comm, &size);
328	if (!err) {
329		/*
330		 * We're reading 16 bytes, while filename__read_str
331		 * allocates data per BUFSIZ bytes, so we can safely
332		 * mark the end of the string.
333		 */
334		(*comm)[size] = 0;
335		strim(*comm);
336	}
337
338	free(path);
339	return err;
340}
341
342static void comm_init(struct perf_thread_map *map, int i)
343{
344	pid_t pid = perf_thread_map__pid(map, i);
345	char *comm = NULL;
346
347	/* dummy pid comm initialization */
348	if (pid == -1) {
349		map->map[i].comm = strdup("dummy");
350		return;
351	}
352
353	/*
354	 * The comm name is like extra bonus ;-),
355	 * so just warn if we fail for any reason.
356	 */
357	if (get_comm(&comm, pid))
358		pr_warning("Couldn't resolve comm name for pid %d\n", pid);
359
360	map->map[i].comm = comm;
361}
362
363void thread_map__read_comms(struct perf_thread_map *threads)
364{
365	int i;
366
367	for (i = 0; i < threads->nr; ++i)
368		comm_init(threads, i);
369}
370
371static void thread_map__copy_event(struct perf_thread_map *threads,
372				   struct perf_record_thread_map *event)
373{
374	unsigned i;
375
376	threads->nr = (int) event->nr;
377
378	for (i = 0; i < event->nr; i++) {
379		perf_thread_map__set_pid(threads, i, (pid_t) event->entries[i].pid);
380		threads->map[i].comm = strndup(event->entries[i].comm, 16);
381	}
382
383	refcount_set(&threads->refcnt, 1);
384}
385
386struct perf_thread_map *thread_map__new_event(struct perf_record_thread_map *event)
387{
388	struct perf_thread_map *threads;
389
390	threads = thread_map__alloc(event->nr);
391	if (threads)
392		thread_map__copy_event(threads, event);
393
394	return threads;
395}
396
397bool thread_map__has(struct perf_thread_map *threads, pid_t pid)
398{
399	int i;
400
401	for (i = 0; i < threads->nr; ++i) {
402		if (threads->map[i].pid == pid)
403			return true;
404	}
405
406	return false;
407}
408
409int thread_map__remove(struct perf_thread_map *threads, int idx)
410{
411	int i;
412
413	if (threads->nr < 1)
414		return -EINVAL;
415
416	if (idx >= threads->nr)
417		return -EINVAL;
418
419	/*
420	 * Free the 'idx' item and shift the rest up.
421	 */
422	zfree(&threads->map[idx].comm);
423
424	for (i = idx; i < threads->nr - 1; i++)
425		threads->map[i] = threads->map[i + 1];
426
427	threads->nr--;
428	return 0;
429}