Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
  1/*
  2 * resource cgroups
  3 *
  4 * Copyright 2007 OpenVZ SWsoft Inc
  5 *
  6 * Author: Pavel Emelianov <xemul@openvz.org>
  7 *
  8 */
  9
 10#include <linux/types.h>
 11#include <linux/parser.h>
 12#include <linux/fs.h>
 13#include <linux/res_counter.h>
 14#include <linux/uaccess.h>
 15#include <linux/mm.h>
 16
 17void res_counter_init(struct res_counter *counter, struct res_counter *parent)
 18{
 19	spin_lock_init(&counter->lock);
 20	counter->limit = RESOURCE_MAX;
 21	counter->soft_limit = RESOURCE_MAX;
 22	counter->parent = parent;
 23}
 24
 25int res_counter_charge_locked(struct res_counter *counter, unsigned long val,
 26			      bool force)
 27{
 28	int ret = 0;
 29
 30	if (counter->usage + val > counter->limit) {
 31		counter->failcnt++;
 32		ret = -ENOMEM;
 33		if (!force)
 34			return ret;
 35	}
 36
 37	counter->usage += val;
 38	if (counter->usage > counter->max_usage)
 39		counter->max_usage = counter->usage;
 40	return ret;
 41}
 42
 43static int __res_counter_charge(struct res_counter *counter, unsigned long val,
 44				struct res_counter **limit_fail_at, bool force)
 45{
 46	int ret, r;
 47	unsigned long flags;
 48	struct res_counter *c, *u;
 49
 50	r = ret = 0;
 51	*limit_fail_at = NULL;
 52	local_irq_save(flags);
 53	for (c = counter; c != NULL; c = c->parent) {
 54		spin_lock(&c->lock);
 55		r = res_counter_charge_locked(c, val, force);
 56		spin_unlock(&c->lock);
 57		if (r < 0 && !ret) {
 58			ret = r;
 59			*limit_fail_at = c;
 60			if (!force)
 61				break;
 62		}
 63	}
 64
 65	if (ret < 0 && !force) {
 66		for (u = counter; u != c; u = u->parent) {
 67			spin_lock(&u->lock);
 68			res_counter_uncharge_locked(u, val);
 69			spin_unlock(&u->lock);
 70		}
 71	}
 72	local_irq_restore(flags);
 73
 74	return ret;
 75}
 76
 77int res_counter_charge(struct res_counter *counter, unsigned long val,
 78			struct res_counter **limit_fail_at)
 79{
 80	return __res_counter_charge(counter, val, limit_fail_at, false);
 81}
 82
 83int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
 84			      struct res_counter **limit_fail_at)
 85{
 86	return __res_counter_charge(counter, val, limit_fail_at, true);
 87}
 88
 89void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
 90{
 91	if (WARN_ON(counter->usage < val))
 92		val = counter->usage;
 93
 94	counter->usage -= val;
 95}
 96
 97void res_counter_uncharge_until(struct res_counter *counter,
 98				struct res_counter *top,
 99				unsigned long val)
100{
101	unsigned long flags;
102	struct res_counter *c;
103
104	local_irq_save(flags);
105	for (c = counter; c != top; c = c->parent) {
106		spin_lock(&c->lock);
107		res_counter_uncharge_locked(c, val);
108		spin_unlock(&c->lock);
109	}
110	local_irq_restore(flags);
111}
112
113void res_counter_uncharge(struct res_counter *counter, unsigned long val)
114{
115	res_counter_uncharge_until(counter, NULL, val);
116}
117
118static inline unsigned long long *
119res_counter_member(struct res_counter *counter, int member)
120{
121	switch (member) {
122	case RES_USAGE:
123		return &counter->usage;
124	case RES_MAX_USAGE:
125		return &counter->max_usage;
126	case RES_LIMIT:
127		return &counter->limit;
128	case RES_FAILCNT:
129		return &counter->failcnt;
130	case RES_SOFT_LIMIT:
131		return &counter->soft_limit;
132	};
133
134	BUG();
135	return NULL;
136}
137
138ssize_t res_counter_read(struct res_counter *counter, int member,
139		const char __user *userbuf, size_t nbytes, loff_t *pos,
140		int (*read_strategy)(unsigned long long val, char *st_buf))
141{
142	unsigned long long *val;
143	char buf[64], *s;
144
145	s = buf;
146	val = res_counter_member(counter, member);
147	if (read_strategy)
148		s += read_strategy(*val, s);
149	else
150		s += sprintf(s, "%llu\n", *val);
151	return simple_read_from_buffer((void __user *)userbuf, nbytes,
152			pos, buf, s - buf);
153}
154
155#if BITS_PER_LONG == 32
156u64 res_counter_read_u64(struct res_counter *counter, int member)
157{
158	unsigned long flags;
159	u64 ret;
160
161	spin_lock_irqsave(&counter->lock, flags);
162	ret = *res_counter_member(counter, member);
163	spin_unlock_irqrestore(&counter->lock, flags);
164
165	return ret;
166}
167#else
168u64 res_counter_read_u64(struct res_counter *counter, int member)
169{
170	return *res_counter_member(counter, member);
171}
172#endif
173
174int res_counter_memparse_write_strategy(const char *buf,
175					unsigned long long *res)
176{
177	char *end;
178
179	/* return RESOURCE_MAX(unlimited) if "-1" is specified */
180	if (*buf == '-') {
181		*res = simple_strtoull(buf + 1, &end, 10);
182		if (*res != 1 || *end != '\0')
183			return -EINVAL;
184		*res = RESOURCE_MAX;
185		return 0;
186	}
187
188	*res = memparse(buf, &end);
189	if (*end != '\0')
190		return -EINVAL;
191
192	*res = PAGE_ALIGN(*res);
193	return 0;
194}
195
196int res_counter_write(struct res_counter *counter, int member,
197		      const char *buf, write_strategy_fn write_strategy)
198{
199	char *end;
200	unsigned long flags;
201	unsigned long long tmp, *val;
202
203	if (write_strategy) {
204		if (write_strategy(buf, &tmp))
205			return -EINVAL;
206	} else {
207		tmp = simple_strtoull(buf, &end, 10);
208		if (*end != '\0')
209			return -EINVAL;
210	}
211	spin_lock_irqsave(&counter->lock, flags);
212	val = res_counter_member(counter, member);
213	*val = tmp;
214	spin_unlock_irqrestore(&counter->lock, flags);
215	return 0;
216}