Loading...
1/*
2 * resource cgroups
3 *
4 * Copyright 2007 OpenVZ SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 *
8 */
9
10#include <linux/types.h>
11#include <linux/parser.h>
12#include <linux/fs.h>
13#include <linux/res_counter.h>
14#include <linux/uaccess.h>
15#include <linux/mm.h>
16
17void res_counter_init(struct res_counter *counter, struct res_counter *parent)
18{
19 spin_lock_init(&counter->lock);
20 counter->limit = RES_COUNTER_MAX;
21 counter->soft_limit = RES_COUNTER_MAX;
22 counter->parent = parent;
23}
24
25static u64 res_counter_uncharge_locked(struct res_counter *counter,
26 unsigned long val)
27{
28 if (WARN_ON(counter->usage < val))
29 val = counter->usage;
30
31 counter->usage -= val;
32 return counter->usage;
33}
34
35static int res_counter_charge_locked(struct res_counter *counter,
36 unsigned long val, bool force)
37{
38 int ret = 0;
39
40 if (counter->usage + val > counter->limit) {
41 counter->failcnt++;
42 ret = -ENOMEM;
43 if (!force)
44 return ret;
45 }
46
47 counter->usage += val;
48 if (counter->usage > counter->max_usage)
49 counter->max_usage = counter->usage;
50 return ret;
51}
52
53static int __res_counter_charge(struct res_counter *counter, unsigned long val,
54 struct res_counter **limit_fail_at, bool force)
55{
56 int ret, r;
57 unsigned long flags;
58 struct res_counter *c, *u;
59
60 r = ret = 0;
61 *limit_fail_at = NULL;
62 local_irq_save(flags);
63 for (c = counter; c != NULL; c = c->parent) {
64 spin_lock(&c->lock);
65 r = res_counter_charge_locked(c, val, force);
66 spin_unlock(&c->lock);
67 if (r < 0 && !ret) {
68 ret = r;
69 *limit_fail_at = c;
70 if (!force)
71 break;
72 }
73 }
74
75 if (ret < 0 && !force) {
76 for (u = counter; u != c; u = u->parent) {
77 spin_lock(&u->lock);
78 res_counter_uncharge_locked(u, val);
79 spin_unlock(&u->lock);
80 }
81 }
82 local_irq_restore(flags);
83
84 return ret;
85}
86
87int res_counter_charge(struct res_counter *counter, unsigned long val,
88 struct res_counter **limit_fail_at)
89{
90 return __res_counter_charge(counter, val, limit_fail_at, false);
91}
92
93int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
94 struct res_counter **limit_fail_at)
95{
96 return __res_counter_charge(counter, val, limit_fail_at, true);
97}
98
99u64 res_counter_uncharge_until(struct res_counter *counter,
100 struct res_counter *top,
101 unsigned long val)
102{
103 unsigned long flags;
104 struct res_counter *c;
105 u64 ret = 0;
106
107 local_irq_save(flags);
108 for (c = counter; c != top; c = c->parent) {
109 u64 r;
110 spin_lock(&c->lock);
111 r = res_counter_uncharge_locked(c, val);
112 if (c == counter)
113 ret = r;
114 spin_unlock(&c->lock);
115 }
116 local_irq_restore(flags);
117 return ret;
118}
119
120u64 res_counter_uncharge(struct res_counter *counter, unsigned long val)
121{
122 return res_counter_uncharge_until(counter, NULL, val);
123}
124
125static inline unsigned long long *
126res_counter_member(struct res_counter *counter, int member)
127{
128 switch (member) {
129 case RES_USAGE:
130 return &counter->usage;
131 case RES_MAX_USAGE:
132 return &counter->max_usage;
133 case RES_LIMIT:
134 return &counter->limit;
135 case RES_FAILCNT:
136 return &counter->failcnt;
137 case RES_SOFT_LIMIT:
138 return &counter->soft_limit;
139 };
140
141 BUG();
142 return NULL;
143}
144
145ssize_t res_counter_read(struct res_counter *counter, int member,
146 const char __user *userbuf, size_t nbytes, loff_t *pos,
147 int (*read_strategy)(unsigned long long val, char *st_buf))
148{
149 unsigned long long *val;
150 char buf[64], *s;
151
152 s = buf;
153 val = res_counter_member(counter, member);
154 if (read_strategy)
155 s += read_strategy(*val, s);
156 else
157 s += sprintf(s, "%llu\n", *val);
158 return simple_read_from_buffer((void __user *)userbuf, nbytes,
159 pos, buf, s - buf);
160}
161
162#if BITS_PER_LONG == 32
163u64 res_counter_read_u64(struct res_counter *counter, int member)
164{
165 unsigned long flags;
166 u64 ret;
167
168 spin_lock_irqsave(&counter->lock, flags);
169 ret = *res_counter_member(counter, member);
170 spin_unlock_irqrestore(&counter->lock, flags);
171
172 return ret;
173}
174#else
175u64 res_counter_read_u64(struct res_counter *counter, int member)
176{
177 return *res_counter_member(counter, member);
178}
179#endif
180
181int res_counter_memparse_write_strategy(const char *buf,
182 unsigned long long *resp)
183{
184 char *end;
185 unsigned long long res;
186
187 /* return RES_COUNTER_MAX(unlimited) if "-1" is specified */
188 if (*buf == '-') {
189 res = simple_strtoull(buf + 1, &end, 10);
190 if (res != 1 || *end != '\0')
191 return -EINVAL;
192 *resp = RES_COUNTER_MAX;
193 return 0;
194 }
195
196 res = memparse(buf, &end);
197 if (*end != '\0')
198 return -EINVAL;
199
200 if (PAGE_ALIGN(res) >= res)
201 res = PAGE_ALIGN(res);
202 else
203 res = RES_COUNTER_MAX;
204
205 *resp = res;
206
207 return 0;
208}
1/*
2 * resource cgroups
3 *
4 * Copyright 2007 OpenVZ SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 *
8 */
9
10#include <linux/types.h>
11#include <linux/parser.h>
12#include <linux/fs.h>
13#include <linux/res_counter.h>
14#include <linux/uaccess.h>
15#include <linux/mm.h>
16
17void res_counter_init(struct res_counter *counter, struct res_counter *parent)
18{
19 spin_lock_init(&counter->lock);
20 counter->limit = RESOURCE_MAX;
21 counter->soft_limit = RESOURCE_MAX;
22 counter->parent = parent;
23}
24
25int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
26{
27 if (counter->usage + val > counter->limit) {
28 counter->failcnt++;
29 return -ENOMEM;
30 }
31
32 counter->usage += val;
33 if (counter->usage > counter->max_usage)
34 counter->max_usage = counter->usage;
35 return 0;
36}
37
38int res_counter_charge(struct res_counter *counter, unsigned long val,
39 struct res_counter **limit_fail_at)
40{
41 int ret;
42 unsigned long flags;
43 struct res_counter *c, *u;
44
45 *limit_fail_at = NULL;
46 local_irq_save(flags);
47 for (c = counter; c != NULL; c = c->parent) {
48 spin_lock(&c->lock);
49 ret = res_counter_charge_locked(c, val);
50 spin_unlock(&c->lock);
51 if (ret < 0) {
52 *limit_fail_at = c;
53 goto undo;
54 }
55 }
56 ret = 0;
57 goto done;
58undo:
59 for (u = counter; u != c; u = u->parent) {
60 spin_lock(&u->lock);
61 res_counter_uncharge_locked(u, val);
62 spin_unlock(&u->lock);
63 }
64done:
65 local_irq_restore(flags);
66 return ret;
67}
68
69void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
70{
71 if (WARN_ON(counter->usage < val))
72 val = counter->usage;
73
74 counter->usage -= val;
75}
76
77void res_counter_uncharge(struct res_counter *counter, unsigned long val)
78{
79 unsigned long flags;
80 struct res_counter *c;
81
82 local_irq_save(flags);
83 for (c = counter; c != NULL; c = c->parent) {
84 spin_lock(&c->lock);
85 res_counter_uncharge_locked(c, val);
86 spin_unlock(&c->lock);
87 }
88 local_irq_restore(flags);
89}
90
91
92static inline unsigned long long *
93res_counter_member(struct res_counter *counter, int member)
94{
95 switch (member) {
96 case RES_USAGE:
97 return &counter->usage;
98 case RES_MAX_USAGE:
99 return &counter->max_usage;
100 case RES_LIMIT:
101 return &counter->limit;
102 case RES_FAILCNT:
103 return &counter->failcnt;
104 case RES_SOFT_LIMIT:
105 return &counter->soft_limit;
106 };
107
108 BUG();
109 return NULL;
110}
111
112ssize_t res_counter_read(struct res_counter *counter, int member,
113 const char __user *userbuf, size_t nbytes, loff_t *pos,
114 int (*read_strategy)(unsigned long long val, char *st_buf))
115{
116 unsigned long long *val;
117 char buf[64], *s;
118
119 s = buf;
120 val = res_counter_member(counter, member);
121 if (read_strategy)
122 s += read_strategy(*val, s);
123 else
124 s += sprintf(s, "%llu\n", *val);
125 return simple_read_from_buffer((void __user *)userbuf, nbytes,
126 pos, buf, s - buf);
127}
128
129#if BITS_PER_LONG == 32
130u64 res_counter_read_u64(struct res_counter *counter, int member)
131{
132 unsigned long flags;
133 u64 ret;
134
135 spin_lock_irqsave(&counter->lock, flags);
136 ret = *res_counter_member(counter, member);
137 spin_unlock_irqrestore(&counter->lock, flags);
138
139 return ret;
140}
141#else
142u64 res_counter_read_u64(struct res_counter *counter, int member)
143{
144 return *res_counter_member(counter, member);
145}
146#endif
147
148int res_counter_memparse_write_strategy(const char *buf,
149 unsigned long long *res)
150{
151 char *end;
152
153 /* return RESOURCE_MAX(unlimited) if "-1" is specified */
154 if (*buf == '-') {
155 *res = simple_strtoull(buf + 1, &end, 10);
156 if (*res != 1 || *end != '\0')
157 return -EINVAL;
158 *res = RESOURCE_MAX;
159 return 0;
160 }
161
162 /* FIXME - make memparse() take const char* args */
163 *res = memparse((char *)buf, &end);
164 if (*end != '\0')
165 return -EINVAL;
166
167 *res = PAGE_ALIGN(*res);
168 return 0;
169}
170
171int res_counter_write(struct res_counter *counter, int member,
172 const char *buf, write_strategy_fn write_strategy)
173{
174 char *end;
175 unsigned long flags;
176 unsigned long long tmp, *val;
177
178 if (write_strategy) {
179 if (write_strategy(buf, &tmp))
180 return -EINVAL;
181 } else {
182 tmp = simple_strtoull(buf, &end, 10);
183 if (*end != '\0')
184 return -EINVAL;
185 }
186 spin_lock_irqsave(&counter->lock, flags);
187 val = res_counter_member(counter, member);
188 *val = tmp;
189 spin_unlock_irqrestore(&counter->lock, flags);
190 return 0;
191}