aboutsummaryrefslogtreecommitdiff
path: root/kernel/res_counter.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/res_counter.c')
-rw-r--r--kernel/res_counter.c182
1 files changed, 129 insertions, 53 deletions
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index efbfc0fc232..e791130f85a 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -12,52 +12,115 @@
#include <linux/fs.h>
#include <linux/res_counter.h>
#include <linux/uaccess.h>
+#include <linux/mm.h>
-void res_counter_init(struct res_counter *counter)
+void res_counter_init(struct res_counter *counter, struct res_counter *parent)
{
spin_lock_init(&counter->lock);
- counter->limit = (unsigned long long)LLONG_MAX;
+ counter->limit = RES_COUNTER_MAX;
+ counter->soft_limit = RES_COUNTER_MAX;
+ counter->parent = parent;
}
-int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
+static u64 res_counter_uncharge_locked(struct res_counter *counter,
+ unsigned long val)
{
+ if (WARN_ON(counter->usage < val))
+ val = counter->usage;
+
+ counter->usage -= val;
+ return counter->usage;
+}
+
+static int res_counter_charge_locked(struct res_counter *counter,
+ unsigned long val, bool force)
+{
+ int ret = 0;
+
if (counter->usage + val > counter->limit) {
counter->failcnt++;
- return -ENOMEM;
+ ret = -ENOMEM;
+ if (!force)
+ return ret;
}
counter->usage += val;
- return 0;
+ if (counter->usage > counter->max_usage)
+ counter->max_usage = counter->usage;
+ return ret;
}
-int res_counter_charge(struct res_counter *counter, unsigned long val)
+static int __res_counter_charge(struct res_counter *counter, unsigned long val,
+ struct res_counter **limit_fail_at, bool force)
{
- int ret;
+ int ret, r;
unsigned long flags;
+ struct res_counter *c, *u;
+
+ r = ret = 0;
+ *limit_fail_at = NULL;
+ local_irq_save(flags);
+ for (c = counter; c != NULL; c = c->parent) {
+ spin_lock(&c->lock);
+ r = res_counter_charge_locked(c, val, force);
+ spin_unlock(&c->lock);
+ if (r < 0 && !ret) {
+ ret = r;
+ *limit_fail_at = c;
+ if (!force)
+ break;
+ }
+ }
+
+ if (ret < 0 && !force) {
+ for (u = counter; u != c; u = u->parent) {
+ spin_lock(&u->lock);
+ res_counter_uncharge_locked(u, val);
+ spin_unlock(&u->lock);
+ }
+ }
+ local_irq_restore(flags);
- spin_lock_irqsave(&counter->lock, flags);
- ret = res_counter_charge_locked(counter, val);
- spin_unlock_irqrestore(&counter->lock, flags);
return ret;
}
-void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
+int res_counter_charge(struct res_counter *counter, unsigned long val,
+ struct res_counter **limit_fail_at)
{
- if (WARN_ON(counter->usage < val))
- val = counter->usage;
+ return __res_counter_charge(counter, val, limit_fail_at, false);
+}
- counter->usage -= val;
+int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
+ struct res_counter **limit_fail_at)
+{
+ return __res_counter_charge(counter, val, limit_fail_at, true);
}
-void res_counter_uncharge(struct res_counter *counter, unsigned long val)
+u64 res_counter_uncharge_until(struct res_counter *counter,
+ struct res_counter *top,
+ unsigned long val)
{
unsigned long flags;
-
- spin_lock_irqsave(&counter->lock, flags);
- res_counter_uncharge_locked(counter, val);
- spin_unlock_irqrestore(&counter->lock, flags);
+ struct res_counter *c;
+ u64 ret = 0;
+
+ local_irq_save(flags);
+ for (c = counter; c != top; c = c->parent) {
+ u64 r;
+ spin_lock(&c->lock);
+ r = res_counter_uncharge_locked(c, val);
+ if (c == counter)
+ ret = r;
+ spin_unlock(&c->lock);
+ }
+ local_irq_restore(flags);
+ return ret;
}
+u64 res_counter_uncharge(struct res_counter *counter, unsigned long val)
+{
+ return res_counter_uncharge_until(counter, NULL, val);
+}
static inline unsigned long long *
res_counter_member(struct res_counter *counter, int member)
@@ -65,10 +128,14 @@ res_counter_member(struct res_counter *counter, int member)
switch (member) {
case RES_USAGE:
return &counter->usage;
+ case RES_MAX_USAGE:
+ return &counter->max_usage;
case RES_LIMIT:
return &counter->limit;
case RES_FAILCNT:
return &counter->failcnt;
+ case RES_SOFT_LIMIT:
+ return &counter->soft_limit;
};
BUG();
@@ -92,44 +159,53 @@ ssize_t res_counter_read(struct res_counter *counter, int member,
pos, buf, s - buf);
}
-ssize_t res_counter_write(struct res_counter *counter, int member,
- const char __user *userbuf, size_t nbytes, loff_t *pos,
- int (*write_strategy)(char *st_buf, unsigned long long *val))
+#if BITS_PER_LONG == 32
+u64 res_counter_read_u64(struct res_counter *counter, int member)
{
- int ret;
- char *buf, *end;
unsigned long flags;
- unsigned long long tmp, *val;
-
- buf = kmalloc(nbytes + 1, GFP_KERNEL);
- ret = -ENOMEM;
- if (buf == NULL)
- goto out;
+ u64 ret;
- buf[nbytes] = '\0';
- ret = -EFAULT;
- if (copy_from_user(buf, userbuf, nbytes))
- goto out_free;
-
- ret = -EINVAL;
-
- strstrip(buf);
- if (write_strategy) {
- if (write_strategy(buf, &tmp)) {
- goto out_free;
- }
- } else {
- tmp = simple_strtoull(buf, &end, 10);
- if (*end != '\0')
- goto out_free;
- }
spin_lock_irqsave(&counter->lock, flags);
- val = res_counter_member(counter, member);
- *val = tmp;
+ ret = *res_counter_member(counter, member);
spin_unlock_irqrestore(&counter->lock, flags);
- ret = nbytes;
-out_free:
- kfree(buf);
-out:
+
return ret;
}
+#else
+u64 res_counter_read_u64(struct res_counter *counter, int member)
+{
+ return *res_counter_member(counter, member);
+}
+#endif
+
+int res_counter_memparse_write_strategy(const char *buf,
+ unsigned long long *resp)
+{
+ char *end;
+ unsigned long long res;
+
+ /* return RES_COUNTER_MAX(unlimited) if "-1" is specified */
+ if (*buf == '-') {
+ int rc = kstrtoull(buf + 1, 10, &res);
+
+ if (rc)
+ return rc;
+ if (res != 1)
+ return -EINVAL;
+ *resp = RES_COUNTER_MAX;
+ return 0;
+ }
+
+ res = memparse(buf, &end);
+ if (*end != '\0')
+ return -EINVAL;
+
+ if (PAGE_ALIGN(res) >= res)
+ res = PAGE_ALIGN(res);
+ else
+ res = RES_COUNTER_MAX;
+
+ *resp = res;
+
+ return 0;
+}