aboutsummaryrefslogtreecommitdiff
path: root/include/linux/res_counter.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/res_counter.h')
-rw-r--r--include/linux/res_counter.h140
1 files changed, 118 insertions, 22 deletions
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 61363ce896d..56b7bc32db4 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -9,9 +9,12 @@
*
* Author: Pavel Emelianov <xemul@openvz.org>
*
+ * See Documentation/cgroups/resource_counter.txt for more
+ * info about what this counter is.
*/
-#include <linux/cgroup.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
/*
* The core object. the cgroup that wishes to account for some
@@ -25,10 +28,18 @@ struct res_counter {
*/
unsigned long long usage;
/*
+ * the maximal value of the usage from the counter creation
+ */
+ unsigned long long max_usage;
+ /*
* the limit that usage cannot exceed
*/
unsigned long long limit;
/*
+ * the limit that usage can be exceed
+ */
+ unsigned long long soft_limit;
+ /*
* the number of unsuccessful attempts to consume the resource
*/
unsigned long long failcnt;
@@ -37,10 +48,17 @@ struct res_counter {
* the routines below consider this to be IRQ-safe
*/
spinlock_t lock;
+ /*
+ * Parent counter, used for hierarchial resource accounting
+ */
+ struct res_counter *parent;
};
-/*
+#define RES_COUNTER_MAX ULLONG_MAX
+
+/**
* Helpers to interact with userspace
+ * res_counter_read_u64() - returns the value of the specified member.
* res_counter_read/_write - put/get the specified fields from the
* res_counter struct to/from the user
*
@@ -51,12 +69,14 @@ struct res_counter {
* @pos: and the offset.
*/
+u64 res_counter_read_u64(struct res_counter *counter, int member);
+
ssize_t res_counter_read(struct res_counter *counter, int member,
const char __user *buf, size_t nbytes, loff_t *pos,
int (*read_strategy)(unsigned long long val, char *s));
-ssize_t res_counter_write(struct res_counter *counter, int member,
- const char __user *buf, size_t nbytes, loff_t *pos,
- int (*write_strategy)(char *buf, unsigned long long *val));
+
+int res_counter_memparse_write_strategy(const char *buf,
+ unsigned long long *res);
/*
* the field descriptors. one for each member of res_counter
@@ -64,15 +84,17 @@ ssize_t res_counter_write(struct res_counter *counter, int member,
enum {
RES_USAGE,
+ RES_MAX_USAGE,
RES_LIMIT,
RES_FAILCNT,
+ RES_SOFT_LIMIT,
};
/*
* helpers for accounting
*/
-void res_counter_init(struct res_counter *counter);
+void res_counter_init(struct res_counter *counter, struct res_counter *parent);
/*
* charge - try to consume more resource.
@@ -82,11 +104,17 @@ void res_counter_init(struct res_counter *counter);
* units, e.g. numbers, bytes, Kbytes, etc
*
* returns 0 on success and <0 if the counter->usage will exceed the
- * counter->limit _locked call expects the counter->lock to be taken
+ * counter->limit
+ *
+ * charge_nofail works the same, except that it charges the resource
+ * counter unconditionally, and returns < 0 if the after the current
+ * charge we are over limit.
*/
-int res_counter_charge_locked(struct res_counter *counter, unsigned long val);
-int res_counter_charge(struct res_counter *counter, unsigned long val);
+int __must_check res_counter_charge(struct res_counter *counter,
+ unsigned long val, struct res_counter **limit_fail_at);
+int res_counter_charge_nofail(struct res_counter *counter,
+ unsigned long val, struct res_counter **limit_fail_at);
/*
* uncharge - tell that some portion of the resource is released
@@ -95,33 +123,101 @@ int res_counter_charge(struct res_counter *counter, unsigned long val);
* @val: the amount of the resource
*
* these calls check for usage underflow and show a warning on the console
- * _locked call expects the counter->lock to be taken
+ *
+ * returns the total charges still present in @counter.
*/
-void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
-void res_counter_uncharge(struct res_counter *counter, unsigned long val);
+u64 res_counter_uncharge(struct res_counter *counter, unsigned long val);
-static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
+u64 res_counter_uncharge_until(struct res_counter *counter,
+ struct res_counter *top,
+ unsigned long val);
+/**
+ * res_counter_margin - calculate chargeable space of a counter
+ * @cnt: the counter
+ *
+ * Returns the difference between the hard limit and the current usage
+ * of resource counter @cnt.
+ */
+static inline unsigned long long res_counter_margin(struct res_counter *cnt)
{
- if (cnt->usage < cnt->limit)
- return true;
+ unsigned long long margin;
+ unsigned long flags;
- return false;
+ spin_lock_irqsave(&cnt->lock, flags);
+ if (cnt->limit > cnt->usage)
+ margin = cnt->limit - cnt->usage;
+ else
+ margin = 0;
+ spin_unlock_irqrestore(&cnt->lock, flags);
+ return margin;
}
-/*
- * Helper function to detect if the cgroup is within it's limit or
- * not. It's currently called from cgroup_rss_prepare()
+/**
+ * Get the difference between the usage and the soft limit
+ * @cnt: The counter
+ *
+ * Returns 0 if usage is less than or equal to soft limit
+ * The difference between usage and soft limit, otherwise.
*/
-static inline bool res_counter_check_under_limit(struct res_counter *cnt)
+static inline unsigned long long
+res_counter_soft_limit_excess(struct res_counter *cnt)
+{
+ unsigned long long excess;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cnt->lock, flags);
+ if (cnt->usage <= cnt->soft_limit)
+ excess = 0;
+ else
+ excess = cnt->usage - cnt->soft_limit;
+ spin_unlock_irqrestore(&cnt->lock, flags);
+ return excess;
+}
+
+static inline void res_counter_reset_max(struct res_counter *cnt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cnt->lock, flags);
+ cnt->max_usage = cnt->usage;
+ spin_unlock_irqrestore(&cnt->lock, flags);
+}
+
+static inline void res_counter_reset_failcnt(struct res_counter *cnt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cnt->lock, flags);
+ cnt->failcnt = 0;
+ spin_unlock_irqrestore(&cnt->lock, flags);
+}
+
+static inline int res_counter_set_limit(struct res_counter *cnt,
+ unsigned long long limit)
{
- bool ret;
unsigned long flags;
+ int ret = -EBUSY;
spin_lock_irqsave(&cnt->lock, flags);
- ret = res_counter_limit_check_locked(cnt);
+ if (cnt->usage <= limit) {
+ cnt->limit = limit;
+ ret = 0;
+ }
spin_unlock_irqrestore(&cnt->lock, flags);
return ret;
}
+static inline int
+res_counter_set_soft_limit(struct res_counter *cnt,
+ unsigned long long soft_limit)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cnt->lock, flags);
+ cnt->soft_limit = soft_limit;
+ spin_unlock_irqrestore(&cnt->lock, flags);
+ return 0;
+}
+
#endif