diff options
Diffstat (limited to 'include/linux/res_counter.h')
| -rw-r--r-- | include/linux/res_counter.h | 98 |
1 files changed, 72 insertions, 26 deletions
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index 271c1c2c9f6..56b7bc32db4 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -9,11 +9,12 @@ * * Author: Pavel Emelianov <xemul@openvz.org> * - * See Documentation/controllers/resource_counter.txt for more + * See Documentation/cgroups/resource_counter.txt for more * info about what this counter is. */ -#include <linux/cgroup.h> +#include <linux/spinlock.h> +#include <linux/errno.h> /* * The core object. the cgroup that wishes to account for some @@ -35,6 +36,10 @@ struct res_counter { */ unsigned long long limit; /* + * the limit that usage can be exceed + */ + unsigned long long soft_limit; + /* * the number of unsuccessful attempts to consume the resource */ unsigned long long failcnt; @@ -43,8 +48,14 @@ struct res_counter { * the routines below consider this to be IRQ-safe */ spinlock_t lock; + /* + * Parent counter, used for hierarchial resource accounting + */ + struct res_counter *parent; }; +#define RES_COUNTER_MAX ULLONG_MAX + /** * Helpers to interact with userspace * res_counter_read_u64() - returns the value of the specified member. @@ -64,14 +75,9 @@ ssize_t res_counter_read(struct res_counter *counter, int member, const char __user *buf, size_t nbytes, loff_t *pos, int (*read_strategy)(unsigned long long val, char *s)); -typedef int (*write_strategy_fn)(const char *buf, unsigned long long *val); - int res_counter_memparse_write_strategy(const char *buf, unsigned long long *res); -int res_counter_write(struct res_counter *counter, int member, - const char *buffer, write_strategy_fn write_strategy); - /* * the field descriptors. one for each member of res_counter */ @@ -81,13 +87,14 @@ enum { RES_MAX_USAGE, RES_LIMIT, RES_FAILCNT, + RES_SOFT_LIMIT, }; /* * helpers for accounting */ -void res_counter_init(struct res_counter *counter); +void res_counter_init(struct res_counter *counter, struct res_counter *parent); /* * charge - try to consume more resource. @@ -97,13 +104,17 @@ void res_counter_init(struct res_counter *counter); * units, e.g. numbers, bytes, Kbytes, etc * * returns 0 on success and <0 if the counter->usage will exceed the - * counter->limit _locked call expects the counter->lock to be taken + * counter->limit + * + * charge_nofail works the same, except that it charges the resource + * counter unconditionally, and returns < 0 if the after the current + * charge we are over limit. */ -int __must_check res_counter_charge_locked(struct res_counter *counter, - unsigned long val); int __must_check res_counter_charge(struct res_counter *counter, - unsigned long val); + unsigned long val, struct res_counter **limit_fail_at); +int res_counter_charge_nofail(struct res_counter *counter, + unsigned long val, struct res_counter **limit_fail_at); /* * uncharge - tell that some portion of the resource is released @@ -112,33 +123,56 @@ int __must_check res_counter_charge(struct res_counter *counter, * @val: the amount of the resource * * these calls check for usage underflow and show a warning on the console - * _locked call expects the counter->lock to be taken + * + * returns the total charges still present in @counter. */ -void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); -void res_counter_uncharge(struct res_counter *counter, unsigned long val); +u64 res_counter_uncharge(struct res_counter *counter, unsigned long val); -static inline bool res_counter_limit_check_locked(struct res_counter *cnt) +u64 res_counter_uncharge_until(struct res_counter *counter, + struct res_counter *top, + unsigned long val); +/** + * res_counter_margin - calculate chargeable space of a counter + * @cnt: the counter + * + * Returns the difference between the hard limit and the current usage + * of resource counter @cnt. + */ +static inline unsigned long long res_counter_margin(struct res_counter *cnt) { - if (cnt->usage < cnt->limit) - return true; + unsigned long long margin; + unsigned long flags; - return false; + spin_lock_irqsave(&cnt->lock, flags); + if (cnt->limit > cnt->usage) + margin = cnt->limit - cnt->usage; + else + margin = 0; + spin_unlock_irqrestore(&cnt->lock, flags); + return margin; } -/* - * Helper function to detect if the cgroup is within it's limit or - * not. It's currently called from cgroup_rss_prepare() +/** + * Get the difference between the usage and the soft limit + * @cnt: The counter + * + * Returns 0 if usage is less than or equal to soft limit + * The difference between usage and soft limit, otherwise. */ -static inline bool res_counter_check_under_limit(struct res_counter *cnt) +static inline unsigned long long +res_counter_soft_limit_excess(struct res_counter *cnt) { - bool ret; + unsigned long long excess; unsigned long flags; spin_lock_irqsave(&cnt->lock, flags); - ret = res_counter_limit_check_locked(cnt); + if (cnt->usage <= cnt->soft_limit) + excess = 0; + else + excess = cnt->usage - cnt->soft_limit; spin_unlock_irqrestore(&cnt->lock, flags); - return ret; + return excess; } static inline void res_counter_reset_max(struct res_counter *cnt) @@ -174,4 +208,16 @@ static inline int res_counter_set_limit(struct res_counter *cnt, return ret; } +static inline int +res_counter_set_soft_limit(struct res_counter *cnt, + unsigned long long soft_limit) +{ + unsigned long flags; + + spin_lock_irqsave(&cnt->lock, flags); + cnt->soft_limit = soft_limit; + spin_unlock_irqrestore(&cnt->lock, flags); + return 0; +} + #endif |
