diff options
Diffstat (limited to 'include/linux/res_counter.h')
| -rw-r--r-- | include/linux/res_counter.h | 84 |
1 files changed, 33 insertions, 51 deletions
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index fcb9884df61..56b7bc32db4 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -13,7 +13,8 @@ * info about what this counter is. */ -#include <linux/cgroup.h> +#include <linux/spinlock.h> +#include <linux/errno.h> /* * The core object. the cgroup that wishes to account for some @@ -53,7 +54,7 @@ struct res_counter { struct res_counter *parent; }; -#define RESOURCE_MAX (unsigned long long)LLONG_MAX +#define RES_COUNTER_MAX ULLONG_MAX /** * Helpers to interact with userspace @@ -74,14 +75,9 @@ ssize_t res_counter_read(struct res_counter *counter, int member, const char __user *buf, size_t nbytes, loff_t *pos, int (*read_strategy)(unsigned long long val, char *s)); -typedef int (*write_strategy_fn)(const char *buf, unsigned long long *val); - int res_counter_memparse_write_strategy(const char *buf, unsigned long long *res); -int res_counter_write(struct res_counter *counter, int member, - const char *buffer, write_strategy_fn write_strategy); - /* * the field descriptors. one for each member of res_counter */ @@ -108,13 +104,17 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent); * units, e.g. numbers, bytes, Kbytes, etc * * returns 0 on success and <0 if the counter->usage will exceed the - * counter->limit _locked call expects the counter->lock to be taken + * counter->limit + * + * charge_nofail works the same, except that it charges the resource + * counter unconditionally, and returns < 0 if the after the current + * charge we are over limit. */ -int __must_check res_counter_charge_locked(struct res_counter *counter, - unsigned long val); int __must_check res_counter_charge(struct res_counter *counter, unsigned long val, struct res_counter **limit_fail_at); +int res_counter_charge_nofail(struct res_counter *counter, + unsigned long val, struct res_counter **limit_fail_at); /* * uncharge - tell that some portion of the resource is released @@ -123,26 +123,34 @@ int __must_check res_counter_charge(struct res_counter *counter, * @val: the amount of the resource * * these calls check for usage underflow and show a warning on the console - * _locked call expects the counter->lock to be taken + * + * returns the total charges still present in @counter. */ -void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); -void res_counter_uncharge(struct res_counter *counter, unsigned long val); - -static inline bool res_counter_limit_check_locked(struct res_counter *cnt) -{ - if (cnt->usage < cnt->limit) - return true; - - return false; -} +u64 res_counter_uncharge(struct res_counter *counter, unsigned long val); -static inline bool res_counter_soft_limit_check_locked(struct res_counter *cnt) +u64 res_counter_uncharge_until(struct res_counter *counter, + struct res_counter *top, + unsigned long val); +/** + * res_counter_margin - calculate chargeable space of a counter + * @cnt: the counter + * + * Returns the difference between the hard limit and the current usage + * of resource counter @cnt. + */ +static inline unsigned long long res_counter_margin(struct res_counter *cnt) { - if (cnt->usage < cnt->soft_limit) - return true; + unsigned long long margin; + unsigned long flags; - return false; + spin_lock_irqsave(&cnt->lock, flags); + if (cnt->limit > cnt->usage) + margin = cnt->limit - cnt->usage; + else + margin = 0; + spin_unlock_irqrestore(&cnt->lock, flags); + return margin; } /** @@ -167,32 +175,6 @@ res_counter_soft_limit_excess(struct res_counter *cnt) return excess; } -/* - * Helper function to detect if the cgroup is within it's limit or - * not. It's currently called from cgroup_rss_prepare() - */ -static inline bool res_counter_check_under_limit(struct res_counter *cnt) -{ - bool ret; - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - ret = res_counter_limit_check_locked(cnt); - spin_unlock_irqrestore(&cnt->lock, flags); - return ret; -} - -static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt) -{ - bool ret; - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - ret = res_counter_soft_limit_check_locked(cnt); - spin_unlock_irqrestore(&cnt->lock, flags); - return ret; -} - static inline void res_counter_reset_max(struct res_counter *cnt) { unsigned long flags; |
