diff options
Diffstat (limited to 'mm/huge_memory.c')
| -rw-r--r-- | mm/huge_memory.c | 49 | 
1 files changed, 35 insertions, 14 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0a619e0e2e0..470dcda10ad 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -244,24 +244,28 @@ static ssize_t single_flag_show(struct kobject *kobj,  				struct kobj_attribute *attr, char *buf,  				enum transparent_hugepage_flag flag)  { -	if (test_bit(flag, &transparent_hugepage_flags)) -		return sprintf(buf, "[yes] no\n"); -	else -		return sprintf(buf, "yes [no]\n"); +	return sprintf(buf, "%d\n", +		       !!test_bit(flag, &transparent_hugepage_flags));  } +  static ssize_t single_flag_store(struct kobject *kobj,  				 struct kobj_attribute *attr,  				 const char *buf, size_t count,  				 enum transparent_hugepage_flag flag)  { -	if (!memcmp("yes", buf, -		    min(sizeof("yes")-1, count))) { +	unsigned long value; +	int ret; + +	ret = kstrtoul(buf, 10, &value); +	if (ret < 0) +		return ret; +	if (value > 1) +		return -EINVAL; + +	if (value)  		set_bit(flag, &transparent_hugepage_flags); -	} else if (!memcmp("no", buf, -			   min(sizeof("no")-1, count))) { +	else  		clear_bit(flag, &transparent_hugepage_flags); -	} else -		return -EINVAL;  	return count;  } @@ -680,8 +684,11 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,  			return VM_FAULT_OOM;  		page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),  					  vma, haddr, numa_node_id(), 0); -		if (unlikely(!page)) +		if (unlikely(!page)) { +			count_vm_event(THP_FAULT_FALLBACK);  			goto out; +		} +		count_vm_event(THP_FAULT_ALLOC);  		if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {  			put_page(page);  			goto out; @@ -909,11 +916,13 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,  		new_page = NULL;  	if (unlikely(!new_page)) { +		count_vm_event(THP_FAULT_FALLBACK);  		ret = do_huge_pmd_wp_page_fallback(mm, vma, address,  						   pmd, orig_pmd, page, haddr);  		put_page(page);  		goto out;  	} +	count_vm_event(THP_FAULT_ALLOC);  	if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {  		put_page(new_page); @@ -1390,6 +1399,7 @@ int split_huge_page(struct page *page)  	BUG_ON(!PageSwapBacked(page));  	__split_huge_page(page, anon_vma); +	count_vm_event(THP_SPLIT);  	BUG_ON(PageCompound(page));  out_unlock: @@ -1784,9 +1794,11 @@ static void collapse_huge_page(struct mm_struct *mm,  				      node, __GFP_OTHER_NODE);  	if (unlikely(!new_page)) {  		up_read(&mm->mmap_sem); +		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);  		*hpage = ERR_PTR(-ENOMEM);  		return;  	} +	count_vm_event(THP_COLLAPSE_ALLOC);  	if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {  		up_read(&mm->mmap_sem);  		put_page(new_page); @@ -2151,8 +2163,11 @@ static void khugepaged_do_scan(struct page **hpage)  #ifndef CONFIG_NUMA  		if (!*hpage) {  			*hpage = alloc_hugepage(khugepaged_defrag()); -			if (unlikely(!*hpage)) +			if (unlikely(!*hpage)) { +				count_vm_event(THP_COLLAPSE_ALLOC_FAILED);  				break; +			} +			count_vm_event(THP_COLLAPSE_ALLOC);  		}  #else  		if (IS_ERR(*hpage)) @@ -2192,8 +2207,11 @@ static struct page *khugepaged_alloc_hugepage(void)  	do {  		hpage = alloc_hugepage(khugepaged_defrag()); -		if (!hpage) +		if (!hpage) { +			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);  			khugepaged_alloc_sleep(); +		} else +			count_vm_event(THP_COLLAPSE_ALLOC);  	} while (unlikely(!hpage) &&  		 likely(khugepaged_enabled()));  	return hpage; @@ -2210,8 +2228,11 @@ static void khugepaged_loop(void)  	while (likely(khugepaged_enabled())) {  #ifndef CONFIG_NUMA  		hpage = khugepaged_alloc_hugepage(); -		if (unlikely(!hpage)) +		if (unlikely(!hpage)) { +			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);  			break; +		} +		count_vm_event(THP_COLLAPSE_ALLOC);  #else  		if (IS_ERR(hpage)) {  			khugepaged_alloc_sleep();  | 
