aboutsummaryrefslogtreecommitdiff
path: root/drivers/char/mspec.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/mspec.c')
-rw-r--r--drivers/char/mspec.c62
1 files changed, 34 insertions, 28 deletions
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index ff146c2b08f..f1d7fa45c27 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -44,9 +44,8 @@
#include <linux/slab.h>
#include <linux/numa.h>
#include <asm/page.h>
-#include <asm/system.h>
#include <asm/pgtable.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/tlbflush.h>
#include <asm/uncached.h>
#include <asm/sn/addrs.h>
@@ -180,7 +179,7 @@ mspec_close(struct vm_area_struct *vma)
my_page = vdata->maddr[index];
vdata->maddr[index] = 0;
if (!mspec_zero_block(my_page, PAGE_SIZE))
- uncached_free_page(my_page);
+ uncached_free_page(my_page, 1);
else
printk(KERN_WARNING "mspec_close(): "
"failed to zero page %ld\n", my_page);
@@ -193,32 +192,30 @@ mspec_close(struct vm_area_struct *vma)
}
/*
- * mspec_nopfn
+ * mspec_fault
*
* Creates a mspec page and maps it to user space.
*/
-static unsigned long
-mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
+static int
+mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
unsigned long paddr, maddr;
unsigned long pfn;
- int index;
+ pgoff_t index = vmf->pgoff;
struct vma_data *vdata = vma->vm_private_data;
- BUG_ON(address < vdata->vm_start || address >= vdata->vm_end);
- index = (address - vdata->vm_start) >> PAGE_SHIFT;
maddr = (volatile unsigned long) vdata->maddr[index];
if (maddr == 0) {
- maddr = uncached_alloc_page(numa_node_id());
+ maddr = uncached_alloc_page(numa_node_id(), 1);
if (maddr == 0)
- return NOPFN_OOM;
+ return VM_FAULT_OOM;
spin_lock(&vdata->lock);
if (vdata->maddr[index] == 0) {
vdata->count++;
vdata->maddr[index] = maddr;
} else {
- uncached_free_page(maddr);
+ uncached_free_page(maddr, 1);
maddr = vdata->maddr[index];
}
spin_unlock(&vdata->lock);
@@ -231,19 +228,26 @@ mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
pfn = paddr >> PAGE_SHIFT;
- return pfn;
+ /*
+ * vm_insert_pfn can fail with -EBUSY, but in that case it will
+ * be because another thread has installed the pte first, so it
+ * is no problem.
+ */
+ vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+
+ return VM_FAULT_NOPAGE;
}
-static struct vm_operations_struct mspec_vm_ops = {
+static const struct vm_operations_struct mspec_vm_ops = {
.open = mspec_open,
.close = mspec_close,
- .nopfn = mspec_nopfn
+ .fault = mspec_fault,
};
/*
* mspec_mmap
*
- * Called when mmaping the device. Initializes the vma with a fault handler
+ * Called when mmapping the device. Initializes the vma with a fault handler
* and private data structure necessary to allocate, track, and free the
* underlying pages.
*/
@@ -263,27 +267,26 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
if ((vma->vm_flags & VM_WRITE) == 0)
return -EPERM;
- pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ pages = vma_pages(vma);
vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
if (vdata_size <= PAGE_SIZE)
- vdata = kmalloc(vdata_size, GFP_KERNEL);
+ vdata = kzalloc(vdata_size, GFP_KERNEL);
else {
- vdata = vmalloc(vdata_size);
+ vdata = vzalloc(vdata_size);
flags = VMD_VMALLOCED;
}
if (!vdata)
return -ENOMEM;
- memset(vdata, 0, vdata_size);
vdata->vm_start = vma->vm_start;
vdata->vm_end = vma->vm_end;
vdata->flags = flags;
vdata->type = type;
spin_lock_init(&vdata->lock);
- vdata->refcnt = ATOMIC_INIT(1);
+ atomic_set(&vdata->refcnt, 1);
vma->vm_private_data = vdata;
- vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &mspec_vm_ops;
@@ -311,7 +314,8 @@ uncached_mmap(struct file *file, struct vm_area_struct *vma)
static const struct file_operations fetchop_fops = {
.owner = THIS_MODULE,
- .mmap = fetchop_mmap
+ .mmap = fetchop_mmap,
+ .llseek = noop_llseek,
};
static struct miscdevice fetchop_miscdev = {
@@ -322,7 +326,8 @@ static struct miscdevice fetchop_miscdev = {
static const struct file_operations cached_fops = {
.owner = THIS_MODULE,
- .mmap = cached_mmap
+ .mmap = cached_mmap,
+ .llseek = noop_llseek,
};
static struct miscdevice cached_miscdev = {
@@ -333,7 +338,8 @@ static struct miscdevice cached_miscdev = {
static const struct file_operations uncached_fops = {
.owner = THIS_MODULE,
- .mmap = uncached_mmap
+ .mmap = uncached_mmap,
+ .llseek = noop_llseek,
};
static struct miscdevice uncached_miscdev = {
@@ -367,7 +373,7 @@ mspec_init(void)
int nasid;
unsigned long phys;
- scratch_page[nid] = uncached_alloc_page(nid);
+ scratch_page[nid] = uncached_alloc_page(nid, 1);
if (scratch_page[nid] == 0)
goto free_scratch_pages;
phys = __pa(scratch_page[nid]);
@@ -414,7 +420,7 @@ mspec_init(void)
free_scratch_pages:
for_each_node(nid) {
if (scratch_page[nid] != 0)
- uncached_free_page(scratch_page[nid]);
+ uncached_free_page(scratch_page[nid], 1);
}
return ret;
}
@@ -431,7 +437,7 @@ mspec_exit(void)
for_each_node(nid) {
if (scratch_page[nid] != 0)
- uncached_free_page(scratch_page[nid]);
+ uncached_free_page(scratch_page[nid], 1);
}
}
}