aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/cell/spufs
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs')
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile12
-rw-r--r--arch/powerpc/platforms/cell/spufs/backing_ops.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c9
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c133
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c287
-rw-r--r--arch/powerpc/platforms/cell/spufs/hw_ops.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c307
-rw-r--r--arch/powerpc/platforms/cell/spufs/lscsa_alloc.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c21
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c81
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_restore.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h26
-rw-r--r--arch/powerpc/platforms/cell/spufs/sputrace.c240
-rw-r--r--arch/powerpc/platforms/cell/spufs/sputrace.h39
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c33
17 files changed, 492 insertions, 711 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index 99610a6361f..52a7d2596d3 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -1,10 +1,12 @@
obj-$(CONFIG_SPU_FS) += spufs.o
-spufs-y += inode.o file.o context.o syscalls.o coredump.o
+spufs-y += inode.o file.o context.o syscalls.o
spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
spufs-y += switch.o fault.o lscsa_alloc.o
+spufs-$(CONFIG_COREDUMP) += coredump.o
-obj-$(CONFIG_SPU_TRACE) += sputrace.o
+# magic for the trace events
+CFLAGS_sched.o := -I$(src)
# Rules to build switch.o with the help of SPU tool chain
SPU_CROSS := spu-
@@ -12,10 +14,8 @@ SPU_CC := $(SPU_CROSS)gcc
SPU_AS := $(SPU_CROSS)gcc
SPU_LD := $(SPU_CROSS)ld
SPU_OBJCOPY := $(SPU_CROSS)objcopy
-SPU_CFLAGS := -O2 -Wall -I$(srctree)/include \
- -I$(objtree)/include2 -D__KERNEL__
-SPU_AFLAGS := -c -D__ASSEMBLY__ -I$(srctree)/include \
- -I$(objtree)/include2 -D__KERNEL__
+SPU_CFLAGS := -O2 -Wall -I$(srctree)/include -D__KERNEL__
+SPU_AFLAGS := -c -D__ASSEMBLY__ -I$(srctree)/include -D__KERNEL__
SPU_LDFLAGS := -N -Ttext=0x0
$(obj)/switch.o: $(obj)/spu_save_dump.h $(obj)/spu_restore_dump.h
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
index 64eb15b2204..6e8a9ef8590 100644
--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -21,7 +21,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 6653ddbed04..9c6790d17ed 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -22,12 +22,13 @@
#include <linux/fs.h>
#include <linux/mm.h>
-#include <linux/module.h>
#include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
+#include <linux/sched.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
#include "spufs.h"
+#include "sputrace.h"
atomic_t nr_spu_contexts = ATOMIC_INIT(0);
@@ -35,6 +36,8 @@ atomic_t nr_spu_contexts = ATOMIC_INIT(0);
struct spu_context *alloc_spu_context(struct spu_gang *gang)
{
struct spu_context *ctx;
+ struct timespec ts;
+
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx)
goto out;
@@ -64,6 +67,8 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
__spu_update_sched_info(ctx);
spu_set_timeslice(ctx);
ctx->stats.util_state = SPU_UTIL_IDLE_LOADED;
+ ktime_get_ts(&ts);
+ ctx->stats.tstamp = timespec_to_ns(&ts);
atomic_inc(&nr_spu_contexts);
goto out;
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index af116aadba1..be6212ddbf0 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -24,9 +24,11 @@
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/list.h>
-#include <linux/module.h>
#include <linux/syscalls.h>
+#include <linux/coredump.h>
+#include <linux/binfmts.h>
#include <asm/uaccess.h>
@@ -42,50 +44,12 @@ static ssize_t do_coredump_read(int num, struct spu_context *ctx, void *buffer,
return spufs_coredump_read[num].read(ctx, buffer, size, off);
data = spufs_coredump_read[num].get(ctx);
- ret = snprintf(buffer, size, "0x%.16lx", data);
+ ret = snprintf(buffer, size, "0x%.16llx", data);
if (ret >= size)
return size;
return ++ret; /* count trailing NULL */
}
-/*
- * These are the only things you should do on a core-file: use only these
- * functions to write out all the necessary info.
- */
-static int spufs_dump_write(struct file *file, const void *addr, int nr, loff_t *foffset)
-{
- unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
- ssize_t written;
-
- if (*foffset + nr > limit)
- return -EIO;
-
- written = file->f_op->write(file, addr, nr, &file->f_pos);
- *foffset += written;
-
- if (written != nr)
- return -EIO;
-
- return 0;
-}
-
-static int spufs_dump_align(struct file *file, char *buf, loff_t new_off,
- loff_t *foffset)
-{
- int rc, size;
-
- size = min((loff_t)PAGE_SIZE, new_off - *foffset);
- memset(buf, 0, size);
-
- rc = 0;
- while (rc == 0 && new_off > *foffset) {
- size = min((loff_t)PAGE_SIZE, new_off - *foffset);
- rc = spufs_dump_write(file, buf, size, foffset);
- }
-
- return rc;
-}
-
static int spufs_ctx_note_size(struct spu_context *ctx, int dfd)
{
int i, sz, total = 0;
@@ -106,6 +70,17 @@ static int spufs_ctx_note_size(struct spu_context *ctx, int dfd)
return total;
}
+static int match_context(const void *v, struct file *file, unsigned fd)
+{
+ struct spu_context *ctx;
+ if (file->f_op != &spufs_context_fops)
+ return 0;
+ ctx = SPUFS_I(file_inode(file))->i_ctx;
+ if (ctx->flags & SPU_CREATE_NOSCHED)
+ return 0;
+ return fd + 1;
+}
+
/*
* The additional architecture-specific notes for Cell are various
* context files in the spu context.
@@ -115,29 +90,18 @@ static int spufs_ctx_note_size(struct spu_context *ctx, int dfd)
* internal functionality to dump them without needing to actually
* open the files.
*/
+/*
+ * descriptor table is not shared, so files can't change or go away.
+ */
static struct spu_context *coredump_next_context(int *fd)
{
- struct fdtable *fdt = files_fdtable(current->files);
struct file *file;
- struct spu_context *ctx = NULL;
-
- for (; *fd < fdt->max_fds; (*fd)++) {
- if (!FD_ISSET(*fd, fdt->open_fds))
- continue;
-
- file = fcheck(*fd);
-
- if (!file || file->f_op != &spufs_context_fops)
- continue;
-
- ctx = SPUFS_I(file->f_dentry->d_inode)->i_ctx;
- if (ctx->flags & SPU_CREATE_NOSCHED)
- continue;
-
- break;
- }
-
- return ctx;
+ int n = iterate_fd(current->files, *fd, match_context, NULL);
+ if (!n)
+ return NULL;
+ *fd = n - 1;
+ file = fcheck(*fd);
+ return SPUFS_I(file_inode(file))->i_ctx;
}
int spufs_coredump_extra_notes_size(void)
@@ -165,10 +129,10 @@ int spufs_coredump_extra_notes_size(void)
}
static int spufs_arch_write_note(struct spu_context *ctx, int i,
- struct file *file, int dfd, loff_t *foffset)
+ struct coredump_params *cprm, int dfd)
{
loff_t pos = 0;
- int sz, rc, nread, total = 0;
+ int sz, rc, total = 0;
const int bufsz = PAGE_SIZE;
char *name;
char fullname[80], *buf;
@@ -186,42 +150,39 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
en.n_descsz = sz;
en.n_type = NT_SPU;
- rc = spufs_dump_write(file, &en, sizeof(en), foffset);
- if (rc)
- goto out;
+ if (!dump_emit(cprm, &en, sizeof(en)))
+ goto Eio;
- rc = spufs_dump_write(file, fullname, en.n_namesz, foffset);
- if (rc)
- goto out;
+ if (!dump_emit(cprm, fullname, en.n_namesz))
+ goto Eio;
- rc = spufs_dump_align(file, buf, roundup(*foffset, 4), foffset);
- if (rc)
- goto out;
+ if (!dump_align(cprm, 4))
+ goto Eio;
do {
- nread = do_coredump_read(i, ctx, buf, bufsz, &pos);
- if (nread > 0) {
- rc = spufs_dump_write(file, buf, nread, foffset);
- if (rc)
- goto out;
- total += nread;
+ rc = do_coredump_read(i, ctx, buf, bufsz, &pos);
+ if (rc > 0) {
+ if (!dump_emit(cprm, buf, rc))
+ goto Eio;
+ total += rc;
}
- } while (nread == bufsz && total < sz);
+ } while (rc == bufsz && total < sz);
- if (nread < 0) {
- rc = nread;
+ if (rc < 0)
goto out;
- }
-
- rc = spufs_dump_align(file, buf, roundup(*foffset - total + sz, 4),
- foffset);
+ if (!dump_skip(cprm,
+ roundup(cprm->written - total + sz, 4) - cprm->written))
+ goto Eio;
out:
free_page((unsigned long)buf);
return rc;
+Eio:
+ free_page((unsigned long)buf);
+ return -EIO;
}
-int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset)
+int spufs_coredump_extra_notes_write(struct coredump_params *cprm)
{
struct spu_context *ctx;
int fd, j, rc;
@@ -233,7 +194,7 @@ int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset)
return rc;
for (j = 0; spufs_coredump_read[j].name != NULL; j++) {
- rc = spufs_arch_write_note(ctx, j, file, fd, foffset);
+ rc = spufs_arch_write_note(ctx, j, cprm, fd);
if (rc) {
spu_release_saved(ctx);
return rc;
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index f093a581ac7..8cb6260cc80 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -21,7 +21,6 @@
*/
#include <linux/sched.h>
#include <linux/mm.h>
-#include <linux/module.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
@@ -132,7 +131,7 @@ int spufs_handle_class1(struct spu_context *ctx)
spuctx_switch_state(ctx, SPU_UTIL_IOWAIT);
- pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea,
+ pr_debug("ctx %p: ea %016llx, dsisr %016llx state %d\n", ctx, ea,
dsisr, ctx->state);
ctx->stats.hash_flt++;
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 010a51f5979..90986923a53 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -24,12 +24,12 @@
#include <linux/fs.h>
#include <linux/ioctl.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/pagemap.h>
#include <linux/poll.h>
#include <linux/ptrace.h>
#include <linux/seq_file.h>
-#include <linux/marker.h>
+#include <linux/slab.h>
#include <asm/io.h>
#include <asm/time.h>
@@ -38,6 +38,7 @@
#include <asm/uaccess.h>
#include "spufs.h"
+#include "sputrace.h"
#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
@@ -147,12 +148,12 @@ static int __fops ## _open(struct inode *inode, struct file *file) \
__simple_attr_check_format(__fmt, 0ull); \
return spufs_attr_open(inode, file, __get, __set, __fmt); \
} \
-static struct file_operations __fops = { \
- .owner = THIS_MODULE, \
+static const struct file_operations __fops = { \
.open = __fops ## _open, \
.release = spufs_attr_release, \
.read = spufs_attr_read, \
.write = spufs_attr_write, \
+ .llseek = generic_file_llseek, \
};
@@ -217,24 +218,17 @@ spufs_mem_write(struct file *file, const char __user *buffer,
loff_t pos = *ppos;
int ret;
- if (pos < 0)
- return -EINVAL;
if (pos > LS_SIZE)
return -EFBIG;
- if (size > LS_SIZE - pos)
- size = LS_SIZE - pos;
ret = spu_acquire(ctx);
if (ret)
return ret;
local_store = ctx->ops->get_ls(ctx);
- ret = copy_from_user(local_store + pos, buffer, size);
+ size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size);
spu_release(ctx);
- if (ret)
- return -EFAULT;
- *ppos = pos + size;
return size;
}
@@ -273,12 +267,10 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
if (ctx->state == SPU_STATE_SAVED) {
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- & ~_PAGE_NO_CACHE);
+ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
} else {
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE);
+ vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
}
vm_insert_pfn(vma, address, pfn);
@@ -311,7 +303,7 @@ static int spufs_mem_mmap_access(struct vm_area_struct *vma,
return len;
}
-static struct vm_operations_struct spufs_mem_mmap_vmops = {
+static const struct vm_operations_struct spufs_mem_mmap_vmops = {
.fault = spufs_mem_mmap_fault,
.access = spufs_mem_mmap_access,
};
@@ -338,8 +330,7 @@ static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE);
+ vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
vma->vm_ops = &spufs_mem_mmap_vmops;
return 0;
@@ -360,7 +351,7 @@ static unsigned long spufs_get_unmapped_area(struct file *file,
/* Else, try to obtain a 64K pages slice */
return slice_get_unmapped_area(addr, len, flags,
- MMU_PAGE_64K, 1, 0);
+ MMU_PAGE_64K, 1);
}
#endif /* CONFIG_SPU_FS_64K_LS */
@@ -390,6 +381,9 @@ static int spufs_ps_fault(struct vm_area_struct *vma,
if (offset >= ps_size)
return VM_FAULT_SIGBUS;
+ if (fatal_signal_pending(current))
+ return VM_FAULT_SIGBUS;
+
/*
* Because we release the mmap_sem, the context may be destroyed while
* we're in spu_wait. Grab an extra reference so it isn't destroyed
@@ -436,7 +430,7 @@ static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
}
-static struct vm_operations_struct spufs_cntl_mmap_vmops = {
+static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
.fault = spufs_cntl_mmap_fault,
};
@@ -449,8 +443,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_cntl_mmap_vmops;
return 0;
@@ -521,6 +514,7 @@ static const struct file_operations spufs_cntl_fops = {
.release = spufs_cntl_release,
.read = simple_attr_read,
.write = simple_attr_write,
+ .llseek = generic_file_llseek,
.mmap = spufs_cntl_mmap,
};
@@ -548,6 +542,11 @@ spufs_regs_read(struct file *file, char __user *buffer,
int ret;
struct spu_context *ctx = file->private_data;
+ /* pre-check for file position: if we'd return EOF, there's no point
+ * causing a deschedule */
+ if (*pos >= sizeof(ctx->csa.lscsa->gprs))
+ return 0;
+
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
@@ -564,20 +563,18 @@ spufs_regs_write(struct file *file, const char __user *buffer,
struct spu_lscsa *lscsa = ctx->csa.lscsa;
int ret;
- size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
- if (size <= 0)
+ if (*pos >= sizeof(lscsa->gprs))
return -EFBIG;
- *pos += size;
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
- ret = copy_from_user(lscsa->gprs + *pos - size,
- buffer, size) ? -EFAULT : size;
+ size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos,
+ buffer, size);
spu_release_saved(ctx);
- return ret;
+ return size;
}
static const struct file_operations spufs_regs_fops = {
@@ -619,20 +616,18 @@ spufs_fpcr_write(struct file *file, const char __user * buffer,
struct spu_lscsa *lscsa = ctx->csa.lscsa;
int ret;
- size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
- if (size <= 0)
+ if (*pos >= sizeof(lscsa->fpcr))
return -EFBIG;
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
- *pos += size;
- ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
- buffer, size) ? -EFAULT : size;
+ size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos,
+ buffer, size);
spu_release_saved(ctx);
- return ret;
+ return size;
}
static const struct file_operations spufs_fpcr_fops = {
@@ -707,6 +702,7 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
static const struct file_operations spufs_mbox_fops = {
.open = spufs_pipe_open,
.read = spufs_mbox_read,
+ .llseek = no_llseek,
};
static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
@@ -736,6 +732,7 @@ static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
static const struct file_operations spufs_mbox_stat_fops = {
.open = spufs_pipe_open,
.read = spufs_mbox_stat_read,
+ .llseek = no_llseek,
};
/* low-level ibox access function */
@@ -856,6 +853,7 @@ static const struct file_operations spufs_ibox_fops = {
.read = spufs_ibox_read,
.poll = spufs_ibox_poll,
.fasync = spufs_ibox_fasync,
+ .llseek = no_llseek,
};
static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
@@ -883,6 +881,7 @@ static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
static const struct file_operations spufs_ibox_stat_fops = {
.open = spufs_pipe_open,
.read = spufs_ibox_stat_read,
+ .llseek = no_llseek,
};
/* low-level mailbox write */
@@ -1004,6 +1003,7 @@ static const struct file_operations spufs_wbox_fops = {
.write = spufs_wbox_write,
.poll = spufs_wbox_poll,
.fasync = spufs_wbox_fasync,
+ .llseek = no_llseek,
};
static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
@@ -1031,6 +1031,7 @@ static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
static const struct file_operations spufs_wbox_stat_fops = {
.open = spufs_pipe_open,
.read = spufs_wbox_stat_read,
+ .llseek = no_llseek,
};
static int spufs_signal1_open(struct inode *inode, struct file *file)
@@ -1137,7 +1138,7 @@ spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
#endif
}
-static struct vm_operations_struct spufs_signal1_mmap_vmops = {
+static const struct vm_operations_struct spufs_signal1_mmap_vmops = {
.fault = spufs_signal1_mmap_fault,
};
@@ -1147,8 +1148,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_signal1_mmap_vmops;
return 0;
@@ -1160,6 +1160,7 @@ static const struct file_operations spufs_signal1_fops = {
.read = spufs_signal1_read,
.write = spufs_signal1_write,
.mmap = spufs_signal1_mmap,
+ .llseek = no_llseek,
};
static const struct file_operations spufs_signal1_nosched_fops = {
@@ -1167,6 +1168,7 @@ static const struct file_operations spufs_signal1_nosched_fops = {
.release = spufs_signal1_release,
.write = spufs_signal1_write,
.mmap = spufs_signal1_mmap,
+ .llseek = no_llseek,
};
static int spufs_signal2_open(struct inode *inode, struct file *file)
@@ -1274,7 +1276,7 @@ spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
#endif
}
-static struct vm_operations_struct spufs_signal2_mmap_vmops = {
+static const struct vm_operations_struct spufs_signal2_mmap_vmops = {
.fault = spufs_signal2_mmap_fault,
};
@@ -1284,8 +1286,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_signal2_mmap_vmops;
return 0;
@@ -1300,6 +1301,7 @@ static const struct file_operations spufs_signal2_fops = {
.read = spufs_signal2_read,
.write = spufs_signal2_write,
.mmap = spufs_signal2_mmap,
+ .llseek = no_llseek,
};
static const struct file_operations spufs_signal2_nosched_fops = {
@@ -1307,6 +1309,7 @@ static const struct file_operations spufs_signal2_nosched_fops = {
.release = spufs_signal2_release,
.write = spufs_signal2_write,
.mmap = spufs_signal2_mmap,
+ .llseek = no_llseek,
};
/*
@@ -1393,7 +1396,7 @@ spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
}
-static struct vm_operations_struct spufs_mss_mmap_vmops = {
+static const struct vm_operations_struct spufs_mss_mmap_vmops = {
.fault = spufs_mss_mmap_fault,
};
@@ -1406,8 +1409,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_mss_mmap_vmops;
return 0;
@@ -1447,6 +1449,7 @@ static const struct file_operations spufs_mss_fops = {
.open = spufs_mss_open,
.release = spufs_mss_release,
.mmap = spufs_mss_mmap,
+ .llseek = no_llseek,
};
static int
@@ -1455,7 +1458,7 @@ spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
}
-static struct vm_operations_struct spufs_psmap_mmap_vmops = {
+static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
.fault = spufs_psmap_mmap_fault,
};
@@ -1468,8 +1471,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_psmap_mmap_vmops;
return 0;
@@ -1505,6 +1507,7 @@ static const struct file_operations spufs_psmap_fops = {
.open = spufs_psmap_open,
.release = spufs_psmap_release,
.mmap = spufs_psmap_mmap,
+ .llseek = no_llseek,
};
@@ -1515,7 +1518,7 @@ spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
}
-static struct vm_operations_struct spufs_mfc_mmap_vmops = {
+static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
.fault = spufs_mfc_mmap_fault,
};
@@ -1528,8 +1531,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_mfc_mmap_vmops;
return 0;
@@ -1655,7 +1657,7 @@ out:
static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
{
- pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
+ pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa,
cmd->ea, cmd->size, cmd->tag, cmd->cmd);
switch (cmd->cmd) {
@@ -1672,7 +1674,7 @@ static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
}
if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
- pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
+ pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
cmd->ea, cmd->lsa);
return -EIO;
}
@@ -1847,10 +1849,16 @@ out:
return ret;
}
-static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
- int datasync)
+static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- return spufs_mfc_flush(file, NULL);
+ struct inode *inode = file_inode(file);
+ int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ if (!err) {
+ mutex_lock(&inode->i_mutex);
+ err = spufs_mfc_flush(file, NULL);
+ mutex_unlock(&inode->i_mutex);
+ }
+ return err;
}
static int spufs_mfc_fasync(int fd, struct file *file, int on)
@@ -1870,6 +1878,7 @@ static const struct file_operations spufs_mfc_fops = {
.fsync = spufs_mfc_fsync,
.fasync = spufs_mfc_fasync,
.mmap = spufs_mfc_mmap,
+ .llseek = no_llseek,
};
static int spufs_npc_set(void *data, u64 val)
@@ -2245,6 +2254,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
static const struct file_operations spufs_dma_info_fops = {
.open = spufs_info_open,
.read = spufs_dma_info_read,
+ .llseek = no_llseek,
};
static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
@@ -2298,6 +2308,7 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
static const struct file_operations spufs_proxydma_info_fops = {
.open = spufs_info_open,
.read = spufs_proxydma_info_read,
+ .llseek = no_llseek,
};
static int spufs_show_tid(struct seq_file *s, void *private)
@@ -2426,38 +2437,49 @@ static inline int spufs_switch_log_avail(struct spu_context *ctx)
static int spufs_switch_log_open(struct inode *inode, struct file *file)
{
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+ int rc;
+
+ rc = spu_acquire(ctx);
+ if (rc)
+ return rc;
- /*
- * We (ab-)use the mapping_lock here because it serves the similar
- * purpose for synchronizing open/close elsewhere. Maybe it should
- * be renamed eventually.
- */
- mutex_lock(&ctx->mapping_lock);
if (ctx->switch_log) {
- spin_lock(&ctx->switch_log->lock);
- ctx->switch_log->head = 0;
- ctx->switch_log->tail = 0;
- spin_unlock(&ctx->switch_log->lock);
- } else {
- /*
- * We allocate the switch log data structures on first open.
- * They will never be free because we assume a context will
- * be traced until it goes away.
- */
- ctx->switch_log = kzalloc(sizeof(struct switch_log) +
- SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
- GFP_KERNEL);
- if (!ctx->switch_log)
- goto out;
- spin_lock_init(&ctx->switch_log->lock);
- init_waitqueue_head(&ctx->switch_log->wait);
+ rc = -EBUSY;
+ goto out;
}
- mutex_unlock(&ctx->mapping_lock);
+
+ ctx->switch_log = kmalloc(sizeof(struct switch_log) +
+ SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
+ GFP_KERNEL);
+
+ if (!ctx->switch_log) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ ctx->switch_log->head = ctx->switch_log->tail = 0;
+ init_waitqueue_head(&ctx->switch_log->wait);
+ rc = 0;
+
+out:
+ spu_release(ctx);
+ return rc;
+}
+
+static int spufs_switch_log_release(struct inode *inode, struct file *file)
+{
+ struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+ int rc;
+
+ rc = spu_acquire(ctx);
+ if (rc)
+ return rc;
+
+ kfree(ctx->switch_log);
+ ctx->switch_log = NULL;
+ spu_release(ctx);
return 0;
- out:
- mutex_unlock(&ctx->mapping_lock);
- return -ENOMEM;
}
static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
@@ -2478,49 +2500,61 @@ static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
int error = 0, cnt = 0;
- if (!buf || len < 0)
+ if (!buf)
return -EINVAL;
+ error = spu_acquire(ctx);
+ if (error)
+ return error;
+
while (cnt < len) {
char tbuf[128];
int width;
- if (file->f_flags & O_NONBLOCK) {
- if (spufs_switch_log_used(ctx) <= 0)
- return cnt ? cnt : -EAGAIN;
- } else {
- /* Wait for data in buffer */
- error = wait_event_interruptible(ctx->switch_log->wait,
- spufs_switch_log_used(ctx) > 0);
- if (error)
+ if (spufs_switch_log_used(ctx) == 0) {
+ if (cnt > 0) {
+ /* If there's data ready to go, we can
+ * just return straight away */
+ break;
+
+ } else if (file->f_flags & O_NONBLOCK) {
+ error = -EAGAIN;
break;
- }
- spin_lock(&ctx->switch_log->lock);
- if (ctx->switch_log->head == ctx->switch_log->tail) {
- /* multiple readers race? */
- spin_unlock(&ctx->switch_log->lock);
- continue;
+ } else {
+ /* spufs_wait will drop the mutex and
+ * re-acquire, but since we're in read(), the
+ * file cannot be _released (and so
+ * ctx->switch_log is stable).
+ */
+ error = spufs_wait(ctx->switch_log->wait,
+ spufs_switch_log_used(ctx) > 0);
+
+ /* On error, spufs_wait returns without the
+ * state mutex held */
+ if (error)
+ return error;
+
+ /* We may have had entries read from underneath
+ * us while we dropped the mutex in spufs_wait,
+ * so re-check */
+ if (spufs_switch_log_used(ctx) == 0)
+ continue;
+ }
}
width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
- if (width < len) {
+ if (width < len)
ctx->switch_log->tail =
(ctx->switch_log->tail + 1) %
SWITCH_LOG_BUFSIZE;
- }
-
- spin_unlock(&ctx->switch_log->lock);
-
- /*
- * If the record is greater than space available return
- * partial buffer (so far)
- */
- if (width >= len)
+ else
+ /* If the record is greater than space available return
+ * partial buffer (so far) */
break;
error = copy_to_user(buf + cnt, tbuf, width);
@@ -2529,37 +2563,51 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
cnt += width;
}
+ spu_release(ctx);
+
return cnt == 0 ? error : cnt;
}
static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
unsigned int mask = 0;
+ int rc;
poll_wait(file, &ctx->switch_log->wait, wait);
+ rc = spu_acquire(ctx);
+ if (rc)
+ return rc;
+
if (spufs_switch_log_used(ctx) > 0)
mask |= POLLIN;
+ spu_release(ctx);
+
return mask;
}
static const struct file_operations spufs_switch_log_fops = {
- .owner = THIS_MODULE,
- .open = spufs_switch_log_open,
- .read = spufs_switch_log_read,
- .poll = spufs_switch_log_poll,
+ .open = spufs_switch_log_open,
+ .read = spufs_switch_log_read,
+ .poll = spufs_switch_log_poll,
+ .release = spufs_switch_log_release,
+ .llseek = no_llseek,
};
+/**
+ * Log a context switch event to a switch log reader.
+ *
+ * Must be called with ctx->state_mutex held.
+ */
void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
u32 type, u32 val)
{
if (!ctx->switch_log)
return;
- spin_lock(&ctx->switch_log->lock);
if (spufs_switch_log_avail(ctx) > 1) {
struct switch_log_entry *p;
@@ -2573,7 +2621,6 @@ void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
ctx->switch_log->head =
(ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
}
- spin_unlock(&ctx->switch_log->lock);
wake_up(&ctx->switch_log->wait);
}
@@ -2598,7 +2645,7 @@ static int spufs_show_ctx(struct seq_file *s, void *private)
}
seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
- " %c %lx %lx %lx %lx %x %x\n",
+ " %c %llx %llx %llx %llx %x %x\n",
ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
ctx->flags,
ctx->sched_flags,
@@ -2630,7 +2677,7 @@ static const struct file_operations spufs_ctx_fops = {
.release = single_release,
};
-struct spufs_tree_descr spufs_dir_contents[] = {
+const struct spufs_tree_descr spufs_dir_contents[] = {
{ "capabilities", &spufs_caps_fops, 0444, },
{ "mem", &spufs_mem_fops, 0666, LS_SIZE, },
{ "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), },
@@ -2671,7 +2718,7 @@ struct spufs_tree_descr spufs_dir_contents[] = {
{},
};
-struct spufs_tree_descr spufs_dir_nosched_contents[] = {
+const struct spufs_tree_descr spufs_dir_nosched_contents[] = {
{ "capabilities", &spufs_caps_fops, 0444, },
{ "mem", &spufs_mem_fops, 0666, LS_SIZE, },
{ "mbox", &spufs_mbox_fops, 0444, },
@@ -2696,12 +2743,12 @@ struct spufs_tree_descr spufs_dir_nosched_contents[] = {
{},
};
-struct spufs_tree_descr spufs_dir_debug_contents[] = {
+const struct spufs_tree_descr spufs_dir_debug_contents[] = {
{ ".ctx", &spufs_ctx_fops, 0444, },
{},
};
-struct spufs_coredump_reader spufs_coredump_read[] = {
+const struct spufs_coredump_reader spufs_coredump_read[] = {
{ "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
{ "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
{ "lslr", NULL, spufs_lslr_get, 19 },
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
index 64f8540b832..8655c4cbefc 100644
--- a/arch/powerpc/platforms/cell/spufs/hw_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -18,7 +18,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 690ca7b0dcf..87ba7cf99cd 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -71,12 +71,17 @@ spufs_alloc_inode(struct super_block *sb)
return &ei->vfs_inode;
}
-static void
-spufs_destroy_inode(struct inode *inode)
+static void spufs_i_callback(struct rcu_head *head)
{
+ struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
}
+static void spufs_destroy_inode(struct inode *inode)
+{
+ call_rcu(&inode->i_rcu, spufs_i_callback);
+}
+
static void
spufs_init_once(void *p)
{
@@ -86,7 +91,7 @@ spufs_init_once(void *p)
}
static struct inode *
-spufs_new_inode(struct super_block *sb, int mode)
+spufs_new_inode(struct super_block *sb, umode_t mode)
{
struct inode *inode;
@@ -94,10 +99,10 @@ spufs_new_inode(struct super_block *sb, int mode)
if (!inode)
goto out;
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_uid = current->fsuid;
- inode->i_gid = current->fsgid;
- inode->i_blocks = 0;
+ inode->i_uid = current_fsuid();
+ inode->i_gid = current_fsgid();
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
out:
return inode;
@@ -111,16 +116,18 @@ spufs_setattr(struct dentry *dentry, struct iattr *attr)
if ((attr->ia_valid & ATTR_SIZE) &&
(attr->ia_size != inode->i_size))
return -EINVAL;
- return inode_setattr(inode, attr);
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+ return 0;
}
static int
spufs_new_file(struct super_block *sb, struct dentry *dentry,
- const struct file_operations *fops, int mode,
+ const struct file_operations *fops, umode_t mode,
size_t size, struct spu_context *ctx)
{
- static struct inode_operations spufs_file_iops = {
+ static const struct inode_operations spufs_file_iops = {
.setattr = spufs_setattr,
};
struct inode *inode;
@@ -142,15 +149,14 @@ out:
}
static void
-spufs_delete_inode(struct inode *inode)
+spufs_evict_inode(struct inode *inode)
{
struct spufs_inode_info *ei = SPUFS_I(inode);
-
+ clear_inode(inode);
if (ei->i_ctx)
put_spu_context(ei->i_ctx);
if (ei->i_gang)
put_spu_gang(ei->i_gang);
- clear_inode(inode);
}
static void spufs_prune_dir(struct dentry *dir)
@@ -159,18 +165,18 @@ static void spufs_prune_dir(struct dentry *dir)
mutex_lock(&dir->d_inode->i_mutex);
list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
if (!(d_unhashed(dentry)) && dentry->d_inode) {
- dget_locked(dentry);
+ dget_dlock(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
simple_unlink(dir->d_inode, dentry);
- spin_unlock(&dcache_lock);
+ /* XXX: what was dcache_lock protecting here? Other
+ * filesystems (IB, configfs) release dcache_lock
+ * before unlink */
dput(dentry);
} else {
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
}
}
shrink_dcache_parent(dir);
@@ -181,46 +187,31 @@ static void spufs_prune_dir(struct dentry *dir)
static int spufs_rmdir(struct inode *parent, struct dentry *dir)
{
/* remove all entries */
+ int res;
spufs_prune_dir(dir);
d_drop(dir);
-
- return simple_rmdir(parent, dir);
+ res = simple_rmdir(parent, dir);
+ /* We have to give up the mm_struct */
+ spu_forget(SPUFS_I(dir->d_inode)->i_ctx);
+ return res;
}
-static int spufs_fill_dir(struct dentry *dir, struct spufs_tree_descr *files,
- int mode, struct spu_context *ctx)
+static int spufs_fill_dir(struct dentry *dir,
+ const struct spufs_tree_descr *files, umode_t mode,
+ struct spu_context *ctx)
{
- struct dentry *dentry, *tmp;
- int ret;
-
while (files->name && files->name[0]) {
- ret = -ENOMEM;
- dentry = d_alloc_name(dir, files->name);
+ int ret;
+ struct dentry *dentry = d_alloc_name(dir, files->name);
if (!dentry)
- goto out;
+ return -ENOMEM;
ret = spufs_new_file(dir->d_sb, dentry, files->ops,
files->mode & mode, files->size, ctx);
if (ret)
- goto out;
+ return ret;
files++;
}
return 0;
-out:
- /*
- * remove all children from dir. dir->inode is not set so don't
- * just simply use spufs_prune_dir() and panic afterwards :)
- * dput() looks like it will do the right thing:
- * - dec parent's ref counter
- * - remove child from parent's child list
- * - free child's inode if possible
- * - free child
- */
- list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
- dput(dentry);
- }
-
- shrink_dcache_parent(dir);
- return ret;
}
static int spufs_dir_close(struct inode *inode, struct file *file)
@@ -239,9 +230,6 @@ static int spufs_dir_close(struct inode *inode, struct file *file)
mutex_unlock(&parent->i_mutex);
WARN_ON(ret);
- /* We have to give up the mm_struct */
- spu_forget(ctx);
-
return dcache_dir_close(inode, file);
}
@@ -250,23 +238,22 @@ const struct file_operations spufs_context_fops = {
.release = spufs_dir_close,
.llseek = dcache_dir_lseek,
.read = generic_read_dir,
- .readdir = dcache_readdir,
- .fsync = simple_sync_file,
+ .iterate = dcache_readdir,
+ .fsync = noop_fsync,
};
EXPORT_SYMBOL_GPL(spufs_context_fops);
static int
spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
- int mode)
+ umode_t mode)
{
int ret;
struct inode *inode;
struct spu_context *ctx;
- ret = -ENOSPC;
inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
if (!inode)
- goto out;
+ return -ENOSPC;
if (dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
@@ -274,65 +261,58 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
}
ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
SPUFS_I(inode)->i_ctx = ctx;
- if (!ctx)
- goto out_iput;
+ if (!ctx) {
+ iput(inode);
+ return -ENOSPC;
+ }
ctx->flags = flags;
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
+
+ mutex_lock(&inode->i_mutex);
+
+ dget(dentry);
+ inc_nlink(dir);
+ inc_nlink(inode);
+
+ d_instantiate(dentry, inode);
+
if (flags & SPU_CREATE_NOSCHED)
ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
mode, ctx);
else
ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
- if (ret)
- goto out_free_ctx;
-
- if (spufs_get_sb_info(dir->i_sb)->debug)
+ if (!ret && spufs_get_sb_info(dir->i_sb)->debug)
ret = spufs_fill_dir(dentry, spufs_dir_debug_contents,
mode, ctx);
if (ret)
- goto out_free_ctx;
+ spufs_rmdir(dir, dentry);
- d_instantiate(dentry, inode);
- dget(dentry);
- dir->i_nlink++;
- dentry->d_inode->i_nlink++;
- goto out;
+ mutex_unlock(&inode->i_mutex);
-out_free_ctx:
- spu_forget(ctx);
- put_spu_context(ctx);
-out_iput:
- iput(inode);
-out:
return ret;
}
-static int spufs_context_open(struct dentry *dentry, struct vfsmount *mnt)
+static int spufs_context_open(struct path *path)
{
int ret;
struct file *filp;
ret = get_unused_fd();
- if (ret < 0) {
- dput(dentry);
- mntput(mnt);
- goto out;
- }
+ if (ret < 0)
+ return ret;
- filp = dentry_open(dentry, mnt, O_RDONLY);
+ filp = dentry_open(path, O_RDONLY, current_cred());
if (IS_ERR(filp)) {
put_unused_fd(ret);
- ret = PTR_ERR(filp);
- goto out;
+ return PTR_ERR(filp);
}
filp->f_op = &spufs_context_fops;
fd_install(ret, filp);
-out:
return ret;
}
@@ -367,7 +347,7 @@ spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
return ERR_PTR(-EINVAL);
neighbor = get_spu_context(
- SPUFS_I(filp->f_dentry->d_inode)->i_ctx);
+ SPUFS_I(file_inode(filp))->i_ctx);
if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) &&
!list_is_last(&neighbor->aff_list, &gang->aff_list_head) &&
@@ -440,36 +420,33 @@ spufs_set_affinity(unsigned int flags, struct spu_context *ctx,
static int
spufs_create_context(struct inode *inode, struct dentry *dentry,
- struct vfsmount *mnt, int flags, int mode,
+ struct vfsmount *mnt, int flags, umode_t mode,
struct file *aff_filp)
{
int ret;
int affinity;
struct spu_gang *gang;
struct spu_context *neighbor;
+ struct path path = {.mnt = mnt, .dentry = dentry};
- ret = -EPERM;
if ((flags & SPU_CREATE_NOSCHED) &&
!capable(CAP_SYS_NICE))
- goto out_unlock;
+ return -EPERM;
- ret = -EINVAL;
if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE))
== SPU_CREATE_ISOLATE)
- goto out_unlock;
+ return -EINVAL;
- ret = -ENODEV;
if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
- goto out_unlock;
+ return -ENODEV;
gang = NULL;
neighbor = NULL;
affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU);
if (affinity) {
gang = SPUFS_I(inode)->i_gang;
- ret = -EINVAL;
if (!gang)
- goto out_unlock;
+ return -EINVAL;
mutex_lock(&gang->aff_mutex);
neighbor = spufs_assert_affinity(flags, gang, aff_filp);
if (IS_ERR(neighbor)) {
@@ -489,30 +466,18 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
put_spu_context(neighbor);
}
- /*
- * get references for dget and mntget, will be released
- * in error path of *_open().
- */
- ret = spufs_context_open(dget(dentry), mntget(mnt));
- if (ret < 0) {
+ ret = spufs_context_open(&path);
+ if (ret < 0)
WARN_ON(spufs_rmdir(inode, dentry));
- mutex_unlock(&inode->i_mutex);
- spu_forget(SPUFS_I(dentry->d_inode)->i_ctx);
- goto out;
- }
out_aff_unlock:
if (affinity)
mutex_unlock(&gang->aff_mutex);
-out_unlock:
- mutex_unlock(&inode->i_mutex);
-out:
- dput(dentry);
return ret;
}
static int
-spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
+spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int ret;
struct inode *inode;
@@ -538,8 +503,8 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
inode->i_fop = &simple_dir_operations;
d_instantiate(dentry, inode);
- dir->i_nlink++;
- dentry->d_inode->i_nlink++;
+ inc_nlink(dir);
+ inc_nlink(dentry->d_inode);
return ret;
out_iput:
@@ -548,109 +513,80 @@ out:
return ret;
}
-static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
+static int spufs_gang_open(struct path *path)
{
int ret;
struct file *filp;
ret = get_unused_fd();
- if (ret < 0) {
- dput(dentry);
- mntput(mnt);
- goto out;
- }
+ if (ret < 0)
+ return ret;
- filp = dentry_open(dentry, mnt, O_RDONLY);
+ /*
+ * get references for dget and mntget, will be released
+ * in error path of *_open().
+ */
+ filp = dentry_open(path, O_RDONLY, current_cred());
if (IS_ERR(filp)) {
put_unused_fd(ret);
- ret = PTR_ERR(filp);
- goto out;
+ return PTR_ERR(filp);
}
filp->f_op = &simple_dir_operations;
fd_install(ret, filp);
-out:
return ret;
}
static int spufs_create_gang(struct inode *inode,
struct dentry *dentry,
- struct vfsmount *mnt, int mode)
+ struct vfsmount *mnt, umode_t mode)
{
+ struct path path = {.mnt = mnt, .dentry = dentry};
int ret;
ret = spufs_mkgang(inode, dentry, mode & S_IRWXUGO);
- if (ret)
- goto out;
-
- /*
- * get references for dget and mntget, will be released
- * in error path of *_open().
- */
- ret = spufs_gang_open(dget(dentry), mntget(mnt));
- if (ret < 0) {
- int err = simple_rmdir(inode, dentry);
- WARN_ON(err);
+ if (!ret) {
+ ret = spufs_gang_open(&path);
+ if (ret < 0) {
+ int err = simple_rmdir(inode, dentry);
+ WARN_ON(err);
+ }
}
-
-out:
- mutex_unlock(&inode->i_mutex);
- dput(dentry);
return ret;
}
static struct file_system_type spufs_type;
-long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
- struct file *filp)
+long spufs_create(struct path *path, struct dentry *dentry,
+ unsigned int flags, umode_t mode, struct file *filp)
{
- struct dentry *dentry;
+ struct inode *dir = path->dentry->d_inode;
int ret;
- ret = -EINVAL;
/* check if we are on spufs */
- if (nd->path.dentry->d_sb->s_type != &spufs_type)
- goto out;
+ if (path->dentry->d_sb->s_type != &spufs_type)
+ return -EINVAL;
/* don't accept undefined flags */
if (flags & (~SPU_CREATE_FLAG_ALL))
- goto out;
+ return -EINVAL;
/* only threads can be underneath a gang */
- if (nd->path.dentry != nd->path.dentry->d_sb->s_root) {
- if ((flags & SPU_CREATE_GANG) ||
- !SPUFS_I(nd->path.dentry->d_inode)->i_gang)
- goto out;
- }
+ if (path->dentry != path->dentry->d_sb->s_root)
+ if ((flags & SPU_CREATE_GANG) || !SPUFS_I(dir)->i_gang)
+ return -EINVAL;
- dentry = lookup_create(nd, 1);
- ret = PTR_ERR(dentry);
- if (IS_ERR(dentry))
- goto out_dir;
-
- ret = -EEXIST;
- if (dentry->d_inode)
- goto out_dput;
-
- mode &= ~current->fs->umask;
+ mode &= ~current_umask();
if (flags & SPU_CREATE_GANG)
- ret = spufs_create_gang(nd->path.dentry->d_inode,
- dentry, nd->path.mnt, mode);
+ ret = spufs_create_gang(dir, dentry, path->mnt, mode);
else
- ret = spufs_create_context(nd->path.dentry->d_inode,
- dentry, nd->path.mnt, flags, mode,
+ ret = spufs_create_context(dir, dentry, path->mnt, flags, mode,
filp);
if (ret >= 0)
- fsnotify_mkdir(nd->path.dentry->d_inode, dentry);
- return ret;
+ fsnotify_mkdir(dir, dentry);
-out_dput:
- dput(dentry);
-out_dir:
- mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
-out:
return ret;
}
@@ -659,7 +595,7 @@ enum {
Opt_uid, Opt_gid, Opt_mode, Opt_debug, Opt_err,
};
-static match_table_t spufs_tokens = {
+static const match_table_t spufs_tokens = {
{ Opt_uid, "uid=%d" },
{ Opt_gid, "gid=%d" },
{ Opt_mode, "mode=%o" },
@@ -684,12 +620,16 @@ spufs_parse_options(struct super_block *sb, char *options, struct inode *root)
case Opt_uid:
if (match_int(&args[0], &option))
return 0;
- root->i_uid = option;
+ root->i_uid = make_kuid(current_user_ns(), option);
+ if (!uid_valid(root->i_uid))
+ return 0;
break;
case Opt_gid:
if (match_int(&args[0], &option))
return 0;
- root->i_gid = option;
+ root->i_gid = make_kgid(current_user_ns(), option);
+ if (!gid_valid(root->i_gid))
+ return 0;
break;
case Opt_mode:
if (match_octal(&args[0], &option))
@@ -755,15 +695,16 @@ spufs_create_root(struct super_block *sb, void *data)
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
SPUFS_I(inode)->i_ctx = NULL;
+ inc_nlink(inode);
ret = -EINVAL;
if (!spufs_parse_options(sb, data, inode))
goto out_iput;
ret = -ENOMEM;
- sb->s_root = d_alloc_root(inode);
+ sb->s_root = d_make_root(inode);
if (!sb->s_root)
- goto out_iput;
+ goto out;
return 0;
out_iput:
@@ -776,12 +717,11 @@ static int
spufs_fill_super(struct super_block *sb, void *data, int silent)
{
struct spufs_sb_info *info;
- static struct super_operations s_ops = {
+ static const struct super_operations s_ops = {
.alloc_inode = spufs_alloc_inode,
.destroy_inode = spufs_destroy_inode,
.statfs = simple_statfs,
- .delete_inode = spufs_delete_inode,
- .drop_inode = generic_delete_inode,
+ .evict_inode = spufs_evict_inode,
.show_options = generic_show_options,
};
@@ -801,19 +741,20 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
return spufs_create_root(sb, data);
}
-static int
-spufs_get_sb(struct file_system_type *fstype, int flags,
- const char *name, void *data, struct vfsmount *mnt)
+static struct dentry *
+spufs_mount(struct file_system_type *fstype, int flags,
+ const char *name, void *data)
{
- return get_sb_single(fstype, flags, data, spufs_fill_super, mnt);
+ return mount_single(fstype, flags, data, spufs_fill_super);
}
static struct file_system_type spufs_type = {
.owner = THIS_MODULE,
.name = "spufs",
- .get_sb = spufs_get_sb,
+ .mount = spufs_mount,
.kill_sb = kill_litter_super,
};
+MODULE_ALIAS_FS("spufs");
static int __init spufs_init(void)
{
@@ -833,19 +774,19 @@ static int __init spufs_init(void)
ret = spu_sched_init();
if (ret)
goto out_cache;
- ret = register_filesystem(&spufs_type);
+ ret = register_spu_syscalls(&spufs_calls);
if (ret)
goto out_sched;
- ret = register_spu_syscalls(&spufs_calls);
+ ret = register_filesystem(&spufs_type);
if (ret)
- goto out_fs;
+ goto out_syscalls;
spufs_init_isolated_loader();
return 0;
-out_fs:
- unregister_filesystem(&spufs_type);
+out_syscalls:
+ unregister_spu_syscalls(&spufs_calls);
out_sched:
spu_sched_exit();
out_cache:
diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
index 0e9f325c9ff..147069938cf 100644
--- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
+++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/spu.h>
@@ -35,10 +36,9 @@ static int spu_alloc_lscsa_std(struct spu_state *csa)
struct spu_lscsa *lscsa;
unsigned char *p;
- lscsa = vmalloc(sizeof(struct spu_lscsa));
+ lscsa = vzalloc(sizeof(struct spu_lscsa));
if (!lscsa)
return -ENOMEM;
- memset(lscsa, 0, sizeof(struct spu_lscsa));
csa->lscsa = lscsa;
/* Set LS pages reserved to allow for user-space mapping. */
@@ -90,7 +90,7 @@ int spu_alloc_lscsa(struct spu_state *csa)
*/
for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) {
/* XXX This is likely to fail, we should use a special pool
- * similiar to what hugetlbfs does.
+ * similar to what hugetlbfs does.
*/
csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL,
SPU_64K_PAGE_ORDER);
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index f7edba6cb79..4ddf769a64e 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -117,6 +117,9 @@ static int spu_setup_isolated(struct spu_context *ctx)
cond_resched();
}
+ /* clear purge status */
+ out_be64(mfc_cntl, 0);
+
/* put the SPE in kernel mode to allow access to the loader */
sr1 = spu_mfc_sr1_get(ctx->spu);
sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
@@ -206,11 +209,6 @@ static int spu_run_init(struct spu_context *ctx, u32 *npc)
(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
if (runcntl == 0)
runcntl = SPU_RUNCNTL_RUNNABLE;
- }
-
- if (ctx->flags & SPU_CREATE_NOSCHED) {
- spuctx_switch_state(ctx, SPU_UTIL_USER);
- ctx->ops->runcntl_write(ctx, runcntl);
} else {
unsigned long privcntl;
@@ -219,9 +217,15 @@ static int spu_run_init(struct spu_context *ctx, u32 *npc)
else
privcntl = SPU_PRIVCNTL_MODE_NORMAL;
- ctx->ops->npc_write(ctx, *npc);
ctx->ops->privcntl_write(ctx, privcntl);
- ctx->ops->runcntl_write(ctx, runcntl);
+ ctx->ops->npc_write(ctx, *npc);
+ }
+
+ ctx->ops->runcntl_write(ctx, runcntl);
+
+ if (ctx->flags & SPU_CREATE_NOSCHED) {
+ spuctx_switch_state(ctx, SPU_UTIL_USER);
+ } else {
if (ctx->state == SPU_STATE_SAVED) {
ret = spu_activate(ctx, 0);
@@ -248,6 +252,7 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc,
spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
+ spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status);
spu_release(ctx);
if (signal_pending(current))
@@ -416,8 +421,6 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
ret = spu_run_fini(ctx, npc, &status);
spu_yield(ctx);
- spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, status);
-
if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
(((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
ctx->stats.libassist++;
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 2deeeba7ecc..4a0a64fe25d 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -22,11 +22,12 @@
#undef DEBUG
-#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
+#include <linux/sched/rt.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/vmalloc.h>
#include <linux/smp.h>
@@ -39,7 +40,6 @@
#include <linux/pid_namespace.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/marker.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
@@ -47,6 +47,8 @@
#include <asm/spu_csa.h>
#include <asm/spu_priv1.h>
#include "spufs.h"
+#define CREATE_TRACE_POINTS
+#include "sputrace.h"
struct spu_prio_array {
DECLARE_BITMAP(bitmap, MAX_PRIO);
@@ -81,7 +83,6 @@ static struct timer_list spuloadavg_timer;
#define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
#define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
-#define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
#define SCALE_PRIO(x, prio) \
max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
@@ -139,7 +140,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
* runqueue. The context will be rescheduled on the proper node
* if it is timesliced or preempted.
*/
- ctx->cpus_allowed = current->cpus_allowed;
+ cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current));
/* Save the current cpu id for spu interrupt routing. */
ctx->last_ran = raw_smp_processor_id();
@@ -166,9 +167,9 @@ void spu_update_sched_info(struct spu_context *ctx)
static int __node_allowed(struct spu_context *ctx, int node)
{
if (nr_cpus_node(node)) {
- cpumask_t mask = node_to_cpumask(node);
+ const struct cpumask *mask = cpumask_of_node(node);
- if (cpus_intersects(mask, ctx->cpus_allowed))
+ if (cpumask_intersects(mask, &ctx->cpus_allowed))
return 1;
}
@@ -312,6 +313,15 @@ static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
*/
node = cpu_to_node(raw_smp_processor_id());
for (n = 0; n < MAX_NUMNODES; n++, node++) {
+ /*
+ * "available_spus" counts how many spus are not potentially
+ * going to be used by other affinity gangs whose reference
+ * context is already in place. Although this code seeks to
+ * avoid having affinity gangs with a summed amount of
+ * contexts bigger than the amount of spus in the node,
+ * this may happen sporadically. In this case, available_spus
+ * becomes negative, which is harmless.
+ */
int available_spus;
node = (node < MAX_NUMNODES) ? node : 0;
@@ -321,12 +331,10 @@ static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
available_spus = 0;
mutex_lock(&cbe_spu_info[node].list_mutex);
list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
- if (spu->ctx && spu->ctx->gang
- && spu->ctx->aff_offset == 0)
- available_spus -=
- (spu->ctx->gang->contexts - 1);
- else
- available_spus++;
+ if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
+ && spu->ctx->gang->aff_ref_spu)
+ available_spus -= spu->ctx->gang->contexts;
+ available_spus++;
}
if (available_spus < ctx->gang->contexts) {
mutex_unlock(&cbe_spu_info[node].list_mutex);
@@ -437,6 +445,11 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
if (ctx->gang)
+ /*
+ * If ctx->gang->aff_sched_count is positive, SPU affinity is
+ * being considered in this gang. Using atomic_dec_if_positive
+ * allow us to skip an explicit check for affinity in this gang
+ */
atomic_dec_if_positive(&ctx->gang->aff_sched_count);
spu_switch_notify(spu, NULL);
@@ -496,7 +509,7 @@ static void __spu_add_to_rq(struct spu_context *ctx)
list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
set_bit(ctx->prio, spu_prio->bitmap);
if (!spu_prio->nr_waiting++)
- __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
+ mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
}
}
@@ -641,9 +654,12 @@ static struct spu *find_victim(struct spu_context *ctx)
if (tmp && tmp->prio > ctx->prio &&
!(tmp->flags & SPU_CREATE_NOSCHED) &&
- (!victim || tmp->prio > victim->prio))
+ (!victim || tmp->prio > victim->prio)) {
victim = spu->ctx;
+ }
}
+ if (victim)
+ get_spu_context(victim);
mutex_unlock(&cbe_spu_info[node].list_mutex);
if (victim) {
@@ -658,6 +674,7 @@ static struct spu *find_victim(struct spu_context *ctx)
* look at another context or give up after X retries.
*/
if (!mutex_trylock(&victim->state_mutex)) {
+ put_spu_context(victim);
victim = NULL;
goto restart;
}
@@ -670,6 +687,7 @@ static struct spu *find_victim(struct spu_context *ctx)
* restart the search.
*/
mutex_unlock(&victim->state_mutex);
+ put_spu_context(victim);
victim = NULL;
goto restart;
}
@@ -687,6 +705,7 @@ static struct spu *find_victim(struct spu_context *ctx)
spu_add_to_rq(victim);
mutex_unlock(&victim->state_mutex);
+ put_spu_context(victim);
return spu;
}
@@ -722,17 +741,33 @@ static void spu_schedule(struct spu *spu, struct spu_context *ctx)
/* not a candidate for interruptible because it's called either
from the scheduler thread or from spu_deactivate */
mutex_lock(&ctx->state_mutex);
- __spu_schedule(spu, ctx);
+ if (ctx->state == SPU_STATE_SAVED)
+ __spu_schedule(spu, ctx);
spu_release(ctx);
}
-static void spu_unschedule(struct spu *spu, struct spu_context *ctx)
+/**
+ * spu_unschedule - remove a context from a spu, and possibly release it.
+ * @spu: The SPU to unschedule from
+ * @ctx: The context currently scheduled on the SPU
+ * @free_spu Whether to free the SPU for other contexts
+ *
+ * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
+ * SPU is made available for other contexts (ie, may be returned by
+ * spu_get_idle). If this is zero, the caller is expected to schedule another
+ * context to this spu.
+ *
+ * Should be called with ctx->state_mutex held.
+ */
+static void spu_unschedule(struct spu *spu, struct spu_context *ctx,
+ int free_spu)
{
int node = spu->node;
mutex_lock(&cbe_spu_info[node].list_mutex);
cbe_spu_info[node].nr_active--;
- spu->alloc_state = SPU_FREE;
+ if (free_spu)
+ spu->alloc_state = SPU_FREE;
spu_unbind_context(spu, ctx);
ctx->stats.invol_ctx_switch++;
spu->stats.invol_ctx_switch++;
@@ -810,7 +845,7 @@ static struct spu_context *grab_runnable_context(int prio, int node)
struct list_head *rq = &spu_prio->runq[best];
list_for_each_entry(ctx, rq, rq) {
- /* XXX(hch): check for affinity here aswell */
+ /* XXX(hch): check for affinity here as well */
if (__node_allowed(ctx, node)) {
__spu_del_from_rq(ctx);
goto found;
@@ -832,7 +867,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
if (spu) {
new = grab_runnable_context(max_prio, spu->node);
if (new || force) {
- spu_unschedule(spu, ctx);
+ spu_unschedule(spu, ctx, new == NULL);
if (new) {
if (new->flags & SPU_CREATE_NOSCHED)
wake_up(&new->stop_wq);
@@ -905,7 +940,7 @@ static noinline void spusched_tick(struct spu_context *ctx)
new = grab_runnable_context(ctx->prio + 1, spu->node);
if (new) {
- spu_unschedule(spu, ctx);
+ spu_unschedule(spu, ctx, 0);
if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
spu_add_to_rq(ctx);
} else {
@@ -985,9 +1020,11 @@ static int spusched_thread(void *unused)
struct spu_context *ctx = spu->ctx;
if (ctx) {
+ get_spu_context(ctx);
mutex_unlock(mtx);
spusched_tick(ctx);
mutex_lock(mtx);
+ put_spu_context(ctx);
}
}
mutex_unlock(mtx);
@@ -1030,7 +1067,7 @@ void spuctx_switch_state(struct spu_context *ctx,
node = spu->node;
if (old_state == SPU_UTIL_USER)
atomic_dec(&cbe_spu_info[node].busy_spus);
- if (new_state == SPU_UTIL_USER);
+ if (new_state == SPU_UTIL_USER)
atomic_inc(&cbe_spu_info[node].busy_spus);
}
}
@@ -1057,7 +1094,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
LOAD_INT(c), LOAD_FRAC(c),
count_active_contexts(),
atomic_read(&nr_spu_contexts),
- current->nsproxy->pid_ns->last_pid);
+ task_active_pid_ns(current)->last_pid);
return 0;
}
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore.c b/arch/powerpc/platforms/cell/spufs/spu_restore.c
index 21a9c952d88..72c905f1ee7 100644
--- a/arch/powerpc/platforms/cell/spufs/spu_restore.c
+++ b/arch/powerpc/platforms/cell/spufs/spu_restore.c
@@ -284,7 +284,7 @@ static inline void restore_complete(void)
exit_instrs[3] = BR_INSTR;
break;
default:
- /* SPU_Status[R]=1. No additonal instructions. */
+ /* SPU_Status[R]=1. No additional instructions. */
break;
}
spu_sync();
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 8ae8ef9dfc2..bcfd6f063ef 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -35,7 +35,6 @@
#define SPUFS_PS_MAP_SIZE 0x20000
#define SPUFS_MFC_MAP_SIZE 0x1000
#define SPUFS_CNTL_MAP_SIZE 0x1000
-#define SPUFS_CNTL_MAP_SIZE 0x1000
#define SPUFS_SIGNAL_MAP_SIZE PAGE_SIZE
#define SPUFS_MSS_MAP_SIZE 0x1000
@@ -65,7 +64,6 @@ enum {
};
struct switch_log {
- spinlock_t lock;
wait_queue_head_t wait;
unsigned long head;
unsigned long tail;
@@ -238,22 +236,23 @@ struct spufs_inode_info {
struct spufs_tree_descr {
const char *name;
const struct file_operations *ops;
- int mode;
+ umode_t mode;
size_t size;
};
-extern struct spufs_tree_descr spufs_dir_contents[];
-extern struct spufs_tree_descr spufs_dir_nosched_contents[];
-extern struct spufs_tree_descr spufs_dir_debug_contents[];
+extern const struct spufs_tree_descr spufs_dir_contents[];
+extern const struct spufs_tree_descr spufs_dir_nosched_contents[];
+extern const struct spufs_tree_descr spufs_dir_debug_contents[];
/* system call implementation */
extern struct spufs_calls spufs_calls;
+struct coredump_params;
long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status);
-long spufs_create(struct nameidata *nd, unsigned int flags,
- mode_t mode, struct file *filp);
+long spufs_create(struct path *nd, struct dentry *dentry, unsigned int flags,
+ umode_t mode, struct file *filp);
/* ELF coredump callbacks for writing SPU ELF notes */
extern int spufs_coredump_extra_notes_size(void);
-extern int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset);
+extern int spufs_coredump_extra_notes_write(struct coredump_params *cprm);
extern const struct file_operations spufs_context_fops;
@@ -315,7 +314,7 @@ extern char *isolated_loader;
* we need to call spu_release(ctx) before sleeping, and
* then spu_acquire(ctx) when awoken.
*
- * Returns with state_mutex re-acquired when successfull or
+ * Returns with state_mutex re-acquired when successful or
* with -ERESTARTSYS and the state_mutex dropped when interrupted.
*/
@@ -359,7 +358,7 @@ struct spufs_coredump_reader {
u64 (*get)(struct spu_context *ctx);
size_t size;
};
-extern struct spufs_coredump_reader spufs_coredump_read[];
+extern const struct spufs_coredump_reader spufs_coredump_read[];
extern int spufs_coredump_num_notes;
extern int spu_init_csa(struct spu_state *csa);
@@ -374,9 +373,4 @@ extern void spu_free_lscsa(struct spu_state *csa);
extern void spuctx_switch_state(struct spu_context *ctx,
enum spu_utilization_state new_state);
-#define spu_context_trace(name, ctx, spu) \
- trace_mark(name, "ctx %p spu %p", ctx, spu);
-#define spu_context_nospu_trace(name, ctx) \
- trace_mark(name, "ctx %p", ctx);
-
#endif
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c
deleted file mode 100644
index 92d20e993ed..00000000000
--- a/arch/powerpc/platforms/cell/spufs/sputrace.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Copyright (C) 2007 IBM Deutschland Entwicklung GmbH
- * Released under GPL v2.
- *
- * Partially based on net/ipv4/tcp_probe.c.
- *
- * Simple tracing facility for spu contexts.
- */
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/marker.h>
-#include <linux/proc_fs.h>
-#include <linux/wait.h>
-#include <asm/atomic.h>
-#include <asm/uaccess.h>
-#include "spufs.h"
-
-struct spu_probe {
- const char *name;
- const char *format;
- marker_probe_func *probe_func;
-};
-
-struct sputrace {
- ktime_t tstamp;
- int owner_tid; /* owner */
- int curr_tid;
- const char *name;
- int number;
-};
-
-static int bufsize __read_mostly = 16384;
-MODULE_PARM_DESC(bufsize, "Log buffer size (number of records)");
-module_param(bufsize, int, 0);
-
-
-static DEFINE_SPINLOCK(sputrace_lock);
-static DECLARE_WAIT_QUEUE_HEAD(sputrace_wait);
-static ktime_t sputrace_start;
-static unsigned long sputrace_head, sputrace_tail;
-static struct sputrace *sputrace_log;
-
-static int sputrace_used(void)
-{
- return (sputrace_head - sputrace_tail) % bufsize;
-}
-
-static inline int sputrace_avail(void)
-{
- return bufsize - sputrace_used();
-}
-
-static int sputrace_sprint(char *tbuf, int n)
-{
- const struct sputrace *t = sputrace_log + sputrace_tail % bufsize;
- struct timespec tv =
- ktime_to_timespec(ktime_sub(t->tstamp, sputrace_start));
-
- return snprintf(tbuf, n,
- "[%lu.%09lu] %d: %s (ctxthread = %d, spu = %d)\n",
- (unsigned long) tv.tv_sec,
- (unsigned long) tv.tv_nsec,
- t->curr_tid,
- t->name,
- t->owner_tid,
- t->number);
-}
-
-static ssize_t sputrace_read(struct file *file, char __user *buf,
- size_t len, loff_t *ppos)
-{
- int error = 0, cnt = 0;
-
- if (!buf || len < 0)
- return -EINVAL;
-
- while (cnt < len) {
- char tbuf[128];
- int width;
-
- error = wait_event_interruptible(sputrace_wait,
- sputrace_used() > 0);
- if (error)
- break;
-
- spin_lock(&sputrace_lock);
- if (sputrace_head == sputrace_tail) {
- spin_unlock(&sputrace_lock);
- continue;
- }
-
- width = sputrace_sprint(tbuf, sizeof(tbuf));
- if (width < len)
- sputrace_tail = (sputrace_tail + 1) % bufsize;
- spin_unlock(&sputrace_lock);
-
- if (width >= len)
- break;
-
- error = copy_to_user(buf + cnt, tbuf, width);
- if (error)
- break;
- cnt += width;
- }
-
- return cnt == 0 ? error : cnt;
-}
-
-static int sputrace_open(struct inode *inode, struct file *file)
-{
- spin_lock(&sputrace_lock);
- sputrace_head = sputrace_tail = 0;
- sputrace_start = ktime_get();
- spin_unlock(&sputrace_lock);
-
- return 0;
-}
-
-static const struct file_operations sputrace_fops = {
- .owner = THIS_MODULE,
- .open = sputrace_open,
- .read = sputrace_read,
-};
-
-static void sputrace_log_item(const char *name, struct spu_context *ctx,
- struct spu *spu)
-{
- spin_lock(&sputrace_lock);
- if (sputrace_avail() > 1) {
- struct sputrace *t = sputrace_log + sputrace_head;
-
- t->tstamp = ktime_get();
- t->owner_tid = ctx->tid;
- t->name = name;
- t->curr_tid = current->pid;
- t->number = spu ? spu->number : -1;
-
- sputrace_head = (sputrace_head + 1) % bufsize;
- } else {
- printk(KERN_WARNING
- "sputrace: lost samples due to full buffer.\n");
- }
- spin_unlock(&sputrace_lock);
-
- wake_up(&sputrace_wait);
-}
-
-static void spu_context_event(void *probe_private, void *call_data,
- const char *format, va_list *args)
-{
- struct spu_probe *p = probe_private;
- struct spu_context *ctx;
- struct spu *spu;
-
- ctx = va_arg(*args, struct spu_context *);
- spu = va_arg(*args, struct spu *);
-
- sputrace_log_item(p->name, ctx, spu);
-}
-
-static void spu_context_nospu_event(void *probe_private, void *call_data,
- const char *format, va_list *args)
-{
- struct spu_probe *p = probe_private;
- struct spu_context *ctx;
-
- ctx = va_arg(*args, struct spu_context *);
-
- sputrace_log_item(p->name, ctx, NULL);
-}
-
-struct spu_probe spu_probes[] = {
- { "spu_bind_context__enter", "ctx %p spu %p", spu_context_event },
- { "spu_unbind_context__enter", "ctx %p spu %p", spu_context_event },
- { "spu_get_idle__enter", "ctx %p", spu_context_nospu_event },
- { "spu_get_idle__found", "ctx %p spu %p", spu_context_event },
- { "spu_get_idle__not_found", "ctx %p", spu_context_nospu_event },
- { "spu_find_victim__enter", "ctx %p", spu_context_nospu_event },
- { "spusched_tick__preempt", "ctx %p spu %p", spu_context_event },
- { "spusched_tick__newslice", "ctx %p", spu_context_nospu_event },
- { "spu_yield__enter", "ctx %p", spu_context_nospu_event },
- { "spu_deactivate__enter", "ctx %p", spu_context_nospu_event },
- { "__spu_deactivate__unload", "ctx %p spu %p", spu_context_event },
- { "spufs_ps_fault__enter", "ctx %p", spu_context_nospu_event },
- { "spufs_ps_fault__sleep", "ctx %p", spu_context_nospu_event },
- { "spufs_ps_fault__wake", "ctx %p spu %p", spu_context_event },
- { "spufs_ps_fault__insert", "ctx %p spu %p", spu_context_event },
- { "spu_acquire_saved__enter", "ctx %p", spu_context_nospu_event },
- { "destroy_spu_context__enter", "ctx %p", spu_context_nospu_event },
- { "spufs_stop_callback__enter", "ctx %p spu %p", spu_context_event },
-};
-
-static int __init sputrace_init(void)
-{
- struct proc_dir_entry *entry;
- int i, error = -ENOMEM;
-
- sputrace_log = kcalloc(bufsize, sizeof(struct sputrace), GFP_KERNEL);
- if (!sputrace_log)
- goto out;
-
- entry = proc_create("sputrace", S_IRUSR, NULL, &sputrace_fops);
- if (!entry)
- goto out_free_log;
-
- for (i = 0; i < ARRAY_SIZE(spu_probes); i++) {
- struct spu_probe *p = &spu_probes[i];
-
- error = marker_probe_register(p->name, p->format,
- p->probe_func, p);
- if (error)
- printk(KERN_INFO "Unable to register probe %s\n",
- p->name);
- }
-
- return 0;
-
-out_free_log:
- kfree(sputrace_log);
-out:
- return -ENOMEM;
-}
-
-static void __exit sputrace_exit(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(spu_probes); i++)
- marker_probe_unregister(spu_probes[i].name,
- spu_probes[i].probe_func, &spu_probes[i]);
-
- remove_proc_entry("sputrace", NULL);
- kfree(sputrace_log);
-}
-
-module_init(sputrace_init);
-module_exit(sputrace_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.h b/arch/powerpc/platforms/cell/spufs/sputrace.h
new file mode 100644
index 00000000000..db2656aa410
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/sputrace.h
@@ -0,0 +1,39 @@
+#if !defined(_TRACE_SPUFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SPUFS_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM spufs
+
+TRACE_EVENT(spufs_context,
+ TP_PROTO(struct spu_context *ctx, struct spu *spu, const char *name),
+ TP_ARGS(ctx, spu, name),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(int, owner_tid)
+ __field(int, number)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->owner_tid = ctx->tid;
+ __entry->number = spu ? spu->number : -1;
+ ),
+
+ TP_printk("%s (ctxthread = %d, spu = %d)",
+ __entry->name, __entry->owner_tid, __entry->number)
+);
+
+#define spu_context_trace(name, ctx, spu) \
+ trace_spufs_context(ctx, spu, __stringify(name))
+#define spu_context_nospu_trace(name, ctx) \
+ trace_spufs_context(ctx, NULL, __stringify(name))
+
+#endif /* _TRACE_SPUFS_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE sputrace
+#include <trace/define_trace.h>
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 3df9a36eb2f..dde35551e74 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -32,7 +32,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/errno.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index 49c87769b1f..a87200a535f 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -1,8 +1,9 @@
#include <linux/file.h>
#include <linux/fs.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/mount.h>
#include <linux/namei.h>
+#include <linux/slab.h>
#include <asm/uaccess.h>
@@ -46,7 +47,7 @@ static long do_spu_run(struct file *filp,
if (filp->f_op != &spufs_context_fops)
goto out;
- i = SPUFS_I(filp->f_path.dentry->d_inode);
+ i = SPUFS_I(file_inode(filp));
ret = spufs_run_spu(i->i_ctx, &npc, &status);
if (put_user(npc, unpc))
@@ -59,23 +60,17 @@ out:
}
static long do_spu_create(const char __user *pathname, unsigned int flags,
- mode_t mode, struct file *neighbor)
+ umode_t mode, struct file *neighbor)
{
- char *tmp;
+ struct path path;
+ struct dentry *dentry;
int ret;
- tmp = getname(pathname);
- ret = PTR_ERR(tmp);
- if (!IS_ERR(tmp)) {
- struct nameidata nd;
-
- ret = path_lookup(tmp, LOOKUP_PARENT|
- LOOKUP_OPEN|LOOKUP_CREATE, &nd);
- if (!ret) {
- ret = spufs_create(&nd, flags, mode, neighbor);
- path_put(&nd.path);
- }
- putname(tmp);
+ dentry = user_path_create(AT_FDCWD, pathname, &path, LOOKUP_DIRECTORY);
+ ret = PTR_ERR(dentry);
+ if (!IS_ERR(dentry)) {
+ ret = spufs_create(&path, dentry, flags, mode, neighbor);
+ done_path_create(&path, dentry);
}
return ret;
@@ -84,8 +79,10 @@ static long do_spu_create(const char __user *pathname, unsigned int flags,
struct spufs_calls spufs_calls = {
.create_thread = do_spu_create,
.spu_run = do_spu_run,
- .coredump_extra_notes_size = spufs_coredump_extra_notes_size,
- .coredump_extra_notes_write = spufs_coredump_extra_notes_write,
.notify_spus_active = do_notify_spus_active,
.owner = THIS_MODULE,
+#ifdef CONFIG_COREDUMP
+ .coredump_extra_notes_size = spufs_coredump_extra_notes_size,
+ .coredump_extra_notes_write = spufs_coredump_extra_notes_write,
+#endif
};