spu_acquire(ctx);
- if (ctx->state == SPU_STATE_SAVED)
+ if (ctx->state == SPU_STATE_SAVED) {
+ vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
+ & ~(_PAGE_NO_CACHE | _PAGE_GUARDED));
page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
- else
+ } else {
+ vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
+ | _PAGE_NO_CACHE | _PAGE_GUARDED);
page = pfn_to_page((ctx->spu->local_store_phys + offset)
>> PAGE_SHIFT);
-
+ }
spu_release(ctx);
if (type)
#define spufs_cntl_mmap NULL
#endif /* !SPUFS_MMAP_4K */
-static int spufs_cntl_open(struct inode *inode, struct file *file)
+static u64 spufs_cntl_get(void *data)
{
- struct spufs_inode_info *i = SPUFS_I(inode);
- struct spu_context *ctx = i->i_ctx;
+ struct spu_context *ctx = data;
+ u64 val;
- file->private_data = ctx;
- file->f_mapping = inode->i_mapping;
- ctx->cntl = inode->i_mapping;
- return 0;
+ spu_acquire(ctx);
+ val = ctx->ops->status_read(ctx);
+ spu_release(ctx);
+
+ return val;
}
-static ssize_t
-spufs_cntl_read(struct file *file, char __user *buffer,
- size_t size, loff_t *pos)
+static void spufs_cntl_set(void *data, u64 val)
{
- /* FIXME: read from spu status */
- return -EINVAL;
+ struct spu_context *ctx = data;
+
+ spu_acquire(ctx);
+ ctx->ops->runcntl_write(ctx, val);
+ spu_release(ctx);
}
-static ssize_t
-spufs_cntl_write(struct file *file, const char __user *buffer,
- size_t size, loff_t *pos)
+static int spufs_cntl_open(struct inode *inode, struct file *file)
{
- /* FIXME: write to runctl bit */
- return -EINVAL;
+ struct spufs_inode_info *i = SPUFS_I(inode);
+ struct spu_context *ctx = i->i_ctx;
+
+ file->private_data = ctx;
+ file->f_mapping = inode->i_mapping;
+ ctx->cntl = inode->i_mapping;
+ return simple_attr_open(inode, file, spufs_cntl_get,
+ spufs_cntl_set, "0x%08lx");
}
static struct file_operations spufs_cntl_fops = {
.open = spufs_cntl_open,
- .read = spufs_cntl_read,
- .write = spufs_cntl_write,
+ .release = simple_attr_close,
+ .read = simple_attr_read,
+ .write = simple_attr_write,
.mmap = spufs_cntl_mmap,
};
return nonseekable_open(inode, file);
}
+/*
+ * Read as many bytes from the mailbox as possible, until
+ * one of the conditions becomes true:
+ *
+ * - no more data available in the mailbox
+ * - end of the user provided buffer
+ * - end of the mapped area
+ */
static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
- u32 mbox_data;
- int ret;
+ u32 mbox_data, __user *udata;
+ ssize_t count;
if (len < 4)
return -EINVAL;
+ if (!access_ok(VERIFY_WRITE, buf, len))
+ return -EFAULT;
+
+ udata = (void __user *)buf;
+
spu_acquire(ctx);
- ret = ctx->ops->mbox_read(ctx, &mbox_data);
+ for (count = 0; (count + 4) <= len; count += 4, udata++) {
+ int ret;
+ ret = ctx->ops->mbox_read(ctx, &mbox_data);
+ if (ret == 0)
+ break;
+
+ /*
+ * at the end of the mapped area, we can fault
+ * but still need to return the data we have
+ * read successfully so far.
+ */
+ ret = __put_user(mbox_data, udata);
+ if (ret) {
+ if (!count)
+ count = -EFAULT;
+ break;
+ }
+ }
spu_release(ctx);
- if (!ret)
- return -EAGAIN;
-
- if (copy_to_user(buf, &mbox_data, sizeof mbox_data))
- return -EFAULT;
+ if (!count)
+ count = -EAGAIN;
- return 4;
+ return count;
}
static struct file_operations spufs_mbox_fops = {
kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
}
+/*
+ * Read as many bytes from the interrupt mailbox as possible, until
+ * one of the conditions becomes true:
+ *
+ * - no more data available in the mailbox
+ * - end of the user provided buffer
+ * - end of the mapped area
+ *
+ * If the file is opened without O_NONBLOCK, we wait here until
+ * any data is available, but return when we have been able to
+ * read something.
+ */
static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
- u32 ibox_data;
- ssize_t ret;
+ u32 ibox_data, __user *udata;
+ ssize_t count;
if (len < 4)
return -EINVAL;
+ if (!access_ok(VERIFY_WRITE, buf, len))
+ return -EFAULT;
+
+ udata = (void __user *)buf;
+
spu_acquire(ctx);
- ret = 0;
+ /* wait only for the first element */
+ count = 0;
if (file->f_flags & O_NONBLOCK) {
if (!spu_ibox_read(ctx, &ibox_data))
- ret = -EAGAIN;
+ count = -EAGAIN;
} else {
- ret = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
+ count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
}
+ if (count)
+ goto out;
- spu_release(ctx);
+ /* if we can't write at all, return -EFAULT */
+ count = __put_user(ibox_data, udata);
+ if (count)
+ goto out;
- if (ret)
- return ret;
+ for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
+ int ret;
+ ret = ctx->ops->ibox_read(ctx, &ibox_data);
+ if (ret == 0)
+ break;
+ /*
+ * at the end of the mapped area, we can fault
+ * but still need to return the data we have
+ * read successfully so far.
+ */
+ ret = __put_user(ibox_data, udata);
+ if (ret)
+ break;
+ }
- ret = 4;
- if (copy_to_user(buf, &ibox_data, sizeof ibox_data))
- ret = -EFAULT;
+out:
+ spu_release(ctx);
- return ret;
+ return count;
}
static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
}
+/*
+ * Write as many bytes to the interrupt mailbox as possible, until
+ * one of the conditions becomes true:
+ *
+ * - the mailbox is full
+ * - end of the user provided buffer
+ * - end of the mapped area
+ *
+ * If the file is opened without O_NONBLOCK, we wait here until
+ * space is availabyl, but return when we have been able to
+ * write something.
+ */
static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
- u32 wbox_data;
- int ret;
+ u32 wbox_data, __user *udata;
+ ssize_t count;
if (len < 4)
return -EINVAL;
- if (copy_from_user(&wbox_data, buf, sizeof wbox_data))
+ udata = (void __user *)buf;
+ if (!access_ok(VERIFY_READ, buf, len))
+ return -EFAULT;
+
+ if (__get_user(wbox_data, udata))
return -EFAULT;
spu_acquire(ctx);
- ret = 0;
+ /*
+ * make sure we can at least write one element, by waiting
+ * in case of !O_NONBLOCK
+ */
+ count = 0;
if (file->f_flags & O_NONBLOCK) {
if (!spu_wbox_write(ctx, wbox_data))
- ret = -EAGAIN;
+ count = -EAGAIN;
} else {
- ret = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
+ count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
}
- spu_release(ctx);
+ if (count)
+ goto out;
+
+ /* write aѕ much as possible */
+ for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
+ int ret;
+ ret = __get_user(wbox_data, udata);
+ if (ret)
+ break;
+
+ ret = spu_wbox_write(ctx, wbox_data);
+ if (ret == 0)
+ break;
+ }
- return ret ? ret : sizeof wbox_data;
+out:
+ spu_release(ctx);
+ return count;
}
static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
.mmap = spufs_mfc_mmap,
};
+
+static int spufs_recycle_open(struct inode *inode, struct file *file)
+{
+ file->private_data = SPUFS_I(inode)->i_ctx;
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t spufs_recycle_write(struct file *file,
+ const char __user *buffer, size_t size, loff_t *pos)
+{
+ struct spu_context *ctx = file->private_data;
+ int ret;
+
+ if (!(ctx->flags & SPU_CREATE_ISOLATE))
+ return -EINVAL;
+
+ if (size < 1)
+ return -EINVAL;
+
+ ret = spu_recycle_isolated(ctx);
+
+ if (ret)
+ return ret;
+ return size;
+}
+
+static struct file_operations spufs_recycle_fops = {
+ .open = spufs_recycle_open,
+ .write = spufs_recycle_write,
+};
+
static void spufs_npc_set(void *data, u64 val)
{
struct spu_context *ctx = data;
}
DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
+static u64 spufs_object_id_get(void *data)
+{
+ struct spu_context *ctx = data;
+ return ctx->object_id;
+}
+
+static void spufs_object_id_set(void *data, u64 id)
+{
+ struct spu_context *ctx = data;
+ ctx->object_id = id;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
+ spufs_object_id_set, "0x%llx\n");
+
struct tree_descr spufs_dir_contents[] = {
{ "mem", &spufs_mem_fops, 0666, },
{ "regs", &spufs_regs_fops, 0666, },
{ "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
{ "event_mask", &spufs_event_mask_ops, 0666, },
{ "srr0", &spufs_srr0_ops, 0666, },
+ { "psmap", &spufs_psmap_fops, 0666, },
{ "phys-id", &spufs_id_ops, 0666, },
+ { "object-id", &spufs_object_id_ops, 0666, },
+ {},
+};
+
+struct tree_descr spufs_dir_nosched_contents[] = {
+ { "mem", &spufs_mem_fops, 0666, },
+ { "mbox", &spufs_mbox_fops, 0444, },
+ { "ibox", &spufs_ibox_fops, 0444, },
+ { "wbox", &spufs_wbox_fops, 0222, },
+ { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
+ { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
+ { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
+ { "signal1", &spufs_signal1_fops, 0666, },
+ { "signal2", &spufs_signal2_fops, 0666, },
+ { "signal1_type", &spufs_signal1_type, 0666, },
+ { "signal2_type", &spufs_signal2_type, 0666, },
+ { "mss", &spufs_mss_fops, 0666, },
+ { "mfc", &spufs_mfc_fops, 0666, },
+ { "cntl", &spufs_cntl_fops, 0666, },
+ { "npc", &spufs_npc_ops, 0666, },
{ "psmap", &spufs_psmap_fops, 0666, },
+ { "phys-id", &spufs_id_ops, 0666, },
+ { "object-id", &spufs_object_id_ops, 0666, },
+ { "recycle", &spufs_recycle_fops, 0222, },
{},
};