[Devel] [PATCH rh7] ve: Add a handle to C/R aio ctx
Kirill Tkhai
ktkhai at virtuozzo.com
Mon Feb 15 07:05:40 PST 2016
This adds ioctl, which allows to set ring buffer tail
and to wait till aio requests are finished.
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
fs/aio.c | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++
fs/proc/base.c | 27 ++++++++++++++++++
include/linux/aio.h | 15 ++++++++++
3 files changed, 117 insertions(+)
diff --git a/fs/aio.c b/fs/aio.c
index 8ec32e2..6252339 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1847,3 +1847,78 @@ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
}
return ret;
}
+
+#ifdef CONFIG_VE
+static int ve_aio_set_tail(struct kioctx *ctx, unsigned tail)
+{
+ struct aio_ring *ring;
+ int ret = -EINVAL;
+
+ mutex_lock(&ctx->ring_lock);
+ spin_lock_irq(&ctx->completion_lock);
+
+ if (tail >= ctx->nr_events)
+ goto out;
+
+ ctx->tail = tail;
+
+ ring = kmap_atomic(ctx->ring_pages[0]);
+ ring->tail = tail;
+ kunmap_atomic(ring);
+ ret = 0;
+out:
+ spin_unlock_irq(&ctx->completion_lock);
+ mutex_unlock(&ctx->ring_lock);
+ return ret;
+}
+
+static bool has_reqs_active(struct kioctx *ctx)
+{
+ unsigned long flags;
+ unsigned nr;
+
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ nr = atomic_read(&ctx->reqs_active);
+ nr -= ctx->completed_events;
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+
+ return !!nr;
+}
+
+static int ve_aio_wait_inflight_reqs(struct kioctx *ioctx)
+{
+ return wait_event_interruptible(ioctx->wait, !has_reqs_active(ioctx));
+}
+
+int ve_aio_ioctl(struct task_struct *task, unsigned int cmd, unsigned long arg)
+{
+ struct ve_ioc_arg karg;
+ struct kioctx *ioctx;
+ int ret;
+
+ if (task != current)
+ return -EINVAL;
+
+ if (copy_from_user(&karg, (void *)arg, sizeof(karg)))
+ return -EFAULT;
+
+ ioctx = lookup_ioctx(karg.ctx_id);
+ if (!ioctx)
+ return -EINVAL;;
+
+ switch (cmd) {
+ case VE_AIO_IOC_SET_TAIL:
+ ret = ve_aio_set_tail(ioctx, karg.val);
+ break;
+ case VE_AIO_IOC_WAIT_ACTIVE:
+ ret = ve_aio_wait_inflight_reqs(ioctx);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ put_ioctx(ioctx);
+
+ return ret;
+}
+#endif
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 6d2faa0..ae546aa 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -87,6 +87,7 @@
#include <linux/slab.h>
#include <linux/flex_array.h>
#include <linux/posix-timers.h>
+#include <linux/aio.h>
#ifdef CONFIG_HARDWALL
#include <asm/hardwall.h>
#endif
@@ -2351,8 +2352,33 @@ static const struct file_operations proc_timers_operations = {
.llseek = seq_lseek,
.release = seq_release_private,
};
+
+
#endif /* CONFIG_CHECKPOINT_RESTORE */
+#ifdef CONFIG_VE
+static long proc_aio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode = file_inode(file);
+ struct task_struct *task;
+ int ret;
+
+ task = get_proc_task(inode);
+ if (!task)
+ return -ESRCH;
+
+ ret = ve_aio_ioctl(task, cmd, arg);
+
+ put_task_struct(task);
+
+ return ret;
+}
+
+static const struct file_operations proc_aio_operations = {
+ .unlocked_ioctl = proc_aio_ioctl,
+};
+#endif /* CONFIG_VE */
+
static struct dentry *proc_pident_instantiate(struct inode *dir,
struct dentry *dentry, struct task_struct *task, const void *ptr)
{
@@ -2975,6 +3001,7 @@ static const struct pid_entry tgid_base_stuff[] = {
#endif
#ifdef CONFIG_CHECKPOINT_RESTORE
REG("timers", S_IRUGO, proc_timers_operations),
+ REG("aio", S_IRUGO|S_IWUSR, proc_aio_operations),
#endif
};
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 0aa7dd3..f40a32b 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -16,6 +16,15 @@ struct kiocb;
#define AIO_MAX_NR_DEFAULT 0x10000
+#define VE_AIO_IOC_SET_TAIL 1
+#define VE_AIO_IOC_WAIT_ACTIVE 2
+
+struct ve_ioc_arg
+{
+ aio_context_t ctx_id;
+ unsigned val;
+};
+
/*
* We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
* cancelled or completed (this makes a certain amount of sense because
@@ -108,6 +117,10 @@ void aio_kernel_init_callback(struct kiocb *iocb,
void (*complete)(u64 user_data, long res),
u64 user_data);
int aio_kernel_submit(struct kiocb *iocb);
+#ifdef CONFIG_VE
+int ve_aio_ioctl(struct task_struct *, unsigned int, unsigned long);
+#endif
+
#else
static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
static inline void aio_put_req(struct kiocb *iocb) { }
@@ -119,6 +132,8 @@ static inline long do_io_submit(aio_context_t ctx_id, long nr,
bool compat) { return 0; }
static inline void kiocb_set_cancel_fn(struct kiocb *req,
kiocb_cancel_fn *cancel) { }
+static int ve_aio_ioctl(struct task_struct *task, unsigned int cmd,
+ unsigned long arg) { return 0; }
#endif /* CONFIG_AIO */
static inline struct kiocb *list_kiocb(struct list_head *h)
More information about the Devel
mailing list