misc: fastrpc: Add support for dmabuf exporter
authorSrinivas Kandagatla <srinivas.kandagatla@linaro.org>
Fri, 8 Feb 2019 17:11:27 +0000 (17:11 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 Feb 2019 09:40:30 +0000 (10:40 +0100)
User process can involve dealing with big buffer sizes, and also passing
buffers from one compute context bank to other compute context bank for
complex dsp algorithms.

This patch adds support to fastrpc to make it a proper dmabuf exporter
to avoid making copies of buffers.

Co-developed-by: Thierry Escande <thierry.escande@linaro.org>
Signed-off-by: Thierry Escande <thierry.escande@linaro.org>
Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/misc/fastrpc.c
include/uapi/misc/fastrpc.h

index ceb498487569951a05887f50ae7d64b6268172fc..4b0db33896dfc7a882e38ec5d0927d18dd7e3da4 100644 (file)
@@ -106,10 +106,20 @@ struct fastrpc_invoke_rsp {
 
 struct fastrpc_buf {
        struct fastrpc_user *fl;
+       struct dma_buf *dmabuf;
        struct device *dev;
        void *virt;
        u64 phys;
        u64 size;
+       /* Lock for dma buf attachments */
+       struct mutex lock;
+       struct list_head attachments;
+};
+
+struct fastrpc_dma_buf_attachment {
+       struct device *dev;
+       struct sg_table sgt;
+       struct list_head node;
 };
 
 struct fastrpc_map {
@@ -246,6 +256,9 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
        if (!buf)
                return -ENOMEM;
 
+       INIT_LIST_HEAD(&buf->attachments);
+       mutex_init(&buf->lock);
+
        buf->fl = fl;
        buf->virt = NULL;
        buf->phys = 0;
@@ -360,6 +373,111 @@ err_idr:
        return ERR_PTR(ret);
 }
 
+static struct sg_table *
+fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
+                   enum dma_data_direction dir)
+{
+       struct fastrpc_dma_buf_attachment *a = attachment->priv;
+       struct sg_table *table;
+
+       table = &a->sgt;
+
+       if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir))
+               return ERR_PTR(-ENOMEM);
+
+       return table;
+}
+
+static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
+                                 struct sg_table *table,
+                                 enum dma_data_direction dir)
+{
+       dma_unmap_sg(attach->dev, table->sgl, table->nents, dir);
+}
+
+static void fastrpc_release(struct dma_buf *dmabuf)
+{
+       struct fastrpc_buf *buffer = dmabuf->priv;
+
+       fastrpc_buf_free(buffer);
+}
+
+static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
+                                 struct dma_buf_attachment *attachment)
+{
+       struct fastrpc_dma_buf_attachment *a;
+       struct fastrpc_buf *buffer = dmabuf->priv;
+       int ret;
+
+       a = kzalloc(sizeof(*a), GFP_KERNEL);
+       if (!a)
+               return -ENOMEM;
+
+       ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
+                             FASTRPC_PHYS(buffer->phys), buffer->size);
+       if (ret < 0) {
+               dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
+               return -EINVAL;
+       }
+
+       a->dev = attachment->dev;
+       INIT_LIST_HEAD(&a->node);
+       attachment->priv = a;
+
+       mutex_lock(&buffer->lock);
+       list_add(&a->node, &buffer->attachments);
+       mutex_unlock(&buffer->lock);
+
+       return 0;
+}
+
+static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
+                                   struct dma_buf_attachment *attachment)
+{
+       struct fastrpc_dma_buf_attachment *a = attachment->priv;
+       struct fastrpc_buf *buffer = dmabuf->priv;
+
+       mutex_lock(&buffer->lock);
+       list_del(&a->node);
+       mutex_unlock(&buffer->lock);
+       kfree(a);
+}
+
+static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
+{
+       struct fastrpc_buf *buf = dmabuf->priv;
+
+       return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL;
+}
+
+static void *fastrpc_vmap(struct dma_buf *dmabuf)
+{
+       struct fastrpc_buf *buf = dmabuf->priv;
+
+       return buf->virt;
+}
+
+static int fastrpc_mmap(struct dma_buf *dmabuf,
+                       struct vm_area_struct *vma)
+{
+       struct fastrpc_buf *buf = dmabuf->priv;
+       size_t size = vma->vm_end - vma->vm_start;
+
+       return dma_mmap_coherent(buf->dev, vma, buf->virt,
+                                FASTRPC_PHYS(buf->phys), size);
+}
+
+static const struct dma_buf_ops fastrpc_dma_buf_ops = {
+       .attach = fastrpc_dma_buf_attach,
+       .detach = fastrpc_dma_buf_detatch,
+       .map_dma_buf = fastrpc_map_dma_buf,
+       .unmap_dma_buf = fastrpc_unmap_dma_buf,
+       .mmap = fastrpc_mmap,
+       .map = fastrpc_kmap,
+       .vmap = fastrpc_vmap,
+       .release = fastrpc_release,
+};
+
 static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
                              u64 len, struct fastrpc_map **ppmap)
 {
@@ -906,6 +1024,66 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
        return 0;
 }
 
+static int fastrpc_dmabuf_free(struct fastrpc_user *fl, char __user *argp)
+{
+       struct dma_buf *buf;
+       int info;
+
+       if (copy_from_user(&info, argp, sizeof(info)))
+               return -EFAULT;
+
+       buf = dma_buf_get(info);
+       if (IS_ERR_OR_NULL(buf))
+               return -EINVAL;
+       /*
+        * one for the last get and other for the ALLOC_DMA_BUFF ioctl
+        */
+       dma_buf_put(buf);
+       dma_buf_put(buf);
+
+       return 0;
+}
+
+static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
+{
+       struct fastrpc_alloc_dma_buf bp;
+       DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+       struct fastrpc_buf *buf = NULL;
+       int err;
+
+       if (copy_from_user(&bp, argp, sizeof(bp)))
+               return -EFAULT;
+
+       err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
+       if (err)
+               return err;
+       exp_info.ops = &fastrpc_dma_buf_ops;
+       exp_info.size = bp.size;
+       exp_info.flags = O_RDWR;
+       exp_info.priv = buf;
+       buf->dmabuf = dma_buf_export(&exp_info);
+       if (IS_ERR(buf->dmabuf)) {
+               err = PTR_ERR(buf->dmabuf);
+               fastrpc_buf_free(buf);
+               return err;
+       }
+
+       bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
+       if (bp.fd < 0) {
+               dma_buf_put(buf->dmabuf);
+               return -EINVAL;
+       }
+
+       if (copy_to_user(argp, &bp, sizeof(bp))) {
+               dma_buf_put(buf->dmabuf);
+               return -EFAULT;
+       }
+
+       get_dma_buf(buf->dmabuf);
+
+       return 0;
+}
+
 static int fastrpc_init_attach(struct fastrpc_user *fl)
 {
        struct fastrpc_invoke_args args[1];
@@ -970,6 +1148,12 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
        case FASTRPC_IOCTL_INIT_CREATE:
                err = fastrpc_init_create_process(fl, argp);
                break;
+       case FASTRPC_IOCTL_FREE_DMA_BUFF:
+               err = fastrpc_dmabuf_free(fl, argp);
+               break;
+       case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
+               err = fastrpc_dmabuf_alloc(fl, argp);
+               break;
        default:
                err = -ENOTTY;
                break;
index 32d191c3b7bc8fca4f0b7db201090327b4de09e5..6d701af9fc426689dbf605fc356677d2192d005c 100644 (file)
@@ -5,6 +5,8 @@
 
 #include <linux/types.h>
 
+#define FASTRPC_IOCTL_ALLOC_DMA_BUFF   _IOWR('R', 1, struct fastrpc_alloc_dma_buf)
+#define FASTRPC_IOCTL_FREE_DMA_BUFF    _IOWR('R', 2, __u32)
 #define FASTRPC_IOCTL_INVOKE           _IOWR('R', 3, struct fastrpc_invoke)
 #define FASTRPC_IOCTL_INIT_ATTACH      _IO('R', 4)
 #define FASTRPC_IOCTL_INIT_CREATE      _IOWR('R', 5, struct fastrpc_init_create)
@@ -30,4 +32,10 @@ struct fastrpc_init_create {
        __u64 file;     /* pointer to elf file */
 };
 
+struct fastrpc_alloc_dma_buf {
+       __s32 fd;       /* fd */
+       __u32 flags;    /* flags to map with */
+       __u64 size;     /* size */
+};
+
 #endif /* __QCOM_FASTRPC_H__ */