struct list_head card_list; /* list of usr_maps for card */
struct list_head pin_list; /* list of pinned memory for dev */
+ int write; /* writable map? useful in unmapping */
};
static inline void genwqe_mapping_init(struct dma_mapping *m,
{
memset(m, 0, sizeof(*m));
m->type = type;
+ m->write = 1; /* Assume the maps we create are R/W */
}
/**
* @user_size: size of user-space memory area
* @page: buffer for partial pages if needed
* @page_dma_addr: dma address partial pages
+ * @write: should we write it back to userspace?
*/
struct genwqe_sgl {
dma_addr_t sgl_dma_addr;
void __user *user_addr; /* user-space base-address */
size_t user_size; /* size of memory area */
+ int write;
+
unsigned long nr_pages;
unsigned long fpage_offs;
size_t fpage_size;
};
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
- void __user *user_addr, size_t user_size);
+ void __user *user_addr, size_t user_size, int write);
int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
dma_addr_t *dma_list);
* from user-space into the cached pages.
*/
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
- void __user *user_addr, size_t user_size)
+ void __user *user_addr, size_t user_size, int write)
{
int rc;
struct pci_dev *pci_dev = cd->pci_dev;
sgl->user_addr = user_addr;
sgl->user_size = user_size;
+ sgl->write = write;
sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
if (get_order(sgl->sgl_size) > MAX_ORDER) {
int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
{
int rc = 0;
+ size_t offset;
+ unsigned long res;
struct pci_dev *pci_dev = cd->pci_dev;
if (sgl->fpage) {
- if (copy_to_user(sgl->user_addr, sgl->fpage + sgl->fpage_offs,
- sgl->fpage_size)) {
- dev_err(&pci_dev->dev, "[%s] err: copying fpage!\n",
- __func__);
- rc = -EFAULT;
+ if (sgl->write) {
+ res = copy_to_user(sgl->user_addr,
+ sgl->fpage + sgl->fpage_offs, sgl->fpage_size);
+ if (res) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: copying fpage! (res=%lu)\n",
+ __func__, res);
+ rc = -EFAULT;
+ }
}
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
sgl->fpage_dma_addr);
sgl->fpage_dma_addr = 0;
}
if (sgl->lpage) {
- if (copy_to_user(sgl->user_addr + sgl->user_size -
- sgl->lpage_size, sgl->lpage,
- sgl->lpage_size)) {
- dev_err(&pci_dev->dev, "[%s] err: copying lpage!\n",
- __func__);
- rc = -EFAULT;
+ if (sgl->write) {
+ offset = sgl->user_size - sgl->lpage_size;
+ res = copy_to_user(sgl->user_addr + offset, sgl->lpage,
+ sgl->lpage_size);
+ if (res) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: copying lpage! (res=%lu)\n",
+ __func__, res);
+ rc = -EFAULT;
+ }
}
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
sgl->lpage_dma_addr);
/* pin user pages in memory */
rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
m->nr_pages,
- 1, /* write by caller */
+ m->write, /* readable/writable */
m->page_list); /* ptrs to pages */
if (rc < 0)
goto fail_get_user_pages;
/* assumption: get_user_pages can be killed by signals. */
if (rc < m->nr_pages) {
- free_user_pages(m->page_list, rc, 0);
+ free_user_pages(m->page_list, rc, m->write);
rc = -EFAULT;
goto fail_get_user_pages;
}
return 0;
fail_free_user_pages:
- free_user_pages(m->page_list, m->nr_pages, 0);
+ free_user_pages(m->page_list, m->nr_pages, m->write);
fail_get_user_pages:
kfree(m->page_list);
genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
if (m->page_list) {
- free_user_pages(m->page_list, m->nr_pages, 1);
+ free_user_pages(m->page_list, m->nr_pages, m->write);
kfree(m->page_list);
m->page_list = NULL;