* @svq: tx virtqueue
* @rbufs: kernel address of rx buffers
* @sbufs: kernel address of tx buffers
+ * @num_bufs: total number of buffers for rx and tx
* @last_sbuf: index of last tx buffer used
* @bufs_dma: dma base addr of the buffers
* @tx_lock: protects svq, sbufs and sleepers, to allow concurrent senders.
struct virtio_device *vdev;
struct virtqueue *rvq, *svq;
void *rbufs, *sbufs;
+ unsigned int num_bufs;
int last_sbuf;
dma_addr_t bufs_dma;
struct mutex tx_lock;
#define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv)
/*
- * We're allocating 512 buffers of 512 bytes for communications, and then
- * using the first 256 buffers for RX, and the last 256 buffers for TX.
+ * We're allocating buffers of 512 bytes each for communications. The
+ * number of buffers will be computed from the number of buffers supported
+ * by the vring, upto a maximum of 512 buffers (256 in each direction).
*
* Each buffer will have 16 bytes for the msg header and 496 bytes for
* the payload.
*
- * This will require a total space of 256KB for the buffers.
+ * This will utilize a maximum total space of 256KB for the buffers.
*
* We might also want to add support for user-provided buffers in time.
* This will allow bigger buffer size flexibility, and can also be used
* can change this without changing anything in the firmware of the remote
* processor.
*/
-#define RPMSG_NUM_BUFS (512)
+#define MAX_RPMSG_NUM_BUFS (512)
#define RPMSG_BUF_SIZE (512)
-#define RPMSG_TOTAL_BUF_SPACE (RPMSG_NUM_BUFS * RPMSG_BUF_SIZE)
/*
* Local addresses are dynamically allocated on-demand.
* either pick the next unused tx buffer
* (half of our buffers are used for sending messages)
*/
- if (vrp->last_sbuf < RPMSG_NUM_BUFS / 2)
+ if (vrp->last_sbuf < vrp->num_bufs / 2)
ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++;
/* or recycle a used one */
else
struct virtproc_info *vrp;
void *bufs_va;
int err = 0, i;
+ size_t total_buf_space;
vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
if (!vrp)
vrp->rvq = vqs[0];
vrp->svq = vqs[1];
+ /* we expect symmetric tx/rx vrings */
+ WARN_ON(virtqueue_get_vring_size(vrp->rvq) !=
+ virtqueue_get_vring_size(vrp->svq));
+
+ /* we need less buffers if vrings are small */
+ if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2)
+ vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
+ else
+ vrp->num_bufs = MAX_RPMSG_NUM_BUFS;
+
+ total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
+
/* allocate coherent memory for the buffers */
bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
- RPMSG_TOTAL_BUF_SPACE,
- &vrp->bufs_dma, GFP_KERNEL);
+ total_buf_space, &vrp->bufs_dma,
+ GFP_KERNEL);
if (!bufs_va) {
err = -ENOMEM;
goto vqs_del;
vrp->rbufs = bufs_va;
/* and half is dedicated for TX */
- vrp->sbufs = bufs_va + RPMSG_TOTAL_BUF_SPACE / 2;
+ vrp->sbufs = bufs_va + total_buf_space / 2;
/* set up the receive buffers */
- for (i = 0; i < RPMSG_NUM_BUFS / 2; i++) {
+ for (i = 0; i < vrp->num_bufs / 2; i++) {
struct scatterlist sg;
void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;
return 0;
free_coherent:
- dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
- bufs_va, vrp->bufs_dma);
+ dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+ bufs_va, vrp->bufs_dma);
vqs_del:
vdev->config->del_vqs(vrp->vdev);
free_vrp:
static void rpmsg_remove(struct virtio_device *vdev)
{
struct virtproc_info *vrp = vdev->priv;
+ size_t total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;
int ret;
vdev->config->reset(vdev);
vdev->config->del_vqs(vrp->vdev);
- dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE,
- vrp->rbufs, vrp->bufs_dma);
+ dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
+ vrp->rbufs, vrp->bufs_dma);
kfree(vrp);
}