* for the generic case.
*/
int dma_req_len;
-#if defined(CONFIG_TEGRA_SYSTEM_DMA)
- struct tegra_dma_req rx_dma_req;
- struct tegra_dma_channel *rx_dma;
-#else
struct dma_chan *rx_dma;
struct dma_slave_config sconfig;
struct dma_async_tx_descriptor *rx_dma_desc;
dma_cookie_t rx_cookie;
-#endif
u32 *rx_bb;
dma_addr_t rx_bb_phys;
};
-#if !defined(CONFIG_TEGRA_SYSTEM_DMA)
static void tegra_spi_rx_dma_complete(void *args);
-#endif
-
static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi,
unsigned long reg)
{
val &= ~SLINK_DMA_BLOCK_SIZE(~0) & ~SLINK_DMA_EN;
val |= SLINK_DMA_BLOCK_SIZE(tspi->dma_req_len / 4 - 1);
spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
-#if defined(CONFIG_TEGRA_SYSTEM_DMA)
- tspi->rx_dma_req.size = tspi->dma_req_len;
- tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req);
-#else
tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma,
tspi->rx_bb_phys, tspi->dma_req_len,
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
tspi->rx_dma_desc->callback_param = tspi;
tspi->rx_cookie = dmaengine_submit(tspi->rx_dma_desc);
dma_async_issue_pending(tspi->rx_dma);
-#endif
val |= SLINK_DMA_EN;
spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
spin_unlock_irqrestore(&tspi->lock, flags);
}
-#if defined(CONFIG_TEGRA_SYSTEM_DMA)
-static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req)
-{
- struct spi_tegra_data *tspi = req->dev;
- handle_spi_rx_dma_complete(tspi);
-}
-#else
+
static void tegra_spi_rx_dma_complete(void *args)
{
struct spi_tegra_data *tspi = args;
handle_spi_rx_dma_complete(tspi);
}
-#endif
static int spi_tegra_setup(struct spi_device *spi)
{
struct spi_tegra_data *tspi;
struct resource *r;
int ret;
-#if !defined(CONFIG_TEGRA_SYSTEM_DMA)
dma_cap_mask_t mask;
-#endif
master = spi_alloc_master(&pdev->dev, sizeof *tspi);
if (master == NULL) {
INIT_LIST_HEAD(&tspi->queue);
-#if defined(CONFIG_TEGRA_SYSTEM_DMA)
- tspi->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT);
- if (!tspi->rx_dma) {
- dev_err(&pdev->dev, "can not allocate rx dma channel\n");
- ret = -ENODEV;
- goto err3;
- }
-#else
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
tspi->rx_dma = dma_request_channel(mask, NULL, NULL);
goto err3;
}
-#endif
-
tspi->rx_bb = dma_alloc_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
&tspi->rx_bb_phys, GFP_KERNEL);
if (!tspi->rx_bb) {
goto err4;
}
-#if defined(CONFIG_TEGRA_SYSTEM_DMA)
- tspi->rx_dma_req.complete = tegra_spi_rx_dma_complete;
- tspi->rx_dma_req.to_memory = 1;
- tspi->rx_dma_req.dest_addr = tspi->rx_bb_phys;
- tspi->rx_dma_req.dest_bus_width = 32;
- tspi->rx_dma_req.source_addr = tspi->phys + SLINK_RX_FIFO;
- tspi->rx_dma_req.source_bus_width = 32;
- tspi->rx_dma_req.source_wrap = 4;
- tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
- tspi->rx_dma_req.dev = tspi;
-#else
/* Dmaengine Dma slave config */
tspi->sconfig.src_addr = tspi->phys + SLINK_RX_FIFO;
tspi->sconfig.dst_addr = tspi->phys + SLINK_RX_FIFO;
ret);
goto err4;
}
-#endif
master->dev.of_node = pdev->dev.of_node;
ret = spi_register_master(master);
dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
tspi->rx_bb, tspi->rx_bb_phys);
err4:
-#if defined(CONFIG_TEGRA_SYSTEM_DMA)
- tegra_dma_free_channel(tspi->rx_dma);
-#else
dma_release_channel(tspi->rx_dma);
-#endif
err3:
clk_put(tspi->clk);
err2:
tspi = spi_master_get_devdata(master);
spi_unregister_master(master);
-#if defined(CONFIG_TEGRA_SYSTEM_DMA)
- tegra_dma_free_channel(tspi->rx_dma);
-#else
dma_release_channel(tspi->rx_dma);
-#endif
-
dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
tspi->rx_bb, tspi->rx_bb_phys);