/**
* ali15x3_dma_setup - begin a DMA phase
* @drive: target device
+ * @cmd: command
*
* Returns 1 if the DMA cannot be performed, zero on success.
*/
-static int ali15x3_dma_setup(ide_drive_t *drive)
+static int ali15x3_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
{
if (m5229_revision < 0xC2 && drive->media != ide_disk) {
- if (rq_data_dir(drive->hwif->rq))
+ if (cmd->tf_flags & IDE_TFLAG_WRITE)
return 1; /* try PIO instead of DMA */
}
- return ide_dma_setup(drive);
+ return ide_dma_setup(drive, cmd);
}
/**
*/
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
-static int auide_build_dmatable(ide_drive_t *drive)
+static int auide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
- struct request *rq = hwif->rq;
_auide_hwif *ahwif = &auide_hwif;
struct scatterlist *sg;
- int i = hwif->cmd.sg_nents, iswrite, count = 0;
+ int i = cmd->sg_nents, count = 0;
+ int iswrite = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
- iswrite = (rq_data_dir(rq) == WRITE);
/* Save for interrupt context */
ahwif->drive = drive;
(2*WAIT_CMD), NULL);
}
-static int auide_dma_setup(ide_drive_t *drive)
+static int auide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
{
- struct request *rq = drive->hwif->rq;
-
- if (!auide_build_dmatable(drive)) {
- ide_map_sg(drive, rq);
+ if (auide_build_dmatable(drive, cmd) == 0) {
+ ide_map_sg(drive, cmd);
return 1;
}
enable_dma(ec->dma);
}
-static int icside_dma_setup(ide_drive_t *drive)
+static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
struct expansion_card *ec = ECARD_DEV(hwif->dev);
struct icside_state *state = ecard_get_drvdata(ec);
- struct request *rq = hwif->rq;
unsigned int dma_mode;
- if (rq_data_dir(rq))
+ if (cmd->tf_flags & IDE_TFLAG_WRITE)
dma_mode = DMA_MODE_WRITE;
else
dma_mode = DMA_MODE_READ;
* Tell the DMA engine about the SG table and
* data direction.
*/
- set_dma_sg(ec->dma, hwif->sg_table, hwif->cmd.sg_nents);
+ set_dma_sg(ec->dma, hwif->sg_table, cmd->sg_nents);
set_dma_mode(ec->dma, dma_mode);
drive->waiting_for_dma = 1;
{
struct ide_atapi_pc *pc;
ide_hwif_t *hwif = drive->hwif;
+ const struct ide_dma_ops *dma_ops = hwif->dma_ops;
+ struct ide_cmd *cmd = &hwif->cmd;
ide_expiry_t *expiry = NULL;
struct request *rq = hwif->rq;
unsigned int timeout;
u32 tf_flags;
u16 bcount;
+ if (drive->media != ide_floppy) {
+ if (rq_data_dir(rq))
+ cmd->tf_flags |= IDE_TFLAG_WRITE;
+ cmd->rq = rq;
+ }
+
if (dev_is_idecd(drive)) {
tf_flags = IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL;
bcount = ide_cd_get_xferlen(rq);
timeout = ATAPI_WAIT_PC;
if (drive->dma) {
- if (ide_build_sglist(drive, rq))
- drive->dma = !hwif->dma_ops->dma_setup(drive);
+ if (ide_build_sglist(drive, cmd))
+ drive->dma = !dma_ops->dma_setup(drive, cmd);
else
drive->dma = 0;
}
if ((pc->flags & PC_FLAG_DMA_OK) &&
(drive->dev_flags & IDE_DFLAG_USING_DMA)) {
- if (ide_build_sglist(drive, rq))
- drive->dma = !hwif->dma_ops->dma_setup(drive);
+ if (ide_build_sglist(drive, cmd))
+ drive->dma = !dma_ops->dma_setup(drive, cmd);
else
drive->dma = 0;
}
memset(&cmd, 0, sizeof(cmd));
cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
- if (dma == 0) {
- ide_init_sg_cmd(&cmd, nsectors);
- ide_map_sg(drive, rq);
- }
-
if (drive->dev_flags & IDE_DFLAG_LBA) {
if (lba48) {
pr_debug("%s: LBA=0x%012llx\n", drive->name,
ide_tf_set_cmd(drive, &cmd, dma);
cmd.rq = rq;
+ if (dma == 0) {
+ ide_init_sg_cmd(&cmd, nsectors);
+ ide_map_sg(drive, &cmd);
+ }
+
rc = do_rw_taskfile(drive, &cmd);
if (rc == ide_stopped && dma) {
* May also be invoked from trm290.c
*/
-int ide_build_dmatable(ide_drive_t *drive, struct request *rq)
+int ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
__le32 *table = (__le32 *)hwif->dmatable_cpu;
struct scatterlist *sg;
u8 is_trm290 = !!(hwif->host_flags & IDE_HFLAG_TRM290);
- for_each_sg(hwif->sg_table, sg, hwif->cmd.sg_nents, i) {
+ for_each_sg(hwif->sg_table, sg, cmd->sg_nents, i) {
u32 cur_addr, cur_len, xcount, bcount;
cur_addr = sg_dma_address(sg);
/**
* ide_dma_setup - begin a DMA phase
* @drive: target device
+ * @cmd: command
*
* Build an IDE DMA PRD (IDE speak for scatter gather table)
* and then set up the DMA transfer registers for a device
* is returned.
*/
-int ide_dma_setup(ide_drive_t *drive)
+int ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
- struct request *rq = hwif->rq;
- unsigned int reading = rq_data_dir(rq) ? 0 : ATA_DMA_WR;
u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+ u8 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR;
u8 dma_stat;
/* fall back to pio! */
- if (!ide_build_dmatable(drive, rq)) {
- ide_map_sg(drive, rq);
+ if (ide_build_dmatable(drive, cmd) == 0) {
+ ide_map_sg(drive, cmd);
return 1;
}
/* specify r/w */
if (mmio)
- writeb(reading, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
+ writeb(rw, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
else
- outb(reading, hwif->dma_base + ATA_DMA_CMD);
+ outb(rw, hwif->dma_base + ATA_DMA_CMD);
/* read DMA status for INTR & ERROR flags */
dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
/**
* ide_build_sglist - map IDE scatter gather for DMA I/O
* @drive: the drive to build the DMA table for
- * @rq: the request holding the sg list
+ * @cmd: command
*
* Perform the DMA mapping magic necessary to access the source or
* target buffers of a request via DMA. The lower layers of the
* operate in a portable fashion.
*/
-int ide_build_sglist(ide_drive_t *drive, struct request *rq)
+int ide_build_sglist(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
struct scatterlist *sg = hwif->sg_table;
- struct ide_cmd *cmd = &hwif->cmd;
int i;
- ide_map_sg(drive, rq);
+ ide_map_sg(drive, cmd);
- if (rq_data_dir(rq) == READ)
- cmd->sg_dma_direction = DMA_FROM_DEVICE;
- else
+ if (cmd->tf_flags & IDE_TFLAG_WRITE)
cmd->sg_dma_direction = DMA_TO_DEVICE;
+ else
+ cmd->sg_dma_direction = DMA_FROM_DEVICE;
i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction);
if (i == 0)
- ide_map_sg(drive, rq);
+ ide_map_sg(drive, cmd);
else {
cmd->orig_sg_nents = cmd->sg_nents;
cmd->sg_nents = i;
goto out_end;
}
+ if (rq_data_dir(rq))
+ cmd->tf_flags |= IDE_TFLAG_WRITE;
+ cmd->rq = rq;
+
if (blk_fs_request(rq) || pc->req_xfer) {
ide_init_sg_cmd(cmd, rq->nr_sectors);
- ide_map_sg(drive, rq);
+ ide_map_sg(drive, cmd);
}
pc->sg = hwif->sg_table;
return ide_stopped;
}
-void ide_map_sg(ide_drive_t *drive, struct request *rq)
+void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
- struct ide_cmd *cmd = &hwif->cmd;
struct scatterlist *sg = hwif->sg_table;
+ struct request *rq = cmd->rq;
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
if (cmd) {
if (cmd->protocol == ATA_PROT_PIO) {
ide_init_sg_cmd(cmd, rq->nr_sectors);
- ide_map_sg(drive, rq);
+ ide_map_sg(drive, cmd);
}
return do_rw_taskfile(drive, cmd);
return ide_started;
default:
if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 ||
- ide_build_sglist(drive, hwif->rq) == 0 ||
- dma_ops->dma_setup(drive))
+ ide_build_sglist(drive, cmd) == 0 ||
+ dma_ops->dma_setup(drive, cmd))
return ide_stopped;
dma_ops->dma_exec_cmd(drive, tf->command);
dma_ops->dma_start(drive);
return (dma_stat & 7) != 4;
}
-static int ns87415_dma_setup(ide_drive_t *drive)
+static int ns87415_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
{
/* select DMA xfer */
ns87415_prepare_drive(drive, 1);
- if (!ide_dma_setup(drive))
+ if (ide_dma_setup(drive, cmd) == 0)
return 0;
/* DMA failed: select PIO xfer */
ns87415_prepare_drive(drive, 0);
#define IDE_WAKEUP_DELAY (1*HZ)
static int pmac_ide_init_dma(ide_hwif_t *, const struct ide_port_info *);
-static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq);
static void pmac_ide_selectproc(ide_drive_t *drive);
static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
* pmac_ide_build_dmatable builds the DBDMA command list
* for a transfer and sets the DBDMA channel to point to it.
*/
-static int
-pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
+static int pmac_ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
pmac_ide_hwif_t *pmif =
struct dbdma_cmd *table;
volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
struct scatterlist *sg;
- int wr = (rq_data_dir(rq) == WRITE);
- int i = hwif->cmd.sg_nents, count = 0;
+ int wr = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
+ int i = cmd->sg_nents, count = 0;
/* DMA table is already aligned */
table = (struct dbdma_cmd *) pmif->dma_table_cpu;
* Prepare a DMA transfer. We build the DMA table, adjust the timings for
* a read on KeyLargo ATA/66 and mark us as waiting for DMA completion
*/
-static int
-pmac_ide_dma_setup(ide_drive_t *drive)
+static int pmac_ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
- struct request *rq = hwif->rq;
u8 unit = drive->dn & 1, ata4 = (pmif->kind == controller_kl_ata4);
+ u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
- if (!pmac_ide_build_dmatable(drive, rq)) {
- ide_map_sg(drive, rq);
+ if (pmac_ide_build_dmatable(drive, cmd) == 0) {
+ ide_map_sg(drive, cmd);
return 1;
}
/* Apple adds 60ns to wrDataSetup on reads */
if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) {
- writel(pmif->timings[unit] + (!rq_data_dir(rq) ? 0x00800000UL : 0),
+ writel(pmif->timings[unit] + (write ? 0 : 0x00800000UL),
PMAC_IDE_REG(IDE_TIMING_CONFIG));
(void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
}
}
/**
- * scc_ide_dma_setup - begin a DMA phase
+ * scc_dma_setup - begin a DMA phase
* @drive: target device
+ * @cmd: command
*
* Build an IDE DMA PRD (IDE speak for scatter gather table)
* and then set up the DMA transfer registers.
* is returned.
*/
-static int scc_dma_setup(ide_drive_t *drive)
+static int scc_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
- struct request *rq = hwif->rq;
- unsigned int reading;
+ u32 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR;
u8 dma_stat;
- if (rq_data_dir(rq))
- reading = 0;
- else
- reading = 1 << 3;
-
/* fall back to pio! */
- if (!ide_build_dmatable(drive, rq)) {
- ide_map_sg(drive, rq);
+ if (ide_build_dmatable(drive, cmd) == 0) {
+ ide_map_sg(drive, cmd);
return 1;
}
out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
/* specify r/w */
- out_be32((void __iomem *)hwif->dma_base, reading);
+ out_be32((void __iomem *)hwif->dma_base, rw);
/* read DMA status for INTR & ERROR flags */
dma_stat = scc_dma_sff_read_status(hwif);
/* | Upper 32 bits - Zero |EOL| 15 unused | 16 Bit Length| */
/* --------------------------------------------------------------------- */
/* Creates the scatter gather list, DMA Table */
-static unsigned int
-sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir)
+static int sgiioc4_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
unsigned int *table = hwif->dmatable_cpu;
- unsigned int count = 0, i = hwif->cmd.sg_nents;
+ unsigned int count = 0, i = cmd->sg_nents;
struct scatterlist *sg = hwif->sg_table;
while (i && sg_dma_len(sg)) {
return 0; /* revert to PIO for this request */
}
-static int sgiioc4_dma_setup(ide_drive_t *drive)
+static int sgiioc4_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
{
- struct request *rq = drive->hwif->rq;
- unsigned int count = 0;
int ddir;
+ u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
- if (rq_data_dir(rq))
- ddir = PCI_DMA_TODEVICE;
- else
- ddir = PCI_DMA_FROMDEVICE;
-
- if (!(count = sgiioc4_build_dma_table(drive, rq, ddir))) {
+ if (sgiioc4_build_dmatable(drive, cmd) == 0) {
/* try PIO instead of DMA */
- ide_map_sg(drive, rq);
+ ide_map_sg(drive, cmd);
return 1;
}
- if (rq_data_dir(rq))
+ if (write)
/* Writes TO the IOC4 FROM Main Memory */
ddir = IOC4_DMA_READ;
else
ide_execute_command(drive, command, &ide_dma_intr, WAIT_CMD, NULL);
}
-static int trm290_dma_setup(ide_drive_t *drive)
+static int trm290_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
- struct request *rq = hwif->rq;
unsigned int count, rw;
- if (rq_data_dir(rq)) {
+ if (cmd->tf_flags & IDE_TFLAG_WRITE) {
#ifdef TRM290_NO_DMA_WRITES
/* always use PIO for writes */
trm290_prepare_drive(drive, 0); /* select PIO xfer */
} else
rw = 2;
- if (!(count = ide_build_dmatable(drive, rq))) {
- ide_map_sg(drive, rq);
+ count = ide_build_dmatable(drive, cmd);
+ if (count == 0) {
+ ide_map_sg(drive, cmd);
/* try PIO instead of DMA */
trm290_prepare_drive(drive, 0); /* select PIO xfer */
return 1;
#ifdef __BIG_ENDIAN
/* custom ide_build_dmatable to handle swapped layout */
-static int tx4939ide_build_dmatable(ide_drive_t *drive, struct request *rq)
+static int tx4939ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
u32 *table = (u32 *)hwif->dmatable_cpu;
int i;
struct scatterlist *sg;
- for_each_sg(hwif->sg_table, sg, hwif->cmd.sg_nents, i) {
+ for_each_sg(hwif->sg_table, sg, cmd->sg_nents, i) {
u32 cur_addr, cur_len, bcount;
cur_addr = sg_dma_address(sg);
#define tx4939ide_build_dmatable ide_build_dmatable
#endif
-static int tx4939ide_dma_setup(ide_drive_t *drive)
+static int tx4939ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
void __iomem *base = TX4939IDE_BASE(hwif);
- struct request *rq = hwif->rq;
- u8 reading;
- int nent;
-
- if (rq_data_dir(rq))
- reading = 0;
- else
- reading = ATA_DMA_WR;
+ u8 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR;
/* fall back to PIO! */
- nent = tx4939ide_build_dmatable(drive, rq);
- if (!nent) {
- ide_map_sg(drive, rq);
+ if (tx4939ide_build_dmatable(drive, cmd) == 0) {
+ ide_map_sg(drive, cmd);
return 1;
}
tx4939ide_writel(hwif->dmatable_dma, base, TX4939IDE_PRD_Ptr);
/* specify r/w */
- tx4939ide_writeb(reading, base, TX4939IDE_DMA_Cmd);
+ tx4939ide_writeb(rw, base, TX4939IDE_DMA_Cmd);
/* clear INTR & ERROR flags */
tx4939ide_clear_dma_status(base);
tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ?
TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1);
- tx4939ide_writew(rq->nr_sectors, base, TX4939IDE_Sec_Cnt);
+
+ tx4939ide_writew(cmd->rq->nr_sectors, base, TX4939IDE_Sec_Cnt);
+
return 0;
}
struct ide_dma_ops {
void (*dma_host_set)(struct ide_drive_s *, int);
- int (*dma_setup)(struct ide_drive_s *);
+ int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *);
void (*dma_exec_cmd)(struct ide_drive_s *, u8);
void (*dma_start)(struct ide_drive_s *);
int (*dma_end)(struct ide_drive_s *);
#define ide_pci_resume NULL
#endif
-void ide_map_sg(ide_drive_t *, struct request *);
+void ide_map_sg(ide_drive_t *, struct ide_cmd *);
void ide_init_sg_cmd(struct ide_cmd *, int);
#define BAD_DMA_DRIVE 0
int ide_allocate_dma_engine(ide_hwif_t *);
void ide_release_dma_engine(ide_hwif_t *);
-int ide_build_sglist(ide_drive_t *, struct request *);
+int ide_build_sglist(ide_drive_t *, struct ide_cmd *);
void ide_destroy_dmatable(ide_drive_t *);
#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
int config_drive_for_dma(ide_drive_t *);
-extern int ide_build_dmatable(ide_drive_t *, struct request *);
+int ide_build_dmatable(ide_drive_t *, struct ide_cmd *);
void ide_dma_host_set(ide_drive_t *, int);
-extern int ide_dma_setup(ide_drive_t *);
+int ide_dma_setup(ide_drive_t *, struct ide_cmd *);
void ide_dma_exec_cmd(ide_drive_t *, u8);
extern void ide_dma_start(ide_drive_t *);
int ide_dma_end(ide_drive_t *);
static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
static inline int ide_build_sglist(ide_drive_t *drive,
- struct request *rq) { return 0; }
+ struct ide_cmd *cmd) { return 0; }
#endif /* CONFIG_BLK_DEV_IDEDMA */
#ifdef CONFIG_BLK_DEV_IDEACPI