* Move command related fields from ide_hwif_t to struct ide_cmd.
* Make ide_init_sg_cmd() take command and sectors number as arguments.
There should be no functional changes caused by this patch.
Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
struct request *rq = hwif->rq;
_auide_hwif *ahwif = &auide_hwif;
struct scatterlist *sg;
- int i = hwif->sg_nents, iswrite, count = 0;
+ int i = hwif->cmd.sg_nents, iswrite, count = 0;
iswrite = (rq_data_dir(rq) == WRITE);
/* Save for interrupt context */
* Tell the DMA engine about the SG table and
* data direction.
*/
- set_dma_sg(ec->dma, hwif->sg_table, hwif->sg_nents);
+ set_dma_sg(ec->dma, hwif->sg_table, hwif->cmd.sg_nents);
set_dma_mode(ec->dma, dma_mode);
drive->waiting_for_dma = 1;
lba48 = 0;
}
- if (!dma) {
- ide_init_sg_cmd(drive, rq);
- ide_map_sg(drive, rq);
- }
-
memset(&cmd, 0, sizeof(cmd));
cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+ if (dma == 0) {
+ ide_init_sg_cmd(&cmd, nsectors);
+ ide_map_sg(drive, rq);
+ }
+
if (drive->dev_flags & IDE_DFLAG_LBA) {
if (lba48) {
pr_debug("%s: LBA=0x%012llx\n", drive->name,
/* fallback to PIO */
cmd.tf_flags |= IDE_TFLAG_DMA_PIO_FALLBACK;
ide_tf_set_cmd(drive, &cmd, 0);
- ide_init_sg_cmd(drive, rq);
+ ide_init_sg_cmd(&cmd, nsectors);
rc = do_rw_taskfile(drive, &cmd);
}
struct scatterlist *sg;
u8 is_trm290 = !!(hwif->host_flags & IDE_HFLAG_TRM290);
- for_each_sg(hwif->sg_table, sg, hwif->sg_nents, i) {
+ for_each_sg(hwif->sg_table, sg, hwif->cmd.sg_nents, i) {
u32 cur_addr, cur_len, xcount, bcount;
cur_addr = sg_dma_address(sg);
{
ide_hwif_t *hwif = drive->hwif;
struct scatterlist *sg = hwif->sg_table;
+ struct ide_cmd *cmd = &hwif->cmd;
int i;
ide_map_sg(drive, rq);
if (rq_data_dir(rq) == READ)
- hwif->sg_dma_direction = DMA_FROM_DEVICE;
+ cmd->sg_dma_direction = DMA_FROM_DEVICE;
else
- hwif->sg_dma_direction = DMA_TO_DEVICE;
+ cmd->sg_dma_direction = DMA_TO_DEVICE;
- i = dma_map_sg(hwif->dev, sg, hwif->sg_nents, hwif->sg_dma_direction);
+ i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction);
if (i == 0)
ide_map_sg(drive, rq);
else {
- hwif->orig_sg_nents = hwif->sg_nents;
- hwif->sg_nents = i;
+ cmd->orig_sg_nents = cmd->sg_nents;
+ cmd->sg_nents = i;
}
return i;
void ide_destroy_dmatable(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
+ struct ide_cmd *cmd = &hwif->cmd;
- dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->orig_sg_nents,
- hwif->sg_dma_direction);
+ dma_unmap_sg(hwif->dev, hwif->sg_table, cmd->orig_sg_nents,
+ cmd->sg_dma_direction);
}
EXPORT_SYMBOL_GPL(ide_destroy_dmatable);
{
struct ide_disk_obj *floppy = drive->driver_data;
ide_hwif_t *hwif = drive->hwif;
+ struct ide_cmd *cmd = &hwif->cmd;
struct ide_atapi_pc *pc;
if (drive->debug_mask & IDE_DBG_RQ)
}
if (blk_fs_request(rq) || pc->req_xfer) {
- ide_init_sg_cmd(drive, rq);
+ ide_init_sg_cmd(cmd, rq->nr_sectors);
ide_map_sg(drive, rq);
}
pc->sg = hwif->sg_table;
- pc->sg_cnt = hwif->sg_nents;
+ pc->sg_cnt = cmd->sg_nents;
pc->rq = rq;
void ide_map_sg(ide_drive_t *drive, struct request *rq)
{
ide_hwif_t *hwif = drive->hwif;
+ struct ide_cmd *cmd = &hwif->cmd;
struct scatterlist *sg = hwif->sg_table;
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
- hwif->sg_nents = 1;
+ cmd->sg_nents = 1;
} else if (!rq->bio) {
sg_init_one(sg, rq->data, rq->data_len);
- hwif->sg_nents = 1;
- } else {
- hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
- }
+ cmd->sg_nents = 1;
+ } else
+ cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
}
-
EXPORT_SYMBOL_GPL(ide_map_sg);
-void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
+void ide_init_sg_cmd(struct ide_cmd *cmd, int nsect)
{
- ide_hwif_t *hwif = drive->hwif;
-
- hwif->nsect = hwif->nleft = rq->nr_sectors;
- hwif->cursg_ofs = 0;
- hwif->cursg = NULL;
+ cmd->nsect = cmd->nleft = nsect;
+ cmd->cursg_ofs = 0;
+ cmd->cursg = NULL;
}
-
EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
/**
case TASKFILE_OUT:
case TASKFILE_MULTI_IN:
case TASKFILE_IN:
- ide_init_sg_cmd(drive, rq);
+ ide_init_sg_cmd(cmd, rq->nr_sectors);
ide_map_sg(drive, rq);
default:
break;
{
ide_hwif_t *hwif = drive->hwif;
struct scatterlist *sg = hwif->sg_table;
- struct scatterlist *cursg = hwif->cursg;
+ struct scatterlist *cursg = cmd->cursg;
struct page *page;
#ifdef CONFIG_HIGHMEM
unsigned long flags;
unsigned int offset;
u8 *buf;
- cursg = hwif->cursg;
+ cursg = cmd->cursg;
if (!cursg) {
cursg = sg;
- hwif->cursg = sg;
+ cmd->cursg = sg;
}
page = sg_page(cursg);
- offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
+ offset = cursg->offset + cmd->cursg_ofs * SECTOR_SIZE;
/* get the current page and offset */
page = nth_page(page, (offset >> PAGE_SHIFT));
#endif
buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset;
- hwif->nleft--;
- hwif->cursg_ofs++;
+ cmd->nleft--;
+ cmd->cursg_ofs++;
- if ((hwif->cursg_ofs * SECTOR_SIZE) == cursg->length) {
- hwif->cursg = sg_next(hwif->cursg);
- hwif->cursg_ofs = 0;
+ if ((cmd->cursg_ofs * SECTOR_SIZE) == cursg->length) {
+ cmd->cursg = sg_next(cmd->cursg);
+ cmd->cursg_ofs = 0;
}
/* do the actual data transfer */
{
unsigned int nsect;
- nsect = min_t(unsigned int, drive->hwif->nleft, drive->mult_count);
+ nsect = min_t(unsigned int, cmd->nleft, drive->mult_count);
while (nsect--)
ide_pio_sector(drive, cmd, write);
}
const char *s, u8 stat)
{
if (cmd->tf_flags & IDE_TFLAG_FS) {
- ide_hwif_t *hwif = drive->hwif;
- int sectors = hwif->nsect - hwif->nleft;
+ int sectors = cmd->nsect - cmd->nleft;
switch (cmd->data_phase) {
case TASKFILE_IN:
- if (hwif->nleft)
+ if (cmd->nleft)
break;
/* fall through */
case TASKFILE_OUT:
sectors--;
break;
case TASKFILE_MULTI_IN:
- if (hwif->nleft)
+ if (cmd->nleft)
break;
/* fall through */
case TASKFILE_MULTI_OUT:
ide_pio_datablock(drive, cmd, 0);
/* Are we done? Check status and finish transfer. */
- if (!hwif->nleft) {
+ if (cmd->nleft == 0) {
stat = wait_drive_not_busy(drive);
if (!OK_STAT(stat, 0, BAD_STAT))
return task_error(drive, cmd, __func__, stat);
return task_error(drive, cmd, __func__, stat);
/* Deal with unexpected ATA data phase. */
- if (((stat & ATA_DRQ) == 0) ^ !hwif->nleft)
+ if (((stat & ATA_DRQ) == 0) ^ (cmd->nleft == 0))
return task_error(drive, cmd, __func__, stat);
- if (!hwif->nleft) {
+ if (cmd->nleft == 0) {
ide_finish_cmd(drive, cmd, stat);
return ide_stopped;
}
volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
struct scatterlist *sg;
int wr = (rq_data_dir(rq) == WRITE);
- int i = hwif->sg_nents, count = 0;
+ int i = hwif->cmd.sg_nents, count = 0;
/* DMA table is already aligned */
table = (struct dbdma_cmd *) pmif->dma_table_cpu;
{
ide_hwif_t *hwif = drive->hwif;
unsigned int *table = hwif->dmatable_cpu;
- unsigned int count = 0, i = hwif->sg_nents;
+ unsigned int count = 0, i = hwif->cmd.sg_nents;
struct scatterlist *sg = hwif->sg_table;
while (i && sg_dma_len(sg)) {
int i;
struct scatterlist *sg;
- for_each_sg(hwif->sg_table, sg, hwif->sg_nents, i) {
+ for_each_sg(hwif->sg_table, sg, hwif->cmd.sg_nents, i) {
u32 cur_addr, cur_len, bcount;
cur_addr = sg_dma_address(sg);
u8 ftf_flags; /* for TASKFILE ioctl */
u32 tf_flags;
int data_phase;
+
+ int sg_nents; /* number of sg entries */
+ int orig_sg_nents;
+ int sg_dma_direction; /* DMA transfer direction */
+
+ unsigned int nsect;
+ unsigned int nleft;
+ struct scatterlist *cursg;
+ unsigned int cursg_ofs;
+
struct request *rq; /* copy of request */
void *special; /* valid_t generally */
};
/* Scatter-gather list used to build the above */
struct scatterlist *sg_table;
int sg_max_nents; /* Maximum number of entries in it */
- int sg_nents; /* Current number of entries in it */
- int orig_sg_nents;
- int sg_dma_direction; /* dma transfer direction */
struct ide_cmd cmd; /* current command */
- unsigned int nsect;
- unsigned int nleft;
- struct scatterlist *cursg;
- unsigned int cursg_ofs;
-
int rqsize; /* max sectors per request */
int irq; /* our irq number */
#endif
void ide_map_sg(ide_drive_t *, struct request *);
-void ide_init_sg_cmd(ide_drive_t *, struct request *);
+void ide_init_sg_cmd(struct ide_cmd *, int);
#define BAD_DMA_DRIVE 0
#define GOOD_DMA_DRIVE 1