if (!bio)
goto out_bmd;
- if (iter->type & WRITE)
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-
ret = 0;
if (map_data) {
kfree(pages);
- /*
- * set data direction, and check if mapped pages need bouncing
- */
- if (iter->type & WRITE)
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-
bio_set_flag(bio, BIO_USER_MAPPED);
/*
bio->bi_private = data;
} else {
bio->bi_end_io = bio_copy_kern_endio;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
}
return bio;
void blk_dump_rq_flags(struct request *rq, char *msg)
{
- printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg,
- rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
+ printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
+ rq->rq_disk ? rq->rq_disk->disk_name : "?",
(unsigned long long) rq->cmd_flags);
printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
void init_request_from_bio(struct request *req, struct bio *bio)
{
- req->cmd_type = REQ_TYPE_FS;
if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
dst->cpu = src->cpu;
- dst->cmd_type = src->cmd_type;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
dst->nr_phys_segments = src->nr_phys_segments;
blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
}
- flush_rq->cmd_type = REQ_TYPE_FS;
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
flush_rq->rq_disk = first_rq->rq_disk;
int blk_rq_append_bio(struct request *rq, struct bio *bio)
{
if (!rq->bio) {
- rq->cmd_flags &= REQ_OP_MASK;
- rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK);
blk_rq_bio_prep(rq->q, rq, bio);
} else {
if (!ll_back_merge_fn(rq->q, rq, bio))
if (IS_ERR(bio))
return PTR_ERR(bio);
+ bio->bi_opf &= ~REQ_OP_MASK;
+ bio->bi_opf |= req_op(rq);
+
if (map_data && map_data->null_mapped)
bio_set_flag(bio, BIO_NULL_MAPPED);
}
/**
- * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
+ * blk_rq_map_user_iov - map user data to a request, for passthrough requests
* @q: request queue where request should be inserted
* @rq: request to map data to
* @map_data: pointer to the rq_map_data holding pages (if necessary)
EXPORT_SYMBOL(blk_rq_unmap_user);
/**
- * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
+ * blk_rq_map_kern - map kernel data to a request, for passthrough requests
* @q: request queue where request should be inserted
* @rq: request to fill
* @kbuf: the kernel buffer
if (IS_ERR(bio))
return PTR_ERR(bio);
- if (!reading)
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_opf &= ~REQ_OP_MASK;
+ bio->bi_opf |= req_op(rq);
if (do_copy)
rq->rq_flags |= RQF_COPY_USER;
{
struct request *rq = list_entry_rq(v);
- seq_printf(m, "%p {.cmd_type=%u, .cmd_flags=0x%x, .rq_flags=0x%x, .tag=%d, .internal_tag=%d}\n",
- rq, rq->cmd_type, rq->cmd_flags, (unsigned int)rq->rq_flags,
+ seq_printf(m, "%p {.cmd_flags=0x%x, .rq_flags=0x%x, .tag=%d, .internal_tag=%d}\n",
+ rq, rq->cmd_flags, (unsigned int)rq->rq_flags,
rq->tag, rq->internal_tag);
return 0;
}
* Check if sg_io_v4 from user is allowed and valid
*/
static int
-bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *rw)
+bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *op)
{
int ret = 0;
ret = -EINVAL;
}
- *rw = hdr->dout_xfer_len ? WRITE : READ;
+ *op = hdr->dout_xfer_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN;
return ret;
}
{
struct request_queue *q = bd->queue;
struct request *rq, *next_rq = NULL;
- int ret, rw;
- unsigned int dxfer_len;
+ int ret;
+ unsigned int op, dxfer_len;
void __user *dxferp = NULL;
struct bsg_class_device *bcd = &q->bsg_dev;
hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
hdr->din_xfer_len);
- ret = bsg_validate_sgv4_hdr(hdr, &rw);
+ ret = bsg_validate_sgv4_hdr(hdr, &op);
if (ret)
return ERR_PTR(ret);
/*
* map scatter-gather elements separately and string them to request
*/
- rq = blk_get_request(q, rw, GFP_KERNEL);
+ rq = blk_get_request(q, op, GFP_KERNEL);
if (IS_ERR(rq))
return rq;
scsi_req_init(rq);
if (ret)
goto out;
- if (rw == WRITE && hdr->din_xfer_len) {
+ if (op == REQ_OP_SCSI_OUT && hdr->din_xfer_len) {
if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
ret = -EOPNOTSUPP;
goto out;
}
- next_rq = blk_get_request(q, READ, GFP_KERNEL);
+ next_rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(next_rq)) {
ret = PTR_ERR(next_rq);
next_rq = NULL;
goto out;
}
rq->next_rq = next_rq;
- next_rq->cmd_type = rq->cmd_type;
dxferp = (void __user *)(unsigned long)hdr->din_xferp;
ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
at_head = 1;
ret = -ENOMEM;
- rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
+ rq = blk_get_request(q, writing ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
+ GFP_KERNEL);
if (IS_ERR(rq))
return PTR_ERR(rq);
req = scsi_req(rq);
}
- rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_RECLAIM);
+ rq = blk_get_request(q, in_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
+ __GFP_RECLAIM);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto error_free_buffer;
struct request *rq;
int err;
- rq = blk_get_request(q, WRITE, __GFP_RECLAIM);
+ rq = blk_get_request(q, REQ_OP_SCSI_OUT, __GFP_RECLAIM);
if (IS_ERR(rq))
return PTR_ERR(rq);
scsi_req_init(rq);
{
struct scsi_request *req = scsi_req(rq);
- rq->cmd_type = REQ_TYPE_BLOCK_PC;
memset(req->__cmd, 0, sizeof(req->__cmd));
req->cmd = req->__cmd;
req->cmd_len = BLK_MAX_CDB;
c->Header.SGList = h->max_cmd_sgentries;
set_performant_mode(h, c);
- if (likely(creq->cmd_type == REQ_TYPE_FS)) {
+ switch (req_op(creq)) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
if(h->cciss_read == CCISS_READ_10) {
c->Request.CDB[1] = 0;
c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */
c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
c->Request.CDB[14] = c->Request.CDB[15] = 0;
}
- } else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) {
+ break;
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
c->Request.CDBLen = scsi_req(creq)->cmd_len;
memcpy(c->Request.CDB, scsi_req(creq)->cmd, BLK_MAX_CDB);
scsi_req(creq)->sense = c->err_info->SenseInfo;
- } else {
+ break;
+ default:
dev_warn(&h->pdev->dev, "bad request type %d\n",
- creq->cmd_type);
+ creq->cmd_flags);
BUG();
}
return;
if (WARN(atomic_read(&usage_count) == 0,
- "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%llx\n",
- current_req, (long)blk_rq_pos(current_req), current_req->cmd_type,
+ "warning: usage count=0, current_req=%p sect=%ld flags=%llx\n",
+ current_req, (long)blk_rq_pos(current_req),
(unsigned long long) current_req->cmd_flags))
return;
req_data_dir(req) == READ ? "read" : "writ",
cyl, head, sec, nsect, bio_data(req->bio));
#endif
- if (req->cmd_type == REQ_TYPE_FS) {
- switch (rq_data_dir(req)) {
- case READ:
- hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
- &read_intr);
- if (reset)
- goto repeat;
- break;
- case WRITE:
- hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE,
- &write_intr);
- if (reset)
- goto repeat;
- if (wait_DRQ()) {
- bad_rw_intr();
- goto repeat;
- }
- outsw(HD_DATA, bio_data(req->bio), 256);
- break;
- default:
- printk("unknown hd-command\n");
- hd_end_request_cur(-EIO);
- break;
+
+ switch (req_op(req)) {
+ case REQ_OP_READ:
+ hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
+ &read_intr);
+ if (reset)
+ goto repeat;
+ break;
+ case REQ_OP_WRITE:
+ hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE,
+ &write_intr);
+ if (reset)
+ goto repeat;
+ if (wait_DRQ()) {
+ bad_rw_intr();
+ goto repeat;
}
+ outsw(HD_DATA, bio_data(req->bio), 256);
+ break;
+ default:
+ printk("unknown hd-command\n");
+ hd_end_request_cur(-EIO);
+ break;
}
}
break;
}
- if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) {
- mg_end_request_cur(host, -EIO);
- continue;
- }
-
- if (rq_data_dir(host->req) == READ)
+ switch (req_op(host->req)) {
+ case REQ_OP_READ:
mg_read(host->req);
- else
+ break;
+ case REQ_OP_WRITE:
mg_write(host->req);
+ break;
+ default:
+ mg_end_request_cur(host, -EIO);
+ break;
+ }
}
}
unsigned int sect_num,
unsigned int sect_cnt)
{
- if (rq_data_dir(req) == READ) {
+ switch (req_op(host->req)) {
+ case REQ_OP_READ:
if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
!= MG_ERR_NONE) {
mg_bad_rw_intr(host);
return host->error;
}
- } else {
+ break;
+ case REQ_OP_WRITE:
/* TODO : handler */
outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
mod_timer(&host->timer, jiffies + 3 * HZ);
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND);
+ break;
+ default:
+ mg_end_request_cur(host, -EIO);
+ break;
}
return MG_ERR_NONE;
}
continue;
}
- if (unlikely(req->cmd_type != REQ_TYPE_FS)) {
- mg_end_request_cur(host, -EIO);
- continue;
- }
-
if (!mg_issue_req(req, host, sect_num, sect_cnt))
return;
}
u32 type;
u32 tag = blk_mq_unique_tag(req);
- if (req->cmd_type != REQ_TYPE_FS)
- return -EIO;
-
- if (req_op(req) == REQ_OP_DISCARD)
+ switch (req_op(req)) {
+ case REQ_OP_DISCARD:
type = NBD_CMD_TRIM;
- else if (req_op(req) == REQ_OP_FLUSH)
+ break;
+ case REQ_OP_FLUSH:
type = NBD_CMD_FLUSH;
- else if (rq_data_dir(req) == WRITE)
+ break;
+ case REQ_OP_WRITE:
type = NBD_CMD_WRITE;
- else
+ break;
+ case REQ_OP_READ:
type = NBD_CMD_READ;
+ break;
+ default:
+ return -EIO;
+ }
if (rq_data_dir(req) == WRITE &&
(nbd->flags & NBD_FLAG_READ_ONLY)) {
struct request *rq;
struct bio *bio = rqd->bio;
- rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
+ rq = blk_mq_alloc_request(q,
+ op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return -ENOMEM;
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->__sector = bio->bi_iter.bi_sector;
rq->ioprio = bio_prio(bio);
if (!rq)
break;
- /* filter out block requests we don't understand */
- if (rq->cmd_type != REQ_TYPE_FS) {
- blk_end_request_all(rq, 0);
- continue;
- }
-
/* deduce our operation (read, write, flush) */
/* I wish the block layer simplified cmd_type/cmd_flags/cmd[]
* into a clearly defined set of RPC commands:
static int pd_block; /* address of next requested block */
static int pd_count; /* number of blocks still to do */
static int pd_run; /* sectors in current cluster */
-static int pd_cmd; /* current command READ/WRITE */
static char *pd_buf; /* buffer for request in progress */
static enum action do_pd_io_start(void)
{
- if (pd_req->cmd_type == REQ_TYPE_DRV_PRIV) {
+ switch (req_op(pd_req)) {
+ case REQ_OP_DRV_IN:
phase = pd_special;
return pd_special();
- }
-
- pd_cmd = rq_data_dir(pd_req);
- if (pd_cmd == READ || pd_cmd == WRITE) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
pd_block = blk_rq_pos(pd_req);
pd_count = blk_rq_cur_sectors(pd_req);
if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
pd_run = blk_rq_sectors(pd_req);
pd_buf = bio_data(pd_req->bio);
pd_retries = 0;
- if (pd_cmd == READ)
+ if (req_op(pd_req) == REQ_OP_READ)
return do_pd_read_start();
else
return do_pd_write_start();
struct request *rq;
int err = 0;
- rq = blk_get_request(disk->gd->queue, READ, __GFP_RECLAIM);
+ rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
if (IS_ERR(rq))
return PTR_ERR(rq);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->special = func;
err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
int ret = 0;
rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
- WRITE : READ, __GFP_RECLAIM);
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
if (IS_ERR(rq))
return PTR_ERR(rq);
scsi_req_init(rq);
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
while ((req = blk_fetch_request(q))) {
- if (req_op(req) == REQ_OP_FLUSH) {
+ switch (req_op(req)) {
+ case REQ_OP_FLUSH:
if (ps3disk_submit_flush_request(dev, req))
- break;
- } else if (req->cmd_type == REQ_TYPE_FS) {
+ return;
+ break;
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
if (ps3disk_submit_request_sg(dev, req))
- break;
- } else {
+ return;
+ break;
+ default:
blk_dump_rq_flags(req, DEVICE_NAME " bad request");
__blk_end_request_all(req, -EIO);
- continue;
}
}
}
bool must_be_locked;
int result;
- if (rq->cmd_type != REQ_TYPE_FS) {
- dout("%s: non-fs request type %d\n", __func__,
- (int) rq->cmd_type);
- result = -EIO;
- goto err;
- }
-
- if (req_op(rq) == REQ_OP_DISCARD)
+ switch (req_op(rq)) {
+ case REQ_OP_DISCARD:
op_type = OBJ_OP_DISCARD;
- else if (req_op(rq) == REQ_OP_WRITE)
+ break;
+ case REQ_OP_WRITE:
op_type = OBJ_OP_WRITE;
- else
+ break;
+ case REQ_OP_READ:
op_type = OBJ_OP_READ;
+ break;
+ default:
+ dout("%s: non-fs request type %d\n", __func__, req_op(rq));
+ result = -EIO;
+ goto err;
+ }
/* Ignore/skip any zero-length requests */
if (!crq)
return NULL;
- rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL);
+ rq = blk_get_request(host->oob_q, REQ_OP_DRV_OUT, GFP_KERNEL);
if (IS_ERR(rq)) {
spin_lock_irqsave(&host->lock, flags);
carm_put_request(host, crq);
spin_unlock_irq(&host->lock);
DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
- crq->rq->cmd_type = REQ_TYPE_DRV_PRIV;
crq->rq->special = crq;
blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
crq->msg_bucket = (u32) rc;
DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
- crq->rq->cmd_type = REQ_TYPE_DRV_PRIV;
crq->rq->special = crq;
blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
int error = virtblk_result(vbr);
- switch (req->cmd_type) {
- case REQ_TYPE_BLOCK_PC:
+ switch (req_op(req)) {
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
virtblk_scsi_reques_done(req);
break;
- case REQ_TYPE_DRV_PRIV:
+ case REQ_OP_DRV_IN:
req->errors = (error != 0);
break;
}
int qid = hctx->queue_num;
int err;
bool notify = false;
+ u32 type;
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
- if (req_op(req) == REQ_OP_FLUSH) {
- vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
- vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
- } else {
- switch (req->cmd_type) {
- case REQ_TYPE_FS:
- vbr->out_hdr.type = 0;
- vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
- vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
- break;
- case REQ_TYPE_BLOCK_PC:
- vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD);
- vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
- break;
- case REQ_TYPE_DRV_PRIV:
- vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
- vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
- break;
- default:
- /* We don't put anything else in the queue. */
- BUG();
- }
+ switch (req_op(req)) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ type = 0;
+ break;
+ case REQ_OP_FLUSH:
+ type = VIRTIO_BLK_T_FLUSH;
+ break;
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
+ type = VIRTIO_BLK_T_SCSI_CMD;
+ break;
+ case REQ_OP_DRV_IN:
+ type = VIRTIO_BLK_T_GET_ID;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return BLK_MQ_RQ_QUEUE_ERROR;
}
+ vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
+ vbr->out_hdr.sector = type ?
+ 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
+ vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
+
blk_mq_start_request(req);
num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
}
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
- if (req->cmd_type == REQ_TYPE_BLOCK_PC)
+ if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT)
err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
else
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
struct request *req;
int err;
- req = blk_get_request(q, READ, GFP_KERNEL);
+ req = blk_get_request(q, REQ_OP_DRV_IN, GFP_KERNEL);
if (IS_ERR(req))
return PTR_ERR(req);
- req->cmd_type = REQ_TYPE_DRV_PRIV;
err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
if (err)
static inline bool blkif_request_flush_invalid(struct request *req,
struct blkfront_info *info)
{
- return ((req->cmd_type != REQ_TYPE_FS) ||
+ return (blk_rq_is_passthrough(req) ||
((req_op(req) == REQ_OP_FLUSH) &&
!info->feature_flush) ||
((req->cmd_flags & REQ_FUA) &&
struct request *req;
while ((req = blk_peek_request(q)) != NULL) {
- if (req->cmd_type == REQ_TYPE_FS)
+ if (!blk_rq_is_passthrough(req))
break;
blk_start_request(req);
__blk_end_request_all(req, -EIO);
len = nr * CD_FRAMESIZE_RAW;
- rq = blk_get_request(q, READ, GFP_KERNEL);
+ rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
break;
struct request *req;
while ((req = blk_fetch_request(rq)) != NULL) {
- if (req->cmd_type != REQ_TYPE_FS) {
- printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
- __blk_end_request_all(req, -EIO);
- continue;
- }
- if (rq_data_dir(req) != READ) {
+ switch (req_op(req)) {
+ case REQ_OP_READ:
+ /*
+ * Add to list of deferred work and then schedule
+ * workqueue.
+ */
+ list_add_tail(&req->queuelist, &gdrom_deferred);
+ schedule_work(&work);
+ break;
+ case REQ_OP_WRITE:
pr_notice("Read only device - write request ignored\n");
__blk_end_request_all(req, -EIO);
- continue;
+ break;
+ default:
+ printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
+ __blk_end_request_all(req, -EIO);
+ break;
}
-
- /*
- * Add to list of deferred work and then schedule
- * workqueue.
- */
- list_add_tail(&req->queuelist, &gdrom_deferred);
- schedule_work(&work);
}
}
struct request *rq;
int error;
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
scsi_req_init(rq);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
ide_req(rq)->type = ATA_PRIV_MISC;
rq->special = (char *)pc;
}
sense_rq->rq_disk = rq->rq_disk;
- sense_rq->cmd_type = REQ_TYPE_DRV_PRIV;
+ sense_rq->cmd_flags = REQ_OP_DRV_IN;
ide_req(sense_rq)->type = ATA_PRIV_SENSE;
sense_rq->rq_flags |= RQF_PREEMPT;
int ide_cd_get_xferlen(struct request *rq)
{
- switch (rq->cmd_type) {
- case REQ_TYPE_FS:
+ switch (req_op(rq)) {
+ default:
return 32768;
- case REQ_TYPE_BLOCK_PC:
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
return blk_rq_bytes(rq);
- case REQ_TYPE_DRV_PRIV:
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
switch (ide_req(rq)->type) {
case ATA_PRIV_PC:
case ATA_PRIV_SENSE:
return blk_rq_bytes(rq);
+ default:
+ return 0;
}
- default:
- return 0;
}
}
EXPORT_SYMBOL_GPL(ide_cd_get_xferlen);
error = 0;
} else {
- if (rq->cmd_type != REQ_TYPE_FS && uptodate <= 0) {
+ if (blk_rq_is_passthrough(rq) && uptodate <= 0) {
if (rq->errors == 0)
rq->errors = -EIO;
}
if (!sense->valid)
break;
if (failed_command == NULL ||
- failed_command->cmd_type != REQ_TYPE_FS)
+ blk_rq_is_passthrough(failed_command))
break;
sector = (sense->information[0] << 24) |
(sense->information[1] << 16) |
}
/* if we have an error, pass CHECK_CONDITION as the SCSI status byte */
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !rq->errors)
+ if (blk_rq_is_scsi(rq) && !rq->errors)
rq->errors = SAM_STAT_CHECK_CONDITION;
if (blk_noretry_request(rq))
switch (sense_key) {
case NOT_READY:
- if (rq->cmd_type == REQ_TYPE_FS && rq_data_dir(rq) == WRITE) {
+ if (req_op(rq) == REQ_OP_WRITE) {
if (ide_cd_breathe(drive, rq))
return 1;
} else {
cdrom_saw_media_change(drive);
- if (rq->cmd_type == REQ_TYPE_FS &&
+ if (!blk_rq_is_passthrough(rq) &&
!(rq->rq_flags & RQF_QUIET))
printk(KERN_ERR PFX "%s: tray open\n",
drive->name);
case UNIT_ATTENTION:
cdrom_saw_media_change(drive);
- if (rq->cmd_type != REQ_TYPE_FS)
+ if (blk_rq_is_passthrough(rq))
return 0;
/*
do_end_request = 1;
break;
default:
- if (rq->cmd_type != REQ_TYPE_FS)
+ if (blk_rq_is_passthrough(rq))
break;
if (err & ~ATA_ABORTED) {
/* go to the default handler for other errors */
do_end_request = 1;
}
- if (rq->cmd_type != REQ_TYPE_FS) {
+ if (blk_rq_is_passthrough(rq)) {
rq->rq_flags |= RQF_FAILED;
do_end_request = 1;
}
int error;
bool delay = false;
- rq = blk_get_request(drive->queue, write, __GFP_RECLAIM);
+ rq = blk_get_request(drive->queue,
+ write ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM);
scsi_req_init(rq);
memcpy(scsi_req(rq)->cmd, cmd, BLK_MAX_CDB);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
ide_req(rq)->type = ATA_PRIV_PC;
rq->rq_flags |= rq_flags;
rq->timeout = timeout;
ide_read_bcount_and_ireason(drive, &len, &ireason);
- thislen = (rq->cmd_type == REQ_TYPE_FS) ? len : cmd->nleft;
+ thislen = !blk_rq_is_passthrough(rq) ? len : cmd->nleft;
if (thislen > len)
thislen = len;
/* If DRQ is clear, the command has completed. */
if ((stat & ATA_DRQ) == 0) {
- if (rq->cmd_type == REQ_TYPE_FS) {
+ switch (req_op(rq)) {
+ default:
/*
* If we're not done reading/writing, complain.
* Otherwise, complete the command normally.
rq->rq_flags |= RQF_FAILED;
uptodate = 0;
}
- } else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
+ goto out_end;
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
ide_cd_request_sense_fixup(drive, cmd);
uptodate = cmd->nleft ? 0 : 1;
if (!uptodate)
rq->rq_flags |= RQF_FAILED;
+ goto out_end;
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
+ goto out_end;
}
- goto out_end;
}
rc = ide_check_ireason(drive, rq, len, ireason, write);
/* pad, if necessary */
if (len > 0) {
- if (rq->cmd_type != REQ_TYPE_FS || write == 0)
+ if (blk_rq_is_passthrough(rq) || write == 0)
ide_pad_transfer(drive, write, len);
else {
printk(KERN_ERR PFX "%s: confused, missing data\n",
}
}
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
+ switch (req_op(rq)) {
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
timeout = rq->timeout;
- } else {
+ break;
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ expiry = ide_cd_expiry;
+ /*FALLTHRU*/
+ default:
timeout = ATAPI_WAIT_PC;
- if (rq->cmd_type != REQ_TYPE_FS)
- expiry = ide_cd_expiry;
+ break;
}
hwif->expiry = expiry;
return ide_started;
out_end:
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC && rc == 0) {
+ if (blk_rq_is_scsi(rq) && rc == 0) {
scsi_req(rq)->resid_len = 0;
blk_end_request_all(rq, 0);
hwif->rq = NULL;
if (sense && uptodate)
ide_cd_complete_failed_rq(drive, rq);
- if (rq->cmd_type == REQ_TYPE_FS) {
+ if (!blk_rq_is_passthrough(rq)) {
if (cmd->nleft == 0)
uptodate = 1;
} else {
return ide_stopped;
/* make sure it's fully ended */
- if (rq->cmd_type != REQ_TYPE_FS) {
+ if (blk_rq_is_passthrough(rq)) {
scsi_req(rq)->resid_len -= cmd->nbytes - cmd->nleft;
if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
scsi_req(rq)->resid_len += cmd->last_xfer_len;
ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x",
rq->cmd[0], rq->cmd_type);
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
+ if (blk_rq_is_scsi(rq))
rq->rq_flags |= RQF_QUIET;
else
rq->rq_flags &= ~RQF_FAILED;
if (drive->debug_mask & IDE_DBG_RQ)
blk_dump_rq_flags(rq, "ide_cd_do_request");
- switch (rq->cmd_type) {
- case REQ_TYPE_FS:
+ switch (req_op(rq)) {
+ default:
if (cdrom_start_rw(drive, rq) == ide_stopped)
goto out_end;
break;
- case REQ_TYPE_BLOCK_PC:
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
handle_pc:
if (!rq->timeout)
rq->timeout = ATAPI_WAIT_PC;
cdrom_do_block_pc(drive, rq);
break;
- case REQ_TYPE_DRV_PRIV:
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
switch (ide_req(rq)->type) {
case ATA_PRIV_MISC:
/* right now this can only be a reset... */
case ATA_PRIV_SENSE:
case ATA_PRIV_PC:
goto handle_pc;
+ default:
+ BUG();
}
- default:
- BUG();
}
/* prepare sense request for this command */
cmd.rq = rq;
- if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) {
+ if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
ide_map_sg(drive, &cmd);
}
static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq)
{
- if (rq->cmd_type == REQ_TYPE_FS)
+ if (!blk_rq_is_passthrough(rq))
return ide_cdrom_prep_fs(q, rq);
- else if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
+ else if (blk_rq_is_scsi(rq))
return ide_cdrom_prep_pc(rq);
return 0;
struct request *rq;
int ret;
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
scsi_req_init(rq);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
ide_req(rq)->type = ATA_PRIV_MISC;
rq->rq_flags = RQF_QUIET;
ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
if (!(setting->flags & DS_SYNC))
return setting->set(drive, arg);
- rq = blk_get_request(q, READ, __GFP_RECLAIM);
+ rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
scsi_req_init(rq);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
ide_req(rq)->type = ATA_PRIV_MISC;
scsi_req(rq)->cmd_len = 5;
scsi_req(rq)->cmd[0] = REQ_DEVSET_EXEC;
ide_hwif_t *hwif = drive->hwif;
BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
- BUG_ON(rq->cmd_type != REQ_TYPE_FS);
+ BUG_ON(blk_rq_is_passthrough(rq));
ledtrig_disk_activity();
cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
cmd->tf_flags = IDE_TFLAG_DYN;
cmd->protocol = ATA_PROT_NODATA;
-
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
+ rq->cmd_flags &= ~REQ_OP_MASK;
+ rq->cmd_flags |= REQ_OP_DRV_OUT;
ide_req(rq)->type = ATA_PRIV_TASKFILE;
rq->special = cmd;
cmd->rq = rq;
if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
return -EBUSY;
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
scsi_req_init(rq);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
ide_req(rq)->type = ATA_PRIV_TASKFILE;
drive->mult_req = arg;
return ide_stopped;
/* retry only "normal" I/O: */
- if (rq->cmd_type != REQ_TYPE_FS) {
+ if (blk_rq_is_passthrough(rq)) {
if (ata_taskfile_request(rq)) {
struct ide_cmd *cmd = rq->special;
drive->failed_pc = NULL;
if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 ||
- rq->cmd_type == REQ_TYPE_BLOCK_PC)
+ (req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT))
uptodate = 1; /* FIXME */
else if (pc->c[0] == GPCMD_REQUEST_SENSE) {
goto out_end;
}
- switch (rq->cmd_type) {
- case REQ_TYPE_FS:
+ switch (req_op(rq)) {
+ default:
if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
(blk_rq_sectors(rq) % floppy->bs_factor)) {
printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
pc = &floppy->queued_pc;
idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
break;
- case REQ_TYPE_BLOCK_PC:
+ case REQ_OP_SCSI_IN:
+ case REQ_OP_SCSI_OUT:
pc = &floppy->queued_pc;
idefloppy_blockpc_cmd(floppy, pc, rq);
break;
- case REQ_TYPE_DRV_PRIV:
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
switch (ide_req(rq)->type) {
case ATA_PRIV_MISC:
case ATA_PRIV_SENSE:
default:
BUG();
}
- break;
- default:
- BUG();
}
ide_prep_sense(drive, rq);
cmd.rq = rq;
- if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) {
+ if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
ide_map_sg(drive, &cmd);
}
return ide_floppy_issue_pc(drive, &cmd, pc);
out_end:
drive->failed_pc = NULL;
- if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0)
+ if (blk_rq_is_passthrough(rq) && rq->errors == 0)
rq->errors = -EIO;
ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
return ide_stopped;
} else {
if (media == ide_tape)
rq->errors = IDE_DRV_ERROR_GENERAL;
- else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0)
+ else if (blk_rq_is_passthrough(rq) && rq->errors == 0)
rq->errors = -EIO;
}
if (NULL == (void *) arg) {
struct request *rq;
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
scsi_req_init(rq);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
ide_req(rq)->type = ATA_PRIV_TASKFILE;
err = blk_execute_rq(drive->queue, NULL, rq, 0);
blk_put_request(rq);
struct request *rq;
int ret = 0;
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
scsi_req_init(rq);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
ide_req(rq)->type = ATA_PRIV_MISC;
scsi_req(rq)->cmd_len = 1;
scsi_req(rq)->cmd[0] = REQ_DRIVE_RESET;
}
spin_unlock_irq(&hwif->lock);
- rq = blk_get_request(q, READ, __GFP_RECLAIM);
+ rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
scsi_req_init(rq);
scsi_req(rq)->cmd[0] = REQ_PARK_HEADS;
scsi_req(rq)->cmd_len = 1;
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
ide_req(rq)->type = ATA_PRIV_MISC;
rq->special = &timeout;
rc = blk_execute_rq(q, NULL, rq, 1);
* Make sure that *some* command is sent to the drive after the
* timeout has expired, so power management will be reenabled.
*/
- rq = blk_get_request(q, READ, GFP_NOWAIT);
+ rq = blk_get_request(q, REQ_OP_DRV_IN, GFP_NOWAIT);
scsi_req_init(rq);
if (IS_ERR(rq))
goto out;
scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
scsi_req(rq)->cmd_len = 1;
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
ide_req(rq)->type = ATA_PRIV_MISC;
elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
}
memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
scsi_req_init(rq);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_SUSPEND;
}
memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
scsi_req_init(rq);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
ide_req(rq)->type = ATA_PRIV_PM_RESUME;
rq->rq_flags |= RQF_PREEMPT;
rq->special = &rqpm;
{
struct ide_pm_state *pm = rq->special;
- if (rq->cmd_type == REQ_TYPE_DRV_PRIV &&
+ if (blk_rq_is_private(rq) &&
ide_req(rq)->type == ATA_PRIV_PM_SUSPEND &&
pm->pm_step == IDE_PM_START_SUSPEND)
/* Mark drive blocked when starting the suspend sequence. */
drive->dev_flags |= IDE_DFLAG_BLOCKED;
- else if (rq->cmd_type == REQ_TYPE_DRV_PRIV &&
+ else if (blk_rq_is_private(rq) &&
ide_req(rq)->type == ATA_PRIV_PM_RESUME &&
pm->pm_step == IDE_PM_START_RESUME) {
/*
req->cmd[0], (unsigned long long)blk_rq_pos(rq),
blk_rq_sectors(rq));
- BUG_ON(rq->cmd_type != REQ_TYPE_DRV_PRIV);
+ BUG_ON(!blk_rq_is_private(rq));
BUG_ON(ide_req(rq)->type != ATA_PRIV_MISC &&
ide_req(rq)->type != ATA_PRIV_SENSE);
BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
BUG_ON(size < 0 || size % tape->blk_size);
- rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
scsi_req_init(rq);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
ide_req(rq)->type = ATA_PRIV_MISC;
scsi_req(rq)->cmd[13] = cmd;
rq->rq_disk = tape->disk;
{
struct request *rq;
int error;
- int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE;
- rq = blk_get_request(drive->queue, rw, __GFP_RECLAIM);
+ rq = blk_get_request(drive->queue,
+ (cmd->tf_flags & IDE_TFLAG_WRITE) ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM);
scsi_req_init(rq);
- rq->cmd_type = REQ_TYPE_DRV_PRIV;
ide_req(rq)->type = ATA_PRIV_TASKFILE;
/*
nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
buf = bio_data(req->bio);
- if (req->cmd_type != REQ_TYPE_FS)
- return -EIO;
-
if (req_op(req) == REQ_OP_FLUSH)
return tr->flush(dev);
get_capacity(req->rq_disk))
return -EIO;
- if (req_op(req) == REQ_OP_DISCARD)
+ switch (req_op(req)) {
+ case REQ_OP_DISCARD:
return tr->discard(dev, block, nsect);
-
- if (rq_data_dir(req) == READ) {
+ case REQ_OP_READ:
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->readsect(dev, block, buf))
return -EIO;
rq_flush_dcache_pages(req);
return 0;
- } else {
+ case REQ_OP_WRITE:
if (!tr->writesect)
return -EIO;
if (tr->writesect(dev, block, buf))
return -EIO;
return 0;
+ default:
+ return -EIO;
}
}
struct ubiblock *dev = hctx->queue->queuedata;
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
- if (req->cmd_type != REQ_TYPE_FS)
+ switch (req_op(req)) {
+ case REQ_OP_READ:
+ ubi_sgl_init(&pdu->usgl);
+ queue_work(dev->wq, &pdu->work);
+ return BLK_MQ_RQ_QUEUE_OK;
+ default:
return BLK_MQ_RQ_QUEUE_ERROR;
+ }
- if (rq_data_dir(req) != READ)
- return BLK_MQ_RQ_QUEUE_ERROR; /* Write not implemented */
-
- ubi_sgl_init(&pdu->usgl);
- queue_work(dev->wq, &pdu->work);
-
- return BLK_MQ_RQ_QUEUE_OK;
}
static int ubiblock_init_request(void *data, struct request *req,
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, unsigned int flags, int qid)
{
+ unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
struct request *req;
if (qid == NVME_QID_ANY) {
- req = blk_mq_alloc_request(q, nvme_is_write(cmd), flags);
+ req = blk_mq_alloc_request(q, op, flags);
} else {
- req = blk_mq_alloc_request_hctx(q, nvme_is_write(cmd), flags,
+ req = blk_mq_alloc_request_hctx(q, op, flags,
qid ? qid - 1 : 0);
}
if (IS_ERR(req))
return req;
- req->cmd_type = REQ_TYPE_DRV_PRIV;
req->cmd_flags |= REQ_FAILFAST_DRIVER;
nvme_req(req)->cmd = cmd;
{
int ret = BLK_MQ_RQ_QUEUE_OK;
- if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+ switch (req_op(req)) {
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
- else if (req_op(req) == REQ_OP_FLUSH)
+ break;
+ case REQ_OP_FLUSH:
nvme_setup_flush(ns, cmd);
- else if (req_op(req) == REQ_OP_DISCARD)
+ break;
+ case REQ_OP_DISCARD:
ret = nvme_setup_discard(ns, req, cmd);
- else
+ break;
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
nvme_setup_rw(ns, req, cmd);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return BLK_MQ_RQ_QUEUE_ERROR;
+ }
cmd->common.command_id = req->tag;
-
return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
ib_dma_sync_single_for_device(dev, sqe->dma,
sizeof(struct nvme_command), DMA_TO_DEVICE);
- if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH)
+ if (req_op(rq) == REQ_OP_FLUSH)
flush = true;
ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
struct bio *bio = oii->bio;
int ret;
- req = blk_get_request(q, has_write ? WRITE : READ, flags);
+ req = blk_get_request(q, has_write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
+ flags);
if (IS_ERR(req))
return req;
scsi_req_init(req);
int err = 0;
int write = (data_direction == DMA_TO_DEVICE);
- req = blk_get_request(SRpnt->stp->device->request_queue, write, GFP_KERNEL);
+ req = blk_get_request(SRpnt->stp->device->request_queue,
+ write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(req))
return DRIVER_ERROR << 24;
* blk_get_request with GFP_KERNEL (__GFP_RECLAIM) sleeps until a
* request becomes available
*/
- req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
+ req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(req))
return;
rq = scsi_req(req);
req_flags_t rq_flags, int *resid)
{
struct request *req;
- int write = (data_direction == DMA_TO_DEVICE);
struct scsi_request *rq;
int ret = DRIVER_ERROR << 24;
- req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM);
+ req = blk_get_request(sdev->request_queue,
+ data_direction == DMA_TO_DEVICE ?
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
if (IS_ERR(req))
return ret;
rq = scsi_req(req);
}
} else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
/*
- * Certain non BLOCK_PC requests are commands that don't
- * actually transfer anything (FLUSH), so cannot use
+ * Flush commands do not transfers any data, and thus cannot use
* good_bytes != blk_rq_bytes(req) as the signal for an error.
* This sets the error explicitly for the problem case.
*/
blk_rq_sectors(req), good_bytes));
/*
- * Recovered errors need reporting, but they're always treated
- * as success, so fiddle the result code here. For BLOCK_PC
+ * Recovered errors need reporting, but they're always treated as
+ * success, so fiddle the result code here. For passthrough requests
* we already took a copy of the original into rq->errors which
* is what gets returned to the user
*/
else if (!(req->rq_flags & RQF_QUIET))
scsi_print_sense(cmd);
result = 0;
- /* BLOCK_PC may have set error */
+ /* for passthrough error may be set */
error = 0;
}
spin_unlock_irqrestore(&dev->list_lock, flags);
}
-static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
+static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
{
struct scsi_cmnd *cmd = req->special;
/*
- * BLOCK_PC requests may transfer data, in which case they must
+ * Passthrough requests may transfer data, in which case they must
* a bio attached to them. Or they might contain a SCSI command
* that does not transfer data, in which case they may optionally
* submit a request without an attached bio.
}
/*
- * Setup a REQ_TYPE_FS command. These are simple request from filesystems
+ * Setup a normal block command. These are simple request from filesystems
* that still need to be translated to SCSI CDBs from the ULD.
*/
static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
else
cmd->sc_data_direction = DMA_FROM_DEVICE;
- switch (req->cmd_type) {
- case REQ_TYPE_FS:
+ if (blk_rq_is_scsi(req))
+ return scsi_setup_scsi_cmnd(sdev, req);
+ else
return scsi_setup_fs_cmnd(sdev, req);
- case REQ_TYPE_BLOCK_PC:
- return scsi_setup_blk_pc_cmnd(sdev, req);
- default:
- return BLKPREP_KILL;
- }
}
static int
* With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
* does not sleep except under memory pressure.
*/
- rq = blk_get_request(q, rw, GFP_KERNEL);
+ rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(rq)) {
kfree(long_cmdp);
return PTR_ERR(rq);
goto out;
}
- if (rq_data_dir(rq) == WRITE) {
+ switch (req_op(rq)) {
+ case REQ_OP_WRITE:
if (!cd->writeable)
goto out;
SCpnt->cmnd[0] = WRITE_10;
cd->cdi.media_written = 1;
- } else if (rq_data_dir(rq) == READ) {
+ break;
+ case REQ_OP_READ:
SCpnt->cmnd[0] = READ_10;
- } else {
+ break;
+ default:
blk_dump_rq_flags(rq, "Unknown sr command");
goto out;
}
struct scsi_request *rq;
struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
int err = 0;
- int write = (data_direction == DMA_TO_DEVICE);
struct scsi_tape *STp = SRpnt->stp;
- req = blk_get_request(SRpnt->stp->device->request_queue, write,
- GFP_KERNEL);
+ req = blk_get_request(SRpnt->stp->device->request_queue,
+ data_direction == DMA_TO_DEVICE ?
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(req))
return DRIVER_ERROR << 24;
rq = scsi_req(req);
scsi_command_size(cmd->t_task_cdb));
req = blk_get_request(pdv->pdv_sd->request_queue,
- (cmd->data_direction == DMA_TO_DEVICE),
+ cmd->data_direction == DMA_TO_DEVICE ?
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
GFP_KERNEL);
if (IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed\n");
if (!buf)
return -ENOMEM;
- rq = blk_get_request(q, READ, GFP_KERNEL);
+ rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL);
if (IS_ERR(rq)) {
error = -ENOMEM;
goto out_free_buf;
/* write the zero filled sector many times */
REQ_OP_WRITE_ZEROES = 8,
+ /* SCSI passthrough using struct scsi_request */
+ REQ_OP_SCSI_IN = 32,
+ REQ_OP_SCSI_OUT = 33,
+ /* Driver private requests */
+ REQ_OP_DRV_IN = 34,
+ REQ_OP_DRV_OUT = 35,
+
REQ_OP_LAST,
};
unsigned int flags;
};
-/*
- * request command types
- */
-enum rq_cmd_type_bits {
- REQ_TYPE_FS = 1, /* fs request */
- REQ_TYPE_BLOCK_PC, /* scsi command */
- REQ_TYPE_DRV_PRIV, /* driver defined types from here */
-};
-
/*
* request flags */
typedef __u32 __bitwise req_flags_t;
struct blk_mq_ctx *mq_ctx;
int cpu;
- unsigned cmd_type;
unsigned int cmd_flags; /* op and common flags */
req_flags_t rq_flags;
unsigned long atomic_flags;
struct request *next_rq;
};
+static inline bool blk_rq_is_scsi(struct request *rq)
+{
+ return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT;
+}
+
+static inline bool blk_rq_is_private(struct request *rq)
+{
+ return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT;
+}
+
static inline bool blk_rq_is_passthrough(struct request *rq)
{
- return rq->cmd_type != REQ_TYPE_FS;
+ return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
}
static inline unsigned short req_get_ioprio(struct request *req)
static inline bool ata_misc_request(struct request *rq)
{
- return rq->cmd_type == REQ_TYPE_DRV_PRIV &&
- ide_req(rq)->type == ATA_PRIV_MISC;
+ return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC;
}
static inline bool ata_taskfile_request(struct request *rq)
{
- return rq->cmd_type == REQ_TYPE_DRV_PRIV &&
- ide_req(rq)->type == ATA_PRIV_TASKFILE;
+ return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE;
}
static inline bool ata_pc_request(struct request *rq)
{
- return rq->cmd_type == REQ_TYPE_DRV_PRIV &&
- ide_req(rq)->type == ATA_PRIV_PC;
+ return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC;
}
static inline bool ata_sense_request(struct request *rq)
{
- return rq->cmd_type == REQ_TYPE_DRV_PRIV &&
- ide_req(rq)->type == ATA_PRIV_SENSE;
+ return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE;
}
static inline bool ata_pm_request(struct request *rq)
{
- return rq->cmd_type == REQ_TYPE_DRV_PRIV &&
+ return blk_rq_is_private(rq) &&
(ide_req(rq)->type == ATA_PRIV_PM_SUSPEND ||
ide_req(rq)->type == ATA_PRIV_PM_RESUME);
}