{
int result;
u32 aqa;
- u64 cap = readq(&dev->bar->cap);
+ u64 cap = lo_hi_readq(&dev->bar->cap);
struct nvme_queue *nvmeq;
unsigned page_shift = PAGE_SHIFT;
unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
writel(aqa, &dev->bar->aqa);
- writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
- writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
+ lo_hi_writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
+ lo_hi_writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
result = nvme_enable_ctrl(dev, cap);
if (result)
struct pci_dev *pdev = to_pci_dev(dev->dev);
int res;
struct nvme_id_ctrl *ctrl;
- int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
+ int shift = NVME_CAP_MPSMIN(lo_hi_readq(&dev->bar->cap)) + 12;
res = nvme_identify_ctrl(dev, &ctrl);
if (res) {
goto unmap;
}
- cap = readq(&dev->bar->cap);
+ cap = lo_hi_readq(&dev->bar->cap);
dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
dev->dbs = ((void __iomem *)dev->bar) + 4096;
* queues than admin tags.
*/
set_current_state(TASK_RUNNING);
- nvme_disable_ctrl(dev, readq(&dev->bar->cap));
+ nvme_disable_ctrl(dev, lo_hi_readq(&dev->bar->cap));
nvme_clear_queue(dev->queues[0]);
flush_kthread_worker(dq->worker);
nvme_disable_queue(dev, 0);