#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/delay.h>
+#include <linux/freezer.h>
#include <xen/balloon.h>
-#include <asm/hypervisor.h>
+#include <xen/events.h>
+#include <xen/page.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
#include "common.h"
/*
static int do_block_io_op(blkif_t *blkif);
static void dispatch_rw_block_io(blkif_t *blkif,
- blkif_request_t *req,
+ struct blkif_request *req,
pending_req_t *pending_req);
static void make_response(blkif_t *blkif, u64 id,
unsigned short op, int st);
static void plug_queue(blkif_t *blkif, struct block_device *bdev)
{
- request_queue_t *q = bdev_get_queue(bdev);
+ struct request_queue *q = bdev_get_queue(bdev);
if (q == blkif->plug)
return;
}
}
-static int end_block_io_op(struct bio *bio, unsigned int done, int error)
+static void end_block_io_op(struct bio *bio, int error)
{
- if (bio->bi_size != 0)
- return 1;
__end_block_io_op(bio->bi_private, error);
bio_put(bio);
- return error;
}
wake_up(&blkif->wq);
}
-irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
+irqreturn_t blkif_be_int(int irq, void *dev_id)
{
blkif_notify_work(dev_id);
return IRQ_HANDLED;
static int do_block_io_op(blkif_t *blkif)
{
- blkif_back_rings_t *blk_rings = &blkif->blk_rings;
- blkif_request_t req;
+ union blkif_back_rings *blk_rings = &blkif->blk_rings;
+ struct blkif_request req;
pending_req_t *pending_req;
RING_IDX rc, rp;
int more_to_do = 0;
}
static void dispatch_rw_block_io(blkif_t *blkif,
- blkif_request_t *req,
+ struct blkif_request *req,
pending_req_t *pending_req)
{
extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
static void make_response(blkif_t *blkif, u64 id,
unsigned short op, int st)
{
- blkif_response_t resp;
+ struct blkif_response resp;
unsigned long flags;
- blkif_back_rings_t *blk_rings = &blkif->blk_rings;
+ union blkif_back_rings *blk_rings = &blkif->blk_rings;
int more_to_do = 0;
int notify;
{
int i, mmap_pages;
- if (!is_running_on_xen())
+ printk(KERN_CRIT "***blkif_init\n");
+ if (!xen_pv_domain())
return -ENODEV;
mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
#include <xen/evtchn.h>
#include <asm/hypervisor.h>
#include <xen/blkif.h>
-#include <xen/gnttab.h>
-#include <xen/driver_util.h>
+#include <xen/grant_table.h>
#include <xen/xenbus.h>
#define DPRINTK(_f, _a...) \
unsigned int irq;
/* Comms information. */
enum blkif_protocol blk_protocol;
- blkif_back_rings_t blk_rings;
+ union blkif_back_rings blk_rings;
struct vm_struct *blk_ring_area;
/* The VBD attached to this interface. */
struct vbd vbd;
wait_queue_head_t wq;
struct task_struct *xenblkd;
unsigned int waiting_reqs;
- request_queue_t *plug;
+ struct request_queue *plug;
/* statistics */
unsigned long st_print;
void blkif_xenbus_init(void);
-irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
+irqreturn_t blkif_be_int(int irq, void *dev_id);
int blkif_schedule(void *arg);
int blkback_barrier(struct xenbus_transaction xbt,
*/
#include "common.h"
-#include <xen/evtchn.h>
+#include <xen/events.h>
+#include <xen/grant_table.h>
#include <linux/kthread.h>
-static kmem_cache_t *blkif_cachep;
+static struct kmem_cache *blkif_cachep;
blkif_t *blkif_alloc(domid_t domid)
{
switch (blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
{
- blkif_sring_t *sring;
- sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
+ struct blkif_sring *sring;
+ sring = (struct blkif_sring *)blkif->blk_ring_area->addr;
BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
break;
}
case BLKIF_PROTOCOL_X86_32:
{
- blkif_x86_32_sring_t *sring_x86_32;
- sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
+ struct blkif_x86_32_sring *sring_x86_32;
+ sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr;
BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
break;
}
case BLKIF_PROTOCOL_X86_64:
{
- blkif_x86_64_sring_t *sring_x86_64;
- sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
+ struct blkif_x86_64_sring *sring_x86_64;
+ sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr;
BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
break;
}
void __init blkif_interface_init(void)
{
blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
}
#include "common.h"
#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
- (_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
+ (_v)->bdev->bd_part->nr_sects : get_capacity((_v)->bdev->bd_disk))
unsigned long long vbd_size(struct vbd *vbd)
{
void vbd_free(struct vbd *vbd)
{
if (vbd->bdev)
- blkdev_put(vbd->bdev);
+ blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
vbd->bdev = NULL;
}
/* setup back pointer */
be->blkif->be = be;
- err = xenbus_watch_path2(dev, dev->nodename, "physical-device",
- &be->backend_watch, backend_changed);
+ err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed,
+ "%s/%s", dev->nodename, "physical-device");
if (err)
goto fail;
void blkif_xenbus_init(void)
{
- xenbus_register_backend(&blkback);
+ /* XXX must_check */
+ (void)xenbus_register_backend(&blkback);
}
DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response);
union blkif_back_rings {
- blkif_back_ring_t native;
- blkif_common_back_ring_t common;
- blkif_x86_32_back_ring_t x86_32;
- blkif_x86_64_back_ring_t x86_64;
+ struct blkif_back_ring native;
+ struct blkif_common_back_ring common;
+ struct blkif_x86_32_back_ring x86_32;
+ struct blkif_x86_64_back_ring x86_64;
};
-typedef union blkif_back_rings blkif_back_rings_t;
enum blkif_protocol {
BLKIF_PROTOCOL_NATIVE = 1,
BLKIF_PROTOCOL_X86_64 = 3,
};
-static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src)
+static void inline blkif_get_x86_32_req(struct blkif_request *dst, struct blkif_x86_32_request *src)
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
dst->operation = src->operation;
dst->seg[i] = src->seg[i];
}
-static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src)
+static void inline blkif_get_x86_64_req(struct blkif_request *dst, struct blkif_x86_64_request *src)
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
dst->operation = src->operation;