/* service tree for active throtl groups */
struct throtl_rb_root tg_service_tree;
- struct throtl_grp *root_tg;
struct request_queue *queue;
/* Total Number of queued bios on READ and WRITE lists */
return pdata_to_blkg(tg);
}
+static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
+{
+ return blkg_to_tg(td->queue->root_blkg);
+}
+
enum tg_state_flags {
THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
};
* Avoid lookup in this case
*/
if (blkcg == &blkio_root_cgroup)
- return td->root_tg;
+ return td_root_tg(td);
return blkg_to_tg(blkg_lookup(blkcg, td->queue));
}
* Avoid lookup in this case
*/
if (blkcg == &blkio_root_cgroup) {
- tg = td->root_tg;
+ tg = td_root_tg(td);
} else {
struct blkio_group *blkg;
if (!IS_ERR(blkg))
tg = blkg_to_tg(blkg);
else if (!blk_queue_dead(q))
- tg = td->root_tg;
+ tg = td_root_tg(td);
}
return tg;
blkg = blkg_lookup_create(&blkio_root_cgroup, q, true);
if (!IS_ERR(blkg))
- td->root_tg = blkg_to_tg(blkg);
+ q->root_blkg = blkg;
spin_unlock_irq(q->queue_lock);
rcu_read_unlock();
- if (!td->root_tg) {
+ if (!q->root_blkg) {
kfree(td);
return -ENOMEM;
}
spin_lock_irq(q->queue_lock);
blkg = blkg_lookup_create(&blkio_root_cgroup, q, true);
- if (!IS_ERR(blkg))
+ if (!IS_ERR(blkg)) {
+ q->root_blkg = blkg;
cfqd->root_group = blkg_to_cfqg(blkg);
+ }
spin_unlock_irq(q->queue_lock);
rcu_read_unlock();
struct request;
struct sg_io_hdr;
struct bsg_job;
+struct blkio_group;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
struct list_head icq_list;
#ifdef CONFIG_BLK_CGROUP
+ struct blkio_group *root_blkg;
struct list_head blkg_list;
#endif