/* bits in struct cgroup_subsys_state flags field */
enum {
+ CSS_NO_REF = (1 << 0), /* no reference counting for this css */
CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
};
*/
static inline void css_get(struct cgroup_subsys_state *css)
{
- percpu_ref_get(&css->refcnt);
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_get(&css->refcnt);
}
/**
*/
static inline bool css_tryget_online(struct cgroup_subsys_state *css)
{
- return percpu_ref_tryget_live(&css->refcnt);
+ if (!(css->flags & CSS_NO_REF))
+ return percpu_ref_tryget_live(&css->refcnt);
+ return true;
}
/**
*/
static inline void css_put(struct cgroup_subsys_state *css)
{
- percpu_ref_put(&css->refcnt);
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_put(&css->refcnt);
}
/* bits in struct cgroup flags field */
/* We don't handle early failures gracefully */
BUG_ON(IS_ERR(css));
init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
+
+ /*
+ * Root csses are never destroyed and we can't initialize
+ * percpu_ref during early init. Disable refcnting.
+ */
+ css->flags |= CSS_NO_REF;
+
if (early) {
/* allocation can't be done safely during early init */
css->id = 1;
} else {
- BUG_ON(percpu_ref_init(&css->refcnt, css_release));
css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
BUG_ON(css->id < 0);
}
int i;
init_cgroup_root(&cgrp_dfl_root, &opts);
+ cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;
+
RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
for_each_subsys(ss, i) {
struct cgroup_subsys_state *css =
init_css_set.subsys[ss->id];
- BUG_ON(percpu_ref_init(&css->refcnt, css_release));
css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
GFP_KERNEL);
BUG_ON(css->id < 0);