return rnp->completed + 2;
}
-/*
- * Trace-event helper function for rcu_start_future_gp() and
- * rcu_nocb_wait_gp().
- */
-static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
- unsigned long c, const char *s)
+/* Trace-event wrapper function for trace_rcu_future_grace_period. */
+static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
+ unsigned long c, const char *s)
{
trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
rnp->completed, c, rnp->level,
}
/*
- * Start some future grace period, as needed to handle newly arrived
+ * Start the specified grace period, as needed to handle newly arrived
* callbacks. The required future grace periods are recorded in each
- * rcu_node structure's ->need_future_gp field. Returns true if there
+ * rcu_node structure's ->need_future_gp[] field. Returns true if there
* is reason to awaken the grace-period kthread.
*
* The caller must hold the specified rcu_node structure's ->lock, which
* is why the caller is responsible for waking the grace-period kthread.
*/
-static bool __maybe_unused
-rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
- unsigned long *c_out)
+static bool rcu_start_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
+ unsigned long c)
{
- unsigned long c;
bool ret = false;
struct rcu_state *rsp = rdp->rsp;
struct rcu_node *rnp_root = rcu_get_root(rsp);
raw_lockdep_assert_held_rcu_node(rnp);
- /*
- * Pick up grace-period number for new callbacks. If this
- * grace period is already marked as needed, return to the caller.
- */
- c = rcu_cbs_completed(rsp, rnp);
- trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
+ /* If the specified GP is already known needed, return to caller. */
+ trace_rcu_this_gp(rnp, rdp, c, TPS("Startleaf"));
if (need_future_gp_element(rnp, c)) {
- trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
+ trace_rcu_this_gp(rnp, rdp, c, TPS("Prestartleaf"));
goto out;
}
*/
if (rnp->gpnum != rnp->completed) {
need_future_gp_element(rnp, c) = true;
- trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
+ trace_rcu_this_gp(rnp, rdp, c, TPS("Startedleaf"));
goto out;
}
* recorded, trace and leave.
*/
if (need_future_gp_element(rnp_root, c)) {
- trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
+ trace_rcu_this_gp(rnp, rdp, c, TPS("Prestartedroot"));
goto unlock_out;
}
/* If a grace period is not already in progress, start one. */
if (rnp_root->gpnum != rnp_root->completed) {
- trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
+ trace_rcu_this_gp(rnp, rdp, c, TPS("Startedleafroot"));
} else {
- trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
+ trace_rcu_this_gp(rnp, rdp, c, TPS("Startedroot"));
if (!rsp->gp_kthread)
goto unlock_out; /* No grace-period kthread yet! */
WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT);
if (rnp != rnp_root)
raw_spin_unlock_rcu_node(rnp_root);
out:
- if (c_out != NULL)
- *c_out = c;
return ret;
}
need_future_gp_element(rnp, c) = false;
needmore = need_any_future_gp(rnp);
- trace_rcu_future_gp(rnp, rdp, c,
- needmore ? TPS("CleanupMore") : TPS("Cleanup"));
+ trace_rcu_this_gp(rnp, rdp, c,
+ needmore ? TPS("CleanupMore") : TPS("Cleanup"));
return needmore;
}
static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
struct rcu_data *rdp)
{
+ unsigned long c;
bool ret = false;
raw_lockdep_assert_held_rcu_node(rnp);
* accelerating callback invocation to an earlier grace-period
* number.
*/
- if (rcu_segcblist_accelerate(&rdp->cblist, rcu_cbs_completed(rsp, rnp)))
- ret = rcu_start_future_gp(rnp, rdp, NULL);
+ c = rcu_cbs_completed(rsp, rnp);
+ if (rcu_segcblist_accelerate(&rdp->cblist, c))
+ ret = rcu_start_this_gp(rnp, rdp, c);
/* Trace depending on how much we were able to accelerate. */
if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
/* Check for GP requests since above loop. */
rdp = this_cpu_ptr(rsp->rda);
if (need_any_future_gp(rnp)) {
- trace_rcu_future_gp(rnp, rdp, rsp->completed - 1,
- TPS("CleanupMore"));
+ trace_rcu_this_gp(rnp, rdp, rsp->completed - 1,
+ TPS("CleanupMore"));
needgp = true;
}
/* Advance CBs to reduce false positives below. */
struct rcu_node *rnp = rdp->mynode;
raw_spin_lock_irqsave_rcu_node(rnp, flags);
- needwake = rcu_start_future_gp(rnp, rdp, &c);
+ c = rcu_cbs_completed(rdp->rsp, rnp);
+ needwake = rcu_start_this_gp(rnp, rdp, c);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
if (needwake)
rcu_gp_kthread_wake(rdp->rsp);
* Wait for the grace period. Do so interruptibly to avoid messing
* up the load average.
*/
- trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
+ trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait"));
for (;;) {
swait_event_interruptible(
rnp->nocb_gp_wq[c & 0x1],
if (likely(d))
break;
WARN_ON(signal_pending(current));
- trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
+ trace_rcu_this_gp(rnp, rdp, c, TPS("ResumeWait"));
}
- trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
+ trace_rcu_this_gp(rnp, rdp, c, TPS("EndWait"));
smp_mb(); /* Ensure that CB invocation happens after GP end. */
}