comm2_time = sample.time;
}
next_event:
- perf_mmap__consume(md, false);
+ perf_mmap__consume(md);
}
perf_mmap__read_done(md);
}
while ((event = perf_mmap__read_event(md, false, &start, end)) != NULL) {
err = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp);
if (err) {
- perf_mmap__consume(md, false);
+ perf_mmap__consume(md);
pr_err("Failed to parse sample\n");
return -1;
}
* FIXME: Here we can't consume the event, as perf_session__queue_event will
* point to it, and it'll get possibly overwritten by the kernel.
*/
- perf_mmap__consume(md, false);
+ perf_mmap__consume(md);
if (err) {
pr_err("Failed to enqueue sample: %d\n", err);
} else
++session->evlist->stats.nr_unknown_events;
next_event:
- perf_mmap__consume(md, opts->overwrite);
+ perf_mmap__consume(md);
}
perf_mmap__read_done(md);
trace__handle_event(trace, event, &sample);
next_event:
- perf_mmap__consume(md, false);
+ perf_mmap__consume(md);
if (interrupted)
goto out_disable;
while ((event = perf_mmap__read_event(md, false, &start, end)) != NULL) {
ret = process_event(machine, evlist, event, state);
- perf_mmap__consume(md, false);
+ perf_mmap__consume(md);
if (ret < 0)
return ret;
}
(pid_t)event->comm.tid == getpid() &&
strcmp(event->comm.comm, comm) == 0)
found += 1;
- perf_mmap__consume(md, false);
+ perf_mmap__consume(md);
}
perf_mmap__read_done(md);
}
goto out_delete_evlist;
}
nr_events[evsel->idx]++;
- perf_mmap__consume(md, false);
+ perf_mmap__consume(md);
}
perf_mmap__read_done(md);
++nr_events;
if (type != PERF_RECORD_SAMPLE) {
- perf_mmap__consume(md, false);
+ perf_mmap__consume(md);
continue;
}
++errs;
}
- perf_mmap__consume(md, false);
+ perf_mmap__consume(md);
}
perf_mmap__read_done(md);
}
total_periods += sample.period;
nr_samples++;
next_event:
- perf_mmap__consume(md, false);
+ perf_mmap__consume(md);
}
perf_mmap__read_done(md);
while ((event = perf_mmap__read_event(md, false, &start, end)) != NULL) {
cnt += 1;
ret = add_event(evlist, &events, event);
- perf_mmap__consume(md, false);
+ perf_mmap__consume(md);
if (ret < 0)
goto out_free_nodes;
}
if (event->header.type == PERF_RECORD_EXIT)
nr_exit++;
- perf_mmap__consume(md, false);
+ perf_mmap__consume(md);
}
perf_mmap__read_done(md);
perf_mmap__munmap(map);
}
-void perf_mmap__consume(struct perf_mmap *map, bool overwrite __maybe_unused)
+void perf_mmap__consume(struct perf_mmap *map)
{
if (!map->overwrite) {
u64 old = map->prev;
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
md->prev = head;
- perf_mmap__consume(md, overwrite);
+ perf_mmap__consume(md);
return -EAGAIN;
}
}
md->prev = head;
- perf_mmap__consume(md, md->overwrite);
+ perf_mmap__consume(md);
out:
return rc;
}
void perf_mmap__get(struct perf_mmap *map);
void perf_mmap__put(struct perf_mmap *map);
-void perf_mmap__consume(struct perf_mmap *map, bool overwrite);
+void perf_mmap__consume(struct perf_mmap *map);
static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
{
err = perf_evsel__parse_sample(evsel, event, &pevent->sample);
/* Consume the even only after we parsed it out. */
- perf_mmap__consume(md, false);
+ perf_mmap__consume(md);
if (err)
return PyErr_Format(PyExc_OSError,