if (hits == rec->samples) {
if (done || draining)
break;
- err = perf_evlist__poll(rec->evlist, -1);
+ err = evlist__poll(rec->evlist, -1);
/*
* Propagate error, only if there's any. Ignore positive
* number of returned events and interrupt error.
}
/* Wait for a minimal set of events before starting the snapshot */
- perf_evlist__poll(top->evlist, 100);
+ evlist__poll(top->evlist, 100);
perf_top__mmap_read(top);
perf_top__mmap_read(top);
if (opts->overwrite || (hits == top->samples))
- ret = perf_evlist__poll(top->evlist, 100);
+ ret = evlist__poll(top->evlist, 100);
if (resize) {
perf_top__resize(top);
if (trace->nr_events == before) {
int timeout = done ? 100 : -1;
- if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
+ if (!draining && evlist__poll(evlist, timeout) > 0) {
if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
draining = true;
return pos;
}
+
+int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
+{
+ return fdarray__poll(&evlist->pollfd, timeout);
+}
LIBPERF_API void perf_evlist__set_maps(struct perf_evlist *evlist,
struct perf_cpu_map *cpus,
struct perf_thread_map *threads);
+LIBPERF_API int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
#endif /* __LIBPERF_EVLIST_H */
perf_evlist__remove;
perf_evlist__next;
perf_evlist__set_maps;
+ perf_evlist__poll;
local:
*;
};
}
if (nr_events == before)
- perf_evlist__poll(evlist, 10);
+ evlist__poll(evlist, 10);
if (++nr_polls > 5) {
pr_debug("%s: no events!\n", __func__);
* perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
*/
if (total_events == before && false)
- perf_evlist__poll(evlist, -1);
+ evlist__poll(evlist, -1);
sleep(1);
if (++wakeups > 5) {
out_init:
if (!exited || !nr_exit) {
- perf_evlist__poll(evlist, -1);
+ evlist__poll(evlist, -1);
goto retry;
}
perf_evlist__munmap_filtered, NULL);
}
-int perf_evlist__poll(struct evlist *evlist, int timeout)
+int evlist__poll(struct evlist *evlist, int timeout)
{
- return fdarray__poll(&evlist->core.pollfd, timeout);
+ return perf_evlist__poll(&evlist->core, timeout);
}
static void perf_evlist__set_sid_idx(struct evlist *evlist,
draining = true;
if (!draining)
- perf_evlist__poll(evlist, 1000);
+ evlist__poll(evlist, 1000);
for (i = 0; i < evlist->core.nr_mmaps; i++) {
struct mmap *map = &evlist->mmap[i];
int evlist__add_pollfd(struct evlist *evlist, int fd);
int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask);
-int perf_evlist__poll(struct evlist *evlist, int timeout);
+int evlist__poll(struct evlist *evlist, int timeout);
struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id);
struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist,
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
return NULL;
- n = perf_evlist__poll(evlist, timeout);
+ n = evlist__poll(evlist, timeout);
if (n < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;