return -EPERM;
vma->vm_flags &= ~VM_MAYWRITE;
- if (!dev->mdev->clock_info_page)
+ if (!dev->mdev->clock_info)
return -EOPNOTSUPP;
return rdma_user_mmap_page(&context->ibucontext, vma,
- dev->mdev->clock_info_page, PAGE_SIZE);
+ virt_to_page(dev->mdev->clock_info),
+ PAGE_SIZE);
}
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
do_div(ns, NSEC_PER_SEC / HZ);
clock->overflow_period = ns;
- mdev->clock_info_page = alloc_page(GFP_KERNEL);
- if (mdev->clock_info_page) {
- mdev->clock_info = kmap(mdev->clock_info_page);
- if (!mdev->clock_info) {
- __free_page(mdev->clock_info_page);
- mlx5_core_warn(mdev, "failed to map clock page\n");
- } else {
- mdev->clock_info->sign = 0;
- mdev->clock_info->nsec = clock->tc.nsec;
- mdev->clock_info->cycles = clock->tc.cycle_last;
- mdev->clock_info->mask = clock->cycles.mask;
- mdev->clock_info->mult = clock->nominal_c_mult;
- mdev->clock_info->shift = clock->cycles.shift;
- mdev->clock_info->frac = clock->tc.frac;
- mdev->clock_info->overflow_period =
- clock->overflow_period;
- }
+ mdev->clock_info =
+ (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
+ if (mdev->clock_info) {
+ mdev->clock_info->nsec = clock->tc.nsec;
+ mdev->clock_info->cycles = clock->tc.cycle_last;
+ mdev->clock_info->mask = clock->cycles.mask;
+ mdev->clock_info->mult = clock->nominal_c_mult;
+ mdev->clock_info->shift = clock->cycles.shift;
+ mdev->clock_info->frac = clock->tc.frac;
+ mdev->clock_info->overflow_period = clock->overflow_period;
}
INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
cancel_delayed_work_sync(&clock->overflow_work);
if (mdev->clock_info) {
- kunmap(mdev->clock_info_page);
- __free_page(mdev->clock_info_page);
+ free_page((unsigned long)mdev->clock_info);
mdev->clock_info = NULL;
}