struct mutex lock;
};
- static void tegra_atomic_schedule(struct tegra_drm *tegra,
- struct drm_atomic_state *state)
+ static int tegra_atomic_check(struct drm_device *drm,
+ struct drm_atomic_state *state)
{
- tegra->commit.state = state;
- schedule_work(&tegra->commit.work);
- }
+ int err;
- static void tegra_atomic_complete(struct tegra_drm *tegra,
- struct drm_atomic_state *state)
- {
- struct drm_device *drm = tegra->drm;
+ err = drm_atomic_helper_check_modeset(drm, state);
+ if (err < 0)
+ return err;
- /*
- * Everything below can be run asynchronously without the need to grab
- * any modeset locks at all under one condition: It must be guaranteed
- * that the asynchronous work has either been cancelled (if the driver
- * supports it, which at least requires that the framebuffers get
- * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
- * before the new state gets committed on the software side with
- * drm_atomic_helper_swap_state().
- *
- * This scheme allows new atomic state updates to be prepared and
- * checked in parallel to the asynchronous completion of the previous
- * update. Which is important since compositors need to figure out the
- * composition of the next frame right after having submitted the
- * current layout.
- */
+ err = drm_atomic_normalize_zpos(drm, state);
+ if (err < 0)
+ return err;
- drm_atomic_helper_commit_modeset_disables(drm, state);
- drm_atomic_helper_commit_modeset_enables(drm, state);
- drm_atomic_helper_commit_planes(drm, state,
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
+ err = drm_atomic_helper_check_planes(drm, state);
+ if (err < 0)
+ return err;
- drm_atomic_helper_wait_for_vblanks(drm, state);
+ if (state->legacy_cursor_update)
+ state->async_update = !drm_atomic_helper_async_check(drm, state);
- drm_atomic_helper_cleanup_planes(drm, state);
- drm_atomic_state_put(state);
+ return 0;
}
- static void tegra_atomic_work(struct work_struct *work)
+ static struct drm_atomic_state *
+ tegra_atomic_state_alloc(struct drm_device *drm)
{
- struct tegra_drm *tegra = container_of(work, struct tegra_drm,
- commit.work);
+ struct tegra_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state || drm_atomic_state_init(drm, &state->base) < 0) {
+ kfree(state);
+ return NULL;
+ }
- tegra_atomic_complete(tegra, tegra->commit.state);
+ return &state->base;
}
- static int tegra_atomic_commit(struct drm_device *drm,
- struct drm_atomic_state *state, bool nonblock)
+ static void tegra_atomic_state_clear(struct drm_atomic_state *state)
{
- struct tegra_drm *tegra = drm->dev_private;
- int err;
-
- err = drm_atomic_helper_prepare_planes(drm, state);
- if (err)
- return err;
-
- /* serialize outstanding nonblocking commits */
- mutex_lock(&tegra->commit.lock);
- flush_work(&tegra->commit.work);
-
- /*
- * This is the point of no return - everything below never fails except
- * when the hw goes bonghits. Which means we can commit the new state on
- * the software side now.
- */
-
- err = drm_atomic_helper_swap_state(state, true);
- if (err) {
- mutex_unlock(&tegra->commit.lock);
- drm_atomic_helper_cleanup_planes(drm, state);
- return err;
- }
+ struct tegra_atomic_state *tegra = to_tegra_atomic_state(state);
- drm_atomic_state_get(state);
- if (nonblock)
- tegra_atomic_schedule(tegra, state);
- else
- tegra_atomic_complete(tegra, state);
+ drm_atomic_state_default_clear(state);
+ tegra->clk_disp = NULL;
+ tegra->dc = NULL;
+ tegra->rate = 0;
+ }
- mutex_unlock(&tegra->commit.lock);
- return 0;
+ static void tegra_atomic_state_free(struct drm_atomic_state *state)
+ {
+ drm_atomic_state_default_release(state);
+ kfree(state);
}
- static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
+ static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
.fb_create = tegra_fb_create,
#ifdef CONFIG_DRM_FBDEV_EMULATION
- .output_poll_changed = tegra_fb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
#endif
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = tegra_atomic_commit,
+ .atomic_check = tegra_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ .atomic_state_alloc = tegra_atomic_state_alloc,
+ .atomic_state_clear = tegra_atomic_state_clear,
+ .atomic_state_free = tegra_atomic_state_free,
+ };
+
+ static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
+ {
+ struct drm_device *drm = old_state->dev;
+ struct tegra_drm *tegra = drm->dev_private;
+
+ if (tegra->hub) {
+ drm_atomic_helper_commit_modeset_disables(drm, old_state);
+ tegra_display_hub_atomic_commit(drm, old_state);
+ drm_atomic_helper_commit_planes(drm, old_state, 0);
+ drm_atomic_helper_commit_modeset_enables(drm, old_state);
+ drm_atomic_helper_commit_hw_done(old_state);
+ drm_atomic_helper_wait_for_vblanks(drm, old_state);
+ drm_atomic_helper_cleanup_planes(drm, old_state);
+ } else {
+ drm_atomic_helper_commit_tail_rpm(old_state);
+ }
+ }
+
+ static const struct drm_mode_config_helper_funcs
+ tegra_drm_mode_config_helpers = {
+ .atomic_commit_tail = tegra_atomic_commit_tail,
};
static int tegra_drm_load(struct drm_device *drm, unsigned long flags)