From: Daniel Vetter Date: Fri, 14 Jun 2019 09:31:13 +0000 (+0200) Subject: Merge tag 'drm-misc-next-2019-06-14' of git://anongit.freedesktop.org/drm/drm-misc... X-Git-Url: http://git.cdn.openwrt.org/?a=commitdiff_plain;h=2454fcea338ad821a39d471bc7db5a58ba41b742;p=openwrt%2Fstaging%2Fblogic.git Merge tag 'drm-misc-next-2019-06-14' of git://anongit.freedesktop.org/drm/drm-misc into drm-next drm-misc-next for v5.3: UAPI Changes: Cross-subsystem Changes: - Add code to signal all dma-fences when freed with pending signals. - Annotate reservation object access in CONFIG_DEBUG_MUTEXES Core Changes: - Assorted documentation fixes. - Use irqsave/restore spinlock to add crc entry. - Move code around to drm_client, for internal modeset clients. - Make drm_crtc.h and drm_debugfs.h self-contained. - Remove drm_fb_helper_connector. - Add bootsplash to todo. - Fix lock ordering in pan_display_legacy. - Support pinning buffers to current location in gem-vram. - Remove the now unused locking functions from gem-vram. - Remove the now unused kmap-object argument from vram helpers. - Stop checking return value of debugfs_create. - Add atomic encoder enable/disable helpers. - pass drm_atomic_state to atomic connector check. - Add atomic support for bridge enable/disable. - Add self refresh helpers to core. Driver Changes: - Add extra delay to make MTP SDM845 work. - Small fixes to virtio, vkms, sii902x, sii9234, ast, mcde, analogix, rockchip. - Add zpos and ?BGR8888 support to meson. - More removals of drm_os_linux and drmP headers for amd, radeon, sti, r128, r128, savage, sis. - Allow synopsis to unwedge the i2c hdmi bus. - Add orientation quirks for GPD panels. - Edid cleanups and fixing handling for edid < 1.2. - Add runtime pm to stm. - Handle s/r in dw-hdmi. - Add hooks for power on/off to dsi for stm. - Remove virtio dirty tracking code, done in drm core. - Rework BO handling in ast and mgag200. Tiny conflict in drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c, needed #include to make it compile. Signed-off-by: Daniel Vetter From: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/0e01de30-9797-853c-732f-4a5bd6e61445@linux.intel.com --- 2454fcea338ad821a39d471bc7db5a58ba41b742 diff --cc drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 4af3989e4a75,822049a78e9f..c8887a1c852a --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@@ -22,13 -22,11 +22,13 @@@ #include "amdgpu_amdkfd.h" #include "amd_shared.h" - #include + #include "amdgpu.h" #include "amdgpu_gfx.h" +#include "amdgpu_dma_buf.h" #include #include +#include "amdgpu_xgmi.h" static const unsigned int compute_vmid_bitmap = 0xFF00; diff --cc drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 4711cf1b5bd2,000000000000..489041df1f45 mode 100644,000000..100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@@ -1,450 -1,0 +1,448 @@@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * based on nouveau_prime.c + * + * Authors: Alex Deucher + */ + +/** + * DOC: PRIME Buffer Sharing + * + * The following callback implementations are used for :ref:`sharing GEM buffer + * objects between different devices via PRIME `. + */ + - #include - +#include "amdgpu.h" +#include "amdgpu_display.h" +#include "amdgpu_gem.h" +#include +#include +#include + +/** + * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table + * implementation + * @obj: GEM buffer object (BO) + * + * Returns: + * A scatter/gather table for the pinned pages of the BO's memory. + */ +struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + int npages = bo->tbo.num_pages; + + return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); +} + +/** + * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation + * @obj: GEM BO + * + * Sets up an in-kernel virtual mapping of the BO's memory. + * + * Returns: + * The virtual address of the mapping or an error pointer. + */ +void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj) +{ + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + int ret; + + ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, + &bo->dma_buf_vmap); + if (ret) + return ERR_PTR(ret); + + return bo->dma_buf_vmap.virtual; +} + +/** + * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation + * @obj: GEM BO + * @vaddr: Virtual address (unused) + * + * Tears down the in-kernel virtual mapping of the BO's memory. + */ +void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + + ttm_bo_kunmap(&bo->dma_buf_vmap); +} + +/** + * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation + * @obj: GEM BO + * @vma: Virtual memory area + * + * Sets up a userspace mapping of the BO's memory in the given + * virtual memory area. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + unsigned asize = amdgpu_bo_size(bo); + int ret; + + if (!vma->vm_file) + return -ENODEV; + + if (adev == NULL) + return -ENODEV; + + /* Check for valid size. */ + if (asize < vma->vm_end - vma->vm_start) + return -EINVAL; + + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || + (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { + return -EPERM; + } + vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT; + + /* prime mmap does not need to check access, so allow here */ + ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data); + if (ret) + return ret; + + ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev); + drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data); + + return ret; +} + +static int +__reservation_object_make_exclusive(struct reservation_object *obj) +{ + struct dma_fence **fences; + unsigned int count; + int r; + + if (!reservation_object_get_list(obj)) /* no shared fences to convert */ + return 0; + + r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences); + if (r) + return r; + + if (count == 0) { + /* Now that was unexpected. */ + } else if (count == 1) { + reservation_object_add_excl_fence(obj, fences[0]); + dma_fence_put(fences[0]); + kfree(fences); + } else { + struct dma_fence_array *array; + + array = dma_fence_array_create(count, fences, + dma_fence_context_alloc(1), 0, + false); + if (!array) + goto err_fences_put; + + reservation_object_add_excl_fence(obj, &array->base); + dma_fence_put(&array->base); + } + + return 0; + +err_fences_put: + while (count--) + dma_fence_put(fences[count]); + kfree(fences); + return -ENOMEM; +} + +/** + * amdgpu_dma_buf_map_attach - &dma_buf_ops.attach implementation + * @dma_buf: Shared DMA buffer + * @attach: DMA-buf attachment + * + * Makes sure that the shared DMA buffer can be accessed by the target device. + * For now, simply pins it to the GTT domain, where it should be accessible by + * all DMA devices. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = dma_buf->priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + long r; + + r = drm_gem_map_attach(dma_buf, attach); + if (r) + return r; + + r = amdgpu_bo_reserve(bo, false); + if (unlikely(r != 0)) + goto error_detach; + + + if (attach->dev->driver != adev->dev->driver) { + /* + * We only create shared fences for internal use, but importers + * of the dmabuf rely on exclusive fences for implicitly + * tracking write hazards. As any of the current fences may + * correspond to a write, we need to convert all existing + * fences on the reservation object into a single exclusive + * fence. + */ + r = __reservation_object_make_exclusive(bo->tbo.resv); + if (r) + goto error_unreserve; + } + + /* pin buffer into GTT */ + r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); + if (r) + goto error_unreserve; + + if (attach->dev->driver != adev->dev->driver) + bo->prime_shared_count++; + +error_unreserve: + amdgpu_bo_unreserve(bo); + +error_detach: + if (r) + drm_gem_map_detach(dma_buf, attach); + return r; +} + +/** + * amdgpu_dma_buf_map_detach - &dma_buf_ops.detach implementation + * @dma_buf: Shared DMA buffer + * @attach: DMA-buf attachment + * + * This is called when a shared DMA buffer no longer needs to be accessible by + * another device. For now, simply unpins the buffer from GTT. + */ +static void amdgpu_dma_buf_map_detach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = dma_buf->priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + int ret = 0; + + ret = amdgpu_bo_reserve(bo, true); + if (unlikely(ret != 0)) + goto error; + + amdgpu_bo_unpin(bo); + if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count) + bo->prime_shared_count--; + amdgpu_bo_unreserve(bo); + +error: + drm_gem_map_detach(dma_buf, attach); +} + +/** + * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation + * @obj: GEM BO + * + * Returns: + * The BO's reservation object. + */ +struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) +{ + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + + return bo->tbo.resv; +} + +/** + * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation + * @dma_buf: Shared DMA buffer + * @direction: Direction of DMA transfer + * + * This is called before CPU access to the shared DMA buffer's memory. If it's + * a read access, the buffer is moved to the GTT domain if possible, for optimal + * CPU read performance. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, + enum dma_data_direction direction) +{ + struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct ttm_operation_ctx ctx = { true, false }; + u32 domain = amdgpu_display_supported_domains(adev); + int ret; + bool reads = (direction == DMA_BIDIRECTIONAL || + direction == DMA_FROM_DEVICE); + + if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT)) + return 0; + + /* move to gtt */ + ret = amdgpu_bo_reserve(bo, false); + if (unlikely(ret != 0)) + return ret; + + if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + } + + amdgpu_bo_unreserve(bo); + return ret; +} + +const struct dma_buf_ops amdgpu_dmabuf_ops = { + .attach = amdgpu_dma_buf_map_attach, + .detach = amdgpu_dma_buf_map_detach, + .map_dma_buf = drm_gem_map_dma_buf, + .unmap_dma_buf = drm_gem_unmap_dma_buf, + .release = drm_gem_dmabuf_release, + .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access, + .mmap = drm_gem_dmabuf_mmap, + .vmap = drm_gem_dmabuf_vmap, + .vunmap = drm_gem_dmabuf_vunmap, +}; + +/** + * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation + * @dev: DRM device + * @gobj: GEM BO + * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR. + * + * The main work is done by the &drm_gem_prime_export helper, which in turn + * uses &amdgpu_gem_prime_res_obj. + * + * Returns: + * Shared DMA buffer representing the GEM BO from the given device. + */ +struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *gobj, + int flags) +{ + struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); + struct dma_buf *buf; + + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || + bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) + return ERR_PTR(-EPERM); + + buf = drm_gem_prime_export(dev, gobj, flags); + if (!IS_ERR(buf)) { + buf->file->f_mapping = dev->anon_inode->i_mapping; + buf->ops = &amdgpu_dmabuf_ops; + } + + return buf; +} + +/** + * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table + * implementation + * @dev: DRM device + * @attach: DMA-buf attachment + * @sg: Scatter/gather table + * + * Imports shared DMA buffer memory exported by another device. + * + * Returns: + * A new GEM BO of the given DRM device, representing the memory + * described by the given DMA-buf attachment and scatter/gather table. + */ +struct drm_gem_object * +amdgpu_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sg) +{ + struct reservation_object *resv = attach->dmabuf->resv; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_bo *bo; + struct amdgpu_bo_param bp; + int ret; + + memset(&bp, 0, sizeof(bp)); + bp.size = attach->dmabuf->size; + bp.byte_align = PAGE_SIZE; + bp.domain = AMDGPU_GEM_DOMAIN_CPU; + bp.flags = 0; + bp.type = ttm_bo_type_sg; + bp.resv = resv; + ww_mutex_lock(&resv->lock, NULL); + ret = amdgpu_bo_create(adev, &bp, &bo); + if (ret) + goto error; + + bo->tbo.sg = sg; + bo->tbo.ttm->sg = sg; + bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; + bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; + if (attach->dmabuf->ops != &amdgpu_dmabuf_ops) + bo->prime_shared_count = 1; + + ww_mutex_unlock(&resv->lock); + return &bo->gem_base; + +error: + ww_mutex_unlock(&resv->lock); + return ERR_PTR(ret); +} + +/** + * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation + * @dev: DRM device + * @dma_buf: Shared DMA buffer + * + * The main work is done by the &drm_gem_prime_import helper, which in turn + * uses &amdgpu_gem_prime_import_sg_table. + * + * Returns: + * GEM BO representing the shared DMA buffer for the given device. + */ +struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct drm_gem_object *obj; + + if (dma_buf->ops == &amdgpu_dmabuf_ops) { + obj = dma_buf->priv; + if (obj->dev == dev) { + /* + * Importing dmabuf exported from out own gem increases + * refcount on gem itself instead of f_count of dmabuf. + */ + drm_gem_object_get(obj); + return obj; + } + } + + return drm_gem_prime_import(dev, dma_buf); +} diff --cc drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 41ccee49a224,19f8909e9647..4ff4cf5988ea --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@@ -45,9 -45,9 +45,9 @@@ #include #include -#include +#include #include - #include + #include #include "amdgpu.h" diff --cc drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 7138dc1dd1f4,a8a1fcab299b..d81bebf76310 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@@ -29,21 -29,24 +29,26 @@@ * Thomas Hellstrom * Dave Airlie */ + + #include + #include ++#include + #include + #include + #include + #include + #include + #include + #include #include #include #include #include - #include + + #include #include - #include - #include - #include - #include - #include - #include - #include - #include ++ #include "amdgpu.h" #include "amdgpu_object.h" #include "amdgpu_trace.h" diff --cc drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index eb2204d42337,000000000000..cb3f6a74d9e3 mode 100644,000000..100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@@ -1,134 -1,0 +1,136 @@@ +/* + * Copyright 2012-16 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + ++#include ++ +#include "dal_asic_id.h" +#include "dc_types.h" +#include "dccg.h" +#include "clk_mgr_internal.h" + +#include "dce100/dce_clk_mgr.h" +#include "dce110/dce110_clk_mgr.h" +#include "dce112/dce112_clk_mgr.h" +#include "dce120/dce120_clk_mgr.h" +#include "dcn10/rv1_clk_mgr.h" +#include "dcn10/rv2_clk_mgr.h" + + +int clk_mgr_helper_get_active_display_cnt( + struct dc *dc, + struct dc_state *context) +{ + int i, display_count; + + display_count = 0; + for (i = 0; i < context->stream_count; i++) { + const struct dc_stream_state *stream = context->streams[i]; + + /* + * Only notify active stream or virtual stream. + * Need to notify virtual stream to work around + * headless case. HPD does not fire when system is in + * S0i2. + */ + if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL) + display_count++; + } + + return display_count; +} + + +struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg) +{ + struct hw_asic_id asic_id = ctx->asic_id; + + struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL); + + if (clk_mgr == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + switch (asic_id.chip_family) { + case FAMILY_CI: + case FAMILY_KV: + dce_clk_mgr_construct(ctx, clk_mgr); + break; + case FAMILY_CZ: + dce110_clk_mgr_construct(ctx, clk_mgr); + break; + case FAMILY_VI: + if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) || + ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) { + dce_clk_mgr_construct(ctx, clk_mgr); + break; + } + if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) || + ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) || + ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) { + dce112_clk_mgr_construct(ctx, clk_mgr); + break; + } + if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) { + dce112_clk_mgr_construct(ctx, clk_mgr); + break; + } + break; + case FAMILY_AI: + if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev)) + dce121_clk_mgr_construct(ctx, clk_mgr); + else + dce120_clk_mgr_construct(ctx, clk_mgr); + break; + +#if defined(CONFIG_DRM_AMD_DC_DCN1_0) + case FAMILY_RV: + if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) { + rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu); + break; + } + if (ASICREV_IS_RAVEN(asic_id.hw_internal_rev) || + ASICREV_IS_PICASSO(asic_id.hw_internal_rev)) { + rv1_clk_mgr_construct(ctx, clk_mgr, pp_smu); + break; + } + break; +#endif /* Family RV */ + + default: + ASSERT(0); /* Unknown Asic */ + break; + } + + return &clk_mgr->base; +} + +void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + kfree(clk_mgr); +} + diff --cc drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 31db9b55e11a,000000000000..04b12bb2243d mode 100644,000000..100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@@ -1,262 -1,0 +1,265 @@@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + ++#include ++ ++#include "reg_helper.h" +#include "core_types.h" +#include "clk_mgr_internal.h" +#include "rv1_clk_mgr.h" +#include "dce100/dce_clk_mgr.h" +#include "dce112/dce112_clk_mgr.h" +#include "rv1_clk_mgr_vbios_smu.h" +#include "rv1_clk_mgr_clk.h" + +static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, struct dc_clocks *new_clocks) +{ + bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; + bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->base.clks.dispclk_khz; + int disp_clk_threshold = new_clocks->max_supported_dppclk_khz; + bool cur_dpp_div = clk_mgr->base.clks.dispclk_khz > clk_mgr->base.clks.dppclk_khz; + + /* increase clock, looking for div is 0 for current, request div is 1*/ + if (dispclk_increase) { + /* already divided by 2, no need to reach target clk with 2 steps*/ + if (cur_dpp_div) + return new_clocks->dispclk_khz; + + /* request disp clk is lower than maximum supported dpp clk, + * no need to reach target clk with two steps. + */ + if (new_clocks->dispclk_khz <= disp_clk_threshold) + return new_clocks->dispclk_khz; + + /* target dpp clk not request divided by 2, still within threshold */ + if (!request_dpp_div) + return new_clocks->dispclk_khz; + + } else { + /* decrease clock, looking for current dppclk divided by 2, + * request dppclk not divided by 2. + */ + + /* current dpp clk not divided by 2, no need to ramp*/ + if (!cur_dpp_div) + return new_clocks->dispclk_khz; + + /* current disp clk is lower than current maximum dpp clk, + * no need to ramp + */ + if (clk_mgr->base.clks.dispclk_khz <= disp_clk_threshold) + return new_clocks->dispclk_khz; + + /* request dpp clk need to be divided by 2 */ + if (request_dpp_div) + return new_clocks->dispclk_khz; + } + + return disp_clk_threshold; +} + +static void ramp_up_dispclk_with_dpp(struct clk_mgr_internal *clk_mgr, struct dc *dc, struct dc_clocks *new_clocks) +{ + int i; + int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks); + bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; + + /* set disp clk to dpp clk threshold */ + + clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold); + clk_mgr->funcs->set_dprefclk(clk_mgr); + + + /* update request dpp clk division option */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx->plane_state) + continue; + + pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control( + pipe_ctx->plane_res.dpp, + request_dpp_div, + true); + } + + /* If target clk not same as dppclk threshold, set to target clock */ + if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz) { + clk_mgr->funcs->set_dispclk(clk_mgr, new_clocks->dispclk_khz); + clk_mgr->funcs->set_dprefclk(clk_mgr); + } + + + clk_mgr->base.clks.dispclk_khz = new_clocks->dispclk_khz; + clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz; + clk_mgr->base.clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz; +} + +static void rv1_update_clocks(struct clk_mgr *clk_mgr_base, + struct dc_state *context, + bool safe_to_lower) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dc *dc = clk_mgr_base->ctx->dc; + struct dc_debug_options *debug = &dc->debug; + struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; + struct pp_smu_funcs_rv *pp_smu = NULL; + bool send_request_to_increase = false; + bool send_request_to_lower = false; + int display_count; + + bool enter_display_off = false; + + ASSERT(clk_mgr->pp_smu); + + pp_smu = &clk_mgr->pp_smu->rv_funcs; + + display_count = clk_mgr_helper_get_active_display_cnt(dc, context); + + if (display_count == 0) + enter_display_off = true; + + if (enter_display_off == safe_to_lower) { + /* + * Notify SMU active displays + * if function pointer not set up, this message is + * sent as part of pplib_apply_display_requirements. + */ + if (pp_smu->set_display_count) + pp_smu->set_display_count(&pp_smu->pp_smu, display_count); + } + + if (new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz + || new_clocks->phyclk_khz > clk_mgr_base->clks.phyclk_khz + || new_clocks->fclk_khz > clk_mgr_base->clks.fclk_khz + || new_clocks->dcfclk_khz > clk_mgr_base->clks.dcfclk_khz) + send_request_to_increase = true; + + if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) { + clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz; + send_request_to_lower = true; + } + + // F Clock + if (debug->force_fclk_khz != 0) + new_clocks->fclk_khz = debug->force_fclk_khz; + + if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr_base->clks.fclk_khz)) { + clk_mgr_base->clks.fclk_khz = new_clocks->fclk_khz; + send_request_to_lower = true; + } + + //DCF Clock + if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) { + clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz; + send_request_to_lower = true; + } + + if (should_set_clock(safe_to_lower, + new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) { + clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; + send_request_to_lower = true; + } + + /* make sure dcf clk is before dpp clk to + * make sure we have enough voltage to run dpp clk + */ + if (send_request_to_increase) { + /*use dcfclk to request voltage*/ + if (pp_smu->set_hard_min_fclk_by_freq && + pp_smu->set_hard_min_dcfclk_by_freq && + pp_smu->set_min_deep_sleep_dcfclk) { + pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000); + pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000); + pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000); + } + } + + /* dcn1 dppclk is tied to dispclk */ + /* program dispclk on = as a w/a for sleep resume clock ramping issues */ + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) + || new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) { + ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks); + clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; + send_request_to_lower = true; + } + + if (!send_request_to_increase && send_request_to_lower) { + /*use dcfclk to request voltage*/ + if (pp_smu->set_hard_min_fclk_by_freq && + pp_smu->set_hard_min_dcfclk_by_freq && + pp_smu->set_min_deep_sleep_dcfclk) { + pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000); + pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000); + pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000); + } + } +} + +static struct clk_mgr_funcs rv1_clk_funcs = { + .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .update_clocks = rv1_update_clocks, +}; + +static struct clk_mgr_internal_funcs rv1_clk_internal_funcs = { + .set_dispclk = rv1_vbios_smu_set_dispclk, + .set_dprefclk = dce112_set_dprefclk +}; + +void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu) +{ + struct dc_debug_options *debug = &ctx->dc->debug; + struct dc_bios *bp = ctx->dc_bios; + struct dc_firmware_info fw_info = { { 0 } }; + + clk_mgr->base.ctx = ctx; + clk_mgr->pp_smu = pp_smu; + clk_mgr->base.funcs = &rv1_clk_funcs; + clk_mgr->funcs = &rv1_clk_internal_funcs; + + clk_mgr->dfs_bypass_disp_clk = 0; + + clk_mgr->dprefclk_ss_percentage = 0; + clk_mgr->dprefclk_ss_divider = 1000; + clk_mgr->ss_on_dprefclk = false; + clk_mgr->base.dprefclk_khz = 600000; + + if (bp->integrated_info) + clk_mgr->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; + if (clk_mgr->dentist_vco_freq_khz == 0) { + bp->funcs->get_firmware_info(bp, &fw_info); + clk_mgr->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq; + if (clk_mgr->dentist_vco_freq_khz == 0) + clk_mgr->dentist_vco_freq_khz = 3600000; + } + + if (!debug->disable_dfs_bypass && bp->integrated_info) + if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) + clk_mgr->dfs_bypass_enabled = true; + + dce_clock_read_ss_info(clk_mgr); +} + +