Necessary for new asics.
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
void (*set_pte_pde)(struct amdgpu_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+ uint32_t incr, uint64_t flags);
};
/* provided by the gmc block */
void *cpu_pt_addr, /* cpu addr of page table */
uint32_t gpu_page_idx, /* pte/pde to update */
uint64_t addr, /* addr to write into pte/pde */
- uint32_t flags); /* access flags */
+ uint64_t flags); /* access flags */
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
};
int pages);
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist,
- dma_addr_t *dma_addr, uint32_t flags);
+ dma_addr_t *dma_addr, uint64_t flags);
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
/*
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
-uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
unsigned p;
int i, j;
u64 page_base;
- uint32_t flags = AMDGPU_PTE_SYSTEM;
+ uint64_t flags = AMDGPU_PTE_SYSTEM;
if (!adev->gart.ready) {
WARN(1, "trying to unbind memory from uninitialized GART !\n");
*/
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr,
- uint32_t flags)
+ uint64_t flags)
{
unsigned t;
unsigned p;
{
struct ttm_tt *ttm = bo->ttm;
struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
- uint32_t flags;
+ uint64_t flags;
int r;
if (!ttm || amdgpu_ttm_is_bound(ttm))
return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
}
-uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem)
{
- uint32_t flags = 0;
+ uint64_t flags = 0;
if (mem && mem->mem_type != TTM_PL_SYSTEM)
flags |= AMDGPU_PTE_VALID;
/* Function which actually does the update */
void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr,
- uint32_t flags);
+ uint64_t flags);
/* indicate update pt or its shadow */
bool shadow;
};
static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
- uint32_t flags)
+ uint64_t flags)
{
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
- uint32_t flags)
+ uint64_t flags)
{
uint64_t src = (params->src + (addr >> 12) * 8);
static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
struct amdgpu_vm *vm,
uint64_t start, uint64_t end,
- uint64_t dst, uint32_t flags)
+ uint64_t dst, uint64_t flags)
{
const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
struct amdgpu_vm *vm,
uint64_t start, uint64_t end,
- uint64_t dst, uint32_t flags)
+ uint64_t dst, uint64_t flags)
{
/**
* The MC L1 TLB supports variable sized pages, based on a fragment
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
- uint32_t flags, uint64_t addr,
+ uint64_t flags, uint64_t addr,
struct dma_fence **fence)
{
struct amdgpu_ring *ring;
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive,
- uint32_t gtt_flags,
+ uint64_t gtt_flags,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
- uint32_t flags,
+ uint64_t flags,
struct drm_mm_node *nodes,
struct dma_fence **fence)
{
struct amdgpu_vm *vm = bo_va->vm;
struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL;
- uint32_t gtt_flags, flags;
+ uint64_t gtt_flags, flags;
struct ttm_mem_reg *mem;
struct drm_mm_node *nodes;
struct dma_fence *exclusive;
*/
static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+ uint32_t incr, uint64_t flags)
{
/* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
void *cpu_pt_addr,
uint32_t gpu_page_idx,
uint64_t addr,
- uint32_t flags)
+ uint64_t flags)
{
void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value;
void *cpu_pt_addr,
uint32_t gpu_page_idx,
uint64_t addr,
- uint32_t flags)
+ uint64_t flags)
{
void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value;
void *cpu_pt_addr,
uint32_t gpu_page_idx,
uint64_t addr,
- uint32_t flags)
+ uint64_t flags)
{
void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value;
*/
static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+ uint32_t incr, uint64_t flags)
{
/* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
*/
static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+ uint32_t incr, uint64_t flags)
{
/* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+ uint32_t incr, uint64_t flags)
{
uint64_t value;
unsigned ndw;