r600.c (336879b1da97fffc097f77c6d6f818660f2826f0) r600.c (57d20a43c9b30663bdbacde8294a902edef35a84)
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation

--- 2880 unchanged lines hidden (view full) ---

2889 * @dst_offset: dst GPU address
2890 * @num_gpu_pages: number of GPU pages to xfer
2891 * @fence: radeon fence object
2892 *
2893 * Copy GPU paging using the CP DMA engine (r6xx+).
2894 * Used by the radeon ttm implementation to move pages if
2895 * registered as the asic copy callback.
2896 */
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation

--- 2880 unchanged lines hidden (view full) ---

2889 * @dst_offset: dst GPU address
2890 * @num_gpu_pages: number of GPU pages to xfer
2891 * @fence: radeon fence object
2892 *
2893 * Copy GPU paging using the CP DMA engine (r6xx+).
2894 * Used by the radeon ttm implementation to move pages if
2895 * registered as the asic copy callback.
2896 */
2897int r600_copy_cpdma(struct radeon_device *rdev,
2898 uint64_t src_offset, uint64_t dst_offset,
2899 unsigned num_gpu_pages,
2900 struct radeon_fence **fence)
2897struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
2898 uint64_t src_offset, uint64_t dst_offset,
2899 unsigned num_gpu_pages,
2900 struct reservation_object *resv)
2901{
2902 struct radeon_semaphore *sem = NULL;
2901{
2902 struct radeon_semaphore *sem = NULL;
2903 struct radeon_fence *fence;
2903 int ring_index = rdev->asic->copy.blit_ring_index;
2904 struct radeon_ring *ring = &rdev->ring[ring_index];
2905 u32 size_in_bytes, cur_size_in_bytes, tmp;
2906 int i, num_loops;
2907 int r = 0;
2908
2909 r = radeon_semaphore_create(rdev, &sem);
2910 if (r) {
2911 DRM_ERROR("radeon: moving bo (%d).\n", r);
2904 int ring_index = rdev->asic->copy.blit_ring_index;
2905 struct radeon_ring *ring = &rdev->ring[ring_index];
2906 u32 size_in_bytes, cur_size_in_bytes, tmp;
2907 int i, num_loops;
2908 int r = 0;
2909
2910 r = radeon_semaphore_create(rdev, &sem);
2911 if (r) {
2912 DRM_ERROR("radeon: moving bo (%d).\n", r);
2912 return r;
2913 return ERR_PTR(r);
2913 }
2914
2915 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
2916 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
2917 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
2918 if (r) {
2919 DRM_ERROR("radeon: moving bo (%d).\n", r);
2920 radeon_semaphore_free(rdev, &sem, NULL);
2914 }
2915
2916 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
2917 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
2918 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
2919 if (r) {
2920 DRM_ERROR("radeon: moving bo (%d).\n", r);
2921 radeon_semaphore_free(rdev, &sem, NULL);
2921 return r;
2922 return ERR_PTR(r);
2922 }
2923
2923 }
2924
2924 radeon_semaphore_sync_to(sem, *fence);
2925 radeon_semaphore_sync_resv(sem, resv, false);
2925 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
2926
2927 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2928 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2929 radeon_ring_write(ring, WAIT_3D_IDLE_bit);
2930 for (i = 0; i < num_loops; i++) {
2931 cur_size_in_bytes = size_in_bytes;
2932 if (cur_size_in_bytes > 0x1fffff)

--- 10 unchanged lines hidden (view full) ---

2943 radeon_ring_write(ring, cur_size_in_bytes);
2944 src_offset += cur_size_in_bytes;
2945 dst_offset += cur_size_in_bytes;
2946 }
2947 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2948 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2949 radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
2950
2926 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
2927
2928 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2929 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2930 radeon_ring_write(ring, WAIT_3D_IDLE_bit);
2931 for (i = 0; i < num_loops; i++) {
2932 cur_size_in_bytes = size_in_bytes;
2933 if (cur_size_in_bytes > 0x1fffff)

--- 10 unchanged lines hidden (view full) ---

2944 radeon_ring_write(ring, cur_size_in_bytes);
2945 src_offset += cur_size_in_bytes;
2946 dst_offset += cur_size_in_bytes;
2947 }
2948 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2949 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2950 radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
2951
2951 r = radeon_fence_emit(rdev, fence, ring->idx);
2952 r = radeon_fence_emit(rdev, &fence, ring->idx);
2952 if (r) {
2953 radeon_ring_unlock_undo(rdev, ring);
2954 radeon_semaphore_free(rdev, &sem, NULL);
2953 if (r) {
2954 radeon_ring_unlock_undo(rdev, ring);
2955 radeon_semaphore_free(rdev, &sem, NULL);
2955 return r;
2956 return ERR_PTR(r);
2956 }
2957
2958 radeon_ring_unlock_commit(rdev, ring, false);
2957 }
2958
2959 radeon_ring_unlock_commit(rdev, ring, false);
2959 radeon_semaphore_free(rdev, &sem, *fence);
2960 radeon_semaphore_free(rdev, &sem, fence);
2960
2961
2961 return r;
2962 return fence;
2962}
2963
2964int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2965 uint32_t tiling_flags, uint32_t pitch,
2966 uint32_t offset, uint32_t obj_size)
2967{
2968 /* FIXME: implement */
2969 return 0;

--- 1522 unchanged lines hidden ---
2963}
2964
2965int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2966 uint32_t tiling_flags, uint32_t pitch,
2967 uint32_t offset, uint32_t obj_size)
2968{
2969 /* FIXME: implement */
2970 return 0;

--- 1522 unchanged lines hidden ---