1 #ifndef __DRM_DRM_LEGACY_H__ 2 #define __DRM_DRM_LEGACY_H__ 3 4 /* 5 * Legacy driver interfaces for the Direct Rendering Manager 6 * 7 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 8 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 9 * Copyright (c) 2009-2010, Code Aurora Forum. 10 * All rights reserved. 11 * Copyright © 2014 Intel Corporation 12 * Daniel Vetter <daniel.vetter@ffwll.ch> 13 * 14 * Author: Rickard E. (Rik) Faith <faith@valinux.com> 15 * Author: Gareth Hughes <gareth@valinux.com> 16 * 17 * Permission is hereby granted, free of charge, to any person obtaining a 18 * copy of this software and associated documentation files (the "Software"), 19 * to deal in the Software without restriction, including without limitation 20 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 21 * and/or sell copies of the Software, and to permit persons to whom the 22 * Software is furnished to do so, subject to the following conditions: 23 * 24 * The above copyright notice and this permission notice (including the next 25 * paragraph) shall be included in all copies or substantial portions of the 26 * Software. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 34 * OTHER DEALINGS IN THE SOFTWARE. 35 */ 36 37 38 /* 39 * Legacy Support for palateontologic DRM drivers 40 * 41 * If you add a new driver and it uses any of these functions or structures, 42 * you're doing it terribly wrong. 43 */ 44 45 /** 46 * DMA buffer. 47 */ 48 struct drm_buf { 49 int idx; /**< Index into master buflist */ 50 int total; /**< Buffer size */ 51 int order; /**< log-base-2(total) */ 52 int used; /**< Amount of buffer in use (for DMA) */ 53 unsigned long offset; /**< Byte offset (used internally) */ 54 void *address; /**< Address of buffer */ 55 unsigned long bus_address; /**< Bus address of buffer */ 56 struct drm_buf *next; /**< Kernel-only: used for free list */ 57 __volatile__ int waiting; /**< On kernel DMA queue */ 58 __volatile__ int pending; /**< On hardware DMA queue */ 59 struct drm_file *file_priv; /**< Private of holding file descr */ 60 int context; /**< Kernel queue for this buffer */ 61 int while_locked; /**< Dispatch this buffer while locked */ 62 enum { 63 DRM_LIST_NONE = 0, 64 DRM_LIST_FREE = 1, 65 DRM_LIST_WAIT = 2, 66 DRM_LIST_PEND = 3, 67 DRM_LIST_PRIO = 4, 68 DRM_LIST_RECLAIM = 5 69 } list; /**< Which list we're on */ 70 71 int dev_priv_size; /**< Size of buffer private storage */ 72 void *dev_private; /**< Per-buffer private storage */ 73 }; 74 75 typedef struct drm_dma_handle { 76 dma_addr_t busaddr; 77 void *vaddr; 78 size_t size; 79 } drm_dma_handle_t; 80 81 /** 82 * Buffer entry. There is one of this for each buffer size order. 83 */ 84 struct drm_buf_entry { 85 int buf_size; /**< size */ 86 int buf_count; /**< number of buffers */ 87 struct drm_buf *buflist; /**< buffer list */ 88 int seg_count; 89 int page_order; 90 struct drm_dma_handle **seglist; 91 92 int low_mark; /**< Low water mark */ 93 int high_mark; /**< High water mark */ 94 }; 95 96 /** 97 * DMA data. 98 */ 99 struct drm_device_dma { 100 101 struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ 102 int buf_count; /**< total number of buffers */ 103 struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ 104 int seg_count; 105 int page_count; /**< number of pages */ 106 unsigned long *pagelist; /**< page list */ 107 unsigned long byte_count; 108 enum { 109 _DRM_DMA_USE_AGP = 0x01, 110 _DRM_DMA_USE_SG = 0x02, 111 _DRM_DMA_USE_FB = 0x04, 112 _DRM_DMA_USE_PCI_RO = 0x08 113 } flags; 114 115 }; 116 117 /** 118 * Scatter-gather memory. 119 */ 120 struct drm_sg_mem { 121 unsigned long handle; 122 void *virtual; 123 int pages; 124 struct page **pagelist; 125 dma_addr_t *busaddr; 126 }; 127 128 /** 129 * Kernel side of a mapping 130 */ 131 struct drm_local_map { 132 resource_size_t offset; /**< Requested physical address (0 for SAREA)*/ 133 unsigned long size; /**< Requested physical size (bytes) */ 134 enum drm_map_type type; /**< Type of memory to map */ 135 enum drm_map_flags flags; /**< Flags */ 136 void *handle; /**< User-space: "Handle" to pass to mmap() */ 137 /**< Kernel-space: kernel-virtual address */ 138 int mtrr; /**< MTRR slot used */ 139 }; 140 141 typedef struct drm_local_map drm_local_map_t; 142 143 /** 144 * Mappings list 145 */ 146 struct drm_map_list { 147 struct list_head head; /**< list head */ 148 struct drm_hash_item hash; 149 struct drm_local_map *map; /**< mapping */ 150 uint64_t user_token; 151 struct drm_master *master; 152 }; 153 154 int drm_legacy_addmap(struct drm_device *d, resource_size_t offset, 155 unsigned int size, enum drm_map_type type, 156 enum drm_map_flags flags, struct drm_local_map **map_p); 157 int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); 158 int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); 159 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev); 160 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma); 161 162 int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req); 163 int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req); 164 165 /** 166 * Test that the hardware lock is held by the caller, returning otherwise. 167 * 168 * \param dev DRM device. 169 * \param filp file pointer of the caller. 170 */ 171 #define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ 172 do { \ 173 if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ 174 _file_priv->master->lock.file_priv != _file_priv) { \ 175 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ 176 __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ 177 _file_priv->master->lock.file_priv, _file_priv); \ 178 return -EINVAL; \ 179 } \ 180 } while (0) 181 182 void drm_legacy_idlelock_take(struct drm_lock_data *lock); 183 void drm_legacy_idlelock_release(struct drm_lock_data *lock); 184 185 /* drm_pci.c dma alloc wrappers */ 186 void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); 187 188 /* drm_memory.c */ 189 void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev); 190 void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); 191 void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev); 192 193 static __inline__ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, 194 unsigned int token) 195 { 196 struct drm_map_list *_entry; 197 list_for_each_entry(_entry, &dev->maplist, head) 198 if (_entry->user_token == token) 199 return _entry->map; 200 return NULL; 201 } 202 203 #endif /* __DRM_DRM_LEGACY_H__ */ 204