1*3eab3d9eSThomas Hellstrom /************************************************************************** 2*3eab3d9eSThomas Hellstrom * 3*3eab3d9eSThomas Hellstrom * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA 4*3eab3d9eSThomas Hellstrom * All Rights Reserved. 5*3eab3d9eSThomas Hellstrom * 6*3eab3d9eSThomas Hellstrom * Permission is hereby granted, free of charge, to any person obtaining a 7*3eab3d9eSThomas Hellstrom * copy of this software and associated documentation files (the 8*3eab3d9eSThomas Hellstrom * "Software"), to deal in the Software without restriction, including 9*3eab3d9eSThomas Hellstrom * without limitation the rights to use, copy, modify, merge, publish, 10*3eab3d9eSThomas Hellstrom * distribute, sub license, and/or sell copies of the Software, and to 11*3eab3d9eSThomas Hellstrom * permit persons to whom the Software is furnished to do so, subject to 12*3eab3d9eSThomas Hellstrom * the following conditions: 13*3eab3d9eSThomas Hellstrom * 14*3eab3d9eSThomas Hellstrom * The above copyright notice and this permission notice (including the 15*3eab3d9eSThomas Hellstrom * next paragraph) shall be included in all copies or substantial portions 16*3eab3d9eSThomas Hellstrom * of the Software. 17*3eab3d9eSThomas Hellstrom * 18*3eab3d9eSThomas Hellstrom * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19*3eab3d9eSThomas Hellstrom * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20*3eab3d9eSThomas Hellstrom * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21*3eab3d9eSThomas Hellstrom * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22*3eab3d9eSThomas Hellstrom * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23*3eab3d9eSThomas Hellstrom * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24*3eab3d9eSThomas Hellstrom * USE OR OTHER DEALINGS IN THE SOFTWARE. 25*3eab3d9eSThomas Hellstrom * 26*3eab3d9eSThomas Hellstrom **************************************************************************/ 27*3eab3d9eSThomas Hellstrom 28*3eab3d9eSThomas Hellstrom #include "vmwgfx_drv.h" 29*3eab3d9eSThomas Hellstrom #include "ttm/ttm_bo_api.h" 30*3eab3d9eSThomas Hellstrom 31*3eab3d9eSThomas Hellstrom /* 32*3eab3d9eSThomas Hellstrom * Size of inline command buffers. Try to make sure that a page size is a 33*3eab3d9eSThomas Hellstrom * multiple of the DMA pool allocation size. 34*3eab3d9eSThomas Hellstrom */ 35*3eab3d9eSThomas Hellstrom #define VMW_CMDBUF_INLINE_ALIGN 64 36*3eab3d9eSThomas Hellstrom #define VMW_CMDBUF_INLINE_SIZE (1024 - VMW_CMDBUF_INLINE_ALIGN) 37*3eab3d9eSThomas Hellstrom 38*3eab3d9eSThomas Hellstrom /** 39*3eab3d9eSThomas Hellstrom * struct vmw_cmdbuf_context - Command buffer context queues 40*3eab3d9eSThomas Hellstrom * 41*3eab3d9eSThomas Hellstrom * @submitted: List of command buffers that have been submitted to the 42*3eab3d9eSThomas Hellstrom * manager but not yet submitted to hardware. 43*3eab3d9eSThomas Hellstrom * @hw_submitted: List of command buffers submitted to hardware. 44*3eab3d9eSThomas Hellstrom * @preempted: List of preempted command buffers. 45*3eab3d9eSThomas Hellstrom * @num_hw_submitted: Number of buffers currently being processed by hardware 46*3eab3d9eSThomas Hellstrom */ 47*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_context { 48*3eab3d9eSThomas Hellstrom struct list_head submitted; 49*3eab3d9eSThomas Hellstrom struct list_head hw_submitted; 50*3eab3d9eSThomas Hellstrom struct list_head preempted; 51*3eab3d9eSThomas Hellstrom unsigned num_hw_submitted; 52*3eab3d9eSThomas Hellstrom }; 53*3eab3d9eSThomas Hellstrom 54*3eab3d9eSThomas Hellstrom /** 55*3eab3d9eSThomas Hellstrom * struct vmw_cmdbuf_man: - Command buffer manager 56*3eab3d9eSThomas Hellstrom * 57*3eab3d9eSThomas Hellstrom * @cur_mutex: Mutex protecting the command buffer used for incremental small 58*3eab3d9eSThomas Hellstrom * kernel command submissions, @cur. 59*3eab3d9eSThomas Hellstrom * @space_mutex: Mutex to protect against starvation when we allocate 60*3eab3d9eSThomas Hellstrom * main pool buffer space. 61*3eab3d9eSThomas Hellstrom * @work: A struct work_struct implementeing command buffer error handling. 62*3eab3d9eSThomas Hellstrom * Immutable. 63*3eab3d9eSThomas Hellstrom * @dev_priv: Pointer to the device private struct. Immutable. 64*3eab3d9eSThomas Hellstrom * @ctx: Array of command buffer context queues. The queues and the context 65*3eab3d9eSThomas Hellstrom * data is protected by @lock. 66*3eab3d9eSThomas Hellstrom * @error: List of command buffers that have caused device errors. 67*3eab3d9eSThomas Hellstrom * Protected by @lock. 68*3eab3d9eSThomas Hellstrom * @mm: Range manager for the command buffer space. Manager allocations and 69*3eab3d9eSThomas Hellstrom * frees are protected by @lock. 70*3eab3d9eSThomas Hellstrom * @cmd_space: Buffer object for the command buffer space, unless we were 71*3eab3d9eSThomas Hellstrom * able to make a contigous coherent DMA memory allocation, @handle. Immutable. 72*3eab3d9eSThomas Hellstrom * @map_obj: Mapping state for @cmd_space. Immutable. 73*3eab3d9eSThomas Hellstrom * @map: Pointer to command buffer space. May be a mapped buffer object or 74*3eab3d9eSThomas Hellstrom * a contigous coherent DMA memory allocation. Immutable. 75*3eab3d9eSThomas Hellstrom * @cur: Command buffer for small kernel command submissions. Protected by 76*3eab3d9eSThomas Hellstrom * the @cur_mutex. 77*3eab3d9eSThomas Hellstrom * @cur_pos: Space already used in @cur. Protected by @cur_mutex. 78*3eab3d9eSThomas Hellstrom * @default_size: Default size for the @cur command buffer. Immutable. 79*3eab3d9eSThomas Hellstrom * @max_hw_submitted: Max number of in-flight command buffers the device can 80*3eab3d9eSThomas Hellstrom * handle. Immutable. 81*3eab3d9eSThomas Hellstrom * @lock: Spinlock protecting command submission queues. 82*3eab3d9eSThomas Hellstrom * @header: Pool of DMA memory for device command buffer headers. 83*3eab3d9eSThomas Hellstrom * Internal protection. 84*3eab3d9eSThomas Hellstrom * @dheaders: Pool of DMA memory for device command buffer headers with trailing 85*3eab3d9eSThomas Hellstrom * space for inline data. Internal protection. 86*3eab3d9eSThomas Hellstrom * @tasklet: Tasklet struct for irq processing. Immutable. 87*3eab3d9eSThomas Hellstrom * @alloc_queue: Wait queue for processes waiting to allocate command buffer 88*3eab3d9eSThomas Hellstrom * space. 89*3eab3d9eSThomas Hellstrom * @idle_queue: Wait queue for processes waiting for command buffer idle. 90*3eab3d9eSThomas Hellstrom * @irq_on: Whether the process function has requested irq to be turned on. 91*3eab3d9eSThomas Hellstrom * Protected by @lock. 92*3eab3d9eSThomas Hellstrom * @using_mob: Whether the command buffer space is a MOB or a contigous DMA 93*3eab3d9eSThomas Hellstrom * allocation. Immutable. 94*3eab3d9eSThomas Hellstrom * @has_pool: Has a large pool of DMA memory which allows larger allocations. 95*3eab3d9eSThomas Hellstrom * Typically this is false only during bootstrap. 96*3eab3d9eSThomas Hellstrom * @handle: DMA address handle for the command buffer space if @using_mob is 97*3eab3d9eSThomas Hellstrom * false. Immutable. 98*3eab3d9eSThomas Hellstrom * @size: The size of the command buffer space. Immutable. 99*3eab3d9eSThomas Hellstrom */ 100*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man { 101*3eab3d9eSThomas Hellstrom struct mutex cur_mutex; 102*3eab3d9eSThomas Hellstrom struct mutex space_mutex; 103*3eab3d9eSThomas Hellstrom struct work_struct work; 104*3eab3d9eSThomas Hellstrom struct vmw_private *dev_priv; 105*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX]; 106*3eab3d9eSThomas Hellstrom struct list_head error; 107*3eab3d9eSThomas Hellstrom struct drm_mm mm; 108*3eab3d9eSThomas Hellstrom struct ttm_buffer_object *cmd_space; 109*3eab3d9eSThomas Hellstrom struct ttm_bo_kmap_obj map_obj; 110*3eab3d9eSThomas Hellstrom u8 *map; 111*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *cur; 112*3eab3d9eSThomas Hellstrom size_t cur_pos; 113*3eab3d9eSThomas Hellstrom size_t default_size; 114*3eab3d9eSThomas Hellstrom unsigned max_hw_submitted; 115*3eab3d9eSThomas Hellstrom spinlock_t lock; 116*3eab3d9eSThomas Hellstrom struct dma_pool *headers; 117*3eab3d9eSThomas Hellstrom struct dma_pool *dheaders; 118*3eab3d9eSThomas Hellstrom struct tasklet_struct tasklet; 119*3eab3d9eSThomas Hellstrom wait_queue_head_t alloc_queue; 120*3eab3d9eSThomas Hellstrom wait_queue_head_t idle_queue; 121*3eab3d9eSThomas Hellstrom bool irq_on; 122*3eab3d9eSThomas Hellstrom bool using_mob; 123*3eab3d9eSThomas Hellstrom bool has_pool; 124*3eab3d9eSThomas Hellstrom dma_addr_t handle; 125*3eab3d9eSThomas Hellstrom size_t size; 126*3eab3d9eSThomas Hellstrom }; 127*3eab3d9eSThomas Hellstrom 128*3eab3d9eSThomas Hellstrom /** 129*3eab3d9eSThomas Hellstrom * struct vmw_cmdbuf_header - Command buffer metadata 130*3eab3d9eSThomas Hellstrom * 131*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 132*3eab3d9eSThomas Hellstrom * @cb_header: Device command buffer header, allocated from a DMA pool. 133*3eab3d9eSThomas Hellstrom * @cb_context: The device command buffer context. 134*3eab3d9eSThomas Hellstrom * @list: List head for attaching to the manager lists. 135*3eab3d9eSThomas Hellstrom * @node: The range manager node. 136*3eab3d9eSThomas Hellstrom * @handle. The DMA address of @cb_header. Handed to the device on command 137*3eab3d9eSThomas Hellstrom * buffer submission. 138*3eab3d9eSThomas Hellstrom * @cmd: Pointer to the command buffer space of this buffer. 139*3eab3d9eSThomas Hellstrom * @size: Size of the command buffer space of this buffer. 140*3eab3d9eSThomas Hellstrom * @reserved: Reserved space of this buffer. 141*3eab3d9eSThomas Hellstrom * @inline_space: Whether inline command buffer space is used. 142*3eab3d9eSThomas Hellstrom */ 143*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header { 144*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man *man; 145*3eab3d9eSThomas Hellstrom SVGACBHeader *cb_header; 146*3eab3d9eSThomas Hellstrom SVGACBContext cb_context; 147*3eab3d9eSThomas Hellstrom struct list_head list; 148*3eab3d9eSThomas Hellstrom struct drm_mm_node *node; 149*3eab3d9eSThomas Hellstrom dma_addr_t handle; 150*3eab3d9eSThomas Hellstrom u8 *cmd; 151*3eab3d9eSThomas Hellstrom size_t size; 152*3eab3d9eSThomas Hellstrom size_t reserved; 153*3eab3d9eSThomas Hellstrom bool inline_space; 154*3eab3d9eSThomas Hellstrom }; 155*3eab3d9eSThomas Hellstrom 156*3eab3d9eSThomas Hellstrom /** 157*3eab3d9eSThomas Hellstrom * struct vmw_cmdbuf_dheader - Device command buffer header with inline 158*3eab3d9eSThomas Hellstrom * command buffer space. 159*3eab3d9eSThomas Hellstrom * 160*3eab3d9eSThomas Hellstrom * @cb_header: Device command buffer header. 161*3eab3d9eSThomas Hellstrom * @cmd: Inline command buffer space. 162*3eab3d9eSThomas Hellstrom */ 163*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_dheader { 164*3eab3d9eSThomas Hellstrom SVGACBHeader cb_header; 165*3eab3d9eSThomas Hellstrom u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN); 166*3eab3d9eSThomas Hellstrom }; 167*3eab3d9eSThomas Hellstrom 168*3eab3d9eSThomas Hellstrom /** 169*3eab3d9eSThomas Hellstrom * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata 170*3eab3d9eSThomas Hellstrom * 171*3eab3d9eSThomas Hellstrom * @page_size: Size of requested command buffer space in pages. 172*3eab3d9eSThomas Hellstrom * @node: The range manager node if allocation succeeded. 173*3eab3d9eSThomas Hellstrom * @ret: Error code if failure. Otherwise 0. 174*3eab3d9eSThomas Hellstrom */ 175*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_alloc_info { 176*3eab3d9eSThomas Hellstrom size_t page_size; 177*3eab3d9eSThomas Hellstrom struct drm_mm_node *node; 178*3eab3d9eSThomas Hellstrom int ret; 179*3eab3d9eSThomas Hellstrom }; 180*3eab3d9eSThomas Hellstrom 181*3eab3d9eSThomas Hellstrom /* Loop over each context in the command buffer manager. */ 182*3eab3d9eSThomas Hellstrom #define for_each_cmdbuf_ctx(_man, _i, _ctx) \ 183*3eab3d9eSThomas Hellstrom for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \ 184*3eab3d9eSThomas Hellstrom ++(_i), ++(_ctx)) 185*3eab3d9eSThomas Hellstrom 186*3eab3d9eSThomas Hellstrom static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable); 187*3eab3d9eSThomas Hellstrom 188*3eab3d9eSThomas Hellstrom 189*3eab3d9eSThomas Hellstrom /** 190*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex. 191*3eab3d9eSThomas Hellstrom * 192*3eab3d9eSThomas Hellstrom * @man: The range manager. 193*3eab3d9eSThomas Hellstrom * @interruptible: Whether to wait interruptible when locking. 194*3eab3d9eSThomas Hellstrom */ 195*3eab3d9eSThomas Hellstrom static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible) 196*3eab3d9eSThomas Hellstrom { 197*3eab3d9eSThomas Hellstrom if (interruptible) { 198*3eab3d9eSThomas Hellstrom if (mutex_lock_interruptible(&man->cur_mutex)) 199*3eab3d9eSThomas Hellstrom return -ERESTARTSYS; 200*3eab3d9eSThomas Hellstrom } else { 201*3eab3d9eSThomas Hellstrom mutex_lock(&man->cur_mutex); 202*3eab3d9eSThomas Hellstrom } 203*3eab3d9eSThomas Hellstrom 204*3eab3d9eSThomas Hellstrom return 0; 205*3eab3d9eSThomas Hellstrom } 206*3eab3d9eSThomas Hellstrom 207*3eab3d9eSThomas Hellstrom /** 208*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex. 209*3eab3d9eSThomas Hellstrom * 210*3eab3d9eSThomas Hellstrom * @man: The range manager. 211*3eab3d9eSThomas Hellstrom */ 212*3eab3d9eSThomas Hellstrom static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man) 213*3eab3d9eSThomas Hellstrom { 214*3eab3d9eSThomas Hellstrom mutex_unlock(&man->cur_mutex); 215*3eab3d9eSThomas Hellstrom } 216*3eab3d9eSThomas Hellstrom 217*3eab3d9eSThomas Hellstrom /** 218*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has 219*3eab3d9eSThomas Hellstrom * been used for the device context with inline command buffers. 220*3eab3d9eSThomas Hellstrom * Need not be called locked. 221*3eab3d9eSThomas Hellstrom * 222*3eab3d9eSThomas Hellstrom * @header: Pointer to the header to free. 223*3eab3d9eSThomas Hellstrom */ 224*3eab3d9eSThomas Hellstrom static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header) 225*3eab3d9eSThomas Hellstrom { 226*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_dheader *dheader; 227*3eab3d9eSThomas Hellstrom 228*3eab3d9eSThomas Hellstrom if (WARN_ON_ONCE(!header->inline_space)) 229*3eab3d9eSThomas Hellstrom return; 230*3eab3d9eSThomas Hellstrom 231*3eab3d9eSThomas Hellstrom dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader, 232*3eab3d9eSThomas Hellstrom cb_header); 233*3eab3d9eSThomas Hellstrom dma_pool_free(header->man->dheaders, dheader, header->handle); 234*3eab3d9eSThomas Hellstrom kfree(header); 235*3eab3d9eSThomas Hellstrom } 236*3eab3d9eSThomas Hellstrom 237*3eab3d9eSThomas Hellstrom /** 238*3eab3d9eSThomas Hellstrom * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its 239*3eab3d9eSThomas Hellstrom * associated structures. 240*3eab3d9eSThomas Hellstrom * 241*3eab3d9eSThomas Hellstrom * header: Pointer to the header to free. 242*3eab3d9eSThomas Hellstrom * 243*3eab3d9eSThomas Hellstrom * For internal use. Must be called with man::lock held. 244*3eab3d9eSThomas Hellstrom */ 245*3eab3d9eSThomas Hellstrom static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) 246*3eab3d9eSThomas Hellstrom { 247*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man *man = header->man; 248*3eab3d9eSThomas Hellstrom 249*3eab3d9eSThomas Hellstrom BUG_ON(!spin_is_locked(&man->lock)); 250*3eab3d9eSThomas Hellstrom 251*3eab3d9eSThomas Hellstrom if (header->inline_space) { 252*3eab3d9eSThomas Hellstrom vmw_cmdbuf_header_inline_free(header); 253*3eab3d9eSThomas Hellstrom return; 254*3eab3d9eSThomas Hellstrom } 255*3eab3d9eSThomas Hellstrom 256*3eab3d9eSThomas Hellstrom drm_mm_remove_node(header->node); 257*3eab3d9eSThomas Hellstrom kfree(header->node); 258*3eab3d9eSThomas Hellstrom header->node = NULL; 259*3eab3d9eSThomas Hellstrom wake_up_all(&man->alloc_queue); 260*3eab3d9eSThomas Hellstrom if (header->cb_header) 261*3eab3d9eSThomas Hellstrom dma_pool_free(man->headers, header->cb_header, 262*3eab3d9eSThomas Hellstrom header->handle); 263*3eab3d9eSThomas Hellstrom kfree(header); 264*3eab3d9eSThomas Hellstrom } 265*3eab3d9eSThomas Hellstrom 266*3eab3d9eSThomas Hellstrom /** 267*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its 268*3eab3d9eSThomas Hellstrom * associated structures. 269*3eab3d9eSThomas Hellstrom * 270*3eab3d9eSThomas Hellstrom * @header: Pointer to the header to free. 271*3eab3d9eSThomas Hellstrom */ 272*3eab3d9eSThomas Hellstrom void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) 273*3eab3d9eSThomas Hellstrom { 274*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man *man = header->man; 275*3eab3d9eSThomas Hellstrom 276*3eab3d9eSThomas Hellstrom /* Avoid locking if inline_space */ 277*3eab3d9eSThomas Hellstrom if (header->inline_space) { 278*3eab3d9eSThomas Hellstrom vmw_cmdbuf_header_inline_free(header); 279*3eab3d9eSThomas Hellstrom return; 280*3eab3d9eSThomas Hellstrom } 281*3eab3d9eSThomas Hellstrom spin_lock_bh(&man->lock); 282*3eab3d9eSThomas Hellstrom __vmw_cmdbuf_header_free(header); 283*3eab3d9eSThomas Hellstrom spin_unlock_bh(&man->lock); 284*3eab3d9eSThomas Hellstrom } 285*3eab3d9eSThomas Hellstrom 286*3eab3d9eSThomas Hellstrom 287*3eab3d9eSThomas Hellstrom /** 288*3eab3d9eSThomas Hellstrom * vmw_cmbuf_header_submit: Submit a command buffer to hardware. 289*3eab3d9eSThomas Hellstrom * 290*3eab3d9eSThomas Hellstrom * @header: The header of the buffer to submit. 291*3eab3d9eSThomas Hellstrom */ 292*3eab3d9eSThomas Hellstrom static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header) 293*3eab3d9eSThomas Hellstrom { 294*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man *man = header->man; 295*3eab3d9eSThomas Hellstrom u32 val; 296*3eab3d9eSThomas Hellstrom 297*3eab3d9eSThomas Hellstrom val = (header->handle >> 32); 298*3eab3d9eSThomas Hellstrom vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val); 299*3eab3d9eSThomas Hellstrom val = (header->handle & 0xFFFFFFFFULL); 300*3eab3d9eSThomas Hellstrom val |= header->cb_context & SVGA_CB_CONTEXT_MASK; 301*3eab3d9eSThomas Hellstrom vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val); 302*3eab3d9eSThomas Hellstrom 303*3eab3d9eSThomas Hellstrom return header->cb_header->status; 304*3eab3d9eSThomas Hellstrom } 305*3eab3d9eSThomas Hellstrom 306*3eab3d9eSThomas Hellstrom /** 307*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_ctx_init: Initialize a command buffer context. 308*3eab3d9eSThomas Hellstrom * 309*3eab3d9eSThomas Hellstrom * @ctx: The command buffer context to initialize 310*3eab3d9eSThomas Hellstrom */ 311*3eab3d9eSThomas Hellstrom static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx) 312*3eab3d9eSThomas Hellstrom { 313*3eab3d9eSThomas Hellstrom INIT_LIST_HEAD(&ctx->hw_submitted); 314*3eab3d9eSThomas Hellstrom INIT_LIST_HEAD(&ctx->submitted); 315*3eab3d9eSThomas Hellstrom INIT_LIST_HEAD(&ctx->preempted); 316*3eab3d9eSThomas Hellstrom ctx->num_hw_submitted = 0; 317*3eab3d9eSThomas Hellstrom } 318*3eab3d9eSThomas Hellstrom 319*3eab3d9eSThomas Hellstrom /** 320*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer 321*3eab3d9eSThomas Hellstrom * context. 322*3eab3d9eSThomas Hellstrom * 323*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 324*3eab3d9eSThomas Hellstrom * @ctx: The command buffer context. 325*3eab3d9eSThomas Hellstrom * 326*3eab3d9eSThomas Hellstrom * Submits command buffers to hardware until there are no more command 327*3eab3d9eSThomas Hellstrom * buffers to submit or the hardware can't handle more command buffers. 328*3eab3d9eSThomas Hellstrom */ 329*3eab3d9eSThomas Hellstrom static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man, 330*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_context *ctx) 331*3eab3d9eSThomas Hellstrom { 332*3eab3d9eSThomas Hellstrom while (ctx->num_hw_submitted < man->max_hw_submitted && 333*3eab3d9eSThomas Hellstrom !list_empty(&ctx->submitted)) { 334*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *entry; 335*3eab3d9eSThomas Hellstrom SVGACBStatus status; 336*3eab3d9eSThomas Hellstrom 337*3eab3d9eSThomas Hellstrom entry = list_first_entry(&ctx->submitted, 338*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header, 339*3eab3d9eSThomas Hellstrom list); 340*3eab3d9eSThomas Hellstrom 341*3eab3d9eSThomas Hellstrom status = vmw_cmdbuf_header_submit(entry); 342*3eab3d9eSThomas Hellstrom 343*3eab3d9eSThomas Hellstrom /* This should never happen */ 344*3eab3d9eSThomas Hellstrom if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) { 345*3eab3d9eSThomas Hellstrom entry->cb_header->status = SVGA_CB_STATUS_NONE; 346*3eab3d9eSThomas Hellstrom break; 347*3eab3d9eSThomas Hellstrom } 348*3eab3d9eSThomas Hellstrom 349*3eab3d9eSThomas Hellstrom list_del(&entry->list); 350*3eab3d9eSThomas Hellstrom list_add_tail(&entry->list, &ctx->hw_submitted); 351*3eab3d9eSThomas Hellstrom ctx->num_hw_submitted++; 352*3eab3d9eSThomas Hellstrom } 353*3eab3d9eSThomas Hellstrom 354*3eab3d9eSThomas Hellstrom } 355*3eab3d9eSThomas Hellstrom 356*3eab3d9eSThomas Hellstrom /** 357*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_ctx_submit: Process a command buffer context. 358*3eab3d9eSThomas Hellstrom * 359*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 360*3eab3d9eSThomas Hellstrom * @ctx: The command buffer context. 361*3eab3d9eSThomas Hellstrom * 362*3eab3d9eSThomas Hellstrom * Submit command buffers to hardware if possible, and process finished 363*3eab3d9eSThomas Hellstrom * buffers. Typically freeing them, but on preemption or error take 364*3eab3d9eSThomas Hellstrom * appropriate action. Wake up waiters if appropriate. 365*3eab3d9eSThomas Hellstrom */ 366*3eab3d9eSThomas Hellstrom static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man, 367*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_context *ctx, 368*3eab3d9eSThomas Hellstrom int *notempty) 369*3eab3d9eSThomas Hellstrom { 370*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *entry, *next; 371*3eab3d9eSThomas Hellstrom 372*3eab3d9eSThomas Hellstrom vmw_cmdbuf_ctx_submit(man, ctx); 373*3eab3d9eSThomas Hellstrom 374*3eab3d9eSThomas Hellstrom list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) { 375*3eab3d9eSThomas Hellstrom SVGACBStatus status = entry->cb_header->status; 376*3eab3d9eSThomas Hellstrom 377*3eab3d9eSThomas Hellstrom if (status == SVGA_CB_STATUS_NONE) 378*3eab3d9eSThomas Hellstrom break; 379*3eab3d9eSThomas Hellstrom 380*3eab3d9eSThomas Hellstrom list_del(&entry->list); 381*3eab3d9eSThomas Hellstrom wake_up_all(&man->idle_queue); 382*3eab3d9eSThomas Hellstrom ctx->num_hw_submitted--; 383*3eab3d9eSThomas Hellstrom switch (status) { 384*3eab3d9eSThomas Hellstrom case SVGA_CB_STATUS_COMPLETED: 385*3eab3d9eSThomas Hellstrom __vmw_cmdbuf_header_free(entry); 386*3eab3d9eSThomas Hellstrom break; 387*3eab3d9eSThomas Hellstrom case SVGA_CB_STATUS_COMMAND_ERROR: 388*3eab3d9eSThomas Hellstrom case SVGA_CB_STATUS_CB_HEADER_ERROR: 389*3eab3d9eSThomas Hellstrom list_add_tail(&entry->list, &man->error); 390*3eab3d9eSThomas Hellstrom schedule_work(&man->work); 391*3eab3d9eSThomas Hellstrom break; 392*3eab3d9eSThomas Hellstrom case SVGA_CB_STATUS_PREEMPTED: 393*3eab3d9eSThomas Hellstrom list_add(&entry->list, &ctx->preempted); 394*3eab3d9eSThomas Hellstrom break; 395*3eab3d9eSThomas Hellstrom default: 396*3eab3d9eSThomas Hellstrom WARN_ONCE(true, "Undefined command buffer status.\n"); 397*3eab3d9eSThomas Hellstrom __vmw_cmdbuf_header_free(entry); 398*3eab3d9eSThomas Hellstrom break; 399*3eab3d9eSThomas Hellstrom } 400*3eab3d9eSThomas Hellstrom } 401*3eab3d9eSThomas Hellstrom 402*3eab3d9eSThomas Hellstrom vmw_cmdbuf_ctx_submit(man, ctx); 403*3eab3d9eSThomas Hellstrom if (!list_empty(&ctx->submitted)) 404*3eab3d9eSThomas Hellstrom (*notempty)++; 405*3eab3d9eSThomas Hellstrom } 406*3eab3d9eSThomas Hellstrom 407*3eab3d9eSThomas Hellstrom /** 408*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_man_process - Process all command buffer contexts and 409*3eab3d9eSThomas Hellstrom * switch on and off irqs as appropriate. 410*3eab3d9eSThomas Hellstrom * 411*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 412*3eab3d9eSThomas Hellstrom * 413*3eab3d9eSThomas Hellstrom * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has 414*3eab3d9eSThomas Hellstrom * command buffers left that are not submitted to hardware, Make sure 415*3eab3d9eSThomas Hellstrom * IRQ handling is turned on. Otherwise, make sure it's turned off. This 416*3eab3d9eSThomas Hellstrom * function may return -EAGAIN to indicate it should be rerun due to 417*3eab3d9eSThomas Hellstrom * possibly missed IRQs if IRQs has just been turned on. 418*3eab3d9eSThomas Hellstrom */ 419*3eab3d9eSThomas Hellstrom static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) 420*3eab3d9eSThomas Hellstrom { 421*3eab3d9eSThomas Hellstrom int notempty = 0; 422*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_context *ctx; 423*3eab3d9eSThomas Hellstrom int i; 424*3eab3d9eSThomas Hellstrom 425*3eab3d9eSThomas Hellstrom for_each_cmdbuf_ctx(man, i, ctx) 426*3eab3d9eSThomas Hellstrom vmw_cmdbuf_ctx_process(man, ctx, ¬empty); 427*3eab3d9eSThomas Hellstrom 428*3eab3d9eSThomas Hellstrom if (man->irq_on && !notempty) { 429*3eab3d9eSThomas Hellstrom vmw_generic_waiter_remove(man->dev_priv, 430*3eab3d9eSThomas Hellstrom SVGA_IRQFLAG_COMMAND_BUFFER, 431*3eab3d9eSThomas Hellstrom &man->dev_priv->cmdbuf_waiters); 432*3eab3d9eSThomas Hellstrom man->irq_on = false; 433*3eab3d9eSThomas Hellstrom } else if (!man->irq_on && notempty) { 434*3eab3d9eSThomas Hellstrom vmw_generic_waiter_add(man->dev_priv, 435*3eab3d9eSThomas Hellstrom SVGA_IRQFLAG_COMMAND_BUFFER, 436*3eab3d9eSThomas Hellstrom &man->dev_priv->cmdbuf_waiters); 437*3eab3d9eSThomas Hellstrom man->irq_on = true; 438*3eab3d9eSThomas Hellstrom 439*3eab3d9eSThomas Hellstrom /* Rerun in case we just missed an irq. */ 440*3eab3d9eSThomas Hellstrom return -EAGAIN; 441*3eab3d9eSThomas Hellstrom } 442*3eab3d9eSThomas Hellstrom 443*3eab3d9eSThomas Hellstrom return 0; 444*3eab3d9eSThomas Hellstrom } 445*3eab3d9eSThomas Hellstrom 446*3eab3d9eSThomas Hellstrom /** 447*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a 448*3eab3d9eSThomas Hellstrom * command buffer context 449*3eab3d9eSThomas Hellstrom * 450*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 451*3eab3d9eSThomas Hellstrom * @header: The header of the buffer to submit. 452*3eab3d9eSThomas Hellstrom * @cb_context: The command buffer context to use. 453*3eab3d9eSThomas Hellstrom * 454*3eab3d9eSThomas Hellstrom * This function adds @header to the "submitted" queue of the command 455*3eab3d9eSThomas Hellstrom * buffer context identified by @cb_context. It then calls the command buffer 456*3eab3d9eSThomas Hellstrom * manager processing to potentially submit the buffer to hardware. 457*3eab3d9eSThomas Hellstrom * @man->lock needs to be held when calling this function. 458*3eab3d9eSThomas Hellstrom */ 459*3eab3d9eSThomas Hellstrom static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man, 460*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *header, 461*3eab3d9eSThomas Hellstrom SVGACBContext cb_context) 462*3eab3d9eSThomas Hellstrom { 463*3eab3d9eSThomas Hellstrom if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT)) 464*3eab3d9eSThomas Hellstrom header->cb_header->dxContext = 0; 465*3eab3d9eSThomas Hellstrom header->cb_context = cb_context; 466*3eab3d9eSThomas Hellstrom list_add_tail(&header->list, &man->ctx[cb_context].submitted); 467*3eab3d9eSThomas Hellstrom 468*3eab3d9eSThomas Hellstrom if (vmw_cmdbuf_man_process(man) == -EAGAIN) 469*3eab3d9eSThomas Hellstrom vmw_cmdbuf_man_process(man); 470*3eab3d9eSThomas Hellstrom } 471*3eab3d9eSThomas Hellstrom 472*3eab3d9eSThomas Hellstrom /** 473*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt 474*3eab3d9eSThomas Hellstrom * handler implemented as a tasklet. 475*3eab3d9eSThomas Hellstrom * 476*3eab3d9eSThomas Hellstrom * @data: Tasklet closure. A pointer to the command buffer manager cast to 477*3eab3d9eSThomas Hellstrom * an unsigned long. 478*3eab3d9eSThomas Hellstrom * 479*3eab3d9eSThomas Hellstrom * The bottom half (tasklet) of the interrupt handler simply calls into the 480*3eab3d9eSThomas Hellstrom * command buffer processor to free finished buffers and submit any 481*3eab3d9eSThomas Hellstrom * queued buffers to hardware. 482*3eab3d9eSThomas Hellstrom */ 483*3eab3d9eSThomas Hellstrom static void vmw_cmdbuf_man_tasklet(unsigned long data) 484*3eab3d9eSThomas Hellstrom { 485*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data; 486*3eab3d9eSThomas Hellstrom 487*3eab3d9eSThomas Hellstrom spin_lock(&man->lock); 488*3eab3d9eSThomas Hellstrom if (vmw_cmdbuf_man_process(man) == -EAGAIN) 489*3eab3d9eSThomas Hellstrom (void) vmw_cmdbuf_man_process(man); 490*3eab3d9eSThomas Hellstrom spin_unlock(&man->lock); 491*3eab3d9eSThomas Hellstrom } 492*3eab3d9eSThomas Hellstrom 493*3eab3d9eSThomas Hellstrom /** 494*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_work_func - The deferred work function that handles 495*3eab3d9eSThomas Hellstrom * command buffer errors. 496*3eab3d9eSThomas Hellstrom * 497*3eab3d9eSThomas Hellstrom * @work: The work func closure argument. 498*3eab3d9eSThomas Hellstrom * 499*3eab3d9eSThomas Hellstrom * Restarting the command buffer context after an error requires process 500*3eab3d9eSThomas Hellstrom * context, so it is deferred to this work function. 501*3eab3d9eSThomas Hellstrom */ 502*3eab3d9eSThomas Hellstrom static void vmw_cmdbuf_work_func(struct work_struct *work) 503*3eab3d9eSThomas Hellstrom { 504*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man *man = 505*3eab3d9eSThomas Hellstrom container_of(work, struct vmw_cmdbuf_man, work); 506*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *entry, *next; 507*3eab3d9eSThomas Hellstrom bool restart; 508*3eab3d9eSThomas Hellstrom 509*3eab3d9eSThomas Hellstrom spin_lock_bh(&man->lock); 510*3eab3d9eSThomas Hellstrom list_for_each_entry_safe(entry, next, &man->error, list) { 511*3eab3d9eSThomas Hellstrom restart = true; 512*3eab3d9eSThomas Hellstrom DRM_ERROR("Command buffer error.\n"); 513*3eab3d9eSThomas Hellstrom 514*3eab3d9eSThomas Hellstrom list_del(&entry->list); 515*3eab3d9eSThomas Hellstrom __vmw_cmdbuf_header_free(entry); 516*3eab3d9eSThomas Hellstrom wake_up_all(&man->idle_queue); 517*3eab3d9eSThomas Hellstrom } 518*3eab3d9eSThomas Hellstrom spin_unlock_bh(&man->lock); 519*3eab3d9eSThomas Hellstrom 520*3eab3d9eSThomas Hellstrom if (restart && vmw_cmdbuf_startstop(man, true)) 521*3eab3d9eSThomas Hellstrom DRM_ERROR("Failed restarting command buffer context 0.\n"); 522*3eab3d9eSThomas Hellstrom 523*3eab3d9eSThomas Hellstrom } 524*3eab3d9eSThomas Hellstrom 525*3eab3d9eSThomas Hellstrom /** 526*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle. 527*3eab3d9eSThomas Hellstrom * 528*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 529*3eab3d9eSThomas Hellstrom * @check_preempted: Check also the preempted queue for pending command buffers. 530*3eab3d9eSThomas Hellstrom * 531*3eab3d9eSThomas Hellstrom */ 532*3eab3d9eSThomas Hellstrom static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man, 533*3eab3d9eSThomas Hellstrom bool check_preempted) 534*3eab3d9eSThomas Hellstrom { 535*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_context *ctx; 536*3eab3d9eSThomas Hellstrom bool idle = false; 537*3eab3d9eSThomas Hellstrom int i; 538*3eab3d9eSThomas Hellstrom 539*3eab3d9eSThomas Hellstrom spin_lock_bh(&man->lock); 540*3eab3d9eSThomas Hellstrom vmw_cmdbuf_man_process(man); 541*3eab3d9eSThomas Hellstrom for_each_cmdbuf_ctx(man, i, ctx) { 542*3eab3d9eSThomas Hellstrom if (!list_empty(&ctx->submitted) || 543*3eab3d9eSThomas Hellstrom !list_empty(&ctx->hw_submitted) || 544*3eab3d9eSThomas Hellstrom (check_preempted && !list_empty(&ctx->preempted))) 545*3eab3d9eSThomas Hellstrom goto out_unlock; 546*3eab3d9eSThomas Hellstrom } 547*3eab3d9eSThomas Hellstrom 548*3eab3d9eSThomas Hellstrom idle = list_empty(&man->error); 549*3eab3d9eSThomas Hellstrom 550*3eab3d9eSThomas Hellstrom out_unlock: 551*3eab3d9eSThomas Hellstrom spin_unlock_bh(&man->lock); 552*3eab3d9eSThomas Hellstrom 553*3eab3d9eSThomas Hellstrom return idle; 554*3eab3d9eSThomas Hellstrom } 555*3eab3d9eSThomas Hellstrom 556*3eab3d9eSThomas Hellstrom /** 557*3eab3d9eSThomas Hellstrom * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel 558*3eab3d9eSThomas Hellstrom * command submissions 559*3eab3d9eSThomas Hellstrom * 560*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 561*3eab3d9eSThomas Hellstrom * 562*3eab3d9eSThomas Hellstrom * Flushes the current command buffer without allocating a new one. A new one 563*3eab3d9eSThomas Hellstrom * is automatically allocated when needed. Call with @man->cur_mutex held. 564*3eab3d9eSThomas Hellstrom */ 565*3eab3d9eSThomas Hellstrom static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man) 566*3eab3d9eSThomas Hellstrom { 567*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *cur = man->cur; 568*3eab3d9eSThomas Hellstrom 569*3eab3d9eSThomas Hellstrom WARN_ON(!mutex_is_locked(&man->cur_mutex)); 570*3eab3d9eSThomas Hellstrom 571*3eab3d9eSThomas Hellstrom if (!cur) 572*3eab3d9eSThomas Hellstrom return; 573*3eab3d9eSThomas Hellstrom 574*3eab3d9eSThomas Hellstrom spin_lock_bh(&man->lock); 575*3eab3d9eSThomas Hellstrom if (man->cur_pos == 0) { 576*3eab3d9eSThomas Hellstrom __vmw_cmdbuf_header_free(cur); 577*3eab3d9eSThomas Hellstrom goto out_unlock; 578*3eab3d9eSThomas Hellstrom } 579*3eab3d9eSThomas Hellstrom 580*3eab3d9eSThomas Hellstrom man->cur->cb_header->length = man->cur_pos; 581*3eab3d9eSThomas Hellstrom vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0); 582*3eab3d9eSThomas Hellstrom out_unlock: 583*3eab3d9eSThomas Hellstrom spin_unlock_bh(&man->lock); 584*3eab3d9eSThomas Hellstrom man->cur = NULL; 585*3eab3d9eSThomas Hellstrom man->cur_pos = 0; 586*3eab3d9eSThomas Hellstrom } 587*3eab3d9eSThomas Hellstrom 588*3eab3d9eSThomas Hellstrom /** 589*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel 590*3eab3d9eSThomas Hellstrom * command submissions 591*3eab3d9eSThomas Hellstrom * 592*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 593*3eab3d9eSThomas Hellstrom * @interruptible: Whether to sleep interruptible when sleeping. 594*3eab3d9eSThomas Hellstrom * 595*3eab3d9eSThomas Hellstrom * Flushes the current command buffer without allocating a new one. A new one 596*3eab3d9eSThomas Hellstrom * is automatically allocated when needed. 597*3eab3d9eSThomas Hellstrom */ 598*3eab3d9eSThomas Hellstrom int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, 599*3eab3d9eSThomas Hellstrom bool interruptible) 600*3eab3d9eSThomas Hellstrom { 601*3eab3d9eSThomas Hellstrom int ret = vmw_cmdbuf_cur_lock(man, interruptible); 602*3eab3d9eSThomas Hellstrom 603*3eab3d9eSThomas Hellstrom if (ret) 604*3eab3d9eSThomas Hellstrom return ret; 605*3eab3d9eSThomas Hellstrom 606*3eab3d9eSThomas Hellstrom __vmw_cmdbuf_cur_flush(man); 607*3eab3d9eSThomas Hellstrom vmw_cmdbuf_cur_unlock(man); 608*3eab3d9eSThomas Hellstrom 609*3eab3d9eSThomas Hellstrom return 0; 610*3eab3d9eSThomas Hellstrom } 611*3eab3d9eSThomas Hellstrom 612*3eab3d9eSThomas Hellstrom /** 613*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_idle - Wait for command buffer manager idle. 614*3eab3d9eSThomas Hellstrom * 615*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 616*3eab3d9eSThomas Hellstrom * @interruptible: Sleep interruptible while waiting. 617*3eab3d9eSThomas Hellstrom * @timeout: Time out after this many ticks. 618*3eab3d9eSThomas Hellstrom * 619*3eab3d9eSThomas Hellstrom * Wait until the command buffer manager has processed all command buffers, 620*3eab3d9eSThomas Hellstrom * or until a timeout occurs. If a timeout occurs, the function will return 621*3eab3d9eSThomas Hellstrom * -EBUSY. 622*3eab3d9eSThomas Hellstrom */ 623*3eab3d9eSThomas Hellstrom int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, 624*3eab3d9eSThomas Hellstrom unsigned long timeout) 625*3eab3d9eSThomas Hellstrom { 626*3eab3d9eSThomas Hellstrom int ret; 627*3eab3d9eSThomas Hellstrom 628*3eab3d9eSThomas Hellstrom ret = vmw_cmdbuf_cur_flush(man, interruptible); 629*3eab3d9eSThomas Hellstrom vmw_generic_waiter_add(man->dev_priv, 630*3eab3d9eSThomas Hellstrom SVGA_IRQFLAG_COMMAND_BUFFER, 631*3eab3d9eSThomas Hellstrom &man->dev_priv->cmdbuf_waiters); 632*3eab3d9eSThomas Hellstrom 633*3eab3d9eSThomas Hellstrom if (interruptible) { 634*3eab3d9eSThomas Hellstrom ret = wait_event_interruptible_timeout 635*3eab3d9eSThomas Hellstrom (man->idle_queue, vmw_cmdbuf_man_idle(man, true), 636*3eab3d9eSThomas Hellstrom timeout); 637*3eab3d9eSThomas Hellstrom } else { 638*3eab3d9eSThomas Hellstrom ret = wait_event_timeout 639*3eab3d9eSThomas Hellstrom (man->idle_queue, vmw_cmdbuf_man_idle(man, true), 640*3eab3d9eSThomas Hellstrom timeout); 641*3eab3d9eSThomas Hellstrom } 642*3eab3d9eSThomas Hellstrom vmw_generic_waiter_remove(man->dev_priv, 643*3eab3d9eSThomas Hellstrom SVGA_IRQFLAG_COMMAND_BUFFER, 644*3eab3d9eSThomas Hellstrom &man->dev_priv->cmdbuf_waiters); 645*3eab3d9eSThomas Hellstrom if (ret == 0) { 646*3eab3d9eSThomas Hellstrom if (!vmw_cmdbuf_man_idle(man, true)) 647*3eab3d9eSThomas Hellstrom ret = -EBUSY; 648*3eab3d9eSThomas Hellstrom else 649*3eab3d9eSThomas Hellstrom ret = 0; 650*3eab3d9eSThomas Hellstrom } 651*3eab3d9eSThomas Hellstrom if (ret > 0) 652*3eab3d9eSThomas Hellstrom ret = 0; 653*3eab3d9eSThomas Hellstrom 654*3eab3d9eSThomas Hellstrom return ret; 655*3eab3d9eSThomas Hellstrom } 656*3eab3d9eSThomas Hellstrom 657*3eab3d9eSThomas Hellstrom /** 658*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool. 659*3eab3d9eSThomas Hellstrom * 660*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 661*3eab3d9eSThomas Hellstrom * @info: Allocation info. Will hold the size on entry and allocated mm node 662*3eab3d9eSThomas Hellstrom * on successful return. 663*3eab3d9eSThomas Hellstrom * 664*3eab3d9eSThomas Hellstrom * Try to allocate buffer space from the main pool. Returns true if succeeded. 665*3eab3d9eSThomas Hellstrom * If a fatal error was hit, the error code is returned in @info->ret. 666*3eab3d9eSThomas Hellstrom */ 667*3eab3d9eSThomas Hellstrom static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, 668*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_alloc_info *info) 669*3eab3d9eSThomas Hellstrom { 670*3eab3d9eSThomas Hellstrom int ret; 671*3eab3d9eSThomas Hellstrom 672*3eab3d9eSThomas Hellstrom if (info->node) 673*3eab3d9eSThomas Hellstrom return true; 674*3eab3d9eSThomas Hellstrom 675*3eab3d9eSThomas Hellstrom info->node = kzalloc(sizeof(*info->node), GFP_KERNEL); 676*3eab3d9eSThomas Hellstrom if (!info->node) { 677*3eab3d9eSThomas Hellstrom info->ret = -ENOMEM; 678*3eab3d9eSThomas Hellstrom return true; 679*3eab3d9eSThomas Hellstrom } 680*3eab3d9eSThomas Hellstrom 681*3eab3d9eSThomas Hellstrom spin_lock_bh(&man->lock); 682*3eab3d9eSThomas Hellstrom ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size, 0, 0, 683*3eab3d9eSThomas Hellstrom DRM_MM_SEARCH_DEFAULT, 684*3eab3d9eSThomas Hellstrom DRM_MM_CREATE_DEFAULT); 685*3eab3d9eSThomas Hellstrom spin_unlock_bh(&man->lock); 686*3eab3d9eSThomas Hellstrom if (ret) { 687*3eab3d9eSThomas Hellstrom kfree(info->node); 688*3eab3d9eSThomas Hellstrom info->node = NULL; 689*3eab3d9eSThomas Hellstrom } 690*3eab3d9eSThomas Hellstrom 691*3eab3d9eSThomas Hellstrom return !!info->node; 692*3eab3d9eSThomas Hellstrom } 693*3eab3d9eSThomas Hellstrom 694*3eab3d9eSThomas Hellstrom /** 695*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool. 696*3eab3d9eSThomas Hellstrom * 697*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 698*3eab3d9eSThomas Hellstrom * @size: The size of the allocation. 699*3eab3d9eSThomas Hellstrom * @interruptible: Whether to sleep interruptible while waiting for space. 700*3eab3d9eSThomas Hellstrom * 701*3eab3d9eSThomas Hellstrom * This function allocates buffer space from the main pool, and if there is 702*3eab3d9eSThomas Hellstrom * no space available ATM, it turns on IRQ handling and sleeps waiting for it to 703*3eab3d9eSThomas Hellstrom * become available. 704*3eab3d9eSThomas Hellstrom */ 705*3eab3d9eSThomas Hellstrom static struct drm_mm_node *vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man, 706*3eab3d9eSThomas Hellstrom size_t size, 707*3eab3d9eSThomas Hellstrom bool interruptible) 708*3eab3d9eSThomas Hellstrom { 709*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_alloc_info info; 710*3eab3d9eSThomas Hellstrom 711*3eab3d9eSThomas Hellstrom info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT; 712*3eab3d9eSThomas Hellstrom info.node = NULL; 713*3eab3d9eSThomas Hellstrom info.ret = 0; 714*3eab3d9eSThomas Hellstrom 715*3eab3d9eSThomas Hellstrom /* 716*3eab3d9eSThomas Hellstrom * To prevent starvation of large requests, only one allocating call 717*3eab3d9eSThomas Hellstrom * at a time waiting for space. 718*3eab3d9eSThomas Hellstrom */ 719*3eab3d9eSThomas Hellstrom if (interruptible) { 720*3eab3d9eSThomas Hellstrom if (mutex_lock_interruptible(&man->space_mutex)) 721*3eab3d9eSThomas Hellstrom return ERR_PTR(-ERESTARTSYS); 722*3eab3d9eSThomas Hellstrom } else { 723*3eab3d9eSThomas Hellstrom mutex_lock(&man->space_mutex); 724*3eab3d9eSThomas Hellstrom } 725*3eab3d9eSThomas Hellstrom 726*3eab3d9eSThomas Hellstrom /* Try to allocate space without waiting. */ 727*3eab3d9eSThomas Hellstrom (void) vmw_cmdbuf_try_alloc(man, &info); 728*3eab3d9eSThomas Hellstrom if (info.ret && !info.node) { 729*3eab3d9eSThomas Hellstrom mutex_unlock(&man->space_mutex); 730*3eab3d9eSThomas Hellstrom return ERR_PTR(info.ret); 731*3eab3d9eSThomas Hellstrom } 732*3eab3d9eSThomas Hellstrom 733*3eab3d9eSThomas Hellstrom if (info.node) { 734*3eab3d9eSThomas Hellstrom mutex_unlock(&man->space_mutex); 735*3eab3d9eSThomas Hellstrom return info.node; 736*3eab3d9eSThomas Hellstrom } 737*3eab3d9eSThomas Hellstrom 738*3eab3d9eSThomas Hellstrom vmw_generic_waiter_add(man->dev_priv, 739*3eab3d9eSThomas Hellstrom SVGA_IRQFLAG_COMMAND_BUFFER, 740*3eab3d9eSThomas Hellstrom &man->dev_priv->cmdbuf_waiters); 741*3eab3d9eSThomas Hellstrom 742*3eab3d9eSThomas Hellstrom if (interruptible) { 743*3eab3d9eSThomas Hellstrom int ret; 744*3eab3d9eSThomas Hellstrom 745*3eab3d9eSThomas Hellstrom ret = wait_event_interruptible 746*3eab3d9eSThomas Hellstrom (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); 747*3eab3d9eSThomas Hellstrom if (ret) { 748*3eab3d9eSThomas Hellstrom vmw_generic_waiter_remove 749*3eab3d9eSThomas Hellstrom (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER, 750*3eab3d9eSThomas Hellstrom &man->dev_priv->cmdbuf_waiters); 751*3eab3d9eSThomas Hellstrom mutex_unlock(&man->space_mutex); 752*3eab3d9eSThomas Hellstrom return ERR_PTR(ret); 753*3eab3d9eSThomas Hellstrom } 754*3eab3d9eSThomas Hellstrom } else { 755*3eab3d9eSThomas Hellstrom wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); 756*3eab3d9eSThomas Hellstrom } 757*3eab3d9eSThomas Hellstrom vmw_generic_waiter_remove(man->dev_priv, 758*3eab3d9eSThomas Hellstrom SVGA_IRQFLAG_COMMAND_BUFFER, 759*3eab3d9eSThomas Hellstrom &man->dev_priv->cmdbuf_waiters); 760*3eab3d9eSThomas Hellstrom mutex_unlock(&man->space_mutex); 761*3eab3d9eSThomas Hellstrom if (info.ret && !info.node) 762*3eab3d9eSThomas Hellstrom return ERR_PTR(info.ret); 763*3eab3d9eSThomas Hellstrom 764*3eab3d9eSThomas Hellstrom return info.node; 765*3eab3d9eSThomas Hellstrom } 766*3eab3d9eSThomas Hellstrom 767*3eab3d9eSThomas Hellstrom /** 768*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer 769*3eab3d9eSThomas Hellstrom * space from the main pool. 770*3eab3d9eSThomas Hellstrom * 771*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 772*3eab3d9eSThomas Hellstrom * @header: Pointer to the header to set up. 773*3eab3d9eSThomas Hellstrom * @size: The requested size of the buffer space. 774*3eab3d9eSThomas Hellstrom * @interruptible: Whether to sleep interruptible while waiting for space. 775*3eab3d9eSThomas Hellstrom */ 776*3eab3d9eSThomas Hellstrom static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, 777*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *header, 778*3eab3d9eSThomas Hellstrom size_t size, 779*3eab3d9eSThomas Hellstrom bool interruptible) 780*3eab3d9eSThomas Hellstrom { 781*3eab3d9eSThomas Hellstrom SVGACBHeader *cb_hdr; 782*3eab3d9eSThomas Hellstrom size_t offset; 783*3eab3d9eSThomas Hellstrom int ret; 784*3eab3d9eSThomas Hellstrom 785*3eab3d9eSThomas Hellstrom if (!man->has_pool) 786*3eab3d9eSThomas Hellstrom return -ENOMEM; 787*3eab3d9eSThomas Hellstrom 788*3eab3d9eSThomas Hellstrom header->node = vmw_cmdbuf_alloc_space(man, size, interruptible); 789*3eab3d9eSThomas Hellstrom 790*3eab3d9eSThomas Hellstrom if (IS_ERR(header->node)) 791*3eab3d9eSThomas Hellstrom return PTR_ERR(header->node); 792*3eab3d9eSThomas Hellstrom 793*3eab3d9eSThomas Hellstrom header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL, 794*3eab3d9eSThomas Hellstrom &header->handle); 795*3eab3d9eSThomas Hellstrom if (!header->cb_header) { 796*3eab3d9eSThomas Hellstrom ret = -ENOMEM; 797*3eab3d9eSThomas Hellstrom goto out_no_cb_header; 798*3eab3d9eSThomas Hellstrom } 799*3eab3d9eSThomas Hellstrom 800*3eab3d9eSThomas Hellstrom header->size = header->node->size << PAGE_SHIFT; 801*3eab3d9eSThomas Hellstrom cb_hdr = header->cb_header; 802*3eab3d9eSThomas Hellstrom offset = header->node->start << PAGE_SHIFT; 803*3eab3d9eSThomas Hellstrom header->cmd = man->map + offset; 804*3eab3d9eSThomas Hellstrom memset(cb_hdr, 0, sizeof(*cb_hdr)); 805*3eab3d9eSThomas Hellstrom if (man->using_mob) { 806*3eab3d9eSThomas Hellstrom cb_hdr->flags = SVGA_CB_FLAG_MOB; 807*3eab3d9eSThomas Hellstrom cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; 808*3eab3d9eSThomas Hellstrom cb_hdr->ptr.mob.mobOffset = offset; 809*3eab3d9eSThomas Hellstrom } else { 810*3eab3d9eSThomas Hellstrom cb_hdr->ptr.pa = (u64)man->handle + (u64)offset; 811*3eab3d9eSThomas Hellstrom } 812*3eab3d9eSThomas Hellstrom 813*3eab3d9eSThomas Hellstrom return 0; 814*3eab3d9eSThomas Hellstrom 815*3eab3d9eSThomas Hellstrom out_no_cb_header: 816*3eab3d9eSThomas Hellstrom spin_lock_bh(&man->lock); 817*3eab3d9eSThomas Hellstrom drm_mm_remove_node(header->node); 818*3eab3d9eSThomas Hellstrom spin_unlock_bh(&man->lock); 819*3eab3d9eSThomas Hellstrom kfree(header->node); 820*3eab3d9eSThomas Hellstrom 821*3eab3d9eSThomas Hellstrom return ret; 822*3eab3d9eSThomas Hellstrom } 823*3eab3d9eSThomas Hellstrom 824*3eab3d9eSThomas Hellstrom /** 825*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_space_inline - Set up a command buffer header with 826*3eab3d9eSThomas Hellstrom * inline command buffer space. 827*3eab3d9eSThomas Hellstrom * 828*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 829*3eab3d9eSThomas Hellstrom * @header: Pointer to the header to set up. 830*3eab3d9eSThomas Hellstrom * @size: The requested size of the buffer space. 831*3eab3d9eSThomas Hellstrom */ 832*3eab3d9eSThomas Hellstrom static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, 833*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *header, 834*3eab3d9eSThomas Hellstrom int size) 835*3eab3d9eSThomas Hellstrom { 836*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_dheader *dheader; 837*3eab3d9eSThomas Hellstrom SVGACBHeader *cb_hdr; 838*3eab3d9eSThomas Hellstrom 839*3eab3d9eSThomas Hellstrom if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE)) 840*3eab3d9eSThomas Hellstrom return -ENOMEM; 841*3eab3d9eSThomas Hellstrom 842*3eab3d9eSThomas Hellstrom dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL, 843*3eab3d9eSThomas Hellstrom &header->handle); 844*3eab3d9eSThomas Hellstrom if (!dheader) 845*3eab3d9eSThomas Hellstrom return -ENOMEM; 846*3eab3d9eSThomas Hellstrom 847*3eab3d9eSThomas Hellstrom header->inline_space = true; 848*3eab3d9eSThomas Hellstrom header->size = VMW_CMDBUF_INLINE_SIZE; 849*3eab3d9eSThomas Hellstrom cb_hdr = &dheader->cb_header; 850*3eab3d9eSThomas Hellstrom header->cb_header = cb_hdr; 851*3eab3d9eSThomas Hellstrom header->cmd = dheader->cmd; 852*3eab3d9eSThomas Hellstrom memset(dheader, 0, sizeof(*dheader)); 853*3eab3d9eSThomas Hellstrom cb_hdr->status = SVGA_CB_STATUS_NONE; 854*3eab3d9eSThomas Hellstrom cb_hdr->flags = SVGA_CB_FLAG_NONE; 855*3eab3d9eSThomas Hellstrom cb_hdr->ptr.pa = (u64)header->handle + 856*3eab3d9eSThomas Hellstrom (u64)offsetof(struct vmw_cmdbuf_dheader, cmd); 857*3eab3d9eSThomas Hellstrom 858*3eab3d9eSThomas Hellstrom return 0; 859*3eab3d9eSThomas Hellstrom } 860*3eab3d9eSThomas Hellstrom 861*3eab3d9eSThomas Hellstrom /** 862*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_alloc - Allocate a command buffer header complete with 863*3eab3d9eSThomas Hellstrom * command buffer space. 864*3eab3d9eSThomas Hellstrom * 865*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 866*3eab3d9eSThomas Hellstrom * @size: The requested size of the buffer space. 867*3eab3d9eSThomas Hellstrom * @interruptible: Whether to sleep interruptible while waiting for space. 868*3eab3d9eSThomas Hellstrom * @p_header: points to a header pointer to populate on successful return. 869*3eab3d9eSThomas Hellstrom * 870*3eab3d9eSThomas Hellstrom * Returns a pointer to command buffer space if successful. Otherwise 871*3eab3d9eSThomas Hellstrom * returns an error pointer. The header pointer returned in @p_header should 872*3eab3d9eSThomas Hellstrom * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit(). 873*3eab3d9eSThomas Hellstrom */ 874*3eab3d9eSThomas Hellstrom void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, 875*3eab3d9eSThomas Hellstrom size_t size, bool interruptible, 876*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header **p_header) 877*3eab3d9eSThomas Hellstrom { 878*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *header; 879*3eab3d9eSThomas Hellstrom int ret = 0; 880*3eab3d9eSThomas Hellstrom 881*3eab3d9eSThomas Hellstrom *p_header = NULL; 882*3eab3d9eSThomas Hellstrom 883*3eab3d9eSThomas Hellstrom header = kzalloc(sizeof(*header), GFP_KERNEL); 884*3eab3d9eSThomas Hellstrom if (!header) 885*3eab3d9eSThomas Hellstrom return ERR_PTR(-ENOMEM); 886*3eab3d9eSThomas Hellstrom 887*3eab3d9eSThomas Hellstrom if (size <= VMW_CMDBUF_INLINE_SIZE) 888*3eab3d9eSThomas Hellstrom ret = vmw_cmdbuf_space_inline(man, header, size); 889*3eab3d9eSThomas Hellstrom else 890*3eab3d9eSThomas Hellstrom ret = vmw_cmdbuf_space_pool(man, header, size, interruptible); 891*3eab3d9eSThomas Hellstrom 892*3eab3d9eSThomas Hellstrom if (ret) { 893*3eab3d9eSThomas Hellstrom kfree(header); 894*3eab3d9eSThomas Hellstrom return ERR_PTR(ret); 895*3eab3d9eSThomas Hellstrom } 896*3eab3d9eSThomas Hellstrom 897*3eab3d9eSThomas Hellstrom header->man = man; 898*3eab3d9eSThomas Hellstrom INIT_LIST_HEAD(&header->list); 899*3eab3d9eSThomas Hellstrom header->cb_header->status = SVGA_CB_STATUS_NONE; 900*3eab3d9eSThomas Hellstrom *p_header = header; 901*3eab3d9eSThomas Hellstrom 902*3eab3d9eSThomas Hellstrom return header->cmd; 903*3eab3d9eSThomas Hellstrom } 904*3eab3d9eSThomas Hellstrom 905*3eab3d9eSThomas Hellstrom /** 906*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current 907*3eab3d9eSThomas Hellstrom * command buffer. 908*3eab3d9eSThomas Hellstrom * 909*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 910*3eab3d9eSThomas Hellstrom * @size: The requested size of the commands. 911*3eab3d9eSThomas Hellstrom * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID. 912*3eab3d9eSThomas Hellstrom * @interruptible: Whether to sleep interruptible while waiting for space. 913*3eab3d9eSThomas Hellstrom * 914*3eab3d9eSThomas Hellstrom * Returns a pointer to command buffer space if successful. Otherwise 915*3eab3d9eSThomas Hellstrom * returns an error pointer. 916*3eab3d9eSThomas Hellstrom */ 917*3eab3d9eSThomas Hellstrom static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man, 918*3eab3d9eSThomas Hellstrom size_t size, 919*3eab3d9eSThomas Hellstrom int ctx_id, 920*3eab3d9eSThomas Hellstrom bool interruptible) 921*3eab3d9eSThomas Hellstrom { 922*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *cur; 923*3eab3d9eSThomas Hellstrom void *ret; 924*3eab3d9eSThomas Hellstrom 925*3eab3d9eSThomas Hellstrom if (vmw_cmdbuf_cur_lock(man, interruptible)) 926*3eab3d9eSThomas Hellstrom return ERR_PTR(-ERESTARTSYS); 927*3eab3d9eSThomas Hellstrom 928*3eab3d9eSThomas Hellstrom cur = man->cur; 929*3eab3d9eSThomas Hellstrom if (cur && (size + man->cur_pos > cur->size || 930*3eab3d9eSThomas Hellstrom (ctx_id != SVGA3D_INVALID_ID && 931*3eab3d9eSThomas Hellstrom (cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) && 932*3eab3d9eSThomas Hellstrom ctx_id != cur->cb_header->dxContext))) 933*3eab3d9eSThomas Hellstrom __vmw_cmdbuf_cur_flush(man); 934*3eab3d9eSThomas Hellstrom 935*3eab3d9eSThomas Hellstrom if (!man->cur) { 936*3eab3d9eSThomas Hellstrom ret = vmw_cmdbuf_alloc(man, 937*3eab3d9eSThomas Hellstrom max_t(size_t, size, man->default_size), 938*3eab3d9eSThomas Hellstrom interruptible, &man->cur); 939*3eab3d9eSThomas Hellstrom if (IS_ERR(ret)) { 940*3eab3d9eSThomas Hellstrom vmw_cmdbuf_cur_unlock(man); 941*3eab3d9eSThomas Hellstrom return ret; 942*3eab3d9eSThomas Hellstrom } 943*3eab3d9eSThomas Hellstrom 944*3eab3d9eSThomas Hellstrom cur = man->cur; 945*3eab3d9eSThomas Hellstrom } 946*3eab3d9eSThomas Hellstrom 947*3eab3d9eSThomas Hellstrom if (ctx_id != SVGA3D_INVALID_ID) { 948*3eab3d9eSThomas Hellstrom cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT; 949*3eab3d9eSThomas Hellstrom cur->cb_header->dxContext = ctx_id; 950*3eab3d9eSThomas Hellstrom } 951*3eab3d9eSThomas Hellstrom 952*3eab3d9eSThomas Hellstrom cur->reserved = size; 953*3eab3d9eSThomas Hellstrom 954*3eab3d9eSThomas Hellstrom return (void *) (man->cur->cmd + man->cur_pos); 955*3eab3d9eSThomas Hellstrom } 956*3eab3d9eSThomas Hellstrom 957*3eab3d9eSThomas Hellstrom /** 958*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer. 959*3eab3d9eSThomas Hellstrom * 960*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 961*3eab3d9eSThomas Hellstrom * @size: The size of the commands actually written. 962*3eab3d9eSThomas Hellstrom * @flush: Whether to flush the command buffer immediately. 963*3eab3d9eSThomas Hellstrom */ 964*3eab3d9eSThomas Hellstrom static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man, 965*3eab3d9eSThomas Hellstrom size_t size, bool flush) 966*3eab3d9eSThomas Hellstrom { 967*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *cur = man->cur; 968*3eab3d9eSThomas Hellstrom 969*3eab3d9eSThomas Hellstrom WARN_ON(!mutex_is_locked(&man->cur_mutex)); 970*3eab3d9eSThomas Hellstrom 971*3eab3d9eSThomas Hellstrom WARN_ON(size > cur->reserved); 972*3eab3d9eSThomas Hellstrom man->cur_pos += size; 973*3eab3d9eSThomas Hellstrom if (!size) 974*3eab3d9eSThomas Hellstrom cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT; 975*3eab3d9eSThomas Hellstrom if (flush) 976*3eab3d9eSThomas Hellstrom __vmw_cmdbuf_cur_flush(man); 977*3eab3d9eSThomas Hellstrom vmw_cmdbuf_cur_unlock(man); 978*3eab3d9eSThomas Hellstrom } 979*3eab3d9eSThomas Hellstrom 980*3eab3d9eSThomas Hellstrom /** 981*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer. 982*3eab3d9eSThomas Hellstrom * 983*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 984*3eab3d9eSThomas Hellstrom * @size: The requested size of the commands. 985*3eab3d9eSThomas Hellstrom * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID. 986*3eab3d9eSThomas Hellstrom * @interruptible: Whether to sleep interruptible while waiting for space. 987*3eab3d9eSThomas Hellstrom * @header: Header of the command buffer. NULL if the current command buffer 988*3eab3d9eSThomas Hellstrom * should be used. 989*3eab3d9eSThomas Hellstrom * 990*3eab3d9eSThomas Hellstrom * Returns a pointer to command buffer space if successful. Otherwise 991*3eab3d9eSThomas Hellstrom * returns an error pointer. 992*3eab3d9eSThomas Hellstrom */ 993*3eab3d9eSThomas Hellstrom void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, 994*3eab3d9eSThomas Hellstrom int ctx_id, bool interruptible, 995*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *header) 996*3eab3d9eSThomas Hellstrom { 997*3eab3d9eSThomas Hellstrom if (!header) 998*3eab3d9eSThomas Hellstrom return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible); 999*3eab3d9eSThomas Hellstrom 1000*3eab3d9eSThomas Hellstrom if (size > header->size) 1001*3eab3d9eSThomas Hellstrom return ERR_PTR(-EINVAL); 1002*3eab3d9eSThomas Hellstrom 1003*3eab3d9eSThomas Hellstrom if (ctx_id != SVGA3D_INVALID_ID) { 1004*3eab3d9eSThomas Hellstrom header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT; 1005*3eab3d9eSThomas Hellstrom header->cb_header->dxContext = ctx_id; 1006*3eab3d9eSThomas Hellstrom } 1007*3eab3d9eSThomas Hellstrom 1008*3eab3d9eSThomas Hellstrom header->reserved = size; 1009*3eab3d9eSThomas Hellstrom return header->cmd; 1010*3eab3d9eSThomas Hellstrom } 1011*3eab3d9eSThomas Hellstrom 1012*3eab3d9eSThomas Hellstrom /** 1013*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_commit - Commit commands in a command buffer. 1014*3eab3d9eSThomas Hellstrom * 1015*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 1016*3eab3d9eSThomas Hellstrom * @size: The size of the commands actually written. 1017*3eab3d9eSThomas Hellstrom * @header: Header of the command buffer. NULL if the current command buffer 1018*3eab3d9eSThomas Hellstrom * should be used. 1019*3eab3d9eSThomas Hellstrom * @flush: Whether to flush the command buffer immediately. 1020*3eab3d9eSThomas Hellstrom */ 1021*3eab3d9eSThomas Hellstrom void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, 1022*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *header, bool flush) 1023*3eab3d9eSThomas Hellstrom { 1024*3eab3d9eSThomas Hellstrom if (!header) { 1025*3eab3d9eSThomas Hellstrom vmw_cmdbuf_commit_cur(man, size, flush); 1026*3eab3d9eSThomas Hellstrom return; 1027*3eab3d9eSThomas Hellstrom } 1028*3eab3d9eSThomas Hellstrom 1029*3eab3d9eSThomas Hellstrom (void) vmw_cmdbuf_cur_lock(man, false); 1030*3eab3d9eSThomas Hellstrom __vmw_cmdbuf_cur_flush(man); 1031*3eab3d9eSThomas Hellstrom WARN_ON(size > header->reserved); 1032*3eab3d9eSThomas Hellstrom man->cur = header; 1033*3eab3d9eSThomas Hellstrom man->cur_pos = size; 1034*3eab3d9eSThomas Hellstrom if (!size) 1035*3eab3d9eSThomas Hellstrom header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT; 1036*3eab3d9eSThomas Hellstrom if (flush) 1037*3eab3d9eSThomas Hellstrom __vmw_cmdbuf_cur_flush(man); 1038*3eab3d9eSThomas Hellstrom vmw_cmdbuf_cur_unlock(man); 1039*3eab3d9eSThomas Hellstrom } 1040*3eab3d9eSThomas Hellstrom 1041*3eab3d9eSThomas Hellstrom /** 1042*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half. 1043*3eab3d9eSThomas Hellstrom * 1044*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 1045*3eab3d9eSThomas Hellstrom */ 1046*3eab3d9eSThomas Hellstrom void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man) 1047*3eab3d9eSThomas Hellstrom { 1048*3eab3d9eSThomas Hellstrom if (!man) 1049*3eab3d9eSThomas Hellstrom return; 1050*3eab3d9eSThomas Hellstrom 1051*3eab3d9eSThomas Hellstrom tasklet_schedule(&man->tasklet); 1052*3eab3d9eSThomas Hellstrom } 1053*3eab3d9eSThomas Hellstrom 1054*3eab3d9eSThomas Hellstrom /** 1055*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_send_device_command - Send a command through the device context. 1056*3eab3d9eSThomas Hellstrom * 1057*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 1058*3eab3d9eSThomas Hellstrom * @command: Pointer to the command to send. 1059*3eab3d9eSThomas Hellstrom * @size: Size of the command. 1060*3eab3d9eSThomas Hellstrom * 1061*3eab3d9eSThomas Hellstrom * Synchronously sends a device context command. 1062*3eab3d9eSThomas Hellstrom */ 1063*3eab3d9eSThomas Hellstrom static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man, 1064*3eab3d9eSThomas Hellstrom const void *command, 1065*3eab3d9eSThomas Hellstrom size_t size) 1066*3eab3d9eSThomas Hellstrom { 1067*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *header; 1068*3eab3d9eSThomas Hellstrom int status; 1069*3eab3d9eSThomas Hellstrom void *cmd = vmw_cmdbuf_alloc(man, size, false, &header); 1070*3eab3d9eSThomas Hellstrom 1071*3eab3d9eSThomas Hellstrom if (IS_ERR(cmd)) 1072*3eab3d9eSThomas Hellstrom return PTR_ERR(cmd); 1073*3eab3d9eSThomas Hellstrom 1074*3eab3d9eSThomas Hellstrom memcpy(cmd, command, size); 1075*3eab3d9eSThomas Hellstrom header->cb_header->length = size; 1076*3eab3d9eSThomas Hellstrom header->cb_context = SVGA_CB_CONTEXT_DEVICE; 1077*3eab3d9eSThomas Hellstrom spin_lock_bh(&man->lock); 1078*3eab3d9eSThomas Hellstrom status = vmw_cmdbuf_header_submit(header); 1079*3eab3d9eSThomas Hellstrom spin_unlock_bh(&man->lock); 1080*3eab3d9eSThomas Hellstrom vmw_cmdbuf_header_free(header); 1081*3eab3d9eSThomas Hellstrom 1082*3eab3d9eSThomas Hellstrom if (status != SVGA_CB_STATUS_COMPLETED) { 1083*3eab3d9eSThomas Hellstrom DRM_ERROR("Device context command failed with status %d\n", 1084*3eab3d9eSThomas Hellstrom status); 1085*3eab3d9eSThomas Hellstrom return -EINVAL; 1086*3eab3d9eSThomas Hellstrom } 1087*3eab3d9eSThomas Hellstrom 1088*3eab3d9eSThomas Hellstrom return 0; 1089*3eab3d9eSThomas Hellstrom } 1090*3eab3d9eSThomas Hellstrom 1091*3eab3d9eSThomas Hellstrom /** 1092*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_startstop - Send a start / stop command through the device 1093*3eab3d9eSThomas Hellstrom * context. 1094*3eab3d9eSThomas Hellstrom * 1095*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 1096*3eab3d9eSThomas Hellstrom * @enable: Whether to enable or disable the context. 1097*3eab3d9eSThomas Hellstrom * 1098*3eab3d9eSThomas Hellstrom * Synchronously sends a device start / stop context command. 1099*3eab3d9eSThomas Hellstrom */ 1100*3eab3d9eSThomas Hellstrom static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, 1101*3eab3d9eSThomas Hellstrom bool enable) 1102*3eab3d9eSThomas Hellstrom { 1103*3eab3d9eSThomas Hellstrom struct { 1104*3eab3d9eSThomas Hellstrom uint32 id; 1105*3eab3d9eSThomas Hellstrom SVGADCCmdStartStop body; 1106*3eab3d9eSThomas Hellstrom } __packed cmd; 1107*3eab3d9eSThomas Hellstrom 1108*3eab3d9eSThomas Hellstrom cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT; 1109*3eab3d9eSThomas Hellstrom cmd.body.enable = (enable) ? 1 : 0; 1110*3eab3d9eSThomas Hellstrom cmd.body.context = SVGA_CB_CONTEXT_0; 1111*3eab3d9eSThomas Hellstrom 1112*3eab3d9eSThomas Hellstrom return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); 1113*3eab3d9eSThomas Hellstrom } 1114*3eab3d9eSThomas Hellstrom 1115*3eab3d9eSThomas Hellstrom /** 1116*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes 1117*3eab3d9eSThomas Hellstrom * 1118*3eab3d9eSThomas Hellstrom * @man: The command buffer manager. 1119*3eab3d9eSThomas Hellstrom * @size: The size of the main space pool. 1120*3eab3d9eSThomas Hellstrom * @default_size: The default size of the command buffer for small kernel 1121*3eab3d9eSThomas Hellstrom * submissions. 1122*3eab3d9eSThomas Hellstrom * 1123*3eab3d9eSThomas Hellstrom * Set the size and allocate the main command buffer space pool, 1124*3eab3d9eSThomas Hellstrom * as well as the default size of the command buffer for 1125*3eab3d9eSThomas Hellstrom * small kernel submissions. If successful, this enables large command 1126*3eab3d9eSThomas Hellstrom * submissions. Note that this function requires that rudimentary command 1127*3eab3d9eSThomas Hellstrom * submission is already available and that the MOB memory manager is alive. 1128*3eab3d9eSThomas Hellstrom * Returns 0 on success. Negative error code on failure. 1129*3eab3d9eSThomas Hellstrom */ 1130*3eab3d9eSThomas Hellstrom int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, 1131*3eab3d9eSThomas Hellstrom size_t size, size_t default_size) 1132*3eab3d9eSThomas Hellstrom { 1133*3eab3d9eSThomas Hellstrom struct vmw_private *dev_priv = man->dev_priv; 1134*3eab3d9eSThomas Hellstrom bool dummy; 1135*3eab3d9eSThomas Hellstrom int ret; 1136*3eab3d9eSThomas Hellstrom 1137*3eab3d9eSThomas Hellstrom if (man->has_pool) 1138*3eab3d9eSThomas Hellstrom return -EINVAL; 1139*3eab3d9eSThomas Hellstrom 1140*3eab3d9eSThomas Hellstrom /* First, try to allocate a huge chunk of DMA memory */ 1141*3eab3d9eSThomas Hellstrom size = PAGE_ALIGN(size); 1142*3eab3d9eSThomas Hellstrom man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size, 1143*3eab3d9eSThomas Hellstrom &man->handle, GFP_KERNEL); 1144*3eab3d9eSThomas Hellstrom if (man->map) { 1145*3eab3d9eSThomas Hellstrom man->using_mob = false; 1146*3eab3d9eSThomas Hellstrom } else { 1147*3eab3d9eSThomas Hellstrom /* 1148*3eab3d9eSThomas Hellstrom * DMA memory failed. If we can have command buffers in a 1149*3eab3d9eSThomas Hellstrom * MOB, try to use that instead. Note that this will 1150*3eab3d9eSThomas Hellstrom * actually call into the already enabled manager, when 1151*3eab3d9eSThomas Hellstrom * binding the MOB. 1152*3eab3d9eSThomas Hellstrom */ 1153*3eab3d9eSThomas Hellstrom if (!(dev_priv->capabilities & SVGA_CAP_CMD_BUFFERS_3)) 1154*3eab3d9eSThomas Hellstrom return -ENOMEM; 1155*3eab3d9eSThomas Hellstrom 1156*3eab3d9eSThomas Hellstrom ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device, 1157*3eab3d9eSThomas Hellstrom &vmw_mob_ne_placement, 0, false, NULL, 1158*3eab3d9eSThomas Hellstrom &man->cmd_space); 1159*3eab3d9eSThomas Hellstrom if (ret) 1160*3eab3d9eSThomas Hellstrom return ret; 1161*3eab3d9eSThomas Hellstrom 1162*3eab3d9eSThomas Hellstrom man->using_mob = true; 1163*3eab3d9eSThomas Hellstrom ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT, 1164*3eab3d9eSThomas Hellstrom &man->map_obj); 1165*3eab3d9eSThomas Hellstrom if (ret) 1166*3eab3d9eSThomas Hellstrom goto out_no_map; 1167*3eab3d9eSThomas Hellstrom 1168*3eab3d9eSThomas Hellstrom man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy); 1169*3eab3d9eSThomas Hellstrom } 1170*3eab3d9eSThomas Hellstrom 1171*3eab3d9eSThomas Hellstrom man->size = size; 1172*3eab3d9eSThomas Hellstrom drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT); 1173*3eab3d9eSThomas Hellstrom 1174*3eab3d9eSThomas Hellstrom man->has_pool = true; 1175*3eab3d9eSThomas Hellstrom man->default_size = default_size; 1176*3eab3d9eSThomas Hellstrom DRM_INFO("Using command buffers with %s pool.\n", 1177*3eab3d9eSThomas Hellstrom (man->using_mob) ? "MOB" : "DMA"); 1178*3eab3d9eSThomas Hellstrom 1179*3eab3d9eSThomas Hellstrom return 0; 1180*3eab3d9eSThomas Hellstrom 1181*3eab3d9eSThomas Hellstrom out_no_map: 1182*3eab3d9eSThomas Hellstrom if (man->using_mob) 1183*3eab3d9eSThomas Hellstrom ttm_bo_unref(&man->cmd_space); 1184*3eab3d9eSThomas Hellstrom 1185*3eab3d9eSThomas Hellstrom return ret; 1186*3eab3d9eSThomas Hellstrom } 1187*3eab3d9eSThomas Hellstrom 1188*3eab3d9eSThomas Hellstrom /** 1189*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for 1190*3eab3d9eSThomas Hellstrom * inline command buffer submissions only. 1191*3eab3d9eSThomas Hellstrom * 1192*3eab3d9eSThomas Hellstrom * @dev_priv: Pointer to device private structure. 1193*3eab3d9eSThomas Hellstrom * 1194*3eab3d9eSThomas Hellstrom * Returns a pointer to a cummand buffer manager to success or error pointer 1195*3eab3d9eSThomas Hellstrom * on failure. The command buffer manager will be enabled for submissions of 1196*3eab3d9eSThomas Hellstrom * size VMW_CMDBUF_INLINE_SIZE only. 1197*3eab3d9eSThomas Hellstrom */ 1198*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) 1199*3eab3d9eSThomas Hellstrom { 1200*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man *man; 1201*3eab3d9eSThomas Hellstrom struct vmw_cmdbuf_context *ctx; 1202*3eab3d9eSThomas Hellstrom int i; 1203*3eab3d9eSThomas Hellstrom int ret; 1204*3eab3d9eSThomas Hellstrom 1205*3eab3d9eSThomas Hellstrom if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS)) 1206*3eab3d9eSThomas Hellstrom return ERR_PTR(-ENOSYS); 1207*3eab3d9eSThomas Hellstrom 1208*3eab3d9eSThomas Hellstrom man = kzalloc(sizeof(*man), GFP_KERNEL); 1209*3eab3d9eSThomas Hellstrom if (!man) 1210*3eab3d9eSThomas Hellstrom return ERR_PTR(-ENOMEM); 1211*3eab3d9eSThomas Hellstrom 1212*3eab3d9eSThomas Hellstrom man->headers = dma_pool_create("vmwgfx cmdbuf", 1213*3eab3d9eSThomas Hellstrom &dev_priv->dev->pdev->dev, 1214*3eab3d9eSThomas Hellstrom sizeof(SVGACBHeader), 1215*3eab3d9eSThomas Hellstrom 64, PAGE_SIZE); 1216*3eab3d9eSThomas Hellstrom if (!man->headers) { 1217*3eab3d9eSThomas Hellstrom ret = -ENOMEM; 1218*3eab3d9eSThomas Hellstrom goto out_no_pool; 1219*3eab3d9eSThomas Hellstrom } 1220*3eab3d9eSThomas Hellstrom 1221*3eab3d9eSThomas Hellstrom man->dheaders = dma_pool_create("vmwgfx inline cmdbuf", 1222*3eab3d9eSThomas Hellstrom &dev_priv->dev->pdev->dev, 1223*3eab3d9eSThomas Hellstrom sizeof(struct vmw_cmdbuf_dheader), 1224*3eab3d9eSThomas Hellstrom 64, PAGE_SIZE); 1225*3eab3d9eSThomas Hellstrom if (!man->dheaders) { 1226*3eab3d9eSThomas Hellstrom ret = -ENOMEM; 1227*3eab3d9eSThomas Hellstrom goto out_no_dpool; 1228*3eab3d9eSThomas Hellstrom } 1229*3eab3d9eSThomas Hellstrom 1230*3eab3d9eSThomas Hellstrom for_each_cmdbuf_ctx(man, i, ctx) 1231*3eab3d9eSThomas Hellstrom vmw_cmdbuf_ctx_init(ctx); 1232*3eab3d9eSThomas Hellstrom 1233*3eab3d9eSThomas Hellstrom INIT_LIST_HEAD(&man->error); 1234*3eab3d9eSThomas Hellstrom spin_lock_init(&man->lock); 1235*3eab3d9eSThomas Hellstrom mutex_init(&man->cur_mutex); 1236*3eab3d9eSThomas Hellstrom mutex_init(&man->space_mutex); 1237*3eab3d9eSThomas Hellstrom tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet, 1238*3eab3d9eSThomas Hellstrom (unsigned long) man); 1239*3eab3d9eSThomas Hellstrom man->default_size = VMW_CMDBUF_INLINE_SIZE; 1240*3eab3d9eSThomas Hellstrom init_waitqueue_head(&man->alloc_queue); 1241*3eab3d9eSThomas Hellstrom init_waitqueue_head(&man->idle_queue); 1242*3eab3d9eSThomas Hellstrom man->dev_priv = dev_priv; 1243*3eab3d9eSThomas Hellstrom man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1; 1244*3eab3d9eSThomas Hellstrom INIT_WORK(&man->work, &vmw_cmdbuf_work_func); 1245*3eab3d9eSThomas Hellstrom vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR, 1246*3eab3d9eSThomas Hellstrom &dev_priv->error_waiters); 1247*3eab3d9eSThomas Hellstrom ret = vmw_cmdbuf_startstop(man, true); 1248*3eab3d9eSThomas Hellstrom if (ret) { 1249*3eab3d9eSThomas Hellstrom DRM_ERROR("Failed starting command buffer context 0.\n"); 1250*3eab3d9eSThomas Hellstrom vmw_cmdbuf_man_destroy(man); 1251*3eab3d9eSThomas Hellstrom return ERR_PTR(ret); 1252*3eab3d9eSThomas Hellstrom } 1253*3eab3d9eSThomas Hellstrom 1254*3eab3d9eSThomas Hellstrom return man; 1255*3eab3d9eSThomas Hellstrom 1256*3eab3d9eSThomas Hellstrom out_no_dpool: 1257*3eab3d9eSThomas Hellstrom dma_pool_destroy(man->headers); 1258*3eab3d9eSThomas Hellstrom out_no_pool: 1259*3eab3d9eSThomas Hellstrom kfree(man); 1260*3eab3d9eSThomas Hellstrom 1261*3eab3d9eSThomas Hellstrom return ERR_PTR(ret); 1262*3eab3d9eSThomas Hellstrom } 1263*3eab3d9eSThomas Hellstrom 1264*3eab3d9eSThomas Hellstrom /** 1265*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_remove_pool - Take down the main buffer space pool. 1266*3eab3d9eSThomas Hellstrom * 1267*3eab3d9eSThomas Hellstrom * @man: Pointer to a command buffer manager. 1268*3eab3d9eSThomas Hellstrom * 1269*3eab3d9eSThomas Hellstrom * This function removes the main buffer space pool, and should be called 1270*3eab3d9eSThomas Hellstrom * before MOB memory management is removed. When this function has been called, 1271*3eab3d9eSThomas Hellstrom * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or 1272*3eab3d9eSThomas Hellstrom * less are allowed, and the default size of the command buffer for small kernel 1273*3eab3d9eSThomas Hellstrom * submissions is also set to this size. 1274*3eab3d9eSThomas Hellstrom */ 1275*3eab3d9eSThomas Hellstrom void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man) 1276*3eab3d9eSThomas Hellstrom { 1277*3eab3d9eSThomas Hellstrom if (!man->has_pool) 1278*3eab3d9eSThomas Hellstrom return; 1279*3eab3d9eSThomas Hellstrom 1280*3eab3d9eSThomas Hellstrom man->has_pool = false; 1281*3eab3d9eSThomas Hellstrom man->default_size = VMW_CMDBUF_INLINE_SIZE; 1282*3eab3d9eSThomas Hellstrom (void) vmw_cmdbuf_idle(man, false, 10*HZ); 1283*3eab3d9eSThomas Hellstrom if (man->using_mob) { 1284*3eab3d9eSThomas Hellstrom (void) ttm_bo_kunmap(&man->map_obj); 1285*3eab3d9eSThomas Hellstrom ttm_bo_unref(&man->cmd_space); 1286*3eab3d9eSThomas Hellstrom } else { 1287*3eab3d9eSThomas Hellstrom dma_free_coherent(&man->dev_priv->dev->pdev->dev, 1288*3eab3d9eSThomas Hellstrom man->size, man->map, man->handle); 1289*3eab3d9eSThomas Hellstrom } 1290*3eab3d9eSThomas Hellstrom } 1291*3eab3d9eSThomas Hellstrom 1292*3eab3d9eSThomas Hellstrom /** 1293*3eab3d9eSThomas Hellstrom * vmw_cmdbuf_man_destroy - Take down a command buffer manager. 1294*3eab3d9eSThomas Hellstrom * 1295*3eab3d9eSThomas Hellstrom * @man: Pointer to a command buffer manager. 1296*3eab3d9eSThomas Hellstrom * 1297*3eab3d9eSThomas Hellstrom * This function idles and then destroys a command buffer manager. 1298*3eab3d9eSThomas Hellstrom */ 1299*3eab3d9eSThomas Hellstrom void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man) 1300*3eab3d9eSThomas Hellstrom { 1301*3eab3d9eSThomas Hellstrom WARN_ON_ONCE(man->has_pool); 1302*3eab3d9eSThomas Hellstrom (void) vmw_cmdbuf_idle(man, false, 10*HZ); 1303*3eab3d9eSThomas Hellstrom if (vmw_cmdbuf_startstop(man, false)) 1304*3eab3d9eSThomas Hellstrom DRM_ERROR("Failed stopping command buffer context 0.\n"); 1305*3eab3d9eSThomas Hellstrom 1306*3eab3d9eSThomas Hellstrom vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR, 1307*3eab3d9eSThomas Hellstrom &man->dev_priv->error_waiters); 1308*3eab3d9eSThomas Hellstrom tasklet_kill(&man->tasklet); 1309*3eab3d9eSThomas Hellstrom (void) cancel_work_sync(&man->work); 1310*3eab3d9eSThomas Hellstrom dma_pool_destroy(man->dheaders); 1311*3eab3d9eSThomas Hellstrom dma_pool_destroy(man->headers); 1312*3eab3d9eSThomas Hellstrom mutex_destroy(&man->cur_mutex); 1313*3eab3d9eSThomas Hellstrom mutex_destroy(&man->space_mutex); 1314*3eab3d9eSThomas Hellstrom kfree(man); 1315*3eab3d9eSThomas Hellstrom } 1316