13eab3d9eSThomas Hellstrom /************************************************************************** 23eab3d9eSThomas Hellstrom * 33eab3d9eSThomas Hellstrom * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA 43eab3d9eSThomas Hellstrom * All Rights Reserved. 53eab3d9eSThomas Hellstrom * 63eab3d9eSThomas Hellstrom * Permission is hereby granted, free of charge, to any person obtaining a 73eab3d9eSThomas Hellstrom * copy of this software and associated documentation files (the 83eab3d9eSThomas Hellstrom * "Software"), to deal in the Software without restriction, including 93eab3d9eSThomas Hellstrom * without limitation the rights to use, copy, modify, merge, publish, 103eab3d9eSThomas Hellstrom * distribute, sub license, and/or sell copies of the Software, and to 113eab3d9eSThomas Hellstrom * permit persons to whom the Software is furnished to do so, subject to 123eab3d9eSThomas Hellstrom * the following conditions: 133eab3d9eSThomas Hellstrom * 143eab3d9eSThomas Hellstrom * The above copyright notice and this permission notice (including the 153eab3d9eSThomas Hellstrom * next paragraph) shall be included in all copies or substantial portions 163eab3d9eSThomas Hellstrom * of the Software. 173eab3d9eSThomas Hellstrom * 183eab3d9eSThomas Hellstrom * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 193eab3d9eSThomas Hellstrom * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 203eab3d9eSThomas Hellstrom * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 213eab3d9eSThomas Hellstrom * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 223eab3d9eSThomas Hellstrom * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 233eab3d9eSThomas Hellstrom * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 243eab3d9eSThomas Hellstrom * USE OR OTHER DEALINGS IN THE SOFTWARE. 253eab3d9eSThomas Hellstrom * 263eab3d9eSThomas Hellstrom **************************************************************************/ 273eab3d9eSThomas Hellstrom 28008be682SMasahiro Yamada #include <drm/ttm/ttm_bo_api.h> 29008be682SMasahiro Yamada 303eab3d9eSThomas Hellstrom #include "vmwgfx_drv.h" 313eab3d9eSThomas Hellstrom 323eab3d9eSThomas Hellstrom /* 333eab3d9eSThomas Hellstrom * Size of inline command buffers. Try to make sure that a page size is a 343eab3d9eSThomas Hellstrom * multiple of the DMA pool allocation size. 353eab3d9eSThomas Hellstrom */ 363eab3d9eSThomas Hellstrom #define VMW_CMDBUF_INLINE_ALIGN 64 379b590783SThomas Hellstrom #define VMW_CMDBUF_INLINE_SIZE \ 389b590783SThomas Hellstrom (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN)) 393eab3d9eSThomas Hellstrom 403eab3d9eSThomas Hellstrom /** 413eab3d9eSThomas Hellstrom * struct vmw_cmdbuf_context - Command buffer context queues 423eab3d9eSThomas Hellstrom * 433eab3d9eSThomas Hellstrom * @submitted: List of command buffers that have been submitted to the 443eab3d9eSThomas Hellstrom * manager but not yet submitted to hardware. 453eab3d9eSThomas Hellstrom * @hw_submitted: List of command buffers submitted to hardware. 463eab3d9eSThomas Hellstrom * @preempted: List of preempted command buffers. 473eab3d9eSThomas Hellstrom * @num_hw_submitted: Number of buffers currently being processed by hardware 483eab3d9eSThomas Hellstrom */ 493eab3d9eSThomas Hellstrom struct vmw_cmdbuf_context { 503eab3d9eSThomas Hellstrom struct list_head submitted; 513eab3d9eSThomas Hellstrom struct list_head hw_submitted; 523eab3d9eSThomas Hellstrom struct list_head preempted; 533eab3d9eSThomas Hellstrom unsigned num_hw_submitted; 54*65b97a2bSThomas Hellstrom bool block_submission; 553eab3d9eSThomas Hellstrom }; 563eab3d9eSThomas Hellstrom 573eab3d9eSThomas Hellstrom /** 583eab3d9eSThomas Hellstrom * struct vmw_cmdbuf_man: - Command buffer manager 593eab3d9eSThomas Hellstrom * 603eab3d9eSThomas Hellstrom * @cur_mutex: Mutex protecting the command buffer used for incremental small 613eab3d9eSThomas Hellstrom * kernel command submissions, @cur. 623eab3d9eSThomas Hellstrom * @space_mutex: Mutex to protect against starvation when we allocate 633eab3d9eSThomas Hellstrom * main pool buffer space. 64*65b97a2bSThomas Hellstrom * @error_mutex: Mutex to serialize the work queue error handling. 65*65b97a2bSThomas Hellstrom * Note this is not needed if the same workqueue handler 66*65b97a2bSThomas Hellstrom * can't race with itself... 673eab3d9eSThomas Hellstrom * @work: A struct work_struct implementeing command buffer error handling. 683eab3d9eSThomas Hellstrom * Immutable. 693eab3d9eSThomas Hellstrom * @dev_priv: Pointer to the device private struct. Immutable. 703eab3d9eSThomas Hellstrom * @ctx: Array of command buffer context queues. The queues and the context 713eab3d9eSThomas Hellstrom * data is protected by @lock. 723eab3d9eSThomas Hellstrom * @error: List of command buffers that have caused device errors. 733eab3d9eSThomas Hellstrom * Protected by @lock. 743eab3d9eSThomas Hellstrom * @mm: Range manager for the command buffer space. Manager allocations and 753eab3d9eSThomas Hellstrom * frees are protected by @lock. 763eab3d9eSThomas Hellstrom * @cmd_space: Buffer object for the command buffer space, unless we were 773eab3d9eSThomas Hellstrom * able to make a contigous coherent DMA memory allocation, @handle. Immutable. 783eab3d9eSThomas Hellstrom * @map_obj: Mapping state for @cmd_space. Immutable. 793eab3d9eSThomas Hellstrom * @map: Pointer to command buffer space. May be a mapped buffer object or 803eab3d9eSThomas Hellstrom * a contigous coherent DMA memory allocation. Immutable. 813eab3d9eSThomas Hellstrom * @cur: Command buffer for small kernel command submissions. Protected by 823eab3d9eSThomas Hellstrom * the @cur_mutex. 833eab3d9eSThomas Hellstrom * @cur_pos: Space already used in @cur. Protected by @cur_mutex. 843eab3d9eSThomas Hellstrom * @default_size: Default size for the @cur command buffer. Immutable. 853eab3d9eSThomas Hellstrom * @max_hw_submitted: Max number of in-flight command buffers the device can 863eab3d9eSThomas Hellstrom * handle. Immutable. 873eab3d9eSThomas Hellstrom * @lock: Spinlock protecting command submission queues. 883eab3d9eSThomas Hellstrom * @header: Pool of DMA memory for device command buffer headers. 893eab3d9eSThomas Hellstrom * Internal protection. 903eab3d9eSThomas Hellstrom * @dheaders: Pool of DMA memory for device command buffer headers with trailing 913eab3d9eSThomas Hellstrom * space for inline data. Internal protection. 923eab3d9eSThomas Hellstrom * @alloc_queue: Wait queue for processes waiting to allocate command buffer 933eab3d9eSThomas Hellstrom * space. 943eab3d9eSThomas Hellstrom * @idle_queue: Wait queue for processes waiting for command buffer idle. 953eab3d9eSThomas Hellstrom * @irq_on: Whether the process function has requested irq to be turned on. 963eab3d9eSThomas Hellstrom * Protected by @lock. 973eab3d9eSThomas Hellstrom * @using_mob: Whether the command buffer space is a MOB or a contigous DMA 983eab3d9eSThomas Hellstrom * allocation. Immutable. 993eab3d9eSThomas Hellstrom * @has_pool: Has a large pool of DMA memory which allows larger allocations. 1003eab3d9eSThomas Hellstrom * Typically this is false only during bootstrap. 1013eab3d9eSThomas Hellstrom * @handle: DMA address handle for the command buffer space if @using_mob is 1023eab3d9eSThomas Hellstrom * false. Immutable. 1033eab3d9eSThomas Hellstrom * @size: The size of the command buffer space. Immutable. 1043eab3d9eSThomas Hellstrom */ 1053eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man { 1063eab3d9eSThomas Hellstrom struct mutex cur_mutex; 1073eab3d9eSThomas Hellstrom struct mutex space_mutex; 108*65b97a2bSThomas Hellstrom struct mutex error_mutex; 1093eab3d9eSThomas Hellstrom struct work_struct work; 1103eab3d9eSThomas Hellstrom struct vmw_private *dev_priv; 1113eab3d9eSThomas Hellstrom struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX]; 1123eab3d9eSThomas Hellstrom struct list_head error; 1133eab3d9eSThomas Hellstrom struct drm_mm mm; 1143eab3d9eSThomas Hellstrom struct ttm_buffer_object *cmd_space; 1153eab3d9eSThomas Hellstrom struct ttm_bo_kmap_obj map_obj; 1163eab3d9eSThomas Hellstrom u8 *map; 1173eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *cur; 1183eab3d9eSThomas Hellstrom size_t cur_pos; 1193eab3d9eSThomas Hellstrom size_t default_size; 1203eab3d9eSThomas Hellstrom unsigned max_hw_submitted; 1213eab3d9eSThomas Hellstrom spinlock_t lock; 1223eab3d9eSThomas Hellstrom struct dma_pool *headers; 1233eab3d9eSThomas Hellstrom struct dma_pool *dheaders; 1243eab3d9eSThomas Hellstrom wait_queue_head_t alloc_queue; 1253eab3d9eSThomas Hellstrom wait_queue_head_t idle_queue; 1263eab3d9eSThomas Hellstrom bool irq_on; 1273eab3d9eSThomas Hellstrom bool using_mob; 1283eab3d9eSThomas Hellstrom bool has_pool; 1293eab3d9eSThomas Hellstrom dma_addr_t handle; 1303eab3d9eSThomas Hellstrom size_t size; 1313eab3d9eSThomas Hellstrom }; 1323eab3d9eSThomas Hellstrom 1333eab3d9eSThomas Hellstrom /** 1343eab3d9eSThomas Hellstrom * struct vmw_cmdbuf_header - Command buffer metadata 1353eab3d9eSThomas Hellstrom * 1363eab3d9eSThomas Hellstrom * @man: The command buffer manager. 1373eab3d9eSThomas Hellstrom * @cb_header: Device command buffer header, allocated from a DMA pool. 1383eab3d9eSThomas Hellstrom * @cb_context: The device command buffer context. 1393eab3d9eSThomas Hellstrom * @list: List head for attaching to the manager lists. 1403eab3d9eSThomas Hellstrom * @node: The range manager node. 1413eab3d9eSThomas Hellstrom * @handle. The DMA address of @cb_header. Handed to the device on command 1423eab3d9eSThomas Hellstrom * buffer submission. 1433eab3d9eSThomas Hellstrom * @cmd: Pointer to the command buffer space of this buffer. 1443eab3d9eSThomas Hellstrom * @size: Size of the command buffer space of this buffer. 1453eab3d9eSThomas Hellstrom * @reserved: Reserved space of this buffer. 1463eab3d9eSThomas Hellstrom * @inline_space: Whether inline command buffer space is used. 1473eab3d9eSThomas Hellstrom */ 1483eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header { 1493eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man *man; 1503eab3d9eSThomas Hellstrom SVGACBHeader *cb_header; 1513eab3d9eSThomas Hellstrom SVGACBContext cb_context; 1523eab3d9eSThomas Hellstrom struct list_head list; 1539b590783SThomas Hellstrom struct drm_mm_node node; 1543eab3d9eSThomas Hellstrom dma_addr_t handle; 1553eab3d9eSThomas Hellstrom u8 *cmd; 1563eab3d9eSThomas Hellstrom size_t size; 1573eab3d9eSThomas Hellstrom size_t reserved; 1583eab3d9eSThomas Hellstrom bool inline_space; 1593eab3d9eSThomas Hellstrom }; 1603eab3d9eSThomas Hellstrom 1613eab3d9eSThomas Hellstrom /** 1623eab3d9eSThomas Hellstrom * struct vmw_cmdbuf_dheader - Device command buffer header with inline 1633eab3d9eSThomas Hellstrom * command buffer space. 1643eab3d9eSThomas Hellstrom * 1653eab3d9eSThomas Hellstrom * @cb_header: Device command buffer header. 1663eab3d9eSThomas Hellstrom * @cmd: Inline command buffer space. 1673eab3d9eSThomas Hellstrom */ 1683eab3d9eSThomas Hellstrom struct vmw_cmdbuf_dheader { 1693eab3d9eSThomas Hellstrom SVGACBHeader cb_header; 1703eab3d9eSThomas Hellstrom u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN); 1713eab3d9eSThomas Hellstrom }; 1723eab3d9eSThomas Hellstrom 1733eab3d9eSThomas Hellstrom /** 1743eab3d9eSThomas Hellstrom * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata 1753eab3d9eSThomas Hellstrom * 1763eab3d9eSThomas Hellstrom * @page_size: Size of requested command buffer space in pages. 1779b590783SThomas Hellstrom * @node: Pointer to the range manager node. 1789b590783SThomas Hellstrom * @done: True if this allocation has succeeded. 1793eab3d9eSThomas Hellstrom */ 1803eab3d9eSThomas Hellstrom struct vmw_cmdbuf_alloc_info { 1813eab3d9eSThomas Hellstrom size_t page_size; 1823eab3d9eSThomas Hellstrom struct drm_mm_node *node; 1839b590783SThomas Hellstrom bool done; 1843eab3d9eSThomas Hellstrom }; 1853eab3d9eSThomas Hellstrom 1863eab3d9eSThomas Hellstrom /* Loop over each context in the command buffer manager. */ 1873eab3d9eSThomas Hellstrom #define for_each_cmdbuf_ctx(_man, _i, _ctx) \ 1883eab3d9eSThomas Hellstrom for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \ 1893eab3d9eSThomas Hellstrom ++(_i), ++(_ctx)) 1903eab3d9eSThomas Hellstrom 191*65b97a2bSThomas Hellstrom static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, 192*65b97a2bSThomas Hellstrom bool enable); 193*65b97a2bSThomas Hellstrom static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context); 1943eab3d9eSThomas Hellstrom 1953eab3d9eSThomas Hellstrom /** 1963eab3d9eSThomas Hellstrom * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex. 1973eab3d9eSThomas Hellstrom * 1983eab3d9eSThomas Hellstrom * @man: The range manager. 1993eab3d9eSThomas Hellstrom * @interruptible: Whether to wait interruptible when locking. 2003eab3d9eSThomas Hellstrom */ 2013eab3d9eSThomas Hellstrom static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible) 2023eab3d9eSThomas Hellstrom { 2033eab3d9eSThomas Hellstrom if (interruptible) { 2043eab3d9eSThomas Hellstrom if (mutex_lock_interruptible(&man->cur_mutex)) 2053eab3d9eSThomas Hellstrom return -ERESTARTSYS; 2063eab3d9eSThomas Hellstrom } else { 2073eab3d9eSThomas Hellstrom mutex_lock(&man->cur_mutex); 2083eab3d9eSThomas Hellstrom } 2093eab3d9eSThomas Hellstrom 2103eab3d9eSThomas Hellstrom return 0; 2113eab3d9eSThomas Hellstrom } 2123eab3d9eSThomas Hellstrom 2133eab3d9eSThomas Hellstrom /** 2143eab3d9eSThomas Hellstrom * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex. 2153eab3d9eSThomas Hellstrom * 2163eab3d9eSThomas Hellstrom * @man: The range manager. 2173eab3d9eSThomas Hellstrom */ 2183eab3d9eSThomas Hellstrom static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man) 2193eab3d9eSThomas Hellstrom { 2203eab3d9eSThomas Hellstrom mutex_unlock(&man->cur_mutex); 2213eab3d9eSThomas Hellstrom } 2223eab3d9eSThomas Hellstrom 2233eab3d9eSThomas Hellstrom /** 2243eab3d9eSThomas Hellstrom * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has 2253eab3d9eSThomas Hellstrom * been used for the device context with inline command buffers. 2263eab3d9eSThomas Hellstrom * Need not be called locked. 2273eab3d9eSThomas Hellstrom * 2283eab3d9eSThomas Hellstrom * @header: Pointer to the header to free. 2293eab3d9eSThomas Hellstrom */ 2303eab3d9eSThomas Hellstrom static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header) 2313eab3d9eSThomas Hellstrom { 2323eab3d9eSThomas Hellstrom struct vmw_cmdbuf_dheader *dheader; 2333eab3d9eSThomas Hellstrom 2343eab3d9eSThomas Hellstrom if (WARN_ON_ONCE(!header->inline_space)) 2353eab3d9eSThomas Hellstrom return; 2363eab3d9eSThomas Hellstrom 2373eab3d9eSThomas Hellstrom dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader, 2383eab3d9eSThomas Hellstrom cb_header); 2393eab3d9eSThomas Hellstrom dma_pool_free(header->man->dheaders, dheader, header->handle); 2403eab3d9eSThomas Hellstrom kfree(header); 2413eab3d9eSThomas Hellstrom } 2423eab3d9eSThomas Hellstrom 2433eab3d9eSThomas Hellstrom /** 2443eab3d9eSThomas Hellstrom * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its 2453eab3d9eSThomas Hellstrom * associated structures. 2463eab3d9eSThomas Hellstrom * 2473eab3d9eSThomas Hellstrom * header: Pointer to the header to free. 2483eab3d9eSThomas Hellstrom * 2493eab3d9eSThomas Hellstrom * For internal use. Must be called with man::lock held. 2503eab3d9eSThomas Hellstrom */ 2513eab3d9eSThomas Hellstrom static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) 2523eab3d9eSThomas Hellstrom { 2533eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man *man = header->man; 2543eab3d9eSThomas Hellstrom 255fb89ac51SThomas Hellstrom lockdep_assert_held_once(&man->lock); 2563eab3d9eSThomas Hellstrom 2573eab3d9eSThomas Hellstrom if (header->inline_space) { 2583eab3d9eSThomas Hellstrom vmw_cmdbuf_header_inline_free(header); 2593eab3d9eSThomas Hellstrom return; 2603eab3d9eSThomas Hellstrom } 2613eab3d9eSThomas Hellstrom 2629b590783SThomas Hellstrom drm_mm_remove_node(&header->node); 2633eab3d9eSThomas Hellstrom wake_up_all(&man->alloc_queue); 2643eab3d9eSThomas Hellstrom if (header->cb_header) 2653eab3d9eSThomas Hellstrom dma_pool_free(man->headers, header->cb_header, 2663eab3d9eSThomas Hellstrom header->handle); 2673eab3d9eSThomas Hellstrom kfree(header); 2683eab3d9eSThomas Hellstrom } 2693eab3d9eSThomas Hellstrom 2703eab3d9eSThomas Hellstrom /** 2713eab3d9eSThomas Hellstrom * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its 2723eab3d9eSThomas Hellstrom * associated structures. 2733eab3d9eSThomas Hellstrom * 2743eab3d9eSThomas Hellstrom * @header: Pointer to the header to free. 2753eab3d9eSThomas Hellstrom */ 2763eab3d9eSThomas Hellstrom void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) 2773eab3d9eSThomas Hellstrom { 2783eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man *man = header->man; 2793eab3d9eSThomas Hellstrom 2803eab3d9eSThomas Hellstrom /* Avoid locking if inline_space */ 2813eab3d9eSThomas Hellstrom if (header->inline_space) { 2823eab3d9eSThomas Hellstrom vmw_cmdbuf_header_inline_free(header); 2833eab3d9eSThomas Hellstrom return; 2843eab3d9eSThomas Hellstrom } 285ef369904SThomas Hellstrom spin_lock(&man->lock); 2863eab3d9eSThomas Hellstrom __vmw_cmdbuf_header_free(header); 287ef369904SThomas Hellstrom spin_unlock(&man->lock); 2883eab3d9eSThomas Hellstrom } 2893eab3d9eSThomas Hellstrom 2903eab3d9eSThomas Hellstrom 2913eab3d9eSThomas Hellstrom /** 2923eab3d9eSThomas Hellstrom * vmw_cmbuf_header_submit: Submit a command buffer to hardware. 2933eab3d9eSThomas Hellstrom * 2943eab3d9eSThomas Hellstrom * @header: The header of the buffer to submit. 2953eab3d9eSThomas Hellstrom */ 2963eab3d9eSThomas Hellstrom static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header) 2973eab3d9eSThomas Hellstrom { 2983eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man *man = header->man; 2993eab3d9eSThomas Hellstrom u32 val; 3003eab3d9eSThomas Hellstrom 3010e7c875dSPaul Bolle val = upper_32_bits(header->handle); 3023eab3d9eSThomas Hellstrom vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val); 3032e3cc8cfSThomas Hellstrom 3040e7c875dSPaul Bolle val = lower_32_bits(header->handle); 3053eab3d9eSThomas Hellstrom val |= header->cb_context & SVGA_CB_CONTEXT_MASK; 3063eab3d9eSThomas Hellstrom vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val); 3073eab3d9eSThomas Hellstrom 3083eab3d9eSThomas Hellstrom return header->cb_header->status; 3093eab3d9eSThomas Hellstrom } 3103eab3d9eSThomas Hellstrom 3113eab3d9eSThomas Hellstrom /** 3123eab3d9eSThomas Hellstrom * vmw_cmdbuf_ctx_init: Initialize a command buffer context. 3133eab3d9eSThomas Hellstrom * 3143eab3d9eSThomas Hellstrom * @ctx: The command buffer context to initialize 3153eab3d9eSThomas Hellstrom */ 3163eab3d9eSThomas Hellstrom static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx) 3173eab3d9eSThomas Hellstrom { 3183eab3d9eSThomas Hellstrom INIT_LIST_HEAD(&ctx->hw_submitted); 3193eab3d9eSThomas Hellstrom INIT_LIST_HEAD(&ctx->submitted); 3203eab3d9eSThomas Hellstrom INIT_LIST_HEAD(&ctx->preempted); 3213eab3d9eSThomas Hellstrom ctx->num_hw_submitted = 0; 3223eab3d9eSThomas Hellstrom } 3233eab3d9eSThomas Hellstrom 3243eab3d9eSThomas Hellstrom /** 3253eab3d9eSThomas Hellstrom * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer 3263eab3d9eSThomas Hellstrom * context. 3273eab3d9eSThomas Hellstrom * 3283eab3d9eSThomas Hellstrom * @man: The command buffer manager. 3293eab3d9eSThomas Hellstrom * @ctx: The command buffer context. 3303eab3d9eSThomas Hellstrom * 3313eab3d9eSThomas Hellstrom * Submits command buffers to hardware until there are no more command 3323eab3d9eSThomas Hellstrom * buffers to submit or the hardware can't handle more command buffers. 3333eab3d9eSThomas Hellstrom */ 3343eab3d9eSThomas Hellstrom static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man, 3353eab3d9eSThomas Hellstrom struct vmw_cmdbuf_context *ctx) 3363eab3d9eSThomas Hellstrom { 3373eab3d9eSThomas Hellstrom while (ctx->num_hw_submitted < man->max_hw_submitted && 338*65b97a2bSThomas Hellstrom !list_empty(&ctx->submitted) && 339*65b97a2bSThomas Hellstrom !ctx->block_submission) { 3403eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *entry; 3413eab3d9eSThomas Hellstrom SVGACBStatus status; 3423eab3d9eSThomas Hellstrom 3433eab3d9eSThomas Hellstrom entry = list_first_entry(&ctx->submitted, 3443eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header, 3453eab3d9eSThomas Hellstrom list); 3463eab3d9eSThomas Hellstrom 3473eab3d9eSThomas Hellstrom status = vmw_cmdbuf_header_submit(entry); 3483eab3d9eSThomas Hellstrom 3493eab3d9eSThomas Hellstrom /* This should never happen */ 3503eab3d9eSThomas Hellstrom if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) { 3513eab3d9eSThomas Hellstrom entry->cb_header->status = SVGA_CB_STATUS_NONE; 3523eab3d9eSThomas Hellstrom break; 3533eab3d9eSThomas Hellstrom } 3543eab3d9eSThomas Hellstrom 3553eab3d9eSThomas Hellstrom list_del(&entry->list); 3563eab3d9eSThomas Hellstrom list_add_tail(&entry->list, &ctx->hw_submitted); 3573eab3d9eSThomas Hellstrom ctx->num_hw_submitted++; 3583eab3d9eSThomas Hellstrom } 3593eab3d9eSThomas Hellstrom 3603eab3d9eSThomas Hellstrom } 3613eab3d9eSThomas Hellstrom 3623eab3d9eSThomas Hellstrom /** 3633eab3d9eSThomas Hellstrom * vmw_cmdbuf_ctx_submit: Process a command buffer context. 3643eab3d9eSThomas Hellstrom * 3653eab3d9eSThomas Hellstrom * @man: The command buffer manager. 3663eab3d9eSThomas Hellstrom * @ctx: The command buffer context. 3673eab3d9eSThomas Hellstrom * 3683eab3d9eSThomas Hellstrom * Submit command buffers to hardware if possible, and process finished 3693eab3d9eSThomas Hellstrom * buffers. Typically freeing them, but on preemption or error take 3703eab3d9eSThomas Hellstrom * appropriate action. Wake up waiters if appropriate. 3713eab3d9eSThomas Hellstrom */ 3723eab3d9eSThomas Hellstrom static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man, 3733eab3d9eSThomas Hellstrom struct vmw_cmdbuf_context *ctx, 3743eab3d9eSThomas Hellstrom int *notempty) 3753eab3d9eSThomas Hellstrom { 3763eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *entry, *next; 3773eab3d9eSThomas Hellstrom 3783eab3d9eSThomas Hellstrom vmw_cmdbuf_ctx_submit(man, ctx); 3793eab3d9eSThomas Hellstrom 3803eab3d9eSThomas Hellstrom list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) { 3813eab3d9eSThomas Hellstrom SVGACBStatus status = entry->cb_header->status; 3823eab3d9eSThomas Hellstrom 3833eab3d9eSThomas Hellstrom if (status == SVGA_CB_STATUS_NONE) 3843eab3d9eSThomas Hellstrom break; 3853eab3d9eSThomas Hellstrom 3863eab3d9eSThomas Hellstrom list_del(&entry->list); 3873eab3d9eSThomas Hellstrom wake_up_all(&man->idle_queue); 3883eab3d9eSThomas Hellstrom ctx->num_hw_submitted--; 3893eab3d9eSThomas Hellstrom switch (status) { 3903eab3d9eSThomas Hellstrom case SVGA_CB_STATUS_COMPLETED: 3913eab3d9eSThomas Hellstrom __vmw_cmdbuf_header_free(entry); 3923eab3d9eSThomas Hellstrom break; 3933eab3d9eSThomas Hellstrom case SVGA_CB_STATUS_COMMAND_ERROR: 394*65b97a2bSThomas Hellstrom entry->cb_header->status = SVGA_CB_STATUS_NONE; 3953eab3d9eSThomas Hellstrom list_add_tail(&entry->list, &man->error); 3963eab3d9eSThomas Hellstrom schedule_work(&man->work); 3973eab3d9eSThomas Hellstrom break; 3983eab3d9eSThomas Hellstrom case SVGA_CB_STATUS_PREEMPTED: 399*65b97a2bSThomas Hellstrom entry->cb_header->status = SVGA_CB_STATUS_NONE; 400*65b97a2bSThomas Hellstrom list_add_tail(&entry->list, &ctx->preempted); 401*65b97a2bSThomas Hellstrom break; 402*65b97a2bSThomas Hellstrom case SVGA_CB_STATUS_CB_HEADER_ERROR: 403*65b97a2bSThomas Hellstrom WARN_ONCE(true, "Command buffer header error.\n"); 404*65b97a2bSThomas Hellstrom __vmw_cmdbuf_header_free(entry); 4053eab3d9eSThomas Hellstrom break; 4063eab3d9eSThomas Hellstrom default: 4073eab3d9eSThomas Hellstrom WARN_ONCE(true, "Undefined command buffer status.\n"); 4083eab3d9eSThomas Hellstrom __vmw_cmdbuf_header_free(entry); 4093eab3d9eSThomas Hellstrom break; 4103eab3d9eSThomas Hellstrom } 4113eab3d9eSThomas Hellstrom } 4123eab3d9eSThomas Hellstrom 4133eab3d9eSThomas Hellstrom vmw_cmdbuf_ctx_submit(man, ctx); 4143eab3d9eSThomas Hellstrom if (!list_empty(&ctx->submitted)) 4153eab3d9eSThomas Hellstrom (*notempty)++; 4163eab3d9eSThomas Hellstrom } 4173eab3d9eSThomas Hellstrom 4183eab3d9eSThomas Hellstrom /** 4193eab3d9eSThomas Hellstrom * vmw_cmdbuf_man_process - Process all command buffer contexts and 4203eab3d9eSThomas Hellstrom * switch on and off irqs as appropriate. 4213eab3d9eSThomas Hellstrom * 4223eab3d9eSThomas Hellstrom * @man: The command buffer manager. 4233eab3d9eSThomas Hellstrom * 4243eab3d9eSThomas Hellstrom * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has 4253eab3d9eSThomas Hellstrom * command buffers left that are not submitted to hardware, Make sure 42609dc1387SThomas Hellstrom * IRQ handling is turned on. Otherwise, make sure it's turned off. 4273eab3d9eSThomas Hellstrom */ 42809dc1387SThomas Hellstrom static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) 4293eab3d9eSThomas Hellstrom { 43009dc1387SThomas Hellstrom int notempty; 4313eab3d9eSThomas Hellstrom struct vmw_cmdbuf_context *ctx; 4323eab3d9eSThomas Hellstrom int i; 4333eab3d9eSThomas Hellstrom 43409dc1387SThomas Hellstrom retry: 43509dc1387SThomas Hellstrom notempty = 0; 4363eab3d9eSThomas Hellstrom for_each_cmdbuf_ctx(man, i, ctx) 4373eab3d9eSThomas Hellstrom vmw_cmdbuf_ctx_process(man, ctx, ¬empty); 4383eab3d9eSThomas Hellstrom 4393eab3d9eSThomas Hellstrom if (man->irq_on && !notempty) { 4403eab3d9eSThomas Hellstrom vmw_generic_waiter_remove(man->dev_priv, 4413eab3d9eSThomas Hellstrom SVGA_IRQFLAG_COMMAND_BUFFER, 4423eab3d9eSThomas Hellstrom &man->dev_priv->cmdbuf_waiters); 4433eab3d9eSThomas Hellstrom man->irq_on = false; 4443eab3d9eSThomas Hellstrom } else if (!man->irq_on && notempty) { 4453eab3d9eSThomas Hellstrom vmw_generic_waiter_add(man->dev_priv, 4463eab3d9eSThomas Hellstrom SVGA_IRQFLAG_COMMAND_BUFFER, 4473eab3d9eSThomas Hellstrom &man->dev_priv->cmdbuf_waiters); 4483eab3d9eSThomas Hellstrom man->irq_on = true; 4493eab3d9eSThomas Hellstrom 4503eab3d9eSThomas Hellstrom /* Rerun in case we just missed an irq. */ 45109dc1387SThomas Hellstrom goto retry; 4523eab3d9eSThomas Hellstrom } 4533eab3d9eSThomas Hellstrom } 4543eab3d9eSThomas Hellstrom 4553eab3d9eSThomas Hellstrom /** 4563eab3d9eSThomas Hellstrom * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a 4573eab3d9eSThomas Hellstrom * command buffer context 4583eab3d9eSThomas Hellstrom * 4593eab3d9eSThomas Hellstrom * @man: The command buffer manager. 4603eab3d9eSThomas Hellstrom * @header: The header of the buffer to submit. 4613eab3d9eSThomas Hellstrom * @cb_context: The command buffer context to use. 4623eab3d9eSThomas Hellstrom * 4633eab3d9eSThomas Hellstrom * This function adds @header to the "submitted" queue of the command 4643eab3d9eSThomas Hellstrom * buffer context identified by @cb_context. It then calls the command buffer 4653eab3d9eSThomas Hellstrom * manager processing to potentially submit the buffer to hardware. 4663eab3d9eSThomas Hellstrom * @man->lock needs to be held when calling this function. 4673eab3d9eSThomas Hellstrom */ 4683eab3d9eSThomas Hellstrom static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man, 4693eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *header, 4703eab3d9eSThomas Hellstrom SVGACBContext cb_context) 4713eab3d9eSThomas Hellstrom { 4723eab3d9eSThomas Hellstrom if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT)) 4733eab3d9eSThomas Hellstrom header->cb_header->dxContext = 0; 4743eab3d9eSThomas Hellstrom header->cb_context = cb_context; 4753eab3d9eSThomas Hellstrom list_add_tail(&header->list, &man->ctx[cb_context].submitted); 4763eab3d9eSThomas Hellstrom 4773eab3d9eSThomas Hellstrom vmw_cmdbuf_man_process(man); 4783eab3d9eSThomas Hellstrom } 4793eab3d9eSThomas Hellstrom 4803eab3d9eSThomas Hellstrom /** 481ef369904SThomas Hellstrom * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt 482ef369904SThomas Hellstrom * handler implemented as a threaded irq task. 4833eab3d9eSThomas Hellstrom * 484ef369904SThomas Hellstrom * @man: Pointer to the command buffer manager. 4853eab3d9eSThomas Hellstrom * 486ef369904SThomas Hellstrom * The bottom half of the interrupt handler simply calls into the 4873eab3d9eSThomas Hellstrom * command buffer processor to free finished buffers and submit any 4883eab3d9eSThomas Hellstrom * queued buffers to hardware. 4893eab3d9eSThomas Hellstrom */ 490ef369904SThomas Hellstrom void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man) 4913eab3d9eSThomas Hellstrom { 4923eab3d9eSThomas Hellstrom spin_lock(&man->lock); 49309dc1387SThomas Hellstrom vmw_cmdbuf_man_process(man); 4943eab3d9eSThomas Hellstrom spin_unlock(&man->lock); 4953eab3d9eSThomas Hellstrom } 4963eab3d9eSThomas Hellstrom 4973eab3d9eSThomas Hellstrom /** 4983eab3d9eSThomas Hellstrom * vmw_cmdbuf_work_func - The deferred work function that handles 4993eab3d9eSThomas Hellstrom * command buffer errors. 5003eab3d9eSThomas Hellstrom * 5013eab3d9eSThomas Hellstrom * @work: The work func closure argument. 5023eab3d9eSThomas Hellstrom * 5033eab3d9eSThomas Hellstrom * Restarting the command buffer context after an error requires process 5043eab3d9eSThomas Hellstrom * context, so it is deferred to this work function. 5053eab3d9eSThomas Hellstrom */ 5063eab3d9eSThomas Hellstrom static void vmw_cmdbuf_work_func(struct work_struct *work) 5073eab3d9eSThomas Hellstrom { 5083eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man *man = 5093eab3d9eSThomas Hellstrom container_of(work, struct vmw_cmdbuf_man, work); 5103eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *entry, *next; 51109dc1387SThomas Hellstrom uint32_t dummy; 512*65b97a2bSThomas Hellstrom bool restart[SVGA_CB_CONTEXT_MAX]; 513*65b97a2bSThomas Hellstrom bool send_fence = false; 514*65b97a2bSThomas Hellstrom struct list_head restart_head[SVGA_CB_CONTEXT_MAX]; 515*65b97a2bSThomas Hellstrom int i; 516*65b97a2bSThomas Hellstrom struct vmw_cmdbuf_context *ctx; 5173eab3d9eSThomas Hellstrom 518*65b97a2bSThomas Hellstrom for_each_cmdbuf_ctx(man, i, ctx) { 519*65b97a2bSThomas Hellstrom INIT_LIST_HEAD(&restart_head[i]); 520*65b97a2bSThomas Hellstrom restart[i] = false; 521*65b97a2bSThomas Hellstrom } 522*65b97a2bSThomas Hellstrom 523*65b97a2bSThomas Hellstrom mutex_lock(&man->error_mutex); 524ef369904SThomas Hellstrom spin_lock(&man->lock); 5253eab3d9eSThomas Hellstrom list_for_each_entry_safe(entry, next, &man->error, list) { 526*65b97a2bSThomas Hellstrom SVGACBHeader *cb_hdr = entry->cb_header; 527*65b97a2bSThomas Hellstrom SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) 528*65b97a2bSThomas Hellstrom (entry->cmd + cb_hdr->errorOffset); 529*65b97a2bSThomas Hellstrom u32 error_cmd_size, new_start_offset; 530*65b97a2bSThomas Hellstrom const char *cmd_name; 5313eab3d9eSThomas Hellstrom 532*65b97a2bSThomas Hellstrom list_del_init(&entry->list); 533*65b97a2bSThomas Hellstrom restart[entry->cb_context] = true; 534*65b97a2bSThomas Hellstrom 535*65b97a2bSThomas Hellstrom if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) { 536*65b97a2bSThomas Hellstrom DRM_ERROR("Unknown command causing device error.\n"); 537*65b97a2bSThomas Hellstrom DRM_ERROR("Command buffer offset is %lu\n", 538*65b97a2bSThomas Hellstrom (unsigned long) cb_hdr->errorOffset); 5393eab3d9eSThomas Hellstrom __vmw_cmdbuf_header_free(entry); 540*65b97a2bSThomas Hellstrom send_fence = true; 541*65b97a2bSThomas Hellstrom continue; 542*65b97a2bSThomas Hellstrom } 543*65b97a2bSThomas Hellstrom 544*65b97a2bSThomas Hellstrom DRM_ERROR("Command \"%s\" causing device error.\n", cmd_name); 545*65b97a2bSThomas Hellstrom DRM_ERROR("Command buffer offset is %lu\n", 546*65b97a2bSThomas Hellstrom (unsigned long) cb_hdr->errorOffset); 547*65b97a2bSThomas Hellstrom DRM_ERROR("Command size is %lu\n", 548*65b97a2bSThomas Hellstrom (unsigned long) error_cmd_size); 549*65b97a2bSThomas Hellstrom 550*65b97a2bSThomas Hellstrom new_start_offset = cb_hdr->errorOffset + error_cmd_size; 551*65b97a2bSThomas Hellstrom 552*65b97a2bSThomas Hellstrom if (new_start_offset >= cb_hdr->length) { 553*65b97a2bSThomas Hellstrom __vmw_cmdbuf_header_free(entry); 554*65b97a2bSThomas Hellstrom send_fence = true; 555*65b97a2bSThomas Hellstrom continue; 556*65b97a2bSThomas Hellstrom } 557*65b97a2bSThomas Hellstrom 558*65b97a2bSThomas Hellstrom if (man->using_mob) 559*65b97a2bSThomas Hellstrom cb_hdr->ptr.mob.mobOffset += new_start_offset; 560*65b97a2bSThomas Hellstrom else 561*65b97a2bSThomas Hellstrom cb_hdr->ptr.pa += (u64) new_start_offset; 562*65b97a2bSThomas Hellstrom 563*65b97a2bSThomas Hellstrom entry->cmd += new_start_offset; 564*65b97a2bSThomas Hellstrom cb_hdr->length -= new_start_offset; 565*65b97a2bSThomas Hellstrom cb_hdr->errorOffset = 0; 566*65b97a2bSThomas Hellstrom list_add_tail(&entry->list, &restart_head[entry->cb_context]); 567*65b97a2bSThomas Hellstrom man->ctx[entry->cb_context].block_submission = true; 5683eab3d9eSThomas Hellstrom } 569ef369904SThomas Hellstrom spin_unlock(&man->lock); 5703eab3d9eSThomas Hellstrom 571*65b97a2bSThomas Hellstrom /* Preempt all contexts with errors */ 572*65b97a2bSThomas Hellstrom for_each_cmdbuf_ctx(man, i, ctx) { 573*65b97a2bSThomas Hellstrom if (ctx->block_submission && vmw_cmdbuf_preempt(man, i)) 574*65b97a2bSThomas Hellstrom DRM_ERROR("Failed preempting command buffer " 575*65b97a2bSThomas Hellstrom "context %u.\n", i); 576*65b97a2bSThomas Hellstrom } 577*65b97a2bSThomas Hellstrom 578*65b97a2bSThomas Hellstrom spin_lock(&man->lock); 579*65b97a2bSThomas Hellstrom for_each_cmdbuf_ctx(man, i, ctx) { 580*65b97a2bSThomas Hellstrom if (!ctx->block_submission) 581*65b97a2bSThomas Hellstrom continue; 582*65b97a2bSThomas Hellstrom 583*65b97a2bSThomas Hellstrom /* Move preempted command buffers to the preempted queue. */ 584*65b97a2bSThomas Hellstrom vmw_cmdbuf_ctx_process(man, ctx, &dummy); 585*65b97a2bSThomas Hellstrom 586*65b97a2bSThomas Hellstrom /* 587*65b97a2bSThomas Hellstrom * Add the preempted queue after the command buffer 588*65b97a2bSThomas Hellstrom * that caused an error. 589*65b97a2bSThomas Hellstrom */ 590*65b97a2bSThomas Hellstrom list_splice_init(&ctx->preempted, restart_head[i].prev); 591*65b97a2bSThomas Hellstrom 592*65b97a2bSThomas Hellstrom /* 593*65b97a2bSThomas Hellstrom * Finally add all command buffers first in the submitted 594*65b97a2bSThomas Hellstrom * queue, to rerun them. 595*65b97a2bSThomas Hellstrom */ 596*65b97a2bSThomas Hellstrom list_splice_init(&restart_head[i], &ctx->submitted); 597*65b97a2bSThomas Hellstrom 598*65b97a2bSThomas Hellstrom ctx->block_submission = false; 599*65b97a2bSThomas Hellstrom } 600*65b97a2bSThomas Hellstrom 601*65b97a2bSThomas Hellstrom vmw_cmdbuf_man_process(man); 602*65b97a2bSThomas Hellstrom spin_unlock(&man->lock); 603*65b97a2bSThomas Hellstrom 604*65b97a2bSThomas Hellstrom for_each_cmdbuf_ctx(man, i, ctx) { 605*65b97a2bSThomas Hellstrom if (restart[i] && vmw_cmdbuf_startstop(man, i, true)) 606*65b97a2bSThomas Hellstrom DRM_ERROR("Failed restarting command buffer " 607*65b97a2bSThomas Hellstrom "context %u.\n", i); 608*65b97a2bSThomas Hellstrom } 6093eab3d9eSThomas Hellstrom 61009dc1387SThomas Hellstrom /* Send a new fence in case one was removed */ 611*65b97a2bSThomas Hellstrom if (send_fence) { 61209dc1387SThomas Hellstrom vmw_fifo_send_fence(man->dev_priv, &dummy); 613*65b97a2bSThomas Hellstrom wake_up_all(&man->idle_queue); 614*65b97a2bSThomas Hellstrom } 615*65b97a2bSThomas Hellstrom 616*65b97a2bSThomas Hellstrom mutex_unlock(&man->error_mutex); 6173eab3d9eSThomas Hellstrom } 6183eab3d9eSThomas Hellstrom 6193eab3d9eSThomas Hellstrom /** 6203eab3d9eSThomas Hellstrom * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle. 6213eab3d9eSThomas Hellstrom * 6223eab3d9eSThomas Hellstrom * @man: The command buffer manager. 6233eab3d9eSThomas Hellstrom * @check_preempted: Check also the preempted queue for pending command buffers. 6243eab3d9eSThomas Hellstrom * 6253eab3d9eSThomas Hellstrom */ 6263eab3d9eSThomas Hellstrom static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man, 6273eab3d9eSThomas Hellstrom bool check_preempted) 6283eab3d9eSThomas Hellstrom { 6293eab3d9eSThomas Hellstrom struct vmw_cmdbuf_context *ctx; 6303eab3d9eSThomas Hellstrom bool idle = false; 6313eab3d9eSThomas Hellstrom int i; 6323eab3d9eSThomas Hellstrom 633ef369904SThomas Hellstrom spin_lock(&man->lock); 6343eab3d9eSThomas Hellstrom vmw_cmdbuf_man_process(man); 6353eab3d9eSThomas Hellstrom for_each_cmdbuf_ctx(man, i, ctx) { 6363eab3d9eSThomas Hellstrom if (!list_empty(&ctx->submitted) || 6373eab3d9eSThomas Hellstrom !list_empty(&ctx->hw_submitted) || 6383eab3d9eSThomas Hellstrom (check_preempted && !list_empty(&ctx->preempted))) 6393eab3d9eSThomas Hellstrom goto out_unlock; 6403eab3d9eSThomas Hellstrom } 6413eab3d9eSThomas Hellstrom 6423eab3d9eSThomas Hellstrom idle = list_empty(&man->error); 6433eab3d9eSThomas Hellstrom 6443eab3d9eSThomas Hellstrom out_unlock: 645ef369904SThomas Hellstrom spin_unlock(&man->lock); 6463eab3d9eSThomas Hellstrom 6473eab3d9eSThomas Hellstrom return idle; 6483eab3d9eSThomas Hellstrom } 6493eab3d9eSThomas Hellstrom 6503eab3d9eSThomas Hellstrom /** 6513eab3d9eSThomas Hellstrom * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel 6523eab3d9eSThomas Hellstrom * command submissions 6533eab3d9eSThomas Hellstrom * 6543eab3d9eSThomas Hellstrom * @man: The command buffer manager. 6553eab3d9eSThomas Hellstrom * 6563eab3d9eSThomas Hellstrom * Flushes the current command buffer without allocating a new one. A new one 6573eab3d9eSThomas Hellstrom * is automatically allocated when needed. Call with @man->cur_mutex held. 6583eab3d9eSThomas Hellstrom */ 6593eab3d9eSThomas Hellstrom static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man) 6603eab3d9eSThomas Hellstrom { 6613eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *cur = man->cur; 6623eab3d9eSThomas Hellstrom 6633eab3d9eSThomas Hellstrom WARN_ON(!mutex_is_locked(&man->cur_mutex)); 6643eab3d9eSThomas Hellstrom 6653eab3d9eSThomas Hellstrom if (!cur) 6663eab3d9eSThomas Hellstrom return; 6673eab3d9eSThomas Hellstrom 668ef369904SThomas Hellstrom spin_lock(&man->lock); 6693eab3d9eSThomas Hellstrom if (man->cur_pos == 0) { 6703eab3d9eSThomas Hellstrom __vmw_cmdbuf_header_free(cur); 6713eab3d9eSThomas Hellstrom goto out_unlock; 6723eab3d9eSThomas Hellstrom } 6733eab3d9eSThomas Hellstrom 6743eab3d9eSThomas Hellstrom man->cur->cb_header->length = man->cur_pos; 6753eab3d9eSThomas Hellstrom vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0); 6763eab3d9eSThomas Hellstrom out_unlock: 677ef369904SThomas Hellstrom spin_unlock(&man->lock); 6783eab3d9eSThomas Hellstrom man->cur = NULL; 6793eab3d9eSThomas Hellstrom man->cur_pos = 0; 6803eab3d9eSThomas Hellstrom } 6813eab3d9eSThomas Hellstrom 6823eab3d9eSThomas Hellstrom /** 6833eab3d9eSThomas Hellstrom * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel 6843eab3d9eSThomas Hellstrom * command submissions 6853eab3d9eSThomas Hellstrom * 6863eab3d9eSThomas Hellstrom * @man: The command buffer manager. 6873eab3d9eSThomas Hellstrom * @interruptible: Whether to sleep interruptible when sleeping. 6883eab3d9eSThomas Hellstrom * 6893eab3d9eSThomas Hellstrom * Flushes the current command buffer without allocating a new one. A new one 6903eab3d9eSThomas Hellstrom * is automatically allocated when needed. 6913eab3d9eSThomas Hellstrom */ 6923eab3d9eSThomas Hellstrom int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, 6933eab3d9eSThomas Hellstrom bool interruptible) 6943eab3d9eSThomas Hellstrom { 6953eab3d9eSThomas Hellstrom int ret = vmw_cmdbuf_cur_lock(man, interruptible); 6963eab3d9eSThomas Hellstrom 6973eab3d9eSThomas Hellstrom if (ret) 6983eab3d9eSThomas Hellstrom return ret; 6993eab3d9eSThomas Hellstrom 7003eab3d9eSThomas Hellstrom __vmw_cmdbuf_cur_flush(man); 7013eab3d9eSThomas Hellstrom vmw_cmdbuf_cur_unlock(man); 7023eab3d9eSThomas Hellstrom 7033eab3d9eSThomas Hellstrom return 0; 7043eab3d9eSThomas Hellstrom } 7053eab3d9eSThomas Hellstrom 7063eab3d9eSThomas Hellstrom /** 7073eab3d9eSThomas Hellstrom * vmw_cmdbuf_idle - Wait for command buffer manager idle. 7083eab3d9eSThomas Hellstrom * 7093eab3d9eSThomas Hellstrom * @man: The command buffer manager. 7103eab3d9eSThomas Hellstrom * @interruptible: Sleep interruptible while waiting. 7113eab3d9eSThomas Hellstrom * @timeout: Time out after this many ticks. 7123eab3d9eSThomas Hellstrom * 7133eab3d9eSThomas Hellstrom * Wait until the command buffer manager has processed all command buffers, 7143eab3d9eSThomas Hellstrom * or until a timeout occurs. If a timeout occurs, the function will return 7153eab3d9eSThomas Hellstrom * -EBUSY. 7163eab3d9eSThomas Hellstrom */ 7173eab3d9eSThomas Hellstrom int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, 7183eab3d9eSThomas Hellstrom unsigned long timeout) 7193eab3d9eSThomas Hellstrom { 7203eab3d9eSThomas Hellstrom int ret; 7213eab3d9eSThomas Hellstrom 7223eab3d9eSThomas Hellstrom ret = vmw_cmdbuf_cur_flush(man, interruptible); 7233eab3d9eSThomas Hellstrom vmw_generic_waiter_add(man->dev_priv, 7243eab3d9eSThomas Hellstrom SVGA_IRQFLAG_COMMAND_BUFFER, 7253eab3d9eSThomas Hellstrom &man->dev_priv->cmdbuf_waiters); 7263eab3d9eSThomas Hellstrom 7273eab3d9eSThomas Hellstrom if (interruptible) { 7283eab3d9eSThomas Hellstrom ret = wait_event_interruptible_timeout 7293eab3d9eSThomas Hellstrom (man->idle_queue, vmw_cmdbuf_man_idle(man, true), 7303eab3d9eSThomas Hellstrom timeout); 7313eab3d9eSThomas Hellstrom } else { 7323eab3d9eSThomas Hellstrom ret = wait_event_timeout 7333eab3d9eSThomas Hellstrom (man->idle_queue, vmw_cmdbuf_man_idle(man, true), 7343eab3d9eSThomas Hellstrom timeout); 7353eab3d9eSThomas Hellstrom } 7363eab3d9eSThomas Hellstrom vmw_generic_waiter_remove(man->dev_priv, 7373eab3d9eSThomas Hellstrom SVGA_IRQFLAG_COMMAND_BUFFER, 7383eab3d9eSThomas Hellstrom &man->dev_priv->cmdbuf_waiters); 7393eab3d9eSThomas Hellstrom if (ret == 0) { 7403eab3d9eSThomas Hellstrom if (!vmw_cmdbuf_man_idle(man, true)) 7413eab3d9eSThomas Hellstrom ret = -EBUSY; 7423eab3d9eSThomas Hellstrom else 7433eab3d9eSThomas Hellstrom ret = 0; 7443eab3d9eSThomas Hellstrom } 7453eab3d9eSThomas Hellstrom if (ret > 0) 7463eab3d9eSThomas Hellstrom ret = 0; 7473eab3d9eSThomas Hellstrom 7483eab3d9eSThomas Hellstrom return ret; 7493eab3d9eSThomas Hellstrom } 7503eab3d9eSThomas Hellstrom 7513eab3d9eSThomas Hellstrom /** 7523eab3d9eSThomas Hellstrom * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool. 7533eab3d9eSThomas Hellstrom * 7543eab3d9eSThomas Hellstrom * @man: The command buffer manager. 7553eab3d9eSThomas Hellstrom * @info: Allocation info. Will hold the size on entry and allocated mm node 7563eab3d9eSThomas Hellstrom * on successful return. 7573eab3d9eSThomas Hellstrom * 7583eab3d9eSThomas Hellstrom * Try to allocate buffer space from the main pool. Returns true if succeeded. 7593eab3d9eSThomas Hellstrom * If a fatal error was hit, the error code is returned in @info->ret. 7603eab3d9eSThomas Hellstrom */ 7613eab3d9eSThomas Hellstrom static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, 7623eab3d9eSThomas Hellstrom struct vmw_cmdbuf_alloc_info *info) 7633eab3d9eSThomas Hellstrom { 7643eab3d9eSThomas Hellstrom int ret; 7653eab3d9eSThomas Hellstrom 7669b590783SThomas Hellstrom if (info->done) 7673eab3d9eSThomas Hellstrom return true; 7683eab3d9eSThomas Hellstrom 7699b590783SThomas Hellstrom memset(info->node, 0, sizeof(*info->node)); 770ef369904SThomas Hellstrom spin_lock(&man->lock); 7714e64e553SChris Wilson ret = drm_mm_insert_node(&man->mm, info->node, info->page_size); 772575f9c86SThomas Hellstrom if (ret) { 77309dc1387SThomas Hellstrom vmw_cmdbuf_man_process(man); 7744e64e553SChris Wilson ret = drm_mm_insert_node(&man->mm, info->node, info->page_size); 775575f9c86SThomas Hellstrom } 776575f9c86SThomas Hellstrom 777ef369904SThomas Hellstrom spin_unlock(&man->lock); 7789b590783SThomas Hellstrom info->done = !ret; 7793eab3d9eSThomas Hellstrom 7809b590783SThomas Hellstrom return info->done; 7813eab3d9eSThomas Hellstrom } 7823eab3d9eSThomas Hellstrom 7833eab3d9eSThomas Hellstrom /** 7843eab3d9eSThomas Hellstrom * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool. 7853eab3d9eSThomas Hellstrom * 7863eab3d9eSThomas Hellstrom * @man: The command buffer manager. 7879b590783SThomas Hellstrom * @node: Pointer to pre-allocated range-manager node. 7883eab3d9eSThomas Hellstrom * @size: The size of the allocation. 7893eab3d9eSThomas Hellstrom * @interruptible: Whether to sleep interruptible while waiting for space. 7903eab3d9eSThomas Hellstrom * 7913eab3d9eSThomas Hellstrom * This function allocates buffer space from the main pool, and if there is 7923eab3d9eSThomas Hellstrom * no space available ATM, it turns on IRQ handling and sleeps waiting for it to 7933eab3d9eSThomas Hellstrom * become available. 7943eab3d9eSThomas Hellstrom */ 795b9eb1a61SThomas Hellstrom static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man, 7969b590783SThomas Hellstrom struct drm_mm_node *node, 7973eab3d9eSThomas Hellstrom size_t size, 7983eab3d9eSThomas Hellstrom bool interruptible) 7993eab3d9eSThomas Hellstrom { 8003eab3d9eSThomas Hellstrom struct vmw_cmdbuf_alloc_info info; 8013eab3d9eSThomas Hellstrom 8023eab3d9eSThomas Hellstrom info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT; 8039b590783SThomas Hellstrom info.node = node; 8049b590783SThomas Hellstrom info.done = false; 8053eab3d9eSThomas Hellstrom 8063eab3d9eSThomas Hellstrom /* 8073eab3d9eSThomas Hellstrom * To prevent starvation of large requests, only one allocating call 8083eab3d9eSThomas Hellstrom * at a time waiting for space. 8093eab3d9eSThomas Hellstrom */ 8103eab3d9eSThomas Hellstrom if (interruptible) { 8113eab3d9eSThomas Hellstrom if (mutex_lock_interruptible(&man->space_mutex)) 8129b590783SThomas Hellstrom return -ERESTARTSYS; 8133eab3d9eSThomas Hellstrom } else { 8143eab3d9eSThomas Hellstrom mutex_lock(&man->space_mutex); 8153eab3d9eSThomas Hellstrom } 8163eab3d9eSThomas Hellstrom 8173eab3d9eSThomas Hellstrom /* Try to allocate space without waiting. */ 8189b590783SThomas Hellstrom if (vmw_cmdbuf_try_alloc(man, &info)) 8199b590783SThomas Hellstrom goto out_unlock; 8203eab3d9eSThomas Hellstrom 8213eab3d9eSThomas Hellstrom vmw_generic_waiter_add(man->dev_priv, 8223eab3d9eSThomas Hellstrom SVGA_IRQFLAG_COMMAND_BUFFER, 8233eab3d9eSThomas Hellstrom &man->dev_priv->cmdbuf_waiters); 8243eab3d9eSThomas Hellstrom 8253eab3d9eSThomas Hellstrom if (interruptible) { 8263eab3d9eSThomas Hellstrom int ret; 8273eab3d9eSThomas Hellstrom 8283eab3d9eSThomas Hellstrom ret = wait_event_interruptible 8293eab3d9eSThomas Hellstrom (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); 8303eab3d9eSThomas Hellstrom if (ret) { 8313eab3d9eSThomas Hellstrom vmw_generic_waiter_remove 8323eab3d9eSThomas Hellstrom (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER, 8333eab3d9eSThomas Hellstrom &man->dev_priv->cmdbuf_waiters); 8343eab3d9eSThomas Hellstrom mutex_unlock(&man->space_mutex); 8359b590783SThomas Hellstrom return ret; 8363eab3d9eSThomas Hellstrom } 8373eab3d9eSThomas Hellstrom } else { 8383eab3d9eSThomas Hellstrom wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); 8393eab3d9eSThomas Hellstrom } 8403eab3d9eSThomas Hellstrom vmw_generic_waiter_remove(man->dev_priv, 8413eab3d9eSThomas Hellstrom SVGA_IRQFLAG_COMMAND_BUFFER, 8423eab3d9eSThomas Hellstrom &man->dev_priv->cmdbuf_waiters); 8433eab3d9eSThomas Hellstrom 8449b590783SThomas Hellstrom out_unlock: 8459b590783SThomas Hellstrom mutex_unlock(&man->space_mutex); 8469b590783SThomas Hellstrom 8479b590783SThomas Hellstrom return 0; 8483eab3d9eSThomas Hellstrom } 8493eab3d9eSThomas Hellstrom 8503eab3d9eSThomas Hellstrom /** 8513eab3d9eSThomas Hellstrom * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer 8523eab3d9eSThomas Hellstrom * space from the main pool. 8533eab3d9eSThomas Hellstrom * 8543eab3d9eSThomas Hellstrom * @man: The command buffer manager. 8553eab3d9eSThomas Hellstrom * @header: Pointer to the header to set up. 8563eab3d9eSThomas Hellstrom * @size: The requested size of the buffer space. 8573eab3d9eSThomas Hellstrom * @interruptible: Whether to sleep interruptible while waiting for space. 8583eab3d9eSThomas Hellstrom */ 8593eab3d9eSThomas Hellstrom static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, 8603eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *header, 8613eab3d9eSThomas Hellstrom size_t size, 8623eab3d9eSThomas Hellstrom bool interruptible) 8633eab3d9eSThomas Hellstrom { 8643eab3d9eSThomas Hellstrom SVGACBHeader *cb_hdr; 8653eab3d9eSThomas Hellstrom size_t offset; 8663eab3d9eSThomas Hellstrom int ret; 8673eab3d9eSThomas Hellstrom 8683eab3d9eSThomas Hellstrom if (!man->has_pool) 8693eab3d9eSThomas Hellstrom return -ENOMEM; 8703eab3d9eSThomas Hellstrom 8719b590783SThomas Hellstrom ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible); 8723eab3d9eSThomas Hellstrom 8739b590783SThomas Hellstrom if (ret) 8749b590783SThomas Hellstrom return ret; 8753eab3d9eSThomas Hellstrom 876a02f6da6SSouptick Joarder header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL, 8773eab3d9eSThomas Hellstrom &header->handle); 8783eab3d9eSThomas Hellstrom if (!header->cb_header) { 8793eab3d9eSThomas Hellstrom ret = -ENOMEM; 8803eab3d9eSThomas Hellstrom goto out_no_cb_header; 8813eab3d9eSThomas Hellstrom } 8823eab3d9eSThomas Hellstrom 8839b590783SThomas Hellstrom header->size = header->node.size << PAGE_SHIFT; 8843eab3d9eSThomas Hellstrom cb_hdr = header->cb_header; 8859b590783SThomas Hellstrom offset = header->node.start << PAGE_SHIFT; 8863eab3d9eSThomas Hellstrom header->cmd = man->map + offset; 8873eab3d9eSThomas Hellstrom if (man->using_mob) { 8883eab3d9eSThomas Hellstrom cb_hdr->flags = SVGA_CB_FLAG_MOB; 8893eab3d9eSThomas Hellstrom cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; 8903eab3d9eSThomas Hellstrom cb_hdr->ptr.mob.mobOffset = offset; 8913eab3d9eSThomas Hellstrom } else { 8923eab3d9eSThomas Hellstrom cb_hdr->ptr.pa = (u64)man->handle + (u64)offset; 8933eab3d9eSThomas Hellstrom } 8943eab3d9eSThomas Hellstrom 8953eab3d9eSThomas Hellstrom return 0; 8963eab3d9eSThomas Hellstrom 8973eab3d9eSThomas Hellstrom out_no_cb_header: 898ef369904SThomas Hellstrom spin_lock(&man->lock); 8999b590783SThomas Hellstrom drm_mm_remove_node(&header->node); 900ef369904SThomas Hellstrom spin_unlock(&man->lock); 9013eab3d9eSThomas Hellstrom 9023eab3d9eSThomas Hellstrom return ret; 9033eab3d9eSThomas Hellstrom } 9043eab3d9eSThomas Hellstrom 9053eab3d9eSThomas Hellstrom /** 9063eab3d9eSThomas Hellstrom * vmw_cmdbuf_space_inline - Set up a command buffer header with 9073eab3d9eSThomas Hellstrom * inline command buffer space. 9083eab3d9eSThomas Hellstrom * 9093eab3d9eSThomas Hellstrom * @man: The command buffer manager. 9103eab3d9eSThomas Hellstrom * @header: Pointer to the header to set up. 9113eab3d9eSThomas Hellstrom * @size: The requested size of the buffer space. 9123eab3d9eSThomas Hellstrom */ 9133eab3d9eSThomas Hellstrom static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, 9143eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *header, 9153eab3d9eSThomas Hellstrom int size) 9163eab3d9eSThomas Hellstrom { 9173eab3d9eSThomas Hellstrom struct vmw_cmdbuf_dheader *dheader; 9183eab3d9eSThomas Hellstrom SVGACBHeader *cb_hdr; 9193eab3d9eSThomas Hellstrom 9203eab3d9eSThomas Hellstrom if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE)) 9213eab3d9eSThomas Hellstrom return -ENOMEM; 9223eab3d9eSThomas Hellstrom 923a02f6da6SSouptick Joarder dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL, 9243eab3d9eSThomas Hellstrom &header->handle); 9253eab3d9eSThomas Hellstrom if (!dheader) 9263eab3d9eSThomas Hellstrom return -ENOMEM; 9273eab3d9eSThomas Hellstrom 9283eab3d9eSThomas Hellstrom header->inline_space = true; 9293eab3d9eSThomas Hellstrom header->size = VMW_CMDBUF_INLINE_SIZE; 9303eab3d9eSThomas Hellstrom cb_hdr = &dheader->cb_header; 9313eab3d9eSThomas Hellstrom header->cb_header = cb_hdr; 9323eab3d9eSThomas Hellstrom header->cmd = dheader->cmd; 9333eab3d9eSThomas Hellstrom cb_hdr->status = SVGA_CB_STATUS_NONE; 9343eab3d9eSThomas Hellstrom cb_hdr->flags = SVGA_CB_FLAG_NONE; 9353eab3d9eSThomas Hellstrom cb_hdr->ptr.pa = (u64)header->handle + 9363eab3d9eSThomas Hellstrom (u64)offsetof(struct vmw_cmdbuf_dheader, cmd); 9373eab3d9eSThomas Hellstrom 9383eab3d9eSThomas Hellstrom return 0; 9393eab3d9eSThomas Hellstrom } 9403eab3d9eSThomas Hellstrom 9413eab3d9eSThomas Hellstrom /** 9423eab3d9eSThomas Hellstrom * vmw_cmdbuf_alloc - Allocate a command buffer header complete with 9433eab3d9eSThomas Hellstrom * command buffer space. 9443eab3d9eSThomas Hellstrom * 9453eab3d9eSThomas Hellstrom * @man: The command buffer manager. 9463eab3d9eSThomas Hellstrom * @size: The requested size of the buffer space. 9473eab3d9eSThomas Hellstrom * @interruptible: Whether to sleep interruptible while waiting for space. 9483eab3d9eSThomas Hellstrom * @p_header: points to a header pointer to populate on successful return. 9493eab3d9eSThomas Hellstrom * 9503eab3d9eSThomas Hellstrom * Returns a pointer to command buffer space if successful. Otherwise 9513eab3d9eSThomas Hellstrom * returns an error pointer. The header pointer returned in @p_header should 9523eab3d9eSThomas Hellstrom * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit(). 9533eab3d9eSThomas Hellstrom */ 9543eab3d9eSThomas Hellstrom void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, 9553eab3d9eSThomas Hellstrom size_t size, bool interruptible, 9563eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header **p_header) 9573eab3d9eSThomas Hellstrom { 9583eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *header; 9593eab3d9eSThomas Hellstrom int ret = 0; 9603eab3d9eSThomas Hellstrom 9613eab3d9eSThomas Hellstrom *p_header = NULL; 9623eab3d9eSThomas Hellstrom 9633eab3d9eSThomas Hellstrom header = kzalloc(sizeof(*header), GFP_KERNEL); 9643eab3d9eSThomas Hellstrom if (!header) 9653eab3d9eSThomas Hellstrom return ERR_PTR(-ENOMEM); 9663eab3d9eSThomas Hellstrom 9673eab3d9eSThomas Hellstrom if (size <= VMW_CMDBUF_INLINE_SIZE) 9683eab3d9eSThomas Hellstrom ret = vmw_cmdbuf_space_inline(man, header, size); 9693eab3d9eSThomas Hellstrom else 9703eab3d9eSThomas Hellstrom ret = vmw_cmdbuf_space_pool(man, header, size, interruptible); 9713eab3d9eSThomas Hellstrom 9723eab3d9eSThomas Hellstrom if (ret) { 9733eab3d9eSThomas Hellstrom kfree(header); 9743eab3d9eSThomas Hellstrom return ERR_PTR(ret); 9753eab3d9eSThomas Hellstrom } 9763eab3d9eSThomas Hellstrom 9773eab3d9eSThomas Hellstrom header->man = man; 9783eab3d9eSThomas Hellstrom INIT_LIST_HEAD(&header->list); 9793eab3d9eSThomas Hellstrom header->cb_header->status = SVGA_CB_STATUS_NONE; 9803eab3d9eSThomas Hellstrom *p_header = header; 9813eab3d9eSThomas Hellstrom 9823eab3d9eSThomas Hellstrom return header->cmd; 9833eab3d9eSThomas Hellstrom } 9843eab3d9eSThomas Hellstrom 9853eab3d9eSThomas Hellstrom /** 9863eab3d9eSThomas Hellstrom * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current 9873eab3d9eSThomas Hellstrom * command buffer. 9883eab3d9eSThomas Hellstrom * 9893eab3d9eSThomas Hellstrom * @man: The command buffer manager. 9903eab3d9eSThomas Hellstrom * @size: The requested size of the commands. 9913eab3d9eSThomas Hellstrom * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID. 9923eab3d9eSThomas Hellstrom * @interruptible: Whether to sleep interruptible while waiting for space. 9933eab3d9eSThomas Hellstrom * 9943eab3d9eSThomas Hellstrom * Returns a pointer to command buffer space if successful. Otherwise 9953eab3d9eSThomas Hellstrom * returns an error pointer. 9963eab3d9eSThomas Hellstrom */ 9973eab3d9eSThomas Hellstrom static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man, 9983eab3d9eSThomas Hellstrom size_t size, 9993eab3d9eSThomas Hellstrom int ctx_id, 10003eab3d9eSThomas Hellstrom bool interruptible) 10013eab3d9eSThomas Hellstrom { 10023eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *cur; 10033eab3d9eSThomas Hellstrom void *ret; 10043eab3d9eSThomas Hellstrom 10053eab3d9eSThomas Hellstrom if (vmw_cmdbuf_cur_lock(man, interruptible)) 10063eab3d9eSThomas Hellstrom return ERR_PTR(-ERESTARTSYS); 10073eab3d9eSThomas Hellstrom 10083eab3d9eSThomas Hellstrom cur = man->cur; 10093eab3d9eSThomas Hellstrom if (cur && (size + man->cur_pos > cur->size || 1010d80efd5cSThomas Hellstrom ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) && 10113eab3d9eSThomas Hellstrom ctx_id != cur->cb_header->dxContext))) 10123eab3d9eSThomas Hellstrom __vmw_cmdbuf_cur_flush(man); 10133eab3d9eSThomas Hellstrom 10143eab3d9eSThomas Hellstrom if (!man->cur) { 10153eab3d9eSThomas Hellstrom ret = vmw_cmdbuf_alloc(man, 10163eab3d9eSThomas Hellstrom max_t(size_t, size, man->default_size), 10173eab3d9eSThomas Hellstrom interruptible, &man->cur); 10183eab3d9eSThomas Hellstrom if (IS_ERR(ret)) { 10193eab3d9eSThomas Hellstrom vmw_cmdbuf_cur_unlock(man); 10203eab3d9eSThomas Hellstrom return ret; 10213eab3d9eSThomas Hellstrom } 10223eab3d9eSThomas Hellstrom 10233eab3d9eSThomas Hellstrom cur = man->cur; 10243eab3d9eSThomas Hellstrom } 10253eab3d9eSThomas Hellstrom 10263eab3d9eSThomas Hellstrom if (ctx_id != SVGA3D_INVALID_ID) { 10273eab3d9eSThomas Hellstrom cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT; 10283eab3d9eSThomas Hellstrom cur->cb_header->dxContext = ctx_id; 10293eab3d9eSThomas Hellstrom } 10303eab3d9eSThomas Hellstrom 10313eab3d9eSThomas Hellstrom cur->reserved = size; 10323eab3d9eSThomas Hellstrom 10333eab3d9eSThomas Hellstrom return (void *) (man->cur->cmd + man->cur_pos); 10343eab3d9eSThomas Hellstrom } 10353eab3d9eSThomas Hellstrom 10363eab3d9eSThomas Hellstrom /** 10373eab3d9eSThomas Hellstrom * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer. 10383eab3d9eSThomas Hellstrom * 10393eab3d9eSThomas Hellstrom * @man: The command buffer manager. 10403eab3d9eSThomas Hellstrom * @size: The size of the commands actually written. 10413eab3d9eSThomas Hellstrom * @flush: Whether to flush the command buffer immediately. 10423eab3d9eSThomas Hellstrom */ 10433eab3d9eSThomas Hellstrom static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man, 10443eab3d9eSThomas Hellstrom size_t size, bool flush) 10453eab3d9eSThomas Hellstrom { 10463eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *cur = man->cur; 10473eab3d9eSThomas Hellstrom 10483eab3d9eSThomas Hellstrom WARN_ON(!mutex_is_locked(&man->cur_mutex)); 10493eab3d9eSThomas Hellstrom 10503eab3d9eSThomas Hellstrom WARN_ON(size > cur->reserved); 10513eab3d9eSThomas Hellstrom man->cur_pos += size; 10523eab3d9eSThomas Hellstrom if (!size) 10533eab3d9eSThomas Hellstrom cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT; 10543eab3d9eSThomas Hellstrom if (flush) 10553eab3d9eSThomas Hellstrom __vmw_cmdbuf_cur_flush(man); 10563eab3d9eSThomas Hellstrom vmw_cmdbuf_cur_unlock(man); 10573eab3d9eSThomas Hellstrom } 10583eab3d9eSThomas Hellstrom 10593eab3d9eSThomas Hellstrom /** 10603eab3d9eSThomas Hellstrom * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer. 10613eab3d9eSThomas Hellstrom * 10623eab3d9eSThomas Hellstrom * @man: The command buffer manager. 10633eab3d9eSThomas Hellstrom * @size: The requested size of the commands. 10643eab3d9eSThomas Hellstrom * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID. 10653eab3d9eSThomas Hellstrom * @interruptible: Whether to sleep interruptible while waiting for space. 10663eab3d9eSThomas Hellstrom * @header: Header of the command buffer. NULL if the current command buffer 10673eab3d9eSThomas Hellstrom * should be used. 10683eab3d9eSThomas Hellstrom * 10693eab3d9eSThomas Hellstrom * Returns a pointer to command buffer space if successful. Otherwise 10703eab3d9eSThomas Hellstrom * returns an error pointer. 10713eab3d9eSThomas Hellstrom */ 10723eab3d9eSThomas Hellstrom void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, 10733eab3d9eSThomas Hellstrom int ctx_id, bool interruptible, 10743eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *header) 10753eab3d9eSThomas Hellstrom { 10763eab3d9eSThomas Hellstrom if (!header) 10773eab3d9eSThomas Hellstrom return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible); 10783eab3d9eSThomas Hellstrom 10793eab3d9eSThomas Hellstrom if (size > header->size) 10803eab3d9eSThomas Hellstrom return ERR_PTR(-EINVAL); 10813eab3d9eSThomas Hellstrom 10823eab3d9eSThomas Hellstrom if (ctx_id != SVGA3D_INVALID_ID) { 10833eab3d9eSThomas Hellstrom header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT; 10843eab3d9eSThomas Hellstrom header->cb_header->dxContext = ctx_id; 10853eab3d9eSThomas Hellstrom } 10863eab3d9eSThomas Hellstrom 10873eab3d9eSThomas Hellstrom header->reserved = size; 10883eab3d9eSThomas Hellstrom return header->cmd; 10893eab3d9eSThomas Hellstrom } 10903eab3d9eSThomas Hellstrom 10913eab3d9eSThomas Hellstrom /** 10923eab3d9eSThomas Hellstrom * vmw_cmdbuf_commit - Commit commands in a command buffer. 10933eab3d9eSThomas Hellstrom * 10943eab3d9eSThomas Hellstrom * @man: The command buffer manager. 10953eab3d9eSThomas Hellstrom * @size: The size of the commands actually written. 10963eab3d9eSThomas Hellstrom * @header: Header of the command buffer. NULL if the current command buffer 10973eab3d9eSThomas Hellstrom * should be used. 10983eab3d9eSThomas Hellstrom * @flush: Whether to flush the command buffer immediately. 10993eab3d9eSThomas Hellstrom */ 11003eab3d9eSThomas Hellstrom void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, 11013eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *header, bool flush) 11023eab3d9eSThomas Hellstrom { 11033eab3d9eSThomas Hellstrom if (!header) { 11043eab3d9eSThomas Hellstrom vmw_cmdbuf_commit_cur(man, size, flush); 11053eab3d9eSThomas Hellstrom return; 11063eab3d9eSThomas Hellstrom } 11073eab3d9eSThomas Hellstrom 11083eab3d9eSThomas Hellstrom (void) vmw_cmdbuf_cur_lock(man, false); 11093eab3d9eSThomas Hellstrom __vmw_cmdbuf_cur_flush(man); 11103eab3d9eSThomas Hellstrom WARN_ON(size > header->reserved); 11113eab3d9eSThomas Hellstrom man->cur = header; 11123eab3d9eSThomas Hellstrom man->cur_pos = size; 11133eab3d9eSThomas Hellstrom if (!size) 11143eab3d9eSThomas Hellstrom header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT; 11153eab3d9eSThomas Hellstrom if (flush) 11163eab3d9eSThomas Hellstrom __vmw_cmdbuf_cur_flush(man); 11173eab3d9eSThomas Hellstrom vmw_cmdbuf_cur_unlock(man); 11183eab3d9eSThomas Hellstrom } 11193eab3d9eSThomas Hellstrom 11203eab3d9eSThomas Hellstrom 11213eab3d9eSThomas Hellstrom /** 11223eab3d9eSThomas Hellstrom * vmw_cmdbuf_send_device_command - Send a command through the device context. 11233eab3d9eSThomas Hellstrom * 11243eab3d9eSThomas Hellstrom * @man: The command buffer manager. 11253eab3d9eSThomas Hellstrom * @command: Pointer to the command to send. 11263eab3d9eSThomas Hellstrom * @size: Size of the command. 11273eab3d9eSThomas Hellstrom * 11283eab3d9eSThomas Hellstrom * Synchronously sends a device context command. 11293eab3d9eSThomas Hellstrom */ 11303eab3d9eSThomas Hellstrom static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man, 11313eab3d9eSThomas Hellstrom const void *command, 11323eab3d9eSThomas Hellstrom size_t size) 11333eab3d9eSThomas Hellstrom { 11343eab3d9eSThomas Hellstrom struct vmw_cmdbuf_header *header; 11353eab3d9eSThomas Hellstrom int status; 11363eab3d9eSThomas Hellstrom void *cmd = vmw_cmdbuf_alloc(man, size, false, &header); 11373eab3d9eSThomas Hellstrom 11383eab3d9eSThomas Hellstrom if (IS_ERR(cmd)) 11393eab3d9eSThomas Hellstrom return PTR_ERR(cmd); 11403eab3d9eSThomas Hellstrom 11413eab3d9eSThomas Hellstrom memcpy(cmd, command, size); 11423eab3d9eSThomas Hellstrom header->cb_header->length = size; 11433eab3d9eSThomas Hellstrom header->cb_context = SVGA_CB_CONTEXT_DEVICE; 1144ef369904SThomas Hellstrom spin_lock(&man->lock); 11453eab3d9eSThomas Hellstrom status = vmw_cmdbuf_header_submit(header); 1146ef369904SThomas Hellstrom spin_unlock(&man->lock); 11473eab3d9eSThomas Hellstrom vmw_cmdbuf_header_free(header); 11483eab3d9eSThomas Hellstrom 11493eab3d9eSThomas Hellstrom if (status != SVGA_CB_STATUS_COMPLETED) { 11503eab3d9eSThomas Hellstrom DRM_ERROR("Device context command failed with status %d\n", 11513eab3d9eSThomas Hellstrom status); 11523eab3d9eSThomas Hellstrom return -EINVAL; 11533eab3d9eSThomas Hellstrom } 11543eab3d9eSThomas Hellstrom 11553eab3d9eSThomas Hellstrom return 0; 11563eab3d9eSThomas Hellstrom } 11573eab3d9eSThomas Hellstrom 11583eab3d9eSThomas Hellstrom /** 1159*65b97a2bSThomas Hellstrom * vmw_cmdbuf_preempt - Send a preempt command through the device 1160*65b97a2bSThomas Hellstrom * context. 1161*65b97a2bSThomas Hellstrom * 1162*65b97a2bSThomas Hellstrom * @man: The command buffer manager. 1163*65b97a2bSThomas Hellstrom * 1164*65b97a2bSThomas Hellstrom * Synchronously sends a preempt command. 1165*65b97a2bSThomas Hellstrom */ 1166*65b97a2bSThomas Hellstrom static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context) 1167*65b97a2bSThomas Hellstrom { 1168*65b97a2bSThomas Hellstrom struct { 1169*65b97a2bSThomas Hellstrom uint32 id; 1170*65b97a2bSThomas Hellstrom SVGADCCmdPreempt body; 1171*65b97a2bSThomas Hellstrom } __packed cmd; 1172*65b97a2bSThomas Hellstrom 1173*65b97a2bSThomas Hellstrom cmd.id = SVGA_DC_CMD_PREEMPT; 1174*65b97a2bSThomas Hellstrom cmd.body.context = SVGA_CB_CONTEXT_0 + context; 1175*65b97a2bSThomas Hellstrom cmd.body.ignoreIDZero = 0; 1176*65b97a2bSThomas Hellstrom 1177*65b97a2bSThomas Hellstrom return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); 1178*65b97a2bSThomas Hellstrom } 1179*65b97a2bSThomas Hellstrom 1180*65b97a2bSThomas Hellstrom 1181*65b97a2bSThomas Hellstrom /** 11823eab3d9eSThomas Hellstrom * vmw_cmdbuf_startstop - Send a start / stop command through the device 11833eab3d9eSThomas Hellstrom * context. 11843eab3d9eSThomas Hellstrom * 11853eab3d9eSThomas Hellstrom * @man: The command buffer manager. 11863eab3d9eSThomas Hellstrom * @enable: Whether to enable or disable the context. 11873eab3d9eSThomas Hellstrom * 11883eab3d9eSThomas Hellstrom * Synchronously sends a device start / stop context command. 11893eab3d9eSThomas Hellstrom */ 1190*65b97a2bSThomas Hellstrom static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, 11913eab3d9eSThomas Hellstrom bool enable) 11923eab3d9eSThomas Hellstrom { 11933eab3d9eSThomas Hellstrom struct { 11943eab3d9eSThomas Hellstrom uint32 id; 11953eab3d9eSThomas Hellstrom SVGADCCmdStartStop body; 11963eab3d9eSThomas Hellstrom } __packed cmd; 11973eab3d9eSThomas Hellstrom 11983eab3d9eSThomas Hellstrom cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT; 11993eab3d9eSThomas Hellstrom cmd.body.enable = (enable) ? 1 : 0; 1200*65b97a2bSThomas Hellstrom cmd.body.context = SVGA_CB_CONTEXT_0 + context; 12013eab3d9eSThomas Hellstrom 12023eab3d9eSThomas Hellstrom return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); 12033eab3d9eSThomas Hellstrom } 12043eab3d9eSThomas Hellstrom 12053eab3d9eSThomas Hellstrom /** 12063eab3d9eSThomas Hellstrom * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes 12073eab3d9eSThomas Hellstrom * 12083eab3d9eSThomas Hellstrom * @man: The command buffer manager. 12093eab3d9eSThomas Hellstrom * @size: The size of the main space pool. 12103eab3d9eSThomas Hellstrom * @default_size: The default size of the command buffer for small kernel 12113eab3d9eSThomas Hellstrom * submissions. 12123eab3d9eSThomas Hellstrom * 12133eab3d9eSThomas Hellstrom * Set the size and allocate the main command buffer space pool, 12143eab3d9eSThomas Hellstrom * as well as the default size of the command buffer for 12153eab3d9eSThomas Hellstrom * small kernel submissions. If successful, this enables large command 12163eab3d9eSThomas Hellstrom * submissions. Note that this function requires that rudimentary command 12173eab3d9eSThomas Hellstrom * submission is already available and that the MOB memory manager is alive. 12183eab3d9eSThomas Hellstrom * Returns 0 on success. Negative error code on failure. 12193eab3d9eSThomas Hellstrom */ 12203eab3d9eSThomas Hellstrom int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, 12213eab3d9eSThomas Hellstrom size_t size, size_t default_size) 12223eab3d9eSThomas Hellstrom { 12233eab3d9eSThomas Hellstrom struct vmw_private *dev_priv = man->dev_priv; 12243eab3d9eSThomas Hellstrom bool dummy; 12253eab3d9eSThomas Hellstrom int ret; 12263eab3d9eSThomas Hellstrom 12273eab3d9eSThomas Hellstrom if (man->has_pool) 12283eab3d9eSThomas Hellstrom return -EINVAL; 12293eab3d9eSThomas Hellstrom 12303eab3d9eSThomas Hellstrom /* First, try to allocate a huge chunk of DMA memory */ 12313eab3d9eSThomas Hellstrom size = PAGE_ALIGN(size); 12323eab3d9eSThomas Hellstrom man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size, 12333eab3d9eSThomas Hellstrom &man->handle, GFP_KERNEL); 12343eab3d9eSThomas Hellstrom if (man->map) { 12353eab3d9eSThomas Hellstrom man->using_mob = false; 12363eab3d9eSThomas Hellstrom } else { 12373eab3d9eSThomas Hellstrom /* 12383eab3d9eSThomas Hellstrom * DMA memory failed. If we can have command buffers in a 12393eab3d9eSThomas Hellstrom * MOB, try to use that instead. Note that this will 12403eab3d9eSThomas Hellstrom * actually call into the already enabled manager, when 12413eab3d9eSThomas Hellstrom * binding the MOB. 12423eab3d9eSThomas Hellstrom */ 12438ce75f8aSSinclair Yeh if (!(dev_priv->capabilities & SVGA_CAP_DX)) 12443eab3d9eSThomas Hellstrom return -ENOMEM; 12453eab3d9eSThomas Hellstrom 12463eab3d9eSThomas Hellstrom ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device, 12473eab3d9eSThomas Hellstrom &vmw_mob_ne_placement, 0, false, NULL, 12483eab3d9eSThomas Hellstrom &man->cmd_space); 12493eab3d9eSThomas Hellstrom if (ret) 12503eab3d9eSThomas Hellstrom return ret; 12513eab3d9eSThomas Hellstrom 12523eab3d9eSThomas Hellstrom man->using_mob = true; 12533eab3d9eSThomas Hellstrom ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT, 12543eab3d9eSThomas Hellstrom &man->map_obj); 12553eab3d9eSThomas Hellstrom if (ret) 12563eab3d9eSThomas Hellstrom goto out_no_map; 12573eab3d9eSThomas Hellstrom 12583eab3d9eSThomas Hellstrom man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy); 12593eab3d9eSThomas Hellstrom } 12603eab3d9eSThomas Hellstrom 12613eab3d9eSThomas Hellstrom man->size = size; 12623eab3d9eSThomas Hellstrom drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT); 12633eab3d9eSThomas Hellstrom 12643eab3d9eSThomas Hellstrom man->has_pool = true; 126509dc1387SThomas Hellstrom 126609dc1387SThomas Hellstrom /* 126709dc1387SThomas Hellstrom * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to 126809dc1387SThomas Hellstrom * prevent deadlocks from happening when vmw_cmdbuf_space_pool() 126909dc1387SThomas Hellstrom * needs to wait for space and we block on further command 127009dc1387SThomas Hellstrom * submissions to be able to free up space. 127109dc1387SThomas Hellstrom */ 127209dc1387SThomas Hellstrom man->default_size = VMW_CMDBUF_INLINE_SIZE; 12733eab3d9eSThomas Hellstrom DRM_INFO("Using command buffers with %s pool.\n", 12743eab3d9eSThomas Hellstrom (man->using_mob) ? "MOB" : "DMA"); 12753eab3d9eSThomas Hellstrom 12763eab3d9eSThomas Hellstrom return 0; 12773eab3d9eSThomas Hellstrom 12783eab3d9eSThomas Hellstrom out_no_map: 12793eab3d9eSThomas Hellstrom if (man->using_mob) 12803eab3d9eSThomas Hellstrom ttm_bo_unref(&man->cmd_space); 12813eab3d9eSThomas Hellstrom 12823eab3d9eSThomas Hellstrom return ret; 12833eab3d9eSThomas Hellstrom } 12843eab3d9eSThomas Hellstrom 12853eab3d9eSThomas Hellstrom /** 12863eab3d9eSThomas Hellstrom * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for 12873eab3d9eSThomas Hellstrom * inline command buffer submissions only. 12883eab3d9eSThomas Hellstrom * 12893eab3d9eSThomas Hellstrom * @dev_priv: Pointer to device private structure. 12903eab3d9eSThomas Hellstrom * 12913eab3d9eSThomas Hellstrom * Returns a pointer to a cummand buffer manager to success or error pointer 12923eab3d9eSThomas Hellstrom * on failure. The command buffer manager will be enabled for submissions of 12933eab3d9eSThomas Hellstrom * size VMW_CMDBUF_INLINE_SIZE only. 12943eab3d9eSThomas Hellstrom */ 12953eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) 12963eab3d9eSThomas Hellstrom { 12973eab3d9eSThomas Hellstrom struct vmw_cmdbuf_man *man; 12983eab3d9eSThomas Hellstrom struct vmw_cmdbuf_context *ctx; 1299*65b97a2bSThomas Hellstrom unsigned int i; 13003eab3d9eSThomas Hellstrom int ret; 13013eab3d9eSThomas Hellstrom 13023eab3d9eSThomas Hellstrom if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS)) 13033eab3d9eSThomas Hellstrom return ERR_PTR(-ENOSYS); 13043eab3d9eSThomas Hellstrom 13053eab3d9eSThomas Hellstrom man = kzalloc(sizeof(*man), GFP_KERNEL); 13063eab3d9eSThomas Hellstrom if (!man) 13073eab3d9eSThomas Hellstrom return ERR_PTR(-ENOMEM); 13083eab3d9eSThomas Hellstrom 13093eab3d9eSThomas Hellstrom man->headers = dma_pool_create("vmwgfx cmdbuf", 13103eab3d9eSThomas Hellstrom &dev_priv->dev->pdev->dev, 13113eab3d9eSThomas Hellstrom sizeof(SVGACBHeader), 13123eab3d9eSThomas Hellstrom 64, PAGE_SIZE); 13133eab3d9eSThomas Hellstrom if (!man->headers) { 13143eab3d9eSThomas Hellstrom ret = -ENOMEM; 13153eab3d9eSThomas Hellstrom goto out_no_pool; 13163eab3d9eSThomas Hellstrom } 13173eab3d9eSThomas Hellstrom 13183eab3d9eSThomas Hellstrom man->dheaders = dma_pool_create("vmwgfx inline cmdbuf", 13193eab3d9eSThomas Hellstrom &dev_priv->dev->pdev->dev, 13203eab3d9eSThomas Hellstrom sizeof(struct vmw_cmdbuf_dheader), 13213eab3d9eSThomas Hellstrom 64, PAGE_SIZE); 13223eab3d9eSThomas Hellstrom if (!man->dheaders) { 13233eab3d9eSThomas Hellstrom ret = -ENOMEM; 13243eab3d9eSThomas Hellstrom goto out_no_dpool; 13253eab3d9eSThomas Hellstrom } 13263eab3d9eSThomas Hellstrom 13273eab3d9eSThomas Hellstrom for_each_cmdbuf_ctx(man, i, ctx) 13283eab3d9eSThomas Hellstrom vmw_cmdbuf_ctx_init(ctx); 13293eab3d9eSThomas Hellstrom 13303eab3d9eSThomas Hellstrom INIT_LIST_HEAD(&man->error); 13313eab3d9eSThomas Hellstrom spin_lock_init(&man->lock); 13323eab3d9eSThomas Hellstrom mutex_init(&man->cur_mutex); 13333eab3d9eSThomas Hellstrom mutex_init(&man->space_mutex); 1334*65b97a2bSThomas Hellstrom mutex_init(&man->error_mutex); 13353eab3d9eSThomas Hellstrom man->default_size = VMW_CMDBUF_INLINE_SIZE; 13363eab3d9eSThomas Hellstrom init_waitqueue_head(&man->alloc_queue); 13373eab3d9eSThomas Hellstrom init_waitqueue_head(&man->idle_queue); 13383eab3d9eSThomas Hellstrom man->dev_priv = dev_priv; 13393eab3d9eSThomas Hellstrom man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1; 13403eab3d9eSThomas Hellstrom INIT_WORK(&man->work, &vmw_cmdbuf_work_func); 13413eab3d9eSThomas Hellstrom vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR, 13423eab3d9eSThomas Hellstrom &dev_priv->error_waiters); 1343*65b97a2bSThomas Hellstrom for_each_cmdbuf_ctx(man, i, ctx) { 1344*65b97a2bSThomas Hellstrom ret = vmw_cmdbuf_startstop(man, i, true); 13453eab3d9eSThomas Hellstrom if (ret) { 1346*65b97a2bSThomas Hellstrom DRM_ERROR("Failed starting command buffer " 1347*65b97a2bSThomas Hellstrom "context %u.\n", i); 13483eab3d9eSThomas Hellstrom vmw_cmdbuf_man_destroy(man); 13493eab3d9eSThomas Hellstrom return ERR_PTR(ret); 13503eab3d9eSThomas Hellstrom } 1351*65b97a2bSThomas Hellstrom } 13523eab3d9eSThomas Hellstrom 13533eab3d9eSThomas Hellstrom return man; 13543eab3d9eSThomas Hellstrom 13553eab3d9eSThomas Hellstrom out_no_dpool: 13563eab3d9eSThomas Hellstrom dma_pool_destroy(man->headers); 13573eab3d9eSThomas Hellstrom out_no_pool: 13583eab3d9eSThomas Hellstrom kfree(man); 13593eab3d9eSThomas Hellstrom 13603eab3d9eSThomas Hellstrom return ERR_PTR(ret); 13613eab3d9eSThomas Hellstrom } 13623eab3d9eSThomas Hellstrom 13633eab3d9eSThomas Hellstrom /** 13643eab3d9eSThomas Hellstrom * vmw_cmdbuf_remove_pool - Take down the main buffer space pool. 13653eab3d9eSThomas Hellstrom * 13663eab3d9eSThomas Hellstrom * @man: Pointer to a command buffer manager. 13673eab3d9eSThomas Hellstrom * 13683eab3d9eSThomas Hellstrom * This function removes the main buffer space pool, and should be called 13693eab3d9eSThomas Hellstrom * before MOB memory management is removed. When this function has been called, 13703eab3d9eSThomas Hellstrom * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or 13713eab3d9eSThomas Hellstrom * less are allowed, and the default size of the command buffer for small kernel 13723eab3d9eSThomas Hellstrom * submissions is also set to this size. 13733eab3d9eSThomas Hellstrom */ 13743eab3d9eSThomas Hellstrom void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man) 13753eab3d9eSThomas Hellstrom { 13763eab3d9eSThomas Hellstrom if (!man->has_pool) 13773eab3d9eSThomas Hellstrom return; 13783eab3d9eSThomas Hellstrom 13793eab3d9eSThomas Hellstrom man->has_pool = false; 13803eab3d9eSThomas Hellstrom man->default_size = VMW_CMDBUF_INLINE_SIZE; 13813eab3d9eSThomas Hellstrom (void) vmw_cmdbuf_idle(man, false, 10*HZ); 13823eab3d9eSThomas Hellstrom if (man->using_mob) { 13833eab3d9eSThomas Hellstrom (void) ttm_bo_kunmap(&man->map_obj); 13843eab3d9eSThomas Hellstrom ttm_bo_unref(&man->cmd_space); 13853eab3d9eSThomas Hellstrom } else { 13863eab3d9eSThomas Hellstrom dma_free_coherent(&man->dev_priv->dev->pdev->dev, 13873eab3d9eSThomas Hellstrom man->size, man->map, man->handle); 13883eab3d9eSThomas Hellstrom } 13893eab3d9eSThomas Hellstrom } 13903eab3d9eSThomas Hellstrom 13913eab3d9eSThomas Hellstrom /** 13923eab3d9eSThomas Hellstrom * vmw_cmdbuf_man_destroy - Take down a command buffer manager. 13933eab3d9eSThomas Hellstrom * 13943eab3d9eSThomas Hellstrom * @man: Pointer to a command buffer manager. 13953eab3d9eSThomas Hellstrom * 13963eab3d9eSThomas Hellstrom * This function idles and then destroys a command buffer manager. 13973eab3d9eSThomas Hellstrom */ 13983eab3d9eSThomas Hellstrom void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man) 13993eab3d9eSThomas Hellstrom { 1400*65b97a2bSThomas Hellstrom struct vmw_cmdbuf_context *ctx; 1401*65b97a2bSThomas Hellstrom unsigned int i; 1402*65b97a2bSThomas Hellstrom 14033eab3d9eSThomas Hellstrom WARN_ON_ONCE(man->has_pool); 14043eab3d9eSThomas Hellstrom (void) vmw_cmdbuf_idle(man, false, 10*HZ); 1405*65b97a2bSThomas Hellstrom 1406*65b97a2bSThomas Hellstrom for_each_cmdbuf_ctx(man, i, ctx) 1407*65b97a2bSThomas Hellstrom if (vmw_cmdbuf_startstop(man, i, false)) 1408*65b97a2bSThomas Hellstrom DRM_ERROR("Failed stopping command buffer " 1409*65b97a2bSThomas Hellstrom "context %u.\n", i); 14103eab3d9eSThomas Hellstrom 14113eab3d9eSThomas Hellstrom vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR, 14123eab3d9eSThomas Hellstrom &man->dev_priv->error_waiters); 14133eab3d9eSThomas Hellstrom (void) cancel_work_sync(&man->work); 14143eab3d9eSThomas Hellstrom dma_pool_destroy(man->dheaders); 14153eab3d9eSThomas Hellstrom dma_pool_destroy(man->headers); 14163eab3d9eSThomas Hellstrom mutex_destroy(&man->cur_mutex); 14173eab3d9eSThomas Hellstrom mutex_destroy(&man->space_mutex); 1418*65b97a2bSThomas Hellstrom mutex_destroy(&man->error_mutex); 14193eab3d9eSThomas Hellstrom kfree(man); 14203eab3d9eSThomas Hellstrom } 1421