1 /* 2 * linux/drivers/video/fb_defio.c 3 * 4 * Copyright (C) 2006 Jaya Kumar 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file COPYING in the main directory of this archive 8 * for more details. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/errno.h> 14 #include <linux/string.h> 15 #include <linux/mm.h> 16 #include <linux/vmalloc.h> 17 #include <linux/delay.h> 18 #include <linux/interrupt.h> 19 #include <linux/fb.h> 20 #include <linux/list.h> 21 22 /* to support deferred IO */ 23 #include <linux/rmap.h> 24 #include <linux/pagemap.h> 25 26 static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs) 27 { 28 void *screen_base = (void __force *) info->screen_base; 29 struct page *page; 30 31 if (is_vmalloc_addr(screen_base + offs)) 32 page = vmalloc_to_page(screen_base + offs); 33 else 34 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT); 35 36 return page; 37 } 38 39 /* this is to find and return the vmalloc-ed fb pages */ 40 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) 41 { 42 unsigned long offset; 43 struct page *page; 44 struct fb_info *info = vmf->vma->vm_private_data; 45 46 offset = vmf->pgoff << PAGE_SHIFT; 47 if (offset >= info->fix.smem_len) 48 return VM_FAULT_SIGBUS; 49 50 page = fb_deferred_io_page(info, offset); 51 if (!page) 52 return VM_FAULT_SIGBUS; 53 54 get_page(page); 55 56 if (vmf->vma->vm_file) 57 page->mapping = vmf->vma->vm_file->f_mapping; 58 else 59 printk(KERN_ERR "no mapping available\n"); 60 61 BUG_ON(!page->mapping); 62 page->index = vmf->pgoff; 63 64 vmf->page = page; 65 return 0; 66 } 67 68 int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync) 69 { 70 struct fb_info *info = file->private_data; 71 struct inode *inode = file_inode(file); 72 int err = file_write_and_wait_range(file, start, end); 73 if (err) 74 return err; 75 76 /* Skip if deferred io is compiled-in but disabled on this fbdev */ 77 if (!info->fbdefio) 78 return 0; 79 80 inode_lock(inode); 81 /* Kill off the delayed work */ 82 cancel_delayed_work_sync(&info->deferred_work); 83 84 /* Run it immediately */ 85 schedule_delayed_work(&info->deferred_work, 0); 86 inode_unlock(inode); 87 88 return 0; 89 } 90 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); 91 92 /* vm_ops->page_mkwrite handler */ 93 static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) 94 { 95 struct page *page = vmf->page; 96 struct fb_info *info = vmf->vma->vm_private_data; 97 struct fb_deferred_io *fbdefio = info->fbdefio; 98 struct list_head *pos = &fbdefio->pagelist; 99 100 /* this is a callback we get when userspace first tries to 101 write to the page. we schedule a workqueue. that workqueue 102 will eventually mkclean the touched pages and execute the 103 deferred framebuffer IO. then if userspace touches a page 104 again, we repeat the same scheme */ 105 106 file_update_time(vmf->vma->vm_file); 107 108 /* protect against the workqueue changing the page list */ 109 mutex_lock(&fbdefio->lock); 110 111 /* first write in this cycle, notify the driver */ 112 if (fbdefio->first_io && list_empty(&fbdefio->pagelist)) 113 fbdefio->first_io(info); 114 115 /* 116 * We want the page to remain locked from ->page_mkwrite until 117 * the PTE is marked dirty to avoid page_mkclean() being called 118 * before the PTE is updated, which would leave the page ignored 119 * by defio. 120 * Do this by locking the page here and informing the caller 121 * about it with VM_FAULT_LOCKED. 122 */ 123 lock_page(page); 124 125 /* 126 * This check is to catch the case where a new process could start 127 * writing to the same page through a new PTE. This new access 128 * can cause a call to .page_mkwrite even if the original process' 129 * PTE is marked writable. 130 * 131 * TODO: The lru field is owned by the page cache; hence the name. 132 * We dequeue in fb_deferred_io_work() after flushing the 133 * page's content into video memory. Instead of lru, fbdefio 134 * should have it's own field. 135 */ 136 if (!list_empty(&page->lru)) 137 goto page_already_added; 138 139 if (unlikely(fbdefio->sort_pagelist)) { 140 /* 141 * We loop through the pagelist before adding in order to 142 * keep the pagelist sorted. This has significant overhead 143 * of O(n^2) with n being the number of written pages. If 144 * possible, drivers should try to work with unsorted page 145 * lists instead. 146 */ 147 struct page *cur; 148 149 list_for_each_entry(cur, &fbdefio->pagelist, lru) { 150 if (cur->index > page->index) 151 break; 152 } 153 pos = &cur->lru; 154 } 155 156 list_add_tail(&page->lru, pos); 157 158 page_already_added: 159 mutex_unlock(&fbdefio->lock); 160 161 /* come back after delay to process the deferred IO */ 162 schedule_delayed_work(&info->deferred_work, fbdefio->delay); 163 return VM_FAULT_LOCKED; 164 } 165 166 static const struct vm_operations_struct fb_deferred_io_vm_ops = { 167 .fault = fb_deferred_io_fault, 168 .page_mkwrite = fb_deferred_io_mkwrite, 169 }; 170 171 static int fb_deferred_io_set_page_dirty(struct page *page) 172 { 173 if (!PageDirty(page)) 174 SetPageDirty(page); 175 return 0; 176 } 177 178 static const struct address_space_operations fb_deferred_io_aops = { 179 .set_page_dirty = fb_deferred_io_set_page_dirty, 180 }; 181 182 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) 183 { 184 vma->vm_ops = &fb_deferred_io_vm_ops; 185 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 186 if (!(info->flags & FBINFO_VIRTFB)) 187 vma->vm_flags |= VM_IO; 188 vma->vm_private_data = info; 189 return 0; 190 } 191 192 /* workqueue callback */ 193 static void fb_deferred_io_work(struct work_struct *work) 194 { 195 struct fb_info *info = container_of(work, struct fb_info, 196 deferred_work.work); 197 struct list_head *node, *next; 198 struct page *cur; 199 struct fb_deferred_io *fbdefio = info->fbdefio; 200 201 /* here we mkclean the pages, then do all deferred IO */ 202 mutex_lock(&fbdefio->lock); 203 list_for_each_entry(cur, &fbdefio->pagelist, lru) { 204 lock_page(cur); 205 page_mkclean(cur); 206 unlock_page(cur); 207 } 208 209 /* driver's callback with pagelist */ 210 fbdefio->deferred_io(info, &fbdefio->pagelist); 211 212 /* clear the list */ 213 list_for_each_safe(node, next, &fbdefio->pagelist) { 214 list_del_init(node); 215 } 216 mutex_unlock(&fbdefio->lock); 217 } 218 219 void fb_deferred_io_init(struct fb_info *info) 220 { 221 struct fb_deferred_io *fbdefio = info->fbdefio; 222 struct page *page; 223 unsigned int i; 224 225 BUG_ON(!fbdefio); 226 mutex_init(&fbdefio->lock); 227 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work); 228 INIT_LIST_HEAD(&fbdefio->pagelist); 229 if (fbdefio->delay == 0) /* set a default of 1 s */ 230 fbdefio->delay = HZ; 231 232 /* initialize all the page lists one time */ 233 for (i = 0; i < info->fix.smem_len; i += PAGE_SIZE) { 234 page = fb_deferred_io_page(info, i); 235 INIT_LIST_HEAD(&page->lru); 236 } 237 } 238 EXPORT_SYMBOL_GPL(fb_deferred_io_init); 239 240 void fb_deferred_io_open(struct fb_info *info, 241 struct inode *inode, 242 struct file *file) 243 { 244 file->f_mapping->a_ops = &fb_deferred_io_aops; 245 } 246 EXPORT_SYMBOL_GPL(fb_deferred_io_open); 247 248 void fb_deferred_io_cleanup(struct fb_info *info) 249 { 250 struct fb_deferred_io *fbdefio = info->fbdefio; 251 struct page *page; 252 int i; 253 254 BUG_ON(!fbdefio); 255 cancel_delayed_work_sync(&info->deferred_work); 256 257 /* clear out the mapping that we setup */ 258 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { 259 page = fb_deferred_io_page(info, i); 260 page->mapping = NULL; 261 } 262 263 mutex_destroy(&fbdefio->lock); 264 } 265 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); 266