Lines Matching refs:ring
143 static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
144 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
147 static void make_response(struct xen_blkif_ring *ring, u64 id,
168 static int add_persistent_gnt(struct xen_blkif_ring *ring, in add_persistent_gnt() argument
173 struct xen_blkif *blkif = ring->blkif; in add_persistent_gnt()
175 if (ring->persistent_gnt_c >= max_pgrants) { in add_persistent_gnt()
181 new = &ring->persistent_gnts.rb_node; in add_persistent_gnt()
199 rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts); in add_persistent_gnt()
200 ring->persistent_gnt_c++; in add_persistent_gnt()
201 atomic_inc(&ring->persistent_gnt_in_use); in add_persistent_gnt()
205 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring, in get_persistent_gnt() argument
211 node = ring->persistent_gnts.rb_node; in get_persistent_gnt()
225 atomic_inc(&ring->persistent_gnt_in_use); in get_persistent_gnt()
232 static void put_persistent_gnt(struct xen_blkif_ring *ring, in put_persistent_gnt() argument
239 atomic_dec(&ring->persistent_gnt_in_use); in put_persistent_gnt()
242 static void free_persistent_gnts(struct xen_blkif_ring *ring) in free_persistent_gnts() argument
244 struct rb_root *root = &ring->persistent_gnts; in free_persistent_gnts()
276 gnttab_page_cache_put(&ring->free_pages, pages, in free_persistent_gnts()
283 ring->persistent_gnt_c--; in free_persistent_gnts()
286 BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts)); in free_persistent_gnts()
287 BUG_ON(ring->persistent_gnt_c != 0); in free_persistent_gnts()
296 struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work); in xen_blkbk_unmap_purged_grants() local
303 while(!list_empty(&ring->persistent_purge_list)) { in xen_blkbk_unmap_purged_grants()
304 persistent_gnt = list_first_entry(&ring->persistent_purge_list, in xen_blkbk_unmap_purged_grants()
319 gnttab_page_cache_put(&ring->free_pages, pages, in xen_blkbk_unmap_purged_grants()
328 gnttab_page_cache_put(&ring->free_pages, pages, segs_to_unmap); in xen_blkbk_unmap_purged_grants()
332 static void purge_persistent_gnt(struct xen_blkif_ring *ring) in purge_persistent_gnt() argument
340 if (work_busy(&ring->persistent_purge_work)) { in purge_persistent_gnt()
345 if (ring->persistent_gnt_c < max_pgrants || in purge_persistent_gnt()
346 (ring->persistent_gnt_c == max_pgrants && in purge_persistent_gnt()
347 !ring->blkif->vbd.overflow_max_grants)) { in purge_persistent_gnt()
351 num_clean = ring->persistent_gnt_c - max_pgrants + num_clean; in purge_persistent_gnt()
352 num_clean = min(ring->persistent_gnt_c, num_clean); in purge_persistent_gnt()
368 BUG_ON(!list_empty(&ring->persistent_purge_list)); in purge_persistent_gnt()
369 root = &ring->persistent_gnts; in purge_persistent_gnt()
384 &ring->persistent_purge_list); in purge_persistent_gnt()
399 ring->persistent_gnt_c -= total; in purge_persistent_gnt()
400 ring->blkif->vbd.overflow_max_grants = 0; in purge_persistent_gnt()
403 schedule_work(&ring->persistent_purge_work); in purge_persistent_gnt()
414 static struct pending_req *alloc_req(struct xen_blkif_ring *ring) in alloc_req() argument
419 spin_lock_irqsave(&ring->pending_free_lock, flags); in alloc_req()
420 if (!list_empty(&ring->pending_free)) { in alloc_req()
421 req = list_entry(ring->pending_free.next, struct pending_req, in alloc_req()
425 spin_unlock_irqrestore(&ring->pending_free_lock, flags); in alloc_req()
433 static void free_req(struct xen_blkif_ring *ring, struct pending_req *req) in free_req() argument
438 spin_lock_irqsave(&ring->pending_free_lock, flags); in free_req()
439 was_empty = list_empty(&ring->pending_free); in free_req()
440 list_add(&req->free_list, &ring->pending_free); in free_req()
441 spin_unlock_irqrestore(&ring->pending_free_lock, flags); in free_req()
443 wake_up(&ring->pending_free_wq); in free_req()
523 static void blkif_notify_work(struct xen_blkif_ring *ring) in blkif_notify_work() argument
525 ring->waiting_reqs = 1; in blkif_notify_work()
526 wake_up(&ring->wq); in blkif_notify_work()
539 static void print_stats(struct xen_blkif_ring *ring) in print_stats() argument
543 current->comm, ring->st_oo_req, in print_stats()
544 ring->st_rd_req, ring->st_wr_req, in print_stats()
545 ring->st_f_req, ring->st_ds_req, in print_stats()
546 ring->persistent_gnt_c, max_pgrants); in print_stats()
547 ring->st_print = jiffies + msecs_to_jiffies(10 * 1000); in print_stats()
548 ring->st_rd_req = 0; in print_stats()
549 ring->st_wr_req = 0; in print_stats()
550 ring->st_oo_req = 0; in print_stats()
551 ring->st_ds_req = 0; in print_stats()
556 struct xen_blkif_ring *ring = arg; in xen_blkif_schedule() local
557 struct xen_blkif *blkif = ring->blkif; in xen_blkif_schedule()
574 ring->wq, in xen_blkif_schedule()
575 ring->waiting_reqs || kthread_should_stop(), in xen_blkif_schedule()
580 ring->pending_free_wq, in xen_blkif_schedule()
581 !list_empty(&ring->pending_free) || in xen_blkif_schedule()
587 do_eoi = ring->waiting_reqs; in xen_blkif_schedule()
589 ring->waiting_reqs = 0; in xen_blkif_schedule()
592 ret = do_block_io_op(ring, &eoi_flags); in xen_blkif_schedule()
594 ring->waiting_reqs = 1; in xen_blkif_schedule()
596 wait_event_interruptible(ring->shutdown_wq, in xen_blkif_schedule()
599 if (do_eoi && !ring->waiting_reqs) { in xen_blkif_schedule()
600 xen_irq_lateeoi(ring->irq, eoi_flags); in xen_blkif_schedule()
606 time_after(jiffies, ring->next_lru)) { in xen_blkif_schedule()
607 purge_persistent_gnt(ring); in xen_blkif_schedule()
608 ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL); in xen_blkif_schedule()
613 gnttab_page_cache_shrink(&ring->free_pages, 0); in xen_blkif_schedule()
615 gnttab_page_cache_shrink(&ring->free_pages, in xen_blkif_schedule()
618 if (log_stats && time_after(jiffies, ring->st_print)) in xen_blkif_schedule()
619 print_stats(ring); in xen_blkif_schedule()
623 flush_work(&ring->persistent_purge_work); in xen_blkif_schedule()
626 print_stats(ring); in xen_blkif_schedule()
628 ring->xenblkd = NULL; in xen_blkif_schedule()
636 void xen_blkbk_free_caches(struct xen_blkif_ring *ring) in xen_blkbk_free_caches() argument
639 free_persistent_gnts(ring); in xen_blkbk_free_caches()
642 gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */); in xen_blkbk_free_caches()
646 struct xen_blkif_ring *ring, in xen_blkbk_unmap_prepare() argument
656 put_persistent_gnt(ring, pages[i]->persistent_gnt); in xen_blkbk_unmap_prepare()
674 struct xen_blkif_ring *ring = pending_req->ring; in xen_blkbk_unmap_and_respond_callback() local
675 struct xen_blkif *blkif = ring->blkif; in xen_blkbk_unmap_and_respond_callback()
681 gnttab_page_cache_put(&ring->free_pages, data->pages, data->count); in xen_blkbk_unmap_and_respond_callback()
682 make_response(ring, pending_req->id, in xen_blkbk_unmap_and_respond_callback()
684 free_req(ring, pending_req); in xen_blkbk_unmap_and_respond_callback()
697 if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) { in xen_blkbk_unmap_and_respond_callback()
706 struct xen_blkif_ring *ring = req->ring; in xen_blkbk_unmap_and_respond() local
710 invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs, in xen_blkbk_unmap_and_respond()
731 static void xen_blkbk_unmap(struct xen_blkif_ring *ring, in xen_blkbk_unmap() argument
743 invcount = xen_blkbk_unmap_prepare(ring, pages, batch, in xen_blkbk_unmap()
748 gnttab_page_cache_put(&ring->free_pages, unmap_pages, in xen_blkbk_unmap()
756 static int xen_blkbk_map(struct xen_blkif_ring *ring, in xen_blkbk_map() argument
769 struct xen_blkif *blkif = ring->blkif; in xen_blkbk_map()
784 ring, in xen_blkbk_map()
796 if (gnttab_page_cache_get(&ring->free_pages, in xen_blkbk_map()
798 gnttab_page_cache_put(&ring->free_pages, in xen_blkbk_map()
833 gnttab_page_cache_put(&ring->free_pages, in xen_blkbk_map()
844 ring->persistent_gnt_c < max_pgrants) { in xen_blkbk_map()
862 if (add_persistent_gnt(ring, in xen_blkbk_map()
870 persistent_gnt->gnt, ring->persistent_gnt_c, in xen_blkbk_map()
906 rc = xen_blkbk_map(pending_req->ring, pending_req->segments, in xen_blkbk_map_seg()
919 struct xen_blkif_ring *ring = pending_req->ring; in xen_blkbk_parse_indirect() local
930 rc = xen_blkbk_map(ring, pages, indirect_grefs, true); in xen_blkbk_parse_indirect()
962 xen_blkbk_unmap(ring, pages, indirect_grefs); in xen_blkbk_parse_indirect()
966 static int dispatch_discard_io(struct xen_blkif_ring *ring, in dispatch_discard_io() argument
971 struct xen_blkif *blkif = ring->blkif; in dispatch_discard_io()
987 ring->st_ds_req++; in dispatch_discard_io()
1005 make_response(ring, req->u.discard.id, req->operation, status); in dispatch_discard_io()
1010 static int dispatch_other_io(struct xen_blkif_ring *ring, in dispatch_other_io() argument
1014 free_req(ring, pending_req); in dispatch_other_io()
1015 make_response(ring, req->u.other.id, req->operation, in dispatch_other_io()
1020 static void xen_blk_drain_io(struct xen_blkif_ring *ring) in xen_blk_drain_io() argument
1022 struct xen_blkif *blkif = ring->blkif; in xen_blk_drain_io()
1026 if (atomic_read(&ring->inflight) == 0) in xen_blk_drain_io()
1044 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0); in __end_block_io_op()
1049 xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0); in __end_block_io_op()
1187 __do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags) in __do_block_io_op() argument
1189 union blkif_back_rings *blk_rings = &ring->blk_rings; in __do_block_io_op()
1202 rp, rc, rp - rc, ring->blkif->vbd.pdevice); in __do_block_io_op()
1218 pending_req = alloc_req(ring); in __do_block_io_op()
1220 ring->st_oo_req++; in __do_block_io_op()
1225 switch (ring->blkif->blk_protocol) { in __do_block_io_op()
1249 if (dispatch_rw_block_io(ring, &req, pending_req)) in __do_block_io_op()
1253 free_req(ring, pending_req); in __do_block_io_op()
1254 if (dispatch_discard_io(ring, &req)) in __do_block_io_op()
1258 if (dispatch_other_io(ring, &req, pending_req)) in __do_block_io_op()
1271 do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags) in do_block_io_op() argument
1273 union blkif_back_rings *blk_rings = &ring->blk_rings; in do_block_io_op()
1277 more_to_do = __do_block_io_op(ring, eoi_flags); in do_block_io_op()
1290 static int dispatch_rw_block_io(struct xen_blkif_ring *ring, in dispatch_rw_block_io() argument
1319 ring->st_rd_req++; in dispatch_rw_block_io()
1323 ring->st_wr_req++; in dispatch_rw_block_io()
1331 ring->st_f_req++; in dispatch_rw_block_io()
1357 pending_req->ring = ring; in dispatch_rw_block_io()
1384 if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) { in dispatch_rw_block_io()
1389 ring->blkif->vbd.pdevice); in dispatch_rw_block_io()
1401 ring->blkif->domid); in dispatch_rw_block_io()
1410 xen_blk_drain_io(pending_req->ring); in dispatch_rw_block_io()
1425 xen_blkif_get(ring->blkif); in dispatch_rw_block_io()
1426 atomic_inc(&ring->inflight); in dispatch_rw_block_io()
1467 ring->st_rd_sect += preq.nr_sects; in dispatch_rw_block_io()
1469 ring->st_wr_sect += preq.nr_sects; in dispatch_rw_block_io()
1474 xen_blkbk_unmap(ring, pending_req->segments, in dispatch_rw_block_io()
1478 make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR); in dispatch_rw_block_io()
1479 free_req(ring, pending_req); in dispatch_rw_block_io()
1489 static void make_response(struct xen_blkif_ring *ring, u64 id, in make_response() argument
1497 spin_lock_irqsave(&ring->blk_ring_lock, flags); in make_response()
1498 blk_rings = &ring->blk_rings; in make_response()
1500 switch (ring->blkif->blk_protocol) { in make_response()
1523 spin_unlock_irqrestore(&ring->blk_ring_lock, flags); in make_response()
1525 notify_remote_via_irq(ring->irq); in make_response()