1 /* 2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #ifndef __XFS_LOG_H__ 19 #define __XFS_LOG_H__ 20 21 struct xfs_log_vec { 22 struct xfs_log_vec *lv_next; /* next lv in build list */ 23 int lv_niovecs; /* number of iovecs in lv */ 24 struct xfs_log_iovec *lv_iovecp; /* iovec array */ 25 struct xfs_log_item *lv_item; /* owner */ 26 char *lv_buf; /* formatted buffer */ 27 int lv_bytes; /* accounted space in buffer */ 28 int lv_buf_len; /* aligned size of buffer */ 29 int lv_size; /* size of allocated lv */ 30 }; 31 32 #define XFS_LOG_VEC_ORDERED (-1) 33 34 static inline void * 35 xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, 36 uint type) 37 { 38 struct xfs_log_iovec *vec = *vecp; 39 40 if (vec) { 41 ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs); 42 vec++; 43 } else { 44 vec = &lv->lv_iovecp[0]; 45 } 46 47 vec->i_type = type; 48 vec->i_addr = lv->lv_buf + lv->lv_buf_len; 49 50 ASSERT(IS_ALIGNED((unsigned long)vec->i_addr, sizeof(uint64_t))); 51 52 *vecp = vec; 53 return vec->i_addr; 54 } 55 56 /* 57 * We need to make sure the next buffer is naturally aligned for the biggest 58 * basic data type we put into it. We already accounted for this padding when 59 * sizing the buffer. 60 * 61 * However, this padding does not get written into the log, and hence we have to 62 * track the space used by the log vectors separately to prevent log space hangs 63 * due to inaccurate accounting (i.e. a leak) of the used log space through the 64 * CIL context ticket. 65 */ 66 static inline void 67 xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len) 68 { 69 lv->lv_buf_len += round_up(len, sizeof(uint64_t)); 70 lv->lv_bytes += len; 71 vec->i_len = len; 72 } 73 74 static inline void * 75 xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, 76 uint type, void *data, int len) 77 { 78 void *buf; 79 80 buf = xlog_prepare_iovec(lv, vecp, type); 81 memcpy(buf, data, len); 82 xlog_finish_iovec(lv, *vecp, len); 83 return buf; 84 } 85 86 /* 87 * Structure used to pass callback function and the function's argument 88 * to the log manager. 89 */ 90 typedef struct xfs_log_callback { 91 struct xfs_log_callback *cb_next; 92 void (*cb_func)(void *, int); 93 void *cb_arg; 94 } xfs_log_callback_t; 95 96 /* 97 * By comparing each component, we don't have to worry about extra 98 * endian issues in treating two 32 bit numbers as one 64 bit number 99 */ 100 static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2) 101 { 102 if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2)) 103 return (CYCLE_LSN(lsn1)<CYCLE_LSN(lsn2))? -999 : 999; 104 105 if (BLOCK_LSN(lsn1) != BLOCK_LSN(lsn2)) 106 return (BLOCK_LSN(lsn1)<BLOCK_LSN(lsn2))? -999 : 999; 107 108 return 0; 109 } 110 111 #define XFS_LSN_CMP(x,y) _lsn_cmp(x,y) 112 113 /* 114 * Flags to xfs_log_force() 115 * 116 * XFS_LOG_SYNC: Synchronous force in-core log to disk 117 */ 118 #define XFS_LOG_SYNC 0x1 119 120 /* Log manager interfaces */ 121 struct xfs_mount; 122 struct xlog_in_core; 123 struct xlog_ticket; 124 struct xfs_log_item; 125 struct xfs_item_ops; 126 struct xfs_trans; 127 struct xfs_log_callback; 128 129 xfs_lsn_t xfs_log_done(struct xfs_mount *mp, 130 struct xlog_ticket *ticket, 131 struct xlog_in_core **iclog, 132 bool regrant); 133 int _xfs_log_force(struct xfs_mount *mp, 134 uint flags, 135 int *log_forced); 136 void xfs_log_force(struct xfs_mount *mp, 137 uint flags); 138 int _xfs_log_force_lsn(struct xfs_mount *mp, 139 xfs_lsn_t lsn, 140 uint flags, 141 int *log_forced); 142 void xfs_log_force_lsn(struct xfs_mount *mp, 143 xfs_lsn_t lsn, 144 uint flags); 145 int xfs_log_mount(struct xfs_mount *mp, 146 struct xfs_buftarg *log_target, 147 xfs_daddr_t start_block, 148 int num_bblocks); 149 int xfs_log_mount_finish(struct xfs_mount *mp); 150 int xfs_log_mount_cancel(struct xfs_mount *); 151 xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp); 152 xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp); 153 void xfs_log_space_wake(struct xfs_mount *mp); 154 int xfs_log_notify(struct xfs_mount *mp, 155 struct xlog_in_core *iclog, 156 struct xfs_log_callback *callback_entry); 157 int xfs_log_release_iclog(struct xfs_mount *mp, 158 struct xlog_in_core *iclog); 159 int xfs_log_reserve(struct xfs_mount *mp, 160 int length, 161 int count, 162 struct xlog_ticket **ticket, 163 __uint8_t clientid, 164 bool permanent, 165 uint t_type); 166 int xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic); 167 int xfs_log_unmount_write(struct xfs_mount *mp); 168 void xfs_log_unmount(struct xfs_mount *mp); 169 int xfs_log_force_umount(struct xfs_mount *mp, int logerror); 170 int xfs_log_need_covered(struct xfs_mount *mp); 171 172 void xlog_iodone(struct xfs_buf *); 173 174 struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket); 175 void xfs_log_ticket_put(struct xlog_ticket *ticket); 176 177 void xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp, 178 xfs_lsn_t *commit_lsn, bool regrant); 179 bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip); 180 181 void xfs_log_work_queue(struct xfs_mount *mp); 182 void xfs_log_worker(struct work_struct *work); 183 void xfs_log_quiesce(struct xfs_mount *mp); 184 bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t); 185 186 #endif /* __XFS_LOG_H__ */ 187