1*b2f46fd8SDan Williams /* 2*b2f46fd8SDan Williams * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com> 3*b2f46fd8SDan Williams * Copyright(c) 2009 Intel Corporation 4*b2f46fd8SDan Williams * 5*b2f46fd8SDan Williams * This program is free software; you can redistribute it and/or modify it 6*b2f46fd8SDan Williams * under the terms of the GNU General Public License as published by the Free 7*b2f46fd8SDan Williams * Software Foundation; either version 2 of the License, or (at your option) 8*b2f46fd8SDan Williams * any later version. 9*b2f46fd8SDan Williams * 10*b2f46fd8SDan Williams * This program is distributed in the hope that it will be useful, but WITHOUT 11*b2f46fd8SDan Williams * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12*b2f46fd8SDan Williams * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13*b2f46fd8SDan Williams * more details. 14*b2f46fd8SDan Williams * 15*b2f46fd8SDan Williams * You should have received a copy of the GNU General Public License along with 16*b2f46fd8SDan Williams * this program; if not, write to the Free Software Foundation, Inc., 59 17*b2f46fd8SDan Williams * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18*b2f46fd8SDan Williams * 19*b2f46fd8SDan Williams * The full GNU General Public License is included in this distribution in the 20*b2f46fd8SDan Williams * file called COPYING. 21*b2f46fd8SDan Williams */ 22*b2f46fd8SDan Williams #include <linux/kernel.h> 23*b2f46fd8SDan Williams #include <linux/interrupt.h> 24*b2f46fd8SDan Williams #include <linux/dma-mapping.h> 25*b2f46fd8SDan Williams #include <linux/raid/pq.h> 26*b2f46fd8SDan Williams #include <linux/async_tx.h> 27*b2f46fd8SDan Williams 28*b2f46fd8SDan Williams /** 29*b2f46fd8SDan Williams * scribble - space to hold throwaway P buffer for synchronous gen_syndrome 30*b2f46fd8SDan Williams */ 31*b2f46fd8SDan Williams static struct page *scribble; 32*b2f46fd8SDan Williams 33*b2f46fd8SDan Williams static bool is_raid6_zero_block(struct page *p) 34*b2f46fd8SDan Williams { 35*b2f46fd8SDan Williams return p == (void *) raid6_empty_zero_page; 36*b2f46fd8SDan Williams } 37*b2f46fd8SDan Williams 38*b2f46fd8SDan Williams /* the struct page *blocks[] parameter passed to async_gen_syndrome() 39*b2f46fd8SDan Williams * and async_syndrome_val() contains the 'P' destination address at 40*b2f46fd8SDan Williams * blocks[disks-2] and the 'Q' destination address at blocks[disks-1] 41*b2f46fd8SDan Williams * 42*b2f46fd8SDan Williams * note: these are macros as they are used as lvalues 43*b2f46fd8SDan Williams */ 44*b2f46fd8SDan Williams #define P(b, d) (b[d-2]) 45*b2f46fd8SDan Williams #define Q(b, d) (b[d-1]) 46*b2f46fd8SDan Williams 47*b2f46fd8SDan Williams /** 48*b2f46fd8SDan Williams * do_async_gen_syndrome - asynchronously calculate P and/or Q 49*b2f46fd8SDan Williams */ 50*b2f46fd8SDan Williams static __async_inline struct dma_async_tx_descriptor * 51*b2f46fd8SDan Williams do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, 52*b2f46fd8SDan Williams const unsigned char *scfs, unsigned int offset, int disks, 53*b2f46fd8SDan Williams size_t len, dma_addr_t *dma_src, 54*b2f46fd8SDan Williams struct async_submit_ctl *submit) 55*b2f46fd8SDan Williams { 56*b2f46fd8SDan Williams struct dma_async_tx_descriptor *tx = NULL; 57*b2f46fd8SDan Williams struct dma_device *dma = chan->device; 58*b2f46fd8SDan Williams enum dma_ctrl_flags dma_flags = 0; 59*b2f46fd8SDan Williams enum async_tx_flags flags_orig = submit->flags; 60*b2f46fd8SDan Williams dma_async_tx_callback cb_fn_orig = submit->cb_fn; 61*b2f46fd8SDan Williams dma_async_tx_callback cb_param_orig = submit->cb_param; 62*b2f46fd8SDan Williams int src_cnt = disks - 2; 63*b2f46fd8SDan Williams unsigned char coefs[src_cnt]; 64*b2f46fd8SDan Williams unsigned short pq_src_cnt; 65*b2f46fd8SDan Williams dma_addr_t dma_dest[2]; 66*b2f46fd8SDan Williams int src_off = 0; 67*b2f46fd8SDan Williams int idx; 68*b2f46fd8SDan Williams int i; 69*b2f46fd8SDan Williams 70*b2f46fd8SDan Williams /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */ 71*b2f46fd8SDan Williams if (P(blocks, disks)) 72*b2f46fd8SDan Williams dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset, 73*b2f46fd8SDan Williams len, DMA_BIDIRECTIONAL); 74*b2f46fd8SDan Williams else 75*b2f46fd8SDan Williams dma_flags |= DMA_PREP_PQ_DISABLE_P; 76*b2f46fd8SDan Williams if (Q(blocks, disks)) 77*b2f46fd8SDan Williams dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset, 78*b2f46fd8SDan Williams len, DMA_BIDIRECTIONAL); 79*b2f46fd8SDan Williams else 80*b2f46fd8SDan Williams dma_flags |= DMA_PREP_PQ_DISABLE_Q; 81*b2f46fd8SDan Williams 82*b2f46fd8SDan Williams /* convert source addresses being careful to collapse 'empty' 83*b2f46fd8SDan Williams * sources and update the coefficients accordingly 84*b2f46fd8SDan Williams */ 85*b2f46fd8SDan Williams for (i = 0, idx = 0; i < src_cnt; i++) { 86*b2f46fd8SDan Williams if (is_raid6_zero_block(blocks[i])) 87*b2f46fd8SDan Williams continue; 88*b2f46fd8SDan Williams dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, 89*b2f46fd8SDan Williams DMA_TO_DEVICE); 90*b2f46fd8SDan Williams coefs[idx] = scfs[i]; 91*b2f46fd8SDan Williams idx++; 92*b2f46fd8SDan Williams } 93*b2f46fd8SDan Williams src_cnt = idx; 94*b2f46fd8SDan Williams 95*b2f46fd8SDan Williams while (src_cnt > 0) { 96*b2f46fd8SDan Williams submit->flags = flags_orig; 97*b2f46fd8SDan Williams pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags)); 98*b2f46fd8SDan Williams /* if we are submitting additional pqs, leave the chain open, 99*b2f46fd8SDan Williams * clear the callback parameters, and leave the destination 100*b2f46fd8SDan Williams * buffers mapped 101*b2f46fd8SDan Williams */ 102*b2f46fd8SDan Williams if (src_cnt > pq_src_cnt) { 103*b2f46fd8SDan Williams submit->flags &= ~ASYNC_TX_ACK; 104*b2f46fd8SDan Williams dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP; 105*b2f46fd8SDan Williams submit->cb_fn = NULL; 106*b2f46fd8SDan Williams submit->cb_param = NULL; 107*b2f46fd8SDan Williams } else { 108*b2f46fd8SDan Williams dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP; 109*b2f46fd8SDan Williams submit->cb_fn = cb_fn_orig; 110*b2f46fd8SDan Williams submit->cb_param = cb_param_orig; 111*b2f46fd8SDan Williams if (cb_fn_orig) 112*b2f46fd8SDan Williams dma_flags |= DMA_PREP_INTERRUPT; 113*b2f46fd8SDan Williams } 114*b2f46fd8SDan Williams 115*b2f46fd8SDan Williams /* Since we have clobbered the src_list we are committed 116*b2f46fd8SDan Williams * to doing this asynchronously. Drivers force forward 117*b2f46fd8SDan Williams * progress in case they can not provide a descriptor 118*b2f46fd8SDan Williams */ 119*b2f46fd8SDan Williams for (;;) { 120*b2f46fd8SDan Williams tx = dma->device_prep_dma_pq(chan, dma_dest, 121*b2f46fd8SDan Williams &dma_src[src_off], 122*b2f46fd8SDan Williams pq_src_cnt, 123*b2f46fd8SDan Williams &coefs[src_off], len, 124*b2f46fd8SDan Williams dma_flags); 125*b2f46fd8SDan Williams if (likely(tx)) 126*b2f46fd8SDan Williams break; 127*b2f46fd8SDan Williams async_tx_quiesce(&submit->depend_tx); 128*b2f46fd8SDan Williams dma_async_issue_pending(chan); 129*b2f46fd8SDan Williams } 130*b2f46fd8SDan Williams 131*b2f46fd8SDan Williams async_tx_submit(chan, tx, submit); 132*b2f46fd8SDan Williams submit->depend_tx = tx; 133*b2f46fd8SDan Williams 134*b2f46fd8SDan Williams /* drop completed sources */ 135*b2f46fd8SDan Williams src_cnt -= pq_src_cnt; 136*b2f46fd8SDan Williams src_off += pq_src_cnt; 137*b2f46fd8SDan Williams 138*b2f46fd8SDan Williams dma_flags |= DMA_PREP_CONTINUE; 139*b2f46fd8SDan Williams } 140*b2f46fd8SDan Williams 141*b2f46fd8SDan Williams return tx; 142*b2f46fd8SDan Williams } 143*b2f46fd8SDan Williams 144*b2f46fd8SDan Williams /** 145*b2f46fd8SDan Williams * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome 146*b2f46fd8SDan Williams */ 147*b2f46fd8SDan Williams static void 148*b2f46fd8SDan Williams do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, 149*b2f46fd8SDan Williams size_t len, struct async_submit_ctl *submit) 150*b2f46fd8SDan Williams { 151*b2f46fd8SDan Williams void **srcs; 152*b2f46fd8SDan Williams int i; 153*b2f46fd8SDan Williams 154*b2f46fd8SDan Williams if (submit->scribble) 155*b2f46fd8SDan Williams srcs = submit->scribble; 156*b2f46fd8SDan Williams else 157*b2f46fd8SDan Williams srcs = (void **) blocks; 158*b2f46fd8SDan Williams 159*b2f46fd8SDan Williams for (i = 0; i < disks; i++) { 160*b2f46fd8SDan Williams if (is_raid6_zero_block(blocks[i])) { 161*b2f46fd8SDan Williams BUG_ON(i > disks - 3); /* P or Q can't be zero */ 162*b2f46fd8SDan Williams srcs[i] = blocks[i]; 163*b2f46fd8SDan Williams } else 164*b2f46fd8SDan Williams srcs[i] = page_address(blocks[i]) + offset; 165*b2f46fd8SDan Williams } 166*b2f46fd8SDan Williams raid6_call.gen_syndrome(disks, len, srcs); 167*b2f46fd8SDan Williams async_tx_sync_epilog(submit); 168*b2f46fd8SDan Williams } 169*b2f46fd8SDan Williams 170*b2f46fd8SDan Williams /** 171*b2f46fd8SDan Williams * async_gen_syndrome - asynchronously calculate a raid6 syndrome 172*b2f46fd8SDan Williams * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 173*b2f46fd8SDan Williams * @offset: common offset into each block (src and dest) to start transaction 174*b2f46fd8SDan Williams * @disks: number of blocks (including missing P or Q, see below) 175*b2f46fd8SDan Williams * @len: length of operation in bytes 176*b2f46fd8SDan Williams * @submit: submission/completion modifiers 177*b2f46fd8SDan Williams * 178*b2f46fd8SDan Williams * General note: This routine assumes a field of GF(2^8) with a 179*b2f46fd8SDan Williams * primitive polynomial of 0x11d and a generator of {02}. 180*b2f46fd8SDan Williams * 181*b2f46fd8SDan Williams * 'disks' note: callers can optionally omit either P or Q (but not 182*b2f46fd8SDan Williams * both) from the calculation by setting blocks[disks-2] or 183*b2f46fd8SDan Williams * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= 184*b2f46fd8SDan Williams * PAGE_SIZE as a temporary buffer of this size is used in the 185*b2f46fd8SDan Williams * synchronous path. 'disks' always accounts for both destination 186*b2f46fd8SDan Williams * buffers. 187*b2f46fd8SDan Williams * 188*b2f46fd8SDan Williams * 'blocks' note: if submit->scribble is NULL then the contents of 189*b2f46fd8SDan Williams * 'blocks' may be overridden 190*b2f46fd8SDan Williams */ 191*b2f46fd8SDan Williams struct dma_async_tx_descriptor * 192*b2f46fd8SDan Williams async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, 193*b2f46fd8SDan Williams size_t len, struct async_submit_ctl *submit) 194*b2f46fd8SDan Williams { 195*b2f46fd8SDan Williams int src_cnt = disks - 2; 196*b2f46fd8SDan Williams struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, 197*b2f46fd8SDan Williams &P(blocks, disks), 2, 198*b2f46fd8SDan Williams blocks, src_cnt, len); 199*b2f46fd8SDan Williams struct dma_device *device = chan ? chan->device : NULL; 200*b2f46fd8SDan Williams dma_addr_t *dma_src = NULL; 201*b2f46fd8SDan Williams 202*b2f46fd8SDan Williams BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); 203*b2f46fd8SDan Williams 204*b2f46fd8SDan Williams if (submit->scribble) 205*b2f46fd8SDan Williams dma_src = submit->scribble; 206*b2f46fd8SDan Williams else if (sizeof(dma_addr_t) <= sizeof(struct page *)) 207*b2f46fd8SDan Williams dma_src = (dma_addr_t *) blocks; 208*b2f46fd8SDan Williams 209*b2f46fd8SDan Williams if (dma_src && device && 210*b2f46fd8SDan Williams (src_cnt <= dma_maxpq(device, 0) || 211*b2f46fd8SDan Williams dma_maxpq(device, DMA_PREP_CONTINUE) > 0)) { 212*b2f46fd8SDan Williams /* run the p+q asynchronously */ 213*b2f46fd8SDan Williams pr_debug("%s: (async) disks: %d len: %zu\n", 214*b2f46fd8SDan Williams __func__, disks, len); 215*b2f46fd8SDan Williams return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset, 216*b2f46fd8SDan Williams disks, len, dma_src, submit); 217*b2f46fd8SDan Williams } 218*b2f46fd8SDan Williams 219*b2f46fd8SDan Williams /* run the pq synchronously */ 220*b2f46fd8SDan Williams pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); 221*b2f46fd8SDan Williams 222*b2f46fd8SDan Williams /* wait for any prerequisite operations */ 223*b2f46fd8SDan Williams async_tx_quiesce(&submit->depend_tx); 224*b2f46fd8SDan Williams 225*b2f46fd8SDan Williams if (!P(blocks, disks)) { 226*b2f46fd8SDan Williams P(blocks, disks) = scribble; 227*b2f46fd8SDan Williams BUG_ON(len + offset > PAGE_SIZE); 228*b2f46fd8SDan Williams } 229*b2f46fd8SDan Williams if (!Q(blocks, disks)) { 230*b2f46fd8SDan Williams Q(blocks, disks) = scribble; 231*b2f46fd8SDan Williams BUG_ON(len + offset > PAGE_SIZE); 232*b2f46fd8SDan Williams } 233*b2f46fd8SDan Williams do_sync_gen_syndrome(blocks, offset, disks, len, submit); 234*b2f46fd8SDan Williams 235*b2f46fd8SDan Williams return NULL; 236*b2f46fd8SDan Williams } 237*b2f46fd8SDan Williams EXPORT_SYMBOL_GPL(async_gen_syndrome); 238*b2f46fd8SDan Williams 239*b2f46fd8SDan Williams /** 240*b2f46fd8SDan Williams * async_syndrome_val - asynchronously validate a raid6 syndrome 241*b2f46fd8SDan Williams * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 242*b2f46fd8SDan Williams * @offset: common offset into each block (src and dest) to start transaction 243*b2f46fd8SDan Williams * @disks: number of blocks (including missing P or Q, see below) 244*b2f46fd8SDan Williams * @len: length of operation in bytes 245*b2f46fd8SDan Williams * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set 246*b2f46fd8SDan Williams * @spare: temporary result buffer for the synchronous case 247*b2f46fd8SDan Williams * @submit: submission / completion modifiers 248*b2f46fd8SDan Williams * 249*b2f46fd8SDan Williams * The same notes from async_gen_syndrome apply to the 'blocks', 250*b2f46fd8SDan Williams * and 'disks' parameters of this routine. The synchronous path 251*b2f46fd8SDan Williams * requires a temporary result buffer and submit->scribble to be 252*b2f46fd8SDan Williams * specified. 253*b2f46fd8SDan Williams */ 254*b2f46fd8SDan Williams struct dma_async_tx_descriptor * 255*b2f46fd8SDan Williams async_syndrome_val(struct page **blocks, unsigned int offset, int disks, 256*b2f46fd8SDan Williams size_t len, enum sum_check_flags *pqres, struct page *spare, 257*b2f46fd8SDan Williams struct async_submit_ctl *submit) 258*b2f46fd8SDan Williams { 259*b2f46fd8SDan Williams struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL, 260*b2f46fd8SDan Williams NULL, 0, blocks, disks, 261*b2f46fd8SDan Williams len); 262*b2f46fd8SDan Williams struct dma_device *device = chan ? chan->device : NULL; 263*b2f46fd8SDan Williams struct dma_async_tx_descriptor *tx; 264*b2f46fd8SDan Williams enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; 265*b2f46fd8SDan Williams dma_addr_t *dma_src = NULL; 266*b2f46fd8SDan Williams 267*b2f46fd8SDan Williams BUG_ON(disks < 4); 268*b2f46fd8SDan Williams 269*b2f46fd8SDan Williams if (submit->scribble) 270*b2f46fd8SDan Williams dma_src = submit->scribble; 271*b2f46fd8SDan Williams else if (sizeof(dma_addr_t) <= sizeof(struct page *)) 272*b2f46fd8SDan Williams dma_src = (dma_addr_t *) blocks; 273*b2f46fd8SDan Williams 274*b2f46fd8SDan Williams if (dma_src && device && disks <= dma_maxpq(device, 0)) { 275*b2f46fd8SDan Williams struct device *dev = device->dev; 276*b2f46fd8SDan Williams dma_addr_t *pq = &dma_src[disks-2]; 277*b2f46fd8SDan Williams int i; 278*b2f46fd8SDan Williams 279*b2f46fd8SDan Williams pr_debug("%s: (async) disks: %d len: %zu\n", 280*b2f46fd8SDan Williams __func__, disks, len); 281*b2f46fd8SDan Williams if (!P(blocks, disks)) 282*b2f46fd8SDan Williams dma_flags |= DMA_PREP_PQ_DISABLE_P; 283*b2f46fd8SDan Williams if (!Q(blocks, disks)) 284*b2f46fd8SDan Williams dma_flags |= DMA_PREP_PQ_DISABLE_Q; 285*b2f46fd8SDan Williams for (i = 0; i < disks; i++) 286*b2f46fd8SDan Williams if (likely(blocks[i])) { 287*b2f46fd8SDan Williams BUG_ON(is_raid6_zero_block(blocks[i])); 288*b2f46fd8SDan Williams dma_src[i] = dma_map_page(dev, blocks[i], 289*b2f46fd8SDan Williams offset, len, 290*b2f46fd8SDan Williams DMA_TO_DEVICE); 291*b2f46fd8SDan Williams } 292*b2f46fd8SDan Williams 293*b2f46fd8SDan Williams for (;;) { 294*b2f46fd8SDan Williams tx = device->device_prep_dma_pq_val(chan, pq, dma_src, 295*b2f46fd8SDan Williams disks - 2, 296*b2f46fd8SDan Williams raid6_gfexp, 297*b2f46fd8SDan Williams len, pqres, 298*b2f46fd8SDan Williams dma_flags); 299*b2f46fd8SDan Williams if (likely(tx)) 300*b2f46fd8SDan Williams break; 301*b2f46fd8SDan Williams async_tx_quiesce(&submit->depend_tx); 302*b2f46fd8SDan Williams dma_async_issue_pending(chan); 303*b2f46fd8SDan Williams } 304*b2f46fd8SDan Williams async_tx_submit(chan, tx, submit); 305*b2f46fd8SDan Williams 306*b2f46fd8SDan Williams return tx; 307*b2f46fd8SDan Williams } else { 308*b2f46fd8SDan Williams struct page *p_src = P(blocks, disks); 309*b2f46fd8SDan Williams struct page *q_src = Q(blocks, disks); 310*b2f46fd8SDan Williams enum async_tx_flags flags_orig = submit->flags; 311*b2f46fd8SDan Williams dma_async_tx_callback cb_fn_orig = submit->cb_fn; 312*b2f46fd8SDan Williams void *scribble = submit->scribble; 313*b2f46fd8SDan Williams void *cb_param_orig = submit->cb_param; 314*b2f46fd8SDan Williams void *p, *q, *s; 315*b2f46fd8SDan Williams 316*b2f46fd8SDan Williams pr_debug("%s: (sync) disks: %d len: %zu\n", 317*b2f46fd8SDan Williams __func__, disks, len); 318*b2f46fd8SDan Williams 319*b2f46fd8SDan Williams /* caller must provide a temporary result buffer and 320*b2f46fd8SDan Williams * allow the input parameters to be preserved 321*b2f46fd8SDan Williams */ 322*b2f46fd8SDan Williams BUG_ON(!spare || !scribble); 323*b2f46fd8SDan Williams 324*b2f46fd8SDan Williams /* wait for any prerequisite operations */ 325*b2f46fd8SDan Williams async_tx_quiesce(&submit->depend_tx); 326*b2f46fd8SDan Williams 327*b2f46fd8SDan Williams /* recompute p and/or q into the temporary buffer and then 328*b2f46fd8SDan Williams * check to see the result matches the current value 329*b2f46fd8SDan Williams */ 330*b2f46fd8SDan Williams tx = NULL; 331*b2f46fd8SDan Williams *pqres = 0; 332*b2f46fd8SDan Williams if (p_src) { 333*b2f46fd8SDan Williams init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL, 334*b2f46fd8SDan Williams NULL, NULL, scribble); 335*b2f46fd8SDan Williams tx = async_xor(spare, blocks, offset, disks-2, len, submit); 336*b2f46fd8SDan Williams async_tx_quiesce(&tx); 337*b2f46fd8SDan Williams p = page_address(p_src) + offset; 338*b2f46fd8SDan Williams s = page_address(spare) + offset; 339*b2f46fd8SDan Williams *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P; 340*b2f46fd8SDan Williams } 341*b2f46fd8SDan Williams 342*b2f46fd8SDan Williams if (q_src) { 343*b2f46fd8SDan Williams P(blocks, disks) = NULL; 344*b2f46fd8SDan Williams Q(blocks, disks) = spare; 345*b2f46fd8SDan Williams init_async_submit(submit, 0, NULL, NULL, NULL, scribble); 346*b2f46fd8SDan Williams tx = async_gen_syndrome(blocks, offset, disks, len, submit); 347*b2f46fd8SDan Williams async_tx_quiesce(&tx); 348*b2f46fd8SDan Williams q = page_address(q_src) + offset; 349*b2f46fd8SDan Williams s = page_address(spare) + offset; 350*b2f46fd8SDan Williams *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q; 351*b2f46fd8SDan Williams } 352*b2f46fd8SDan Williams 353*b2f46fd8SDan Williams /* restore P, Q and submit */ 354*b2f46fd8SDan Williams P(blocks, disks) = p_src; 355*b2f46fd8SDan Williams Q(blocks, disks) = q_src; 356*b2f46fd8SDan Williams 357*b2f46fd8SDan Williams submit->cb_fn = cb_fn_orig; 358*b2f46fd8SDan Williams submit->cb_param = cb_param_orig; 359*b2f46fd8SDan Williams submit->flags = flags_orig; 360*b2f46fd8SDan Williams async_tx_sync_epilog(submit); 361*b2f46fd8SDan Williams 362*b2f46fd8SDan Williams return NULL; 363*b2f46fd8SDan Williams } 364*b2f46fd8SDan Williams } 365*b2f46fd8SDan Williams EXPORT_SYMBOL_GPL(async_syndrome_val); 366*b2f46fd8SDan Williams 367*b2f46fd8SDan Williams static int __init async_pq_init(void) 368*b2f46fd8SDan Williams { 369*b2f46fd8SDan Williams scribble = alloc_page(GFP_KERNEL); 370*b2f46fd8SDan Williams 371*b2f46fd8SDan Williams if (scribble) 372*b2f46fd8SDan Williams return 0; 373*b2f46fd8SDan Williams 374*b2f46fd8SDan Williams pr_err("%s: failed to allocate required spare page\n", __func__); 375*b2f46fd8SDan Williams 376*b2f46fd8SDan Williams return -ENOMEM; 377*b2f46fd8SDan Williams } 378*b2f46fd8SDan Williams 379*b2f46fd8SDan Williams static void __exit async_pq_exit(void) 380*b2f46fd8SDan Williams { 381*b2f46fd8SDan Williams put_page(scribble); 382*b2f46fd8SDan Williams } 383*b2f46fd8SDan Williams 384*b2f46fd8SDan Williams module_init(async_pq_init); 385*b2f46fd8SDan Williams module_exit(async_pq_exit); 386*b2f46fd8SDan Williams 387*b2f46fd8SDan Williams MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation"); 388*b2f46fd8SDan Williams MODULE_LICENSE("GPL"); 389