1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Optimized xor_block operation for RAID4/5 4 * 5 * Copyright IBM Corp. 2016 6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 7 */ 8 9 #include <linux/types.h> 10 #include <linux/export.h> 11 #include <linux/raid/xor.h> 12 #include <asm/xor.h> 13 14 static void xor_xc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) 15 { 16 asm volatile( 17 " larl 1,2f\n" 18 " aghi %0,-1\n" 19 " jm 3f\n" 20 " srlg 0,%0,8\n" 21 " ltgr 0,0\n" 22 " jz 1f\n" 23 "0: xc 0(256,%1),0(%2)\n" 24 " la %1,256(%1)\n" 25 " la %2,256(%2)\n" 26 " brctg 0,0b\n" 27 "1: ex %0,0(1)\n" 28 " j 3f\n" 29 "2: xc 0(1,%1),0(%2)\n" 30 "3:\n" 31 : : "d" (bytes), "a" (p1), "a" (p2) 32 : "0", "1", "cc", "memory"); 33 } 34 35 static void xor_xc_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, 36 unsigned long *p3) 37 { 38 asm volatile( 39 " larl 1,2f\n" 40 " aghi %0,-1\n" 41 " jm 3f\n" 42 " srlg 0,%0,8\n" 43 " ltgr 0,0\n" 44 " jz 1f\n" 45 "0: xc 0(256,%1),0(%2)\n" 46 " xc 0(256,%1),0(%3)\n" 47 " la %1,256(%1)\n" 48 " la %2,256(%2)\n" 49 " la %3,256(%3)\n" 50 " brctg 0,0b\n" 51 "1: ex %0,0(1)\n" 52 " ex %0,6(1)\n" 53 " j 3f\n" 54 "2: xc 0(1,%1),0(%2)\n" 55 " xc 0(1,%1),0(%3)\n" 56 "3:\n" 57 : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3) 58 : : "0", "1", "cc", "memory"); 59 } 60 61 static void xor_xc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, 62 unsigned long *p3, unsigned long *p4) 63 { 64 asm volatile( 65 " larl 1,2f\n" 66 " aghi %0,-1\n" 67 " jm 3f\n" 68 " srlg 0,%0,8\n" 69 " ltgr 0,0\n" 70 " jz 1f\n" 71 "0: xc 0(256,%1),0(%2)\n" 72 " xc 0(256,%1),0(%3)\n" 73 " xc 0(256,%1),0(%4)\n" 74 " la %1,256(%1)\n" 75 " la %2,256(%2)\n" 76 " la %3,256(%3)\n" 77 " la %4,256(%4)\n" 78 " brctg 0,0b\n" 79 "1: ex %0,0(1)\n" 80 " ex %0,6(1)\n" 81 " ex %0,12(1)\n" 82 " j 3f\n" 83 "2: xc 0(1,%1),0(%2)\n" 84 " xc 0(1,%1),0(%3)\n" 85 " xc 0(1,%1),0(%4)\n" 86 "3:\n" 87 : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4) 88 : : "0", "1", "cc", "memory"); 89 } 90 91 static void xor_xc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, 92 unsigned long *p3, unsigned long *p4, unsigned long *p5) 93 { 94 /* Get around a gcc oddity */ 95 register unsigned long *reg7 asm ("7") = p5; 96 97 asm volatile( 98 " larl 1,2f\n" 99 " aghi %0,-1\n" 100 " jm 3f\n" 101 " srlg 0,%0,8\n" 102 " ltgr 0,0\n" 103 " jz 1f\n" 104 "0: xc 0(256,%1),0(%2)\n" 105 " xc 0(256,%1),0(%3)\n" 106 " xc 0(256,%1),0(%4)\n" 107 " xc 0(256,%1),0(%5)\n" 108 " la %1,256(%1)\n" 109 " la %2,256(%2)\n" 110 " la %3,256(%3)\n" 111 " la %4,256(%4)\n" 112 " la %5,256(%5)\n" 113 " brctg 0,0b\n" 114 "1: ex %0,0(1)\n" 115 " ex %0,6(1)\n" 116 " ex %0,12(1)\n" 117 " ex %0,18(1)\n" 118 " j 3f\n" 119 "2: xc 0(1,%1),0(%2)\n" 120 " xc 0(1,%1),0(%3)\n" 121 " xc 0(1,%1),0(%4)\n" 122 " xc 0(1,%1),0(%5)\n" 123 "3:\n" 124 : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4), 125 "+a" (reg7) 126 : : "0", "1", "cc", "memory"); 127 } 128 129 struct xor_block_template xor_block_xc = { 130 .name = "xc", 131 .do_2 = xor_xc_2, 132 .do_3 = xor_xc_3, 133 .do_4 = xor_xc_4, 134 .do_5 = xor_xc_5, 135 }; 136 EXPORT_SYMBOL(xor_block_xc); 137