Lines Matching refs:rmd

87 	struct pqi_scsi_dev_raid_map_data *rmd);
91 struct pqi_scsi_dev_raid_map_data *rmd);
2647 struct pqi_scsi_dev_raid_map_data *rmd) in pqi_aio_raid_level_supported() argument
2651 switch (rmd->raid_level) { in pqi_aio_raid_level_supported()
2655 if (rmd->is_write && (!ctrl_info->enable_r1_writes || in pqi_aio_raid_level_supported()
2656 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive)) in pqi_aio_raid_level_supported()
2660 if (rmd->is_write && (!ctrl_info->enable_r1_writes || in pqi_aio_raid_level_supported()
2661 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive)) in pqi_aio_raid_level_supported()
2665 if (rmd->is_write && (!ctrl_info->enable_r5_writes || in pqi_aio_raid_level_supported()
2666 rmd->data_length > ctrl_info->max_write_raid_5_6)) in pqi_aio_raid_level_supported()
2670 if (rmd->is_write && (!ctrl_info->enable_r6_writes || in pqi_aio_raid_level_supported()
2671 rmd->data_length > ctrl_info->max_write_raid_5_6)) in pqi_aio_raid_level_supported()
2685 struct pqi_scsi_dev_raid_map_data *rmd) in pqi_get_aio_lba_and_block_count() argument
2690 rmd->is_write = true; in pqi_get_aio_lba_and_block_count()
2693 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | in pqi_get_aio_lba_and_block_count()
2695 rmd->block_cnt = (u32)scmd->cmnd[4]; in pqi_get_aio_lba_and_block_count()
2696 if (rmd->block_cnt == 0) in pqi_get_aio_lba_and_block_count()
2697 rmd->block_cnt = 256; in pqi_get_aio_lba_and_block_count()
2700 rmd->is_write = true; in pqi_get_aio_lba_and_block_count()
2703 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); in pqi_get_aio_lba_and_block_count()
2704 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); in pqi_get_aio_lba_and_block_count()
2707 rmd->is_write = true; in pqi_get_aio_lba_and_block_count()
2710 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); in pqi_get_aio_lba_and_block_count()
2711 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]); in pqi_get_aio_lba_and_block_count()
2714 rmd->is_write = true; in pqi_get_aio_lba_and_block_count()
2717 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]); in pqi_get_aio_lba_and_block_count()
2718 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]); in pqi_get_aio_lba_and_block_count()
2725 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length); in pqi_get_aio_lba_and_block_count()
2731 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map) in pci_get_aio_common_raid_map_values() argument
2737 rmd->last_block = rmd->first_block + rmd->block_cnt - 1; in pci_get_aio_common_raid_map_values()
2740 if (rmd->last_block >= in pci_get_aio_common_raid_map_values()
2742 rmd->last_block < rmd->first_block) in pci_get_aio_common_raid_map_values()
2745 rmd->data_disks_per_row = in pci_get_aio_common_raid_map_values()
2747 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size); in pci_get_aio_common_raid_map_values()
2748 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); in pci_get_aio_common_raid_map_values()
2751 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size; in pci_get_aio_common_raid_map_values()
2752 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ in pci_get_aio_common_raid_map_values()
2755 tmpdiv = rmd->first_block; in pci_get_aio_common_raid_map_values()
2756 do_div(tmpdiv, rmd->blocks_per_row); in pci_get_aio_common_raid_map_values()
2757 rmd->first_row = tmpdiv; in pci_get_aio_common_raid_map_values()
2758 tmpdiv = rmd->last_block; in pci_get_aio_common_raid_map_values()
2759 do_div(tmpdiv, rmd->blocks_per_row); in pci_get_aio_common_raid_map_values()
2760 rmd->last_row = tmpdiv; in pci_get_aio_common_raid_map_values()
2761 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row)); in pci_get_aio_common_raid_map_values()
2762 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row)); in pci_get_aio_common_raid_map_values()
2763 tmpdiv = rmd->first_row_offset; in pci_get_aio_common_raid_map_values()
2764 do_div(tmpdiv, rmd->strip_size); in pci_get_aio_common_raid_map_values()
2765 rmd->first_column = tmpdiv; in pci_get_aio_common_raid_map_values()
2766 tmpdiv = rmd->last_row_offset; in pci_get_aio_common_raid_map_values()
2767 do_div(tmpdiv, rmd->strip_size); in pci_get_aio_common_raid_map_values()
2768 rmd->last_column = tmpdiv; in pci_get_aio_common_raid_map_values()
2770 rmd->first_row = rmd->first_block / rmd->blocks_per_row; in pci_get_aio_common_raid_map_values()
2771 rmd->last_row = rmd->last_block / rmd->blocks_per_row; in pci_get_aio_common_raid_map_values()
2772 rmd->first_row_offset = (u32)(rmd->first_block - in pci_get_aio_common_raid_map_values()
2773 (rmd->first_row * rmd->blocks_per_row)); in pci_get_aio_common_raid_map_values()
2774 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * in pci_get_aio_common_raid_map_values()
2775 rmd->blocks_per_row)); in pci_get_aio_common_raid_map_values()
2776 rmd->first_column = rmd->first_row_offset / rmd->strip_size; in pci_get_aio_common_raid_map_values()
2777 rmd->last_column = rmd->last_row_offset / rmd->strip_size; in pci_get_aio_common_raid_map_values()
2781 if (rmd->first_row != rmd->last_row || in pci_get_aio_common_raid_map_values()
2782 rmd->first_column != rmd->last_column) in pci_get_aio_common_raid_map_values()
2786 rmd->total_disks_per_row = rmd->data_disks_per_row + in pci_get_aio_common_raid_map_values()
2788 rmd->map_row = ((u32)(rmd->first_row >> in pci_get_aio_common_raid_map_values()
2791 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) + in pci_get_aio_common_raid_map_values()
2792 rmd->first_column; in pci_get_aio_common_raid_map_values()
2797 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd, in pqi_calc_aio_r5_or_r6() argument
2804 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ in pqi_calc_aio_r5_or_r6()
2809 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count; in pqi_calc_aio_r5_or_r6()
2811 tmpdiv = rmd->first_block; in pqi_calc_aio_r5_or_r6()
2812 rmd->first_group = do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2813 tmpdiv = rmd->first_group; in pqi_calc_aio_r5_or_r6()
2814 do_div(tmpdiv, rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2815 rmd->first_group = tmpdiv; in pqi_calc_aio_r5_or_r6()
2816 tmpdiv = rmd->last_block; in pqi_calc_aio_r5_or_r6()
2817 rmd->last_group = do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2818 tmpdiv = rmd->last_group; in pqi_calc_aio_r5_or_r6()
2819 do_div(tmpdiv, rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2820 rmd->last_group = tmpdiv; in pqi_calc_aio_r5_or_r6()
2822 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row; in pqi_calc_aio_r5_or_r6()
2823 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row; in pqi_calc_aio_r5_or_r6()
2825 if (rmd->first_group != rmd->last_group) in pqi_calc_aio_r5_or_r6()
2830 tmpdiv = rmd->first_block; in pqi_calc_aio_r5_or_r6()
2831 do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2832 rmd->first_row = tmpdiv; in pqi_calc_aio_r5_or_r6()
2833 rmd->r5or6_first_row = tmpdiv; in pqi_calc_aio_r5_or_r6()
2834 tmpdiv = rmd->last_block; in pqi_calc_aio_r5_or_r6()
2835 do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2836 rmd->r5or6_last_row = tmpdiv; in pqi_calc_aio_r5_or_r6()
2838 rmd->first_row = rmd->r5or6_first_row = in pqi_calc_aio_r5_or_r6()
2839 rmd->first_block / rmd->stripesize; in pqi_calc_aio_r5_or_r6()
2840 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize; in pqi_calc_aio_r5_or_r6()
2842 if (rmd->r5or6_first_row != rmd->r5or6_last_row) in pqi_calc_aio_r5_or_r6()
2847 tmpdiv = rmd->first_block; in pqi_calc_aio_r5_or_r6()
2848 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2849 tmpdiv = rmd->first_row_offset; in pqi_calc_aio_r5_or_r6()
2850 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2851 rmd->r5or6_first_row_offset = rmd->first_row_offset; in pqi_calc_aio_r5_or_r6()
2852 tmpdiv = rmd->last_block; in pqi_calc_aio_r5_or_r6()
2853 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2854 tmpdiv = rmd->r5or6_last_row_offset; in pqi_calc_aio_r5_or_r6()
2855 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2856 tmpdiv = rmd->r5or6_first_row_offset; in pqi_calc_aio_r5_or_r6()
2857 do_div(tmpdiv, rmd->strip_size); in pqi_calc_aio_r5_or_r6()
2858 rmd->first_column = rmd->r5or6_first_column = tmpdiv; in pqi_calc_aio_r5_or_r6()
2859 tmpdiv = rmd->r5or6_last_row_offset; in pqi_calc_aio_r5_or_r6()
2860 do_div(tmpdiv, rmd->strip_size); in pqi_calc_aio_r5_or_r6()
2861 rmd->r5or6_last_column = tmpdiv; in pqi_calc_aio_r5_or_r6()
2863 rmd->first_row_offset = rmd->r5or6_first_row_offset = in pqi_calc_aio_r5_or_r6()
2864 (u32)((rmd->first_block % rmd->stripesize) % in pqi_calc_aio_r5_or_r6()
2865 rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2867 rmd->r5or6_last_row_offset = in pqi_calc_aio_r5_or_r6()
2868 (u32)((rmd->last_block % rmd->stripesize) % in pqi_calc_aio_r5_or_r6()
2869 rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2871 rmd->first_column = in pqi_calc_aio_r5_or_r6()
2872 rmd->r5or6_first_row_offset / rmd->strip_size; in pqi_calc_aio_r5_or_r6()
2873 rmd->r5or6_first_column = rmd->first_column; in pqi_calc_aio_r5_or_r6()
2874 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size; in pqi_calc_aio_r5_or_r6()
2876 if (rmd->r5or6_first_column != rmd->r5or6_last_column) in pqi_calc_aio_r5_or_r6()
2880 rmd->map_row = in pqi_calc_aio_r5_or_r6()
2881 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) % in pqi_calc_aio_r5_or_r6()
2884 rmd->map_index = (rmd->first_group * in pqi_calc_aio_r5_or_r6()
2886 rmd->total_disks_per_row)) + in pqi_calc_aio_r5_or_r6()
2887 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column; in pqi_calc_aio_r5_or_r6()
2889 if (rmd->is_write) { in pqi_calc_aio_r5_or_r6()
2901 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row); in pqi_calc_aio_r5_or_r6()
2902 index *= rmd->total_disks_per_row; in pqi_calc_aio_r5_or_r6()
2905 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle; in pqi_calc_aio_r5_or_r6()
2906 if (rmd->raid_level == SA_RAID_6) { in pqi_calc_aio_r5_or_r6()
2907 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle; in pqi_calc_aio_r5_or_r6()
2908 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1]; in pqi_calc_aio_r5_or_r6()
2911 tmpdiv = rmd->first_block; in pqi_calc_aio_r5_or_r6()
2912 do_div(tmpdiv, rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2913 rmd->row = tmpdiv; in pqi_calc_aio_r5_or_r6()
2915 rmd->row = rmd->first_block / rmd->blocks_per_row; in pqi_calc_aio_r5_or_r6()
2922 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd) in pqi_set_aio_cdb() argument
2925 if (rmd->disk_block > 0xffffffff) { in pqi_set_aio_cdb()
2926 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16; in pqi_set_aio_cdb()
2927 rmd->cdb[1] = 0; in pqi_set_aio_cdb()
2928 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]); in pqi_set_aio_cdb()
2929 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]); in pqi_set_aio_cdb()
2930 rmd->cdb[14] = 0; in pqi_set_aio_cdb()
2931 rmd->cdb[15] = 0; in pqi_set_aio_cdb()
2932 rmd->cdb_length = 16; in pqi_set_aio_cdb()
2934 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10; in pqi_set_aio_cdb()
2935 rmd->cdb[1] = 0; in pqi_set_aio_cdb()
2936 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]); in pqi_set_aio_cdb()
2937 rmd->cdb[6] = 0; in pqi_set_aio_cdb()
2938 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]); in pqi_set_aio_cdb()
2939 rmd->cdb[9] = 0; in pqi_set_aio_cdb()
2940 rmd->cdb_length = 10; in pqi_set_aio_cdb()
2945 struct pqi_scsi_dev_raid_map_data *rmd) in pqi_calc_aio_r1_nexus() argument
2950 group = rmd->map_index / rmd->data_disks_per_row; in pqi_calc_aio_r1_nexus()
2952 index = rmd->map_index - (group * rmd->data_disks_per_row); in pqi_calc_aio_r1_nexus()
2953 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle; in pqi_calc_aio_r1_nexus()
2954 index += rmd->data_disks_per_row; in pqi_calc_aio_r1_nexus()
2955 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle; in pqi_calc_aio_r1_nexus()
2956 if (rmd->layout_map_count > 2) { in pqi_calc_aio_r1_nexus()
2957 index += rmd->data_disks_per_row; in pqi_calc_aio_r1_nexus()
2958 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle; in pqi_calc_aio_r1_nexus()
2961 rmd->num_it_nexus_entries = rmd->layout_map_count; in pqi_calc_aio_r1_nexus()
2974 struct pqi_scsi_dev_raid_map_data rmd = { 0 }; in pqi_raid_bypass_submit_scsi_cmd() local
2976 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); in pqi_raid_bypass_submit_scsi_cmd()
2980 rmd.raid_level = device->raid_level; in pqi_raid_bypass_submit_scsi_cmd()
2982 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd)) in pqi_raid_bypass_submit_scsi_cmd()
2985 if (unlikely(rmd.block_cnt == 0)) in pqi_raid_bypass_submit_scsi_cmd()
2990 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map); in pqi_raid_bypass_submit_scsi_cmd()
2996 if (rmd.is_write) { in pqi_raid_bypass_submit_scsi_cmd()
2997 pqi_calc_aio_r1_nexus(raid_map, &rmd); in pqi_raid_bypass_submit_scsi_cmd()
2999 group = device->next_bypass_group[rmd.map_index]; in pqi_raid_bypass_submit_scsi_cmd()
3001 if (next_bypass_group >= rmd.layout_map_count) in pqi_raid_bypass_submit_scsi_cmd()
3003 device->next_bypass_group[rmd.map_index] = next_bypass_group; in pqi_raid_bypass_submit_scsi_cmd()
3004 rmd.map_index += group * rmd.data_disks_per_row; in pqi_raid_bypass_submit_scsi_cmd()
3008 (rmd.layout_map_count > 1 || rmd.is_write)) { in pqi_raid_bypass_submit_scsi_cmd()
3009 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map); in pqi_raid_bypass_submit_scsi_cmd()
3014 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES)) in pqi_raid_bypass_submit_scsi_cmd()
3017 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle; in pqi_raid_bypass_submit_scsi_cmd()
3018 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + in pqi_raid_bypass_submit_scsi_cmd()
3019 rmd.first_row * rmd.strip_size + in pqi_raid_bypass_submit_scsi_cmd()
3020 (rmd.first_row_offset - rmd.first_column * rmd.strip_size); in pqi_raid_bypass_submit_scsi_cmd()
3021 rmd.disk_block_cnt = rmd.block_cnt; in pqi_raid_bypass_submit_scsi_cmd()
3025 rmd.disk_block <<= raid_map->phys_blk_shift; in pqi_raid_bypass_submit_scsi_cmd()
3026 rmd.disk_block_cnt <<= raid_map->phys_blk_shift; in pqi_raid_bypass_submit_scsi_cmd()
3029 if (unlikely(rmd.disk_block_cnt > 0xffff)) in pqi_raid_bypass_submit_scsi_cmd()
3032 pqi_set_aio_cdb(&rmd); in pqi_raid_bypass_submit_scsi_cmd()
3035 if (rmd.data_length > device->max_transfer_encrypted) in pqi_raid_bypass_submit_scsi_cmd()
3037 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block); in pqi_raid_bypass_submit_scsi_cmd()
3043 if (rmd.is_write) { in pqi_raid_bypass_submit_scsi_cmd()
3048 encryption_info_ptr, device, &rmd); in pqi_raid_bypass_submit_scsi_cmd()
3052 encryption_info_ptr, device, &rmd); in pqi_raid_bypass_submit_scsi_cmd()
3056 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle, in pqi_raid_bypass_submit_scsi_cmd()
3057 rmd.cdb, rmd.cdb_length, queue_group, in pqi_raid_bypass_submit_scsi_cmd()
5758 struct pqi_scsi_dev_raid_map_data *rmd) in pqi_aio_submit_r1_write_io() argument
5777 r1_request->num_drives = rmd->num_it_nexus_entries; in pqi_aio_submit_r1_write_io()
5778 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1); in pqi_aio_submit_r1_write_io()
5779 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2); in pqi_aio_submit_r1_write_io()
5780 if (rmd->num_it_nexus_entries == 3) in pqi_aio_submit_r1_write_io()
5781 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3); in pqi_aio_submit_r1_write_io()
5787 if (rmd->cdb_length > sizeof(r1_request->cdb)) in pqi_aio_submit_r1_write_io()
5788 rmd->cdb_length = sizeof(r1_request->cdb); in pqi_aio_submit_r1_write_io()
5789 r1_request->cdb_length = rmd->cdb_length; in pqi_aio_submit_r1_write_io()
5790 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length); in pqi_aio_submit_r1_write_io()
5819 struct pqi_scsi_dev_raid_map_data *rmd) in pqi_aio_submit_r56_write_io() argument
5841 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus); in pqi_aio_submit_r56_write_io()
5842 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus); in pqi_aio_submit_r56_write_io()
5843 if (rmd->raid_level == SA_RAID_6) { in pqi_aio_submit_r56_write_io()
5844 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus); in pqi_aio_submit_r56_write_io()
5845 r56_request->xor_multiplier = rmd->xor_mult; in pqi_aio_submit_r56_write_io()
5849 put_unaligned_le64(rmd->row, &r56_request->row); in pqi_aio_submit_r56_write_io()
5854 if (rmd->cdb_length > sizeof(r56_request->cdb)) in pqi_aio_submit_r56_write_io()
5855 rmd->cdb_length = sizeof(r56_request->cdb); in pqi_aio_submit_r56_write_io()
5856 r56_request->cdb_length = rmd->cdb_length; in pqi_aio_submit_r56_write_io()
5857 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length); in pqi_aio_submit_r56_write_io()
5937 struct pqi_scsi_dev_raid_map_data rmd; in pqi_is_parity_write_stream() local
5942 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); in pqi_is_parity_write_stream()
5947 if (!rmd.is_write) in pqi_is_parity_write_stream()
5973 rmd.first_block >= pqi_stream_data->next_lba) && in pqi_is_parity_write_stream()
5974 rmd.first_block <= pqi_stream_data->next_lba + in pqi_is_parity_write_stream()
5975 rmd.block_cnt) { in pqi_is_parity_write_stream()
5976 pqi_stream_data->next_lba = rmd.first_block + in pqi_is_parity_write_stream()
5977 rmd.block_cnt; in pqi_is_parity_write_stream()
5998 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; in pqi_is_parity_write_stream()