Lines Matching refs:p

536 	struct task_struct *p = current;  in drbd_thread_current_set_cpu()  local
541 set_cpus_allowed_ptr(p, resource->cpu_mask); in drbd_thread_current_set_cpu()
616 void *p; in conn_prepare_command() local
619 p = __conn_prepare_command(connection, sock); in conn_prepare_command()
620 if (!p) in conn_prepare_command()
623 return p; in conn_prepare_command()
715 struct p_rs_param_95 *p; in drbd_send_sync_param() local
723 p = drbd_prepare_command(peer_device, sock); in drbd_send_sync_param()
724 if (!p) in drbd_send_sync_param()
739 BUILD_BUG_ON(sizeof(p->algs) != 2 * SHARED_SECRET_MAX); in drbd_send_sync_param()
740 memset(&p->algs, 0, sizeof(p->algs)); in drbd_send_sync_param()
744 p->resync_rate = cpu_to_be32(dc->resync_rate); in drbd_send_sync_param()
745 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead); in drbd_send_sync_param()
746 p->c_delay_target = cpu_to_be32(dc->c_delay_target); in drbd_send_sync_param()
747 p->c_fill_target = cpu_to_be32(dc->c_fill_target); in drbd_send_sync_param()
748 p->c_max_rate = cpu_to_be32(dc->c_max_rate); in drbd_send_sync_param()
751 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF); in drbd_send_sync_param()
752 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF); in drbd_send_sync_param()
753 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF); in drbd_send_sync_param()
754 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF); in drbd_send_sync_param()
755 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF); in drbd_send_sync_param()
759 strcpy(p->verify_alg, nc->verify_alg); in drbd_send_sync_param()
761 strcpy(p->csums_alg, nc->csums_alg); in drbd_send_sync_param()
770 struct p_protocol *p; in __drbd_send_protocol() local
775 p = __conn_prepare_command(connection, sock); in __drbd_send_protocol()
776 if (!p) in __drbd_send_protocol()
788 size = sizeof(*p); in __drbd_send_protocol()
792 p->protocol = cpu_to_be32(nc->wire_protocol); in __drbd_send_protocol()
793 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p); in __drbd_send_protocol()
794 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p); in __drbd_send_protocol()
795 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p); in __drbd_send_protocol()
796 p->two_primaries = cpu_to_be32(nc->two_primaries); in __drbd_send_protocol()
802 p->conn_flags = cpu_to_be32(cf); in __drbd_send_protocol()
805 strcpy(p->integrity_alg, nc->integrity_alg); in __drbd_send_protocol()
826 struct p_uuids *p; in _drbd_send_uuids() local
833 p = drbd_prepare_command(peer_device, sock); in _drbd_send_uuids()
834 if (!p) { in _drbd_send_uuids()
840 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]); in _drbd_send_uuids()
844 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set); in _drbd_send_uuids()
850 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags); in _drbd_send_uuids()
853 return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0); in _drbd_send_uuids()
888 struct p_rs_uuid *p; in drbd_gen_and_send_sync_uuid() local
903 p = drbd_prepare_command(peer_device, sock); in drbd_gen_and_send_sync_uuid()
904 if (p) { in drbd_gen_and_send_sync_uuid()
905 p->uuid = cpu_to_be64(uuid); in drbd_gen_and_send_sync_uuid()
906 drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0); in drbd_gen_and_send_sync_uuid()
914 struct p_sizes *p; in drbd_send_sizes() local
921 p = drbd_prepare_command(peer_device, sock); in drbd_send_sizes()
922 if (!p) in drbd_send_sizes()
925 packet_size = sizeof(*p); in drbd_send_sizes()
927 packet_size += sizeof(p->qlim[0]); in drbd_send_sizes()
929 memset(p, 0, packet_size); in drbd_send_sizes()
941 p->qlim->physical_block_size = in drbd_send_sizes()
943 p->qlim->logical_block_size = in drbd_send_sizes()
945 p->qlim->alignment_offset = in drbd_send_sizes()
947 p->qlim->io_min = cpu_to_be32(bdev_io_min(bdev)); in drbd_send_sizes()
948 p->qlim->io_opt = cpu_to_be32(bdev_io_opt(bdev)); in drbd_send_sizes()
949 p->qlim->discard_enabled = !!bdev_max_discard_sectors(bdev); in drbd_send_sizes()
954 p->qlim->physical_block_size = in drbd_send_sizes()
956 p->qlim->logical_block_size = in drbd_send_sizes()
958 p->qlim->alignment_offset = 0; in drbd_send_sizes()
959 p->qlim->io_min = cpu_to_be32(queue_io_min(q)); in drbd_send_sizes()
960 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q)); in drbd_send_sizes()
961 p->qlim->discard_enabled = 0; in drbd_send_sizes()
974 p->d_size = cpu_to_be64(d_size); in drbd_send_sizes()
975 p->u_size = cpu_to_be64(u_size); in drbd_send_sizes()
977 p->c_size = 0; in drbd_send_sizes()
979 p->c_size = cpu_to_be64(get_capacity(device->vdisk)); in drbd_send_sizes()
980 p->max_bio_size = cpu_to_be32(max_bio_size); in drbd_send_sizes()
981 p->queue_order_type = cpu_to_be16(q_order_type); in drbd_send_sizes()
982 p->dds_flags = cpu_to_be16(flags); in drbd_send_sizes()
994 struct p_state *p; in drbd_send_current_state() local
997 p = drbd_prepare_command(peer_device, sock); in drbd_send_current_state()
998 if (!p) in drbd_send_current_state()
1000 p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */ in drbd_send_current_state()
1001 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0); in drbd_send_current_state()
1017 struct p_state *p; in drbd_send_state() local
1020 p = drbd_prepare_command(peer_device, sock); in drbd_send_state()
1021 if (!p) in drbd_send_state()
1023 p->state = cpu_to_be32(state.i); /* Within the send mutex */ in drbd_send_state()
1024 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0); in drbd_send_state()
1030 struct p_req_state *p; in drbd_send_state_req() local
1033 p = drbd_prepare_command(peer_device, sock); in drbd_send_state_req()
1034 if (!p) in drbd_send_state_req()
1036 p->mask = cpu_to_be32(mask.i); in drbd_send_state_req()
1037 p->val = cpu_to_be32(val.i); in drbd_send_state_req()
1038 return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0); in drbd_send_state_req()
1045 struct p_req_state *p; in conn_send_state_req() local
1049 p = conn_prepare_command(connection, sock); in conn_send_state_req()
1050 if (!p) in conn_send_state_req()
1052 p->mask = cpu_to_be32(mask.i); in conn_send_state_req()
1053 p->val = cpu_to_be32(val.i); in conn_send_state_req()
1054 return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0); in conn_send_state_req()
1060 struct p_req_state_reply *p; in drbd_send_sr_reply() local
1063 p = drbd_prepare_command(peer_device, sock); in drbd_send_sr_reply()
1064 if (p) { in drbd_send_sr_reply()
1065 p->retcode = cpu_to_be32(retcode); in drbd_send_sr_reply()
1066 drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0); in drbd_send_sr_reply()
1073 struct p_req_state_reply *p; in conn_send_sr_reply() local
1077 p = conn_prepare_command(connection, sock); in conn_send_sr_reply()
1078 if (p) { in conn_send_sr_reply()
1079 p->retcode = cpu_to_be32(retcode); in conn_send_sr_reply()
1080 conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0); in conn_send_sr_reply()
1084 static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code) in dcbp_set_code() argument
1087 p->encoding = (p->encoding & ~0xf) | code; in dcbp_set_code()
1090 static void dcbp_set_start(struct p_compressed_bm *p, int set) in dcbp_set_start() argument
1092 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0); in dcbp_set_start()
1095 static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n) in dcbp_set_pad_bits() argument
1098 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4); in dcbp_set_pad_bits()
1102 struct p_compressed_bm *p, in fill_bitmap_rle_bits() argument
1125 bitstream_init(&bs, p->code, size, 0); in fill_bitmap_rle_bits()
1126 memset(p->code, 0, size); in fill_bitmap_rle_bits()
1148 dcbp_set_start(p, 1); in fill_bitmap_rle_bits()
1153 dcbp_set_start(p, 0); in fill_bitmap_rle_bits()
1177 len = bs.cur.b - p->code + !!bs.cur.bit; in fill_bitmap_rle_bits()
1193 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7); in fill_bitmap_rle_bits()
1210 struct p_compressed_bm *p = sock->sbuf + header_size; in send_bitmap_rle_or_plain() local
1213 len = fill_bitmap_rle_bits(device, p, in send_bitmap_rle_or_plain()
1214 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c); in send_bitmap_rle_or_plain()
1219 dcbp_set_code(p, RLE_VLI_Bits); in send_bitmap_rle_or_plain()
1221 P_COMPRESSED_BITMAP, sizeof(*p) + len, in send_bitmap_rle_or_plain()
1224 c->bytes[0] += header_size + sizeof(*p) + len; in send_bitmap_rle_or_plain()
1233 unsigned long *p = sock->sbuf + header_size; in send_bitmap_rle_or_plain() local
1236 num_words = min_t(size_t, data_size / sizeof(*p), in send_bitmap_rle_or_plain()
1238 len = num_words * sizeof(*p); in send_bitmap_rle_or_plain()
1240 drbd_bm_get_lel(device, c->word_offset, num_words, p); in send_bitmap_rle_or_plain()
1316 struct p_barrier_ack *p; in drbd_send_b_ack() local
1322 p = conn_prepare_command(connection, sock); in drbd_send_b_ack()
1323 if (!p) in drbd_send_b_ack()
1325 p->barrier = barrier_nr; in drbd_send_b_ack()
1326 p->set_size = cpu_to_be32(set_size); in drbd_send_b_ack()
1327 conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0); in drbd_send_b_ack()
1342 struct p_block_ack *p; in _drbd_send_ack() local
1348 p = drbd_prepare_command(peer_device, sock); in _drbd_send_ack()
1349 if (!p) in _drbd_send_ack()
1351 p->sector = sector; in _drbd_send_ack()
1352 p->block_id = block_id; in _drbd_send_ack()
1353 p->blksize = blksize; in _drbd_send_ack()
1354 p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq)); in _drbd_send_ack()
1355 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0); in _drbd_send_ack()
1406 struct p_block_desc *p; in drbd_send_rs_deallocated() local
1409 p = drbd_prepare_command(peer_device, sock); in drbd_send_rs_deallocated()
1410 if (!p) in drbd_send_rs_deallocated()
1412 p->sector = cpu_to_be64(peer_req->i.sector); in drbd_send_rs_deallocated()
1413 p->blksize = cpu_to_be32(peer_req->i.size); in drbd_send_rs_deallocated()
1414 p->pad = 0; in drbd_send_rs_deallocated()
1415 return drbd_send_command(peer_device, sock, P_RS_DEALLOCATED, sizeof(*p), NULL, 0); in drbd_send_rs_deallocated()
1422 struct p_block_req *p; in drbd_send_drequest() local
1425 p = drbd_prepare_command(peer_device, sock); in drbd_send_drequest()
1426 if (!p) in drbd_send_drequest()
1428 p->sector = cpu_to_be64(sector); in drbd_send_drequest()
1429 p->block_id = block_id; in drbd_send_drequest()
1430 p->blksize = cpu_to_be32(size); in drbd_send_drequest()
1431 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0); in drbd_send_drequest()
1438 struct p_block_req *p; in drbd_send_drequest_csum() local
1443 p = drbd_prepare_command(peer_device, sock); in drbd_send_drequest_csum()
1444 if (!p) in drbd_send_drequest_csum()
1446 p->sector = cpu_to_be64(sector); in drbd_send_drequest_csum()
1447 p->block_id = ID_SYNCER /* unused */; in drbd_send_drequest_csum()
1448 p->blksize = cpu_to_be32(size); in drbd_send_drequest_csum()
1449 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size); in drbd_send_drequest_csum()
1455 struct p_block_req *p; in drbd_send_ov_request() local
1458 p = drbd_prepare_command(peer_device, sock); in drbd_send_ov_request()
1459 if (!p) in drbd_send_ov_request()
1461 p->sector = cpu_to_be64(sector); in drbd_send_ov_request()
1462 p->block_id = ID_SYNCER /* unused */; in drbd_send_ov_request()
1463 p->blksize = cpu_to_be32(size); in drbd_send_ov_request()
1464 return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0); in drbd_send_ov_request()
1669 struct p_data *p; in drbd_send_dblock() local
1676 p = drbd_prepare_command(peer_device, sock); in drbd_send_dblock()
1680 if (!p) in drbd_send_dblock()
1682 p->sector = cpu_to_be64(req->i.sector); in drbd_send_dblock()
1683 p->block_id = (unsigned long)req; in drbd_send_dblock()
1684 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq)); in drbd_send_dblock()
1698 p->dp_flags = cpu_to_be32(dp_flags); in drbd_send_dblock()
1702 struct p_trim *t = (struct p_trim*)p; in drbd_send_dblock()
1707 digest_out = p + 1; in drbd_send_dblock()
1714 sizeof(*p) + digest_size, NULL, req->i.size); in drbd_send_dblock()
1738 if (memcmp(p + 1, digest, digest_size)) { in drbd_send_dblock()
1762 struct p_data *p; in drbd_send_block() local
1767 p = drbd_prepare_command(peer_device, sock); in drbd_send_block()
1772 if (!p) in drbd_send_block()
1774 p->sector = cpu_to_be64(peer_req->i.sector); in drbd_send_block()
1775 p->block_id = peer_req->block_id; in drbd_send_block()
1776 p->seq_num = 0; /* unused */ in drbd_send_block()
1777 p->dp_flags = 0; in drbd_send_block()
1779 drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1); in drbd_send_block()
1780 …err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NU… in drbd_send_block()
1791 struct p_block_desc *p; in drbd_send_out_of_sync() local
1794 p = drbd_prepare_command(peer_device, sock); in drbd_send_out_of_sync()
1795 if (!p) in drbd_send_out_of_sync()
1797 p->sector = cpu_to_be64(req->i.sector); in drbd_send_out_of_sync()
1798 p->blksize = cpu_to_be32(req->i.size); in drbd_send_out_of_sync()
1799 return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0); in drbd_send_out_of_sync()