qp.c (00af6729b52ede86a08173c8d5f2c8cd9fa3390d) qp.c (c0a6b5ecc5b7dd028c2921415ea036074a8f8b00)
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the

--- 908 unchanged lines hidden (view full) ---

917 (qp->sq.wqe_cnt << qp->sq.wqe_shift);
918
919 qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0);
920 if (IS_ERR(qp->umem)) {
921 err = PTR_ERR(qp->umem);
922 goto err;
923 }
924
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the

--- 908 unchanged lines hidden (view full) ---

917 (qp->sq.wqe_cnt << qp->sq.wqe_shift);
918
919 qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0);
920 if (IS_ERR(qp->umem)) {
921 err = PTR_ERR(qp->umem);
922 goto err;
923 }
924
925 n = ib_umem_page_count(qp->umem);
926 shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
927 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
928
929 if (err)
930 goto err_buf;
931
932 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
933 if (err)

--- 178 unchanged lines hidden (view full) ---

1112
1113 qp->umem =
1114 ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0);
1115 if (IS_ERR(qp->umem)) {
1116 err = PTR_ERR(qp->umem);
1117 goto err;
1118 }
1119
925 shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
926 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
927
928 if (err)
929 goto err_buf;
930
931 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
932 if (err)

--- 178 unchanged lines hidden (view full) ---

1111
1112 qp->umem =
1113 ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0);
1114 if (IS_ERR(qp->umem)) {
1115 err = PTR_ERR(qp->umem);
1116 goto err;
1117 }
1118
1120 n = ib_umem_page_count(qp->umem);
1121 shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
1122 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
1123
1124 if (err)
1125 goto err_buf;
1126
1127 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
1128 if (err)

--- 3193 unchanged lines hidden (view full) ---

4322 if (!err)
4323 ibwq->state = new_state;
4324
4325 mutex_unlock(&qp->mutex);
4326
4327 return err;
4328}
4329
1119 shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
1120 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
1121
1122 if (err)
1123 goto err_buf;
1124
1125 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
1126 if (err)

--- 3193 unchanged lines hidden (view full) ---

4320 if (!err)
4321 ibwq->state = new_state;
4322
4323 mutex_unlock(&qp->mutex);
4324
4325 return err;
4326}
4327
4330void mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
4328int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
4331{
4332 struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
4333 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
4334
4335 if (qp->counter_index)
4336 mlx4_ib_free_qp_counter(dev, qp);
4337
4338 destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
4339
4340 kfree(qp);
4329{
4330 struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
4331 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
4332
4333 if (qp->counter_index)
4334 mlx4_ib_free_qp_counter(dev, qp);
4335
4336 destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
4337
4338 kfree(qp);
4339 return 0;
4341}
4342
4340}
4341
4343struct ib_rwq_ind_table
4344*mlx4_ib_create_rwq_ind_table(struct ib_device *device,
4345 struct ib_rwq_ind_table_init_attr *init_attr,
4346 struct ib_udata *udata)
4342int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table,
4343 struct ib_rwq_ind_table_init_attr *init_attr,
4344 struct ib_udata *udata)
4347{
4345{
4348 struct ib_rwq_ind_table *rwq_ind_table;
4349 struct mlx4_ib_create_rwq_ind_tbl_resp resp = {};
4350 unsigned int ind_tbl_size = 1 << init_attr->log_ind_tbl_size;
4346 struct mlx4_ib_create_rwq_ind_tbl_resp resp = {};
4347 unsigned int ind_tbl_size = 1 << init_attr->log_ind_tbl_size;
4348 struct ib_device *device = rwq_ind_table->device;
4351 unsigned int base_wqn;
4352 size_t min_resp_len;
4349 unsigned int base_wqn;
4350 size_t min_resp_len;
4353 int i;
4354 int err;
4351 int i, err = 0;
4355
4356 if (udata->inlen > 0 &&
4357 !ib_is_udata_cleared(udata, 0,
4358 udata->inlen))
4352
4353 if (udata->inlen > 0 &&
4354 !ib_is_udata_cleared(udata, 0,
4355 udata->inlen))
4359 return ERR_PTR(-EOPNOTSUPP);
4356 return -EOPNOTSUPP;
4360
4361 min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
4362 if (udata->outlen && udata->outlen < min_resp_len)
4357
4358 min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
4359 if (udata->outlen && udata->outlen < min_resp_len)
4363 return ERR_PTR(-EINVAL);
4360 return -EINVAL;
4364
4365 if (ind_tbl_size >
4366 device->attrs.rss_caps.max_rwq_indirection_table_size) {
4367 pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n",
4368 ind_tbl_size,
4369 device->attrs.rss_caps.max_rwq_indirection_table_size);
4361
4362 if (ind_tbl_size >
4363 device->attrs.rss_caps.max_rwq_indirection_table_size) {
4364 pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n",
4365 ind_tbl_size,
4366 device->attrs.rss_caps.max_rwq_indirection_table_size);
4370 return ERR_PTR(-EINVAL);
4367 return -EINVAL;
4371 }
4372
4373 base_wqn = init_attr->ind_tbl[0]->wq_num;
4374
4375 if (base_wqn % ind_tbl_size) {
4376 pr_debug("WQN=0x%x isn't aligned with indirection table size\n",
4377 base_wqn);
4368 }
4369
4370 base_wqn = init_attr->ind_tbl[0]->wq_num;
4371
4372 if (base_wqn % ind_tbl_size) {
4373 pr_debug("WQN=0x%x isn't aligned with indirection table size\n",
4374 base_wqn);
4378 return ERR_PTR(-EINVAL);
4375 return -EINVAL;
4379 }
4380
4381 for (i = 1; i < ind_tbl_size; i++) {
4382 if (++base_wqn != init_attr->ind_tbl[i]->wq_num) {
4383 pr_debug("indirection table's WQNs aren't consecutive\n");
4376 }
4377
4378 for (i = 1; i < ind_tbl_size; i++) {
4379 if (++base_wqn != init_attr->ind_tbl[i]->wq_num) {
4380 pr_debug("indirection table's WQNs aren't consecutive\n");
4384 return ERR_PTR(-EINVAL);
4381 return -EINVAL;
4385 }
4386 }
4387
4382 }
4383 }
4384
4388 rwq_ind_table = kzalloc(sizeof(*rwq_ind_table), GFP_KERNEL);
4389 if (!rwq_ind_table)
4390 return ERR_PTR(-ENOMEM);
4391
4392 if (udata->outlen) {
4393 resp.response_length = offsetof(typeof(resp), response_length) +
4394 sizeof(resp.response_length);
4395 err = ib_copy_to_udata(udata, &resp, resp.response_length);
4385 if (udata->outlen) {
4386 resp.response_length = offsetof(typeof(resp), response_length) +
4387 sizeof(resp.response_length);
4388 err = ib_copy_to_udata(udata, &resp, resp.response_length);
4396 if (err)
4397 goto err;
4398 }
4399
4389 }
4390
4400 return rwq_ind_table;
4401
4402err:
4403 kfree(rwq_ind_table);
4404 return ERR_PTR(err);
4391 return err;
4405}
4406
4392}
4393
4407int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
4408{
4409 kfree(ib_rwq_ind_tbl);
4410 return 0;
4411}
4412
4413struct mlx4_ib_drain_cqe {
4414 struct ib_cqe cqe;
4415 struct completion done;
4416};
4417
4418static void mlx4_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
4419{
4420 struct mlx4_ib_drain_cqe *cqe = container_of(wc->wr_cqe,

--- 120 unchanged lines hidden ---
4394struct mlx4_ib_drain_cqe {
4395 struct ib_cqe cqe;
4396 struct completion done;
4397};
4398
4399static void mlx4_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
4400{
4401 struct mlx4_ib_drain_cqe *cqe = container_of(wc->wr_cqe,

--- 120 unchanged lines hidden ---