oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016 Hisilicon Limited. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
| 32 | |
| 33 | #include <linux/platform_device.h> |
Salil | 528f1de | 2016-08-24 04:44:50 +0800 | [diff] [blame] | 34 | #include <linux/acpi.h> |
Lijun Ou | 543bfe6 | 2016-11-23 19:41:02 +0000 | [diff] [blame] | 35 | #include <linux/etherdevice.h> |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 36 | #include <linux/interrupt.h> |
Mark Brown | cd6ce4a | 2017-03-30 15:56:01 +0100 | [diff] [blame] | 37 | #include <linux/of.h> |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 38 | #include <linux/of_platform.h> |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 39 | #include <rdma/ib_umem.h> |
| 40 | #include "hns_roce_common.h" |
| 41 | #include "hns_roce_device.h" |
| 42 | #include "hns_roce_cmd.h" |
| 43 | #include "hns_roce_hem.h" |
| 44 | #include "hns_roce_hw_v1.h" |
| 45 | |
| 46 | static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg) |
| 47 | { |
| 48 | dseg->lkey = cpu_to_le32(sg->lkey); |
| 49 | dseg->addr = cpu_to_le64(sg->addr); |
| 50 | dseg->len = cpu_to_le32(sg->length); |
| 51 | } |
| 52 | |
| 53 | static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr, |
| 54 | u32 rkey) |
| 55 | { |
| 56 | rseg->raddr = cpu_to_le64(remote_addr); |
| 57 | rseg->rkey = cpu_to_le32(rkey); |
| 58 | rseg->len = 0; |
| 59 | } |
| 60 | |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 61 | static int hns_roce_v1_post_send(struct ib_qp *ibqp, |
| 62 | const struct ib_send_wr *wr, |
| 63 | const struct ib_send_wr **bad_wr) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 64 | { |
| 65 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); |
| 66 | struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah); |
| 67 | struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL; |
| 68 | struct hns_roce_wqe_ctrl_seg *ctrl = NULL; |
| 69 | struct hns_roce_wqe_data_seg *dseg = NULL; |
| 70 | struct hns_roce_qp *qp = to_hr_qp(ibqp); |
| 71 | struct device *dev = &hr_dev->pdev->dev; |
Lang Cheng | 52c5e9e | 2020-02-20 09:34:31 +0800 | [diff] [blame] | 72 | struct hns_roce_sq_db sq_db = {}; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 73 | int ps_opcode = 0, i = 0; |
| 74 | unsigned long flags = 0; |
| 75 | void *wqe = NULL; |
Lang Cheng | bfe8603 | 2019-08-21 21:14:32 +0800 | [diff] [blame] | 76 | __le32 doorbell[2]; |
Yixian Liu | 4768820 | 2019-12-10 20:45:02 +0800 | [diff] [blame] | 77 | u32 wqe_idx = 0; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 78 | int nreq = 0; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 79 | int ret = 0; |
Lijun Ou | 543bfe6 | 2016-11-23 19:41:02 +0000 | [diff] [blame] | 80 | u8 *smac; |
| 81 | int loopback; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 82 | |
Lijun Ou | 07182fa | 2016-09-20 17:07:04 +0100 | [diff] [blame] | 83 | if (unlikely(ibqp->qp_type != IB_QPT_GSI && |
| 84 | ibqp->qp_type != IB_QPT_RC)) { |
| 85 | dev_err(dev, "un-supported QP type\n"); |
| 86 | *bad_wr = NULL; |
| 87 | return -EOPNOTSUPP; |
| 88 | } |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 89 | |
Lijun Ou | 07182fa | 2016-09-20 17:07:04 +0100 | [diff] [blame] | 90 | spin_lock_irqsave(&qp->sq.lock, flags); |
Yixian Liu | 4768820 | 2019-12-10 20:45:02 +0800 | [diff] [blame] | 91 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 92 | for (nreq = 0; wr; ++nreq, wr = wr->next) { |
| 93 | if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { |
| 94 | ret = -ENOMEM; |
| 95 | *bad_wr = wr; |
| 96 | goto out; |
| 97 | } |
| 98 | |
Yixian Liu | 4768820 | 2019-12-10 20:45:02 +0800 | [diff] [blame] | 99 | wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); |
| 100 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 101 | if (unlikely(wr->num_sge > qp->sq.max_gs)) { |
| 102 | dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n", |
| 103 | wr->num_sge, qp->sq.max_gs); |
| 104 | ret = -EINVAL; |
| 105 | *bad_wr = wr; |
| 106 | goto out; |
| 107 | } |
| 108 | |
Xi Wang | 6c6e392 | 2020-03-10 19:18:00 +0800 | [diff] [blame] | 109 | wqe = hns_roce_get_send_wqe(qp, wqe_idx); |
Yixian Liu | 4768820 | 2019-12-10 20:45:02 +0800 | [diff] [blame] | 110 | qp->sq.wrid[wqe_idx] = wr->wr_id; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 111 | |
| 112 | /* Corresponding to the RC and RD type wqe process separately */ |
| 113 | if (ibqp->qp_type == IB_QPT_GSI) { |
| 114 | ud_sq_wqe = wqe; |
| 115 | roce_set_field(ud_sq_wqe->dmac_h, |
| 116 | UD_SEND_WQE_U32_4_DMAC_0_M, |
| 117 | UD_SEND_WQE_U32_4_DMAC_0_S, |
| 118 | ah->av.mac[0]); |
| 119 | roce_set_field(ud_sq_wqe->dmac_h, |
| 120 | UD_SEND_WQE_U32_4_DMAC_1_M, |
| 121 | UD_SEND_WQE_U32_4_DMAC_1_S, |
| 122 | ah->av.mac[1]); |
| 123 | roce_set_field(ud_sq_wqe->dmac_h, |
| 124 | UD_SEND_WQE_U32_4_DMAC_2_M, |
| 125 | UD_SEND_WQE_U32_4_DMAC_2_S, |
| 126 | ah->av.mac[2]); |
| 127 | roce_set_field(ud_sq_wqe->dmac_h, |
| 128 | UD_SEND_WQE_U32_4_DMAC_3_M, |
| 129 | UD_SEND_WQE_U32_4_DMAC_3_S, |
| 130 | ah->av.mac[3]); |
| 131 | |
| 132 | roce_set_field(ud_sq_wqe->u32_8, |
| 133 | UD_SEND_WQE_U32_8_DMAC_4_M, |
| 134 | UD_SEND_WQE_U32_8_DMAC_4_S, |
| 135 | ah->av.mac[4]); |
| 136 | roce_set_field(ud_sq_wqe->u32_8, |
| 137 | UD_SEND_WQE_U32_8_DMAC_5_M, |
| 138 | UD_SEND_WQE_U32_8_DMAC_5_S, |
| 139 | ah->av.mac[5]); |
Lijun Ou | 543bfe6 | 2016-11-23 19:41:02 +0000 | [diff] [blame] | 140 | |
| 141 | smac = (u8 *)hr_dev->dev_addr[qp->port]; |
| 142 | loopback = ether_addr_equal_unaligned(ah->av.mac, |
| 143 | smac) ? 1 : 0; |
| 144 | roce_set_bit(ud_sq_wqe->u32_8, |
| 145 | UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S, |
| 146 | loopback); |
| 147 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 148 | roce_set_field(ud_sq_wqe->u32_8, |
| 149 | UD_SEND_WQE_U32_8_OPERATION_TYPE_M, |
| 150 | UD_SEND_WQE_U32_8_OPERATION_TYPE_S, |
| 151 | HNS_ROCE_WQE_OPCODE_SEND); |
| 152 | roce_set_field(ud_sq_wqe->u32_8, |
| 153 | UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M, |
| 154 | UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S, |
| 155 | 2); |
| 156 | roce_set_bit(ud_sq_wqe->u32_8, |
| 157 | UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S, |
| 158 | 1); |
| 159 | |
| 160 | ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ? |
| 161 | cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) | |
| 162 | (wr->send_flags & IB_SEND_SOLICITED ? |
| 163 | cpu_to_le32(HNS_ROCE_WQE_SE) : 0) | |
| 164 | ((wr->opcode == IB_WR_SEND_WITH_IMM) ? |
| 165 | cpu_to_le32(HNS_ROCE_WQE_IMM) : 0); |
| 166 | |
| 167 | roce_set_field(ud_sq_wqe->u32_16, |
| 168 | UD_SEND_WQE_U32_16_DEST_QP_M, |
| 169 | UD_SEND_WQE_U32_16_DEST_QP_S, |
| 170 | ud_wr(wr)->remote_qpn); |
| 171 | roce_set_field(ud_sq_wqe->u32_16, |
| 172 | UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M, |
| 173 | UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S, |
| 174 | ah->av.stat_rate); |
| 175 | |
| 176 | roce_set_field(ud_sq_wqe->u32_36, |
| 177 | UD_SEND_WQE_U32_36_FLOW_LABEL_M, |
Lijun Ou | cdfa4ad | 2018-07-30 20:20:30 +0800 | [diff] [blame] | 178 | UD_SEND_WQE_U32_36_FLOW_LABEL_S, |
Lang Cheng | 82e620d | 2019-08-21 21:14:30 +0800 | [diff] [blame] | 179 | ah->av.flowlabel); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 180 | roce_set_field(ud_sq_wqe->u32_36, |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 181 | UD_SEND_WQE_U32_36_PRIORITY_M, |
| 182 | UD_SEND_WQE_U32_36_PRIORITY_S, |
Lang Cheng | 82e620d | 2019-08-21 21:14:30 +0800 | [diff] [blame] | 183 | ah->av.sl); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 184 | roce_set_field(ud_sq_wqe->u32_36, |
| 185 | UD_SEND_WQE_U32_36_SGID_INDEX_M, |
| 186 | UD_SEND_WQE_U32_36_SGID_INDEX_S, |
Lijun Ou | 7716809 | 2016-09-15 23:48:10 +0100 | [diff] [blame] | 187 | hns_get_gid_index(hr_dev, qp->phy_port, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 188 | ah->av.gid_index)); |
| 189 | |
| 190 | roce_set_field(ud_sq_wqe->u32_40, |
| 191 | UD_SEND_WQE_U32_40_HOP_LIMIT_M, |
| 192 | UD_SEND_WQE_U32_40_HOP_LIMIT_S, |
| 193 | ah->av.hop_limit); |
| 194 | roce_set_field(ud_sq_wqe->u32_40, |
| 195 | UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M, |
Lijun Ou | cdfa4ad | 2018-07-30 20:20:30 +0800 | [diff] [blame] | 196 | UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, |
Lang Cheng | 82e620d | 2019-08-21 21:14:30 +0800 | [diff] [blame] | 197 | ah->av.tclass); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 198 | |
| 199 | memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN); |
| 200 | |
oulijun | 8b9b8d1 | 2018-02-05 21:14:00 +0800 | [diff] [blame] | 201 | ud_sq_wqe->va0_l = |
| 202 | cpu_to_le32((u32)wr->sg_list[0].addr); |
| 203 | ud_sq_wqe->va0_h = |
| 204 | cpu_to_le32((wr->sg_list[0].addr) >> 32); |
| 205 | ud_sq_wqe->l_key0 = |
| 206 | cpu_to_le32(wr->sg_list[0].lkey); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 207 | |
oulijun | 8b9b8d1 | 2018-02-05 21:14:00 +0800 | [diff] [blame] | 208 | ud_sq_wqe->va1_l = |
| 209 | cpu_to_le32((u32)wr->sg_list[1].addr); |
| 210 | ud_sq_wqe->va1_h = |
| 211 | cpu_to_le32((wr->sg_list[1].addr) >> 32); |
| 212 | ud_sq_wqe->l_key1 = |
| 213 | cpu_to_le32(wr->sg_list[1].lkey); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 214 | } else if (ibqp->qp_type == IB_QPT_RC) { |
oulijun | 8b9b8d1 | 2018-02-05 21:14:00 +0800 | [diff] [blame] | 215 | u32 tmp_len = 0; |
| 216 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 217 | ctrl = wqe; |
| 218 | memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg)); |
| 219 | for (i = 0; i < wr->num_sge; i++) |
oulijun | 8b9b8d1 | 2018-02-05 21:14:00 +0800 | [diff] [blame] | 220 | tmp_len += wr->sg_list[i].length; |
| 221 | |
| 222 | ctrl->msg_length = |
| 223 | cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 224 | |
| 225 | ctrl->sgl_pa_h = 0; |
| 226 | ctrl->flag = 0; |
oulijun | 8b9b8d1 | 2018-02-05 21:14:00 +0800 | [diff] [blame] | 227 | |
| 228 | switch (wr->opcode) { |
| 229 | case IB_WR_SEND_WITH_IMM: |
| 230 | case IB_WR_RDMA_WRITE_WITH_IMM: |
| 231 | ctrl->imm_data = wr->ex.imm_data; |
| 232 | break; |
| 233 | case IB_WR_SEND_WITH_INV: |
| 234 | ctrl->inv_key = |
| 235 | cpu_to_le32(wr->ex.invalidate_rkey); |
| 236 | break; |
| 237 | default: |
| 238 | ctrl->imm_data = 0; |
| 239 | break; |
| 240 | } |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 241 | |
| 242 | /*Ctrl field, ctrl set type: sig, solic, imm, fence */ |
| 243 | /* SO wait for conforming application scenarios */ |
| 244 | ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ? |
| 245 | cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) | |
| 246 | (wr->send_flags & IB_SEND_SOLICITED ? |
| 247 | cpu_to_le32(HNS_ROCE_WQE_SE) : 0) | |
| 248 | ((wr->opcode == IB_WR_SEND_WITH_IMM || |
| 249 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ? |
| 250 | cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) | |
| 251 | (wr->send_flags & IB_SEND_FENCE ? |
| 252 | (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0); |
| 253 | |
Lijun Ou | c24bf89 | 2016-09-15 23:48:09 +0100 | [diff] [blame] | 254 | wqe += sizeof(struct hns_roce_wqe_ctrl_seg); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 255 | |
| 256 | switch (wr->opcode) { |
| 257 | case IB_WR_RDMA_READ: |
| 258 | ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ; |
oulijun | 9de61d3 | 2017-06-10 18:49:23 +0800 | [diff] [blame] | 259 | set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, |
| 260 | rdma_wr(wr)->rkey); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 261 | break; |
| 262 | case IB_WR_RDMA_WRITE: |
| 263 | case IB_WR_RDMA_WRITE_WITH_IMM: |
| 264 | ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE; |
oulijun | 9de61d3 | 2017-06-10 18:49:23 +0800 | [diff] [blame] | 265 | set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, |
| 266 | rdma_wr(wr)->rkey); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 267 | break; |
| 268 | case IB_WR_SEND: |
| 269 | case IB_WR_SEND_WITH_INV: |
| 270 | case IB_WR_SEND_WITH_IMM: |
| 271 | ps_opcode = HNS_ROCE_WQE_OPCODE_SEND; |
| 272 | break; |
| 273 | case IB_WR_LOCAL_INV: |
| 274 | break; |
| 275 | case IB_WR_ATOMIC_CMP_AND_SWP: |
| 276 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
| 277 | case IB_WR_LSO: |
| 278 | default: |
| 279 | ps_opcode = HNS_ROCE_WQE_OPCODE_MASK; |
| 280 | break; |
| 281 | } |
| 282 | ctrl->flag |= cpu_to_le32(ps_opcode); |
Lijun Ou | c24bf89 | 2016-09-15 23:48:09 +0100 | [diff] [blame] | 283 | wqe += sizeof(struct hns_roce_wqe_raddr_seg); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 284 | |
| 285 | dseg = wqe; |
| 286 | if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { |
oulijun | 8b9b8d1 | 2018-02-05 21:14:00 +0800 | [diff] [blame] | 287 | if (le32_to_cpu(ctrl->msg_length) > |
| 288 | hr_dev->caps.max_sq_inline) { |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 289 | ret = -EINVAL; |
| 290 | *bad_wr = wr; |
| 291 | dev_err(dev, "inline len(1-%d)=%d, illegal", |
| 292 | ctrl->msg_length, |
| 293 | hr_dev->caps.max_sq_inline); |
| 294 | goto out; |
| 295 | } |
| 296 | for (i = 0; i < wr->num_sge; i++) { |
| 297 | memcpy(wqe, ((void *) (uintptr_t) |
| 298 | wr->sg_list[i].addr), |
| 299 | wr->sg_list[i].length); |
Lijun Ou | c24bf89 | 2016-09-15 23:48:09 +0100 | [diff] [blame] | 300 | wqe += wr->sg_list[i].length; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 301 | } |
oulijun | 8b9b8d1 | 2018-02-05 21:14:00 +0800 | [diff] [blame] | 302 | ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 303 | } else { |
| 304 | /*sqe num is two */ |
| 305 | for (i = 0; i < wr->num_sge; i++) |
| 306 | set_data_seg(dseg + i, wr->sg_list + i); |
| 307 | |
| 308 | ctrl->flag |= cpu_to_le32(wr->num_sge << |
| 309 | HNS_ROCE_WQE_SGE_NUM_BIT); |
| 310 | } |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 311 | } |
| 312 | } |
| 313 | |
| 314 | out: |
| 315 | /* Set DB return */ |
| 316 | if (likely(nreq)) { |
| 317 | qp->sq.head += nreq; |
| 318 | /* Memory barrier */ |
| 319 | wmb(); |
| 320 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 321 | roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M, |
| 322 | SQ_DOORBELL_U32_4_SQ_HEAD_S, |
| 323 | (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1))); |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 324 | roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M, |
| 325 | SQ_DOORBELL_U32_4_SL_S, qp->sl); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 326 | roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M, |
Lijun Ou | 7716809 | 2016-09-15 23:48:10 +0100 | [diff] [blame] | 327 | SQ_DOORBELL_U32_4_PORT_S, qp->phy_port); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 328 | roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M, |
| 329 | SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn); |
| 330 | roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1); |
| 331 | |
Lang Cheng | bfe8603 | 2019-08-21 21:14:32 +0800 | [diff] [blame] | 332 | doorbell[0] = sq_db.u32_4; |
| 333 | doorbell[1] = sq_db.u32_8; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 334 | |
Lang Cheng | bfe8603 | 2019-08-21 21:14:32 +0800 | [diff] [blame] | 335 | hns_roce_write64_k(doorbell, qp->sq.db_reg_l); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 336 | } |
| 337 | |
| 338 | spin_unlock_irqrestore(&qp->sq.lock, flags); |
| 339 | |
| 340 | return ret; |
| 341 | } |
| 342 | |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 343 | static int hns_roce_v1_post_recv(struct ib_qp *ibqp, |
| 344 | const struct ib_recv_wr *wr, |
| 345 | const struct ib_recv_wr **bad_wr) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 346 | { |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 347 | struct hns_roce_rq_wqe_ctrl *ctrl = NULL; |
| 348 | struct hns_roce_wqe_data_seg *scat = NULL; |
| 349 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); |
| 350 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); |
| 351 | struct device *dev = &hr_dev->pdev->dev; |
Lang Cheng | 52c5e9e | 2020-02-20 09:34:31 +0800 | [diff] [blame] | 352 | struct hns_roce_rq_db rq_db = {}; |
Lang Cheng | bfe8603 | 2019-08-21 21:14:32 +0800 | [diff] [blame] | 353 | __le32 doorbell[2] = {0}; |
Yixian Liu | 4768820 | 2019-12-10 20:45:02 +0800 | [diff] [blame] | 354 | unsigned long flags = 0; |
| 355 | unsigned int wqe_idx; |
| 356 | int ret = 0; |
| 357 | int nreq = 0; |
| 358 | int i = 0; |
| 359 | u32 reg_val; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 360 | |
| 361 | spin_lock_irqsave(&hr_qp->rq.lock, flags); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 362 | |
| 363 | for (nreq = 0; wr; ++nreq, wr = wr->next) { |
| 364 | if (hns_roce_wq_overflow(&hr_qp->rq, nreq, |
| 365 | hr_qp->ibqp.recv_cq)) { |
| 366 | ret = -ENOMEM; |
| 367 | *bad_wr = wr; |
| 368 | goto out; |
| 369 | } |
| 370 | |
Yixian Liu | 4768820 | 2019-12-10 20:45:02 +0800 | [diff] [blame] | 371 | wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); |
| 372 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 373 | if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { |
| 374 | dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n", |
| 375 | wr->num_sge, hr_qp->rq.max_gs); |
| 376 | ret = -EINVAL; |
| 377 | *bad_wr = wr; |
| 378 | goto out; |
| 379 | } |
| 380 | |
Xi Wang | 6c6e392 | 2020-03-10 19:18:00 +0800 | [diff] [blame] | 381 | ctrl = hns_roce_get_recv_wqe(hr_qp, wqe_idx); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 382 | |
| 383 | roce_set_field(ctrl->rwqe_byte_12, |
| 384 | RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M, |
| 385 | RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S, |
| 386 | wr->num_sge); |
| 387 | |
| 388 | scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1); |
| 389 | |
| 390 | for (i = 0; i < wr->num_sge; i++) |
| 391 | set_data_seg(scat + i, wr->sg_list + i); |
| 392 | |
Yixian Liu | 4768820 | 2019-12-10 20:45:02 +0800 | [diff] [blame] | 393 | hr_qp->rq.wrid[wqe_idx] = wr->wr_id; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 394 | } |
| 395 | |
| 396 | out: |
| 397 | if (likely(nreq)) { |
| 398 | hr_qp->rq.head += nreq; |
| 399 | /* Memory barrier */ |
| 400 | wmb(); |
| 401 | |
| 402 | if (ibqp->qp_type == IB_QPT_GSI) { |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 403 | __le32 tmp; |
| 404 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 405 | /* SW update GSI rq header */ |
| 406 | reg_val = roce_read(to_hr_dev(ibqp->device), |
| 407 | ROCEE_QP1C_CFG3_0_REG + |
Lijun Ou | 7716809 | 2016-09-15 23:48:10 +0100 | [diff] [blame] | 408 | QP1C_CFGN_OFFSET * hr_qp->phy_port); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 409 | tmp = cpu_to_le32(reg_val); |
| 410 | roce_set_field(tmp, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 411 | ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M, |
| 412 | ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S, |
| 413 | hr_qp->rq.head); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 414 | reg_val = le32_to_cpu(tmp); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 415 | roce_write(to_hr_dev(ibqp->device), |
| 416 | ROCEE_QP1C_CFG3_0_REG + |
Lijun Ou | 7716809 | 2016-09-15 23:48:10 +0100 | [diff] [blame] | 417 | QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 418 | } else { |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 419 | roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M, |
| 420 | RQ_DOORBELL_U32_4_RQ_HEAD_S, |
| 421 | hr_qp->rq.head); |
| 422 | roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M, |
| 423 | RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); |
| 424 | roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M, |
| 425 | RQ_DOORBELL_U32_8_CMD_S, 1); |
| 426 | roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S, |
| 427 | 1); |
| 428 | |
Lang Cheng | bfe8603 | 2019-08-21 21:14:32 +0800 | [diff] [blame] | 429 | doorbell[0] = rq_db.u32_4; |
| 430 | doorbell[1] = rq_db.u32_8; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 431 | |
Lang Cheng | bfe8603 | 2019-08-21 21:14:32 +0800 | [diff] [blame] | 432 | hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 433 | } |
| 434 | } |
| 435 | spin_unlock_irqrestore(&hr_qp->rq.lock, flags); |
| 436 | |
| 437 | return ret; |
| 438 | } |
| 439 | |
| 440 | static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev, |
| 441 | int sdb_mode, int odb_mode) |
| 442 | { |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 443 | __le32 tmp; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 444 | u32 val; |
| 445 | |
| 446 | val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 447 | tmp = cpu_to_le32(val); |
| 448 | roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode); |
| 449 | roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode); |
| 450 | val = le32_to_cpu(tmp); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 451 | roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); |
| 452 | } |
| 453 | |
| 454 | static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode, |
| 455 | u32 odb_mode) |
| 456 | { |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 457 | __le32 tmp; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 458 | u32 val; |
| 459 | |
| 460 | /* Configure SDB/ODB extend mode */ |
| 461 | val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 462 | tmp = cpu_to_le32(val); |
| 463 | roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode); |
| 464 | roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode); |
| 465 | val = le32_to_cpu(tmp); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 466 | roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); |
| 467 | } |
| 468 | |
| 469 | static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept, |
| 470 | u32 sdb_alful) |
| 471 | { |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 472 | __le32 tmp; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 473 | u32 val; |
| 474 | |
| 475 | /* Configure SDB */ |
| 476 | val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 477 | tmp = cpu_to_le32(val); |
| 478 | roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 479 | ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 480 | roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 481 | ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 482 | val = le32_to_cpu(tmp); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 483 | roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val); |
| 484 | } |
| 485 | |
| 486 | static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept, |
| 487 | u32 odb_alful) |
| 488 | { |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 489 | __le32 tmp; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 490 | u32 val; |
| 491 | |
| 492 | /* Configure ODB */ |
| 493 | val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 494 | tmp = cpu_to_le32(val); |
| 495 | roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 496 | ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 497 | roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 498 | ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 499 | val = le32_to_cpu(tmp); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 500 | roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val); |
| 501 | } |
| 502 | |
| 503 | static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept, |
| 504 | u32 ext_sdb_alful) |
| 505 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 506 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
| 507 | struct hns_roce_db_table *db = &priv->db_table; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 508 | struct device *dev = &hr_dev->pdev->dev; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 509 | dma_addr_t sdb_dma_addr; |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 510 | __le32 tmp; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 511 | u32 val; |
| 512 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 513 | /* Configure extend SDB threshold */ |
| 514 | roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept); |
| 515 | roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful); |
| 516 | |
| 517 | /* Configure extend SDB base addr */ |
| 518 | sdb_dma_addr = db->ext_db->sdb_buf_list->map; |
| 519 | roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12)); |
| 520 | |
| 521 | /* Configure extend SDB depth */ |
| 522 | val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 523 | tmp = cpu_to_le32(val); |
| 524 | roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 525 | ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S, |
| 526 | db->ext_db->esdb_dep); |
| 527 | /* |
| 528 | * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of |
| 529 | * using 4K page, and shift more 32 because of |
| 530 | * caculating the high 32 bit value evaluated to hardware. |
| 531 | */ |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 532 | roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 533 | ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 534 | val = le32_to_cpu(tmp); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 535 | roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val); |
| 536 | |
| 537 | dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep); |
| 538 | dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n", |
| 539 | ext_sdb_alept, ext_sdb_alful); |
| 540 | } |
| 541 | |
| 542 | static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept, |
| 543 | u32 ext_odb_alful) |
| 544 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 545 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
| 546 | struct hns_roce_db_table *db = &priv->db_table; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 547 | struct device *dev = &hr_dev->pdev->dev; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 548 | dma_addr_t odb_dma_addr; |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 549 | __le32 tmp; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 550 | u32 val; |
| 551 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 552 | /* Configure extend ODB threshold */ |
| 553 | roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept); |
| 554 | roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful); |
| 555 | |
| 556 | /* Configure extend ODB base addr */ |
| 557 | odb_dma_addr = db->ext_db->odb_buf_list->map; |
| 558 | roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12)); |
| 559 | |
| 560 | /* Configure extend ODB depth */ |
| 561 | val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 562 | tmp = cpu_to_le32(val); |
| 563 | roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 564 | ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S, |
| 565 | db->ext_db->eodb_dep); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 566 | roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 567 | ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S, |
| 568 | db->ext_db->eodb_dep); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 569 | val = le32_to_cpu(tmp); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 570 | roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val); |
| 571 | |
| 572 | dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep); |
| 573 | dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n", |
| 574 | ext_odb_alept, ext_odb_alful); |
| 575 | } |
| 576 | |
| 577 | static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod, |
| 578 | u32 odb_ext_mod) |
| 579 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 580 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
| 581 | struct hns_roce_db_table *db = &priv->db_table; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 582 | struct device *dev = &hr_dev->pdev->dev; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 583 | dma_addr_t sdb_dma_addr; |
| 584 | dma_addr_t odb_dma_addr; |
| 585 | int ret = 0; |
| 586 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 587 | db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL); |
| 588 | if (!db->ext_db) |
| 589 | return -ENOMEM; |
| 590 | |
| 591 | if (sdb_ext_mod) { |
| 592 | db->ext_db->sdb_buf_list = kmalloc( |
| 593 | sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL); |
| 594 | if (!db->ext_db->sdb_buf_list) { |
| 595 | ret = -ENOMEM; |
| 596 | goto ext_sdb_buf_fail_out; |
| 597 | } |
| 598 | |
| 599 | db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev, |
| 600 | HNS_ROCE_V1_EXT_SDB_SIZE, |
| 601 | &sdb_dma_addr, GFP_KERNEL); |
| 602 | if (!db->ext_db->sdb_buf_list->buf) { |
| 603 | ret = -ENOMEM; |
| 604 | goto alloc_sq_db_buf_fail; |
| 605 | } |
| 606 | db->ext_db->sdb_buf_list->map = sdb_dma_addr; |
| 607 | |
| 608 | db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH); |
| 609 | hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT, |
| 610 | HNS_ROCE_V1_EXT_SDB_ALFUL); |
| 611 | } else |
| 612 | hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT, |
| 613 | HNS_ROCE_V1_SDB_ALFUL); |
| 614 | |
| 615 | if (odb_ext_mod) { |
| 616 | db->ext_db->odb_buf_list = kmalloc( |
| 617 | sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL); |
| 618 | if (!db->ext_db->odb_buf_list) { |
| 619 | ret = -ENOMEM; |
| 620 | goto ext_odb_buf_fail_out; |
| 621 | } |
| 622 | |
| 623 | db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev, |
| 624 | HNS_ROCE_V1_EXT_ODB_SIZE, |
| 625 | &odb_dma_addr, GFP_KERNEL); |
| 626 | if (!db->ext_db->odb_buf_list->buf) { |
| 627 | ret = -ENOMEM; |
| 628 | goto alloc_otr_db_buf_fail; |
| 629 | } |
| 630 | db->ext_db->odb_buf_list->map = odb_dma_addr; |
| 631 | |
| 632 | db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH); |
| 633 | hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT, |
| 634 | HNS_ROCE_V1_EXT_ODB_ALFUL); |
| 635 | } else |
| 636 | hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT, |
| 637 | HNS_ROCE_V1_ODB_ALFUL); |
| 638 | |
| 639 | hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod); |
| 640 | |
| 641 | return 0; |
| 642 | |
| 643 | alloc_otr_db_buf_fail: |
| 644 | kfree(db->ext_db->odb_buf_list); |
| 645 | |
| 646 | ext_odb_buf_fail_out: |
| 647 | if (sdb_ext_mod) { |
| 648 | dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE, |
| 649 | db->ext_db->sdb_buf_list->buf, |
| 650 | db->ext_db->sdb_buf_list->map); |
| 651 | } |
| 652 | |
| 653 | alloc_sq_db_buf_fail: |
| 654 | if (sdb_ext_mod) |
| 655 | kfree(db->ext_db->sdb_buf_list); |
| 656 | |
| 657 | ext_sdb_buf_fail_out: |
| 658 | kfree(db->ext_db); |
| 659 | return ret; |
| 660 | } |
| 661 | |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 662 | static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev, |
| 663 | struct ib_pd *pd) |
| 664 | { |
| 665 | struct device *dev = &hr_dev->pdev->dev; |
| 666 | struct ib_qp_init_attr init_attr; |
| 667 | struct ib_qp *qp; |
| 668 | |
| 669 | memset(&init_attr, 0, sizeof(struct ib_qp_init_attr)); |
| 670 | init_attr.qp_type = IB_QPT_RC; |
| 671 | init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; |
| 672 | init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM; |
| 673 | init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM; |
| 674 | |
| 675 | qp = hns_roce_create_qp(pd, &init_attr, NULL); |
| 676 | if (IS_ERR(qp)) { |
| 677 | dev_err(dev, "Create loop qp for mr free failed!"); |
| 678 | return NULL; |
| 679 | } |
| 680 | |
| 681 | return to_hr_qp(qp); |
| 682 | } |
| 683 | |
| 684 | static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) |
| 685 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 686 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
| 687 | struct hns_roce_free_mr *free_mr = &priv->free_mr; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 688 | struct hns_roce_caps *caps = &hr_dev->caps; |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 689 | struct ib_device *ibdev = &hr_dev->ib_dev; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 690 | struct device *dev = &hr_dev->pdev->dev; |
| 691 | struct ib_cq_init_attr cq_init_attr; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 692 | struct ib_qp_attr attr = { 0 }; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 693 | struct hns_roce_qp *hr_qp; |
| 694 | struct ib_cq *cq; |
| 695 | struct ib_pd *pd; |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 696 | union ib_gid dgid; |
Lang Cheng | bfe8603 | 2019-08-21 21:14:32 +0800 | [diff] [blame] | 697 | __be64 subnet_prefix; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 698 | int attr_mask = 0; |
Leon Romanovsky | e39afe3 | 2019-05-28 14:37:29 +0300 | [diff] [blame] | 699 | int ret; |
oulijun | 5802883 | 2017-06-10 18:49:21 +0800 | [diff] [blame] | 700 | int i, j; |
oulijun | 5802883 | 2017-06-10 18:49:21 +0800 | [diff] [blame] | 701 | u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 }; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 702 | u8 phy_port; |
oulijun | 5802883 | 2017-06-10 18:49:21 +0800 | [diff] [blame] | 703 | u8 port = 0; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 704 | u8 sl; |
| 705 | |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 706 | /* Reserved cq for loop qp */ |
| 707 | cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2; |
| 708 | cq_init_attr.comp_vector = 0; |
Leon Romanovsky | e39afe3 | 2019-05-28 14:37:29 +0300 | [diff] [blame] | 709 | |
Leon Romanovsky | e39afe3 | 2019-05-28 14:37:29 +0300 | [diff] [blame] | 710 | cq = rdma_zalloc_drv_obj(ibdev, ib_cq); |
| 711 | if (!cq) |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 712 | return -ENOMEM; |
Leon Romanovsky | e39afe3 | 2019-05-28 14:37:29 +0300 | [diff] [blame] | 713 | |
Yixian Liu | 707783a | 2019-11-18 10:34:52 +0800 | [diff] [blame] | 714 | ret = hns_roce_create_cq(cq, &cq_init_attr, NULL); |
Leon Romanovsky | e39afe3 | 2019-05-28 14:37:29 +0300 | [diff] [blame] | 715 | if (ret) { |
| 716 | dev_err(dev, "Create cq for reserved loop qp failed!"); |
| 717 | goto alloc_cq_failed; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 718 | } |
| 719 | free_mr->mr_free_cq = to_hr_cq(cq); |
| 720 | free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev; |
| 721 | free_mr->mr_free_cq->ib_cq.uobject = NULL; |
| 722 | free_mr->mr_free_cq->ib_cq.comp_handler = NULL; |
| 723 | free_mr->mr_free_cq->ib_cq.event_handler = NULL; |
| 724 | free_mr->mr_free_cq->ib_cq.cq_context = NULL; |
| 725 | atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0); |
| 726 | |
Leon Romanovsky | 21a428a | 2019-02-03 14:55:51 +0200 | [diff] [blame] | 727 | pd = rdma_zalloc_drv_obj(ibdev, ib_pd); |
Wei Yongjun | 020fb3b | 2019-08-01 01:27:25 +0000 | [diff] [blame] | 728 | if (!pd) { |
| 729 | ret = -ENOMEM; |
Leon Romanovsky | 21a428a | 2019-02-03 14:55:51 +0200 | [diff] [blame] | 730 | goto alloc_mem_failed; |
Wei Yongjun | 020fb3b | 2019-08-01 01:27:25 +0000 | [diff] [blame] | 731 | } |
Leon Romanovsky | 21a428a | 2019-02-03 14:55:51 +0200 | [diff] [blame] | 732 | |
| 733 | pd->device = ibdev; |
Shamir Rabinovitch | ff23dfa | 2019-03-31 19:10:07 +0300 | [diff] [blame] | 734 | ret = hns_roce_alloc_pd(pd, NULL); |
Leon Romanovsky | 21a428a | 2019-02-03 14:55:51 +0200 | [diff] [blame] | 735 | if (ret) |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 736 | goto alloc_pd_failed; |
Leon Romanovsky | 21a428a | 2019-02-03 14:55:51 +0200 | [diff] [blame] | 737 | |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 738 | free_mr->mr_free_pd = to_hr_pd(pd); |
| 739 | free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev; |
| 740 | free_mr->mr_free_pd->ibpd.uobject = NULL; |
oulijun | 79d4420 | 2018-05-04 10:57:12 +0800 | [diff] [blame] | 741 | free_mr->mr_free_pd->ibpd.__internal_mr = NULL; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 742 | atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0); |
| 743 | |
| 744 | attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE; |
| 745 | attr.pkey_index = 0; |
| 746 | attr.min_rnr_timer = 0; |
| 747 | /* Disable read ability */ |
| 748 | attr.max_dest_rd_atomic = 0; |
| 749 | attr.max_rd_atomic = 0; |
| 750 | /* Use arbitrary values as rq_psn and sq_psn */ |
| 751 | attr.rq_psn = 0x0808; |
| 752 | attr.sq_psn = 0x0808; |
| 753 | attr.retry_cnt = 7; |
| 754 | attr.rnr_retry = 7; |
| 755 | attr.timeout = 0x12; |
| 756 | attr.path_mtu = IB_MTU_256; |
oulijun | 5802883 | 2017-06-10 18:49:21 +0800 | [diff] [blame] | 757 | attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 758 | rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0); |
| 759 | rdma_ah_set_static_rate(&attr.ah_attr, 3); |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 760 | |
| 761 | subnet_prefix = cpu_to_be64(0xfe80000000000000LL); |
| 762 | for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { |
oulijun | 5802883 | 2017-06-10 18:49:21 +0800 | [diff] [blame] | 763 | phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) : |
| 764 | (i % HNS_ROCE_MAX_PORTS); |
| 765 | sl = i / HNS_ROCE_MAX_PORTS; |
| 766 | |
| 767 | for (j = 0; j < caps->num_ports; j++) { |
| 768 | if (hr_dev->iboe.phy_port[j] == phy_port) { |
| 769 | queue_en[i] = 1; |
| 770 | port = j; |
| 771 | break; |
| 772 | } |
| 773 | } |
| 774 | |
| 775 | if (!queue_en[i]) |
| 776 | continue; |
| 777 | |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 778 | free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); |
Dan Carpenter | 5db465f | 2017-08-04 11:12:08 +0300 | [diff] [blame] | 779 | if (!free_mr->mr_free_qp[i]) { |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 780 | dev_err(dev, "Create loop qp failed!\n"); |
Lijun Ou | dedf635 | 2018-07-25 15:29:36 +0800 | [diff] [blame] | 781 | ret = -ENOMEM; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 782 | goto create_lp_qp_failed; |
| 783 | } |
| 784 | hr_qp = free_mr->mr_free_qp[i]; |
| 785 | |
oulijun | 5802883 | 2017-06-10 18:49:21 +0800 | [diff] [blame] | 786 | hr_qp->port = port; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 787 | hr_qp->phy_port = phy_port; |
| 788 | hr_qp->ibqp.qp_type = IB_QPT_RC; |
| 789 | hr_qp->ibqp.device = &hr_dev->ib_dev; |
| 790 | hr_qp->ibqp.uobject = NULL; |
| 791 | atomic_set(&hr_qp->ibqp.usecnt, 0); |
| 792 | hr_qp->ibqp.pd = pd; |
| 793 | hr_qp->ibqp.recv_cq = cq; |
| 794 | hr_qp->ibqp.send_cq = cq; |
| 795 | |
oulijun | 5802883 | 2017-06-10 18:49:21 +0800 | [diff] [blame] | 796 | rdma_ah_set_port_num(&attr.ah_attr, port + 1); |
| 797 | rdma_ah_set_sl(&attr.ah_attr, sl); |
| 798 | attr.port_num = port + 1; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 799 | |
| 800 | attr.dest_qp_num = hr_qp->qpn; |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 801 | memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr), |
oulijun | 5802883 | 2017-06-10 18:49:21 +0800 | [diff] [blame] | 802 | hr_dev->dev_addr[port], |
Lijun Ou | 2a3d923 | 2019-05-24 23:29:36 +0800 | [diff] [blame] | 803 | ETH_ALEN); |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 804 | |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 805 | memcpy(&dgid.raw, &subnet_prefix, sizeof(u64)); |
oulijun | 5802883 | 2017-06-10 18:49:21 +0800 | [diff] [blame] | 806 | memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3); |
| 807 | memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3); |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 808 | dgid.raw[11] = 0xff; |
| 809 | dgid.raw[12] = 0xfe; |
| 810 | dgid.raw[8] ^= 2; |
| 811 | rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw); |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 812 | |
| 813 | ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, |
| 814 | IB_QPS_RESET, IB_QPS_INIT); |
| 815 | if (ret) { |
| 816 | dev_err(dev, "modify qp failed(%d)!\n", ret); |
| 817 | goto create_lp_qp_failed; |
| 818 | } |
| 819 | |
oulijun | 107013c | 2018-01-03 10:44:08 +0800 | [diff] [blame] | 820 | ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN, |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 821 | IB_QPS_INIT, IB_QPS_RTR); |
| 822 | if (ret) { |
| 823 | dev_err(dev, "modify qp failed(%d)!\n", ret); |
| 824 | goto create_lp_qp_failed; |
| 825 | } |
| 826 | |
| 827 | ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, |
| 828 | IB_QPS_RTR, IB_QPS_RTS); |
| 829 | if (ret) { |
| 830 | dev_err(dev, "modify qp failed(%d)!\n", ret); |
| 831 | goto create_lp_qp_failed; |
| 832 | } |
| 833 | } |
| 834 | |
| 835 | return 0; |
| 836 | |
| 837 | create_lp_qp_failed: |
| 838 | for (i -= 1; i >= 0; i--) { |
| 839 | hr_qp = free_mr->mr_free_qp[i]; |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 840 | if (hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL)) |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 841 | dev_err(dev, "Destroy qp %d for mr free failed!\n", i); |
| 842 | } |
| 843 | |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 844 | hns_roce_dealloc_pd(pd, NULL); |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 845 | |
| 846 | alloc_pd_failed: |
Leon Romanovsky | 21a428a | 2019-02-03 14:55:51 +0200 | [diff] [blame] | 847 | kfree(pd); |
| 848 | |
| 849 | alloc_mem_failed: |
Yixian Liu | 707783a | 2019-11-18 10:34:52 +0800 | [diff] [blame] | 850 | hns_roce_destroy_cq(cq, NULL); |
Leon Romanovsky | e39afe3 | 2019-05-28 14:37:29 +0300 | [diff] [blame] | 851 | alloc_cq_failed: |
| 852 | kfree(cq); |
Lijun Ou | dedf635 | 2018-07-25 15:29:36 +0800 | [diff] [blame] | 853 | return ret; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 854 | } |
| 855 | |
| 856 | static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev) |
| 857 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 858 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
| 859 | struct hns_roce_free_mr *free_mr = &priv->free_mr; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 860 | struct device *dev = &hr_dev->pdev->dev; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 861 | struct hns_roce_qp *hr_qp; |
| 862 | int ret; |
| 863 | int i; |
| 864 | |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 865 | for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { |
| 866 | hr_qp = free_mr->mr_free_qp[i]; |
oulijun | 5802883 | 2017-06-10 18:49:21 +0800 | [diff] [blame] | 867 | if (!hr_qp) |
| 868 | continue; |
| 869 | |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 870 | ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL); |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 871 | if (ret) |
| 872 | dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n", |
| 873 | i, ret); |
| 874 | } |
| 875 | |
Yixian Liu | 707783a | 2019-11-18 10:34:52 +0800 | [diff] [blame] | 876 | hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL); |
Leon Romanovsky | e39afe3 | 2019-05-28 14:37:29 +0300 | [diff] [blame] | 877 | kfree(&free_mr->mr_free_cq->ib_cq); |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 878 | hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL); |
Leon Romanovsky | 619122b | 2019-05-20 09:43:53 +0300 | [diff] [blame] | 879 | kfree(&free_mr->mr_free_pd->ibpd); |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 880 | } |
| 881 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 882 | static int hns_roce_db_init(struct hns_roce_dev *hr_dev) |
| 883 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 884 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
| 885 | struct hns_roce_db_table *db = &priv->db_table; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 886 | struct device *dev = &hr_dev->pdev->dev; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 887 | u32 sdb_ext_mod; |
| 888 | u32 odb_ext_mod; |
| 889 | u32 sdb_evt_mod; |
| 890 | u32 odb_evt_mod; |
| 891 | int ret = 0; |
| 892 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 893 | memset(db, 0, sizeof(*db)); |
| 894 | |
| 895 | /* Default DB mode */ |
| 896 | sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE; |
| 897 | odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE; |
| 898 | sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE; |
| 899 | odb_evt_mod = HNS_ROCE_ODB_POLL_MODE; |
| 900 | |
| 901 | db->sdb_ext_mod = sdb_ext_mod; |
| 902 | db->odb_ext_mod = odb_ext_mod; |
| 903 | |
| 904 | /* Init extend DB */ |
| 905 | ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod); |
| 906 | if (ret) { |
| 907 | dev_err(dev, "Failed in extend DB configuration.\n"); |
| 908 | return ret; |
| 909 | } |
| 910 | |
| 911 | hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod); |
| 912 | |
| 913 | return 0; |
| 914 | } |
| 915 | |
Bart Van Assche | d61d6de | 2017-10-11 10:49:01 -0700 | [diff] [blame] | 916 | static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work) |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 917 | { |
| 918 | struct hns_roce_recreate_lp_qp_work *lp_qp_work; |
| 919 | struct hns_roce_dev *hr_dev; |
| 920 | |
| 921 | lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work, |
| 922 | work); |
| 923 | hr_dev = to_hr_dev(lp_qp_work->ib_dev); |
| 924 | |
| 925 | hns_roce_v1_release_lp_qp(hr_dev); |
| 926 | |
| 927 | if (hns_roce_v1_rsv_lp_qp(hr_dev)) |
| 928 | dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n"); |
| 929 | |
| 930 | if (lp_qp_work->comp_flag) |
| 931 | complete(lp_qp_work->comp); |
| 932 | |
| 933 | kfree(lp_qp_work); |
| 934 | } |
| 935 | |
| 936 | static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev) |
| 937 | { |
Colin Ian King | a511f82 | 2019-05-31 10:21:00 +0100 | [diff] [blame] | 938 | long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS; |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 939 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
| 940 | struct hns_roce_free_mr *free_mr = &priv->free_mr; |
| 941 | struct hns_roce_recreate_lp_qp_work *lp_qp_work; |
| 942 | struct device *dev = &hr_dev->pdev->dev; |
| 943 | struct completion comp; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 944 | |
| 945 | lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work), |
| 946 | GFP_KERNEL); |
Wei Hu(Xavier) | a74dc41 | 2017-09-29 23:10:09 +0800 | [diff] [blame] | 947 | if (!lp_qp_work) |
| 948 | return -ENOMEM; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 949 | |
| 950 | INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn); |
| 951 | |
| 952 | lp_qp_work->ib_dev = &(hr_dev->ib_dev); |
| 953 | lp_qp_work->comp = ∁ |
| 954 | lp_qp_work->comp_flag = 1; |
| 955 | |
| 956 | init_completion(lp_qp_work->comp); |
| 957 | |
| 958 | queue_work(free_mr->free_mr_wq, &(lp_qp_work->work)); |
| 959 | |
Colin Ian King | a511f82 | 2019-05-31 10:21:00 +0100 | [diff] [blame] | 960 | while (end > 0) { |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 961 | if (try_wait_for_completion(&comp)) |
| 962 | return 0; |
| 963 | msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE); |
Lang Cheng | 669cefb | 2019-05-24 15:31:23 +0800 | [diff] [blame] | 964 | end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 965 | } |
| 966 | |
| 967 | lp_qp_work->comp_flag = 0; |
| 968 | if (try_wait_for_completion(&comp)) |
| 969 | return 0; |
| 970 | |
| 971 | dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n"); |
| 972 | return -ETIMEDOUT; |
| 973 | } |
| 974 | |
| 975 | static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp) |
| 976 | { |
| 977 | struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); |
| 978 | struct device *dev = &hr_dev->pdev->dev; |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 979 | struct ib_send_wr send_wr; |
| 980 | const struct ib_send_wr *bad_wr; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 981 | int ret; |
| 982 | |
| 983 | memset(&send_wr, 0, sizeof(send_wr)); |
| 984 | send_wr.next = NULL; |
| 985 | send_wr.num_sge = 0; |
| 986 | send_wr.send_flags = 0; |
| 987 | send_wr.sg_list = NULL; |
| 988 | send_wr.wr_id = (unsigned long long)&send_wr; |
| 989 | send_wr.opcode = IB_WR_RDMA_WRITE; |
| 990 | |
| 991 | ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr); |
| 992 | if (ret) { |
| 993 | dev_err(dev, "Post write wqe for mr free failed(%d)!", ret); |
| 994 | return ret; |
| 995 | } |
| 996 | |
| 997 | return 0; |
| 998 | } |
| 999 | |
| 1000 | static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) |
| 1001 | { |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1002 | unsigned long end = |
| 1003 | msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies; |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 1004 | struct hns_roce_mr_free_work *mr_work = |
| 1005 | container_of(work, struct hns_roce_mr_free_work, work); |
| 1006 | struct hns_roce_dev *hr_dev = to_hr_dev(mr_work->ib_dev); |
| 1007 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
| 1008 | struct hns_roce_free_mr *free_mr = &priv->free_mr; |
| 1009 | struct hns_roce_cq *mr_free_cq = free_mr->mr_free_cq; |
| 1010 | struct hns_roce_mr *hr_mr = mr_work->mr; |
| 1011 | struct device *dev = &hr_dev->pdev->dev; |
| 1012 | struct ib_wc wc[HNS_ROCE_V1_RESV_QP]; |
| 1013 | struct hns_roce_qp *hr_qp; |
oulijun | 5802883 | 2017-06-10 18:49:21 +0800 | [diff] [blame] | 1014 | int ne = 0; |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 1015 | int ret; |
| 1016 | int i; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1017 | |
| 1018 | for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { |
| 1019 | hr_qp = free_mr->mr_free_qp[i]; |
oulijun | 5802883 | 2017-06-10 18:49:21 +0800 | [diff] [blame] | 1020 | if (!hr_qp) |
| 1021 | continue; |
| 1022 | ne++; |
| 1023 | |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1024 | ret = hns_roce_v1_send_lp_wqe(hr_qp); |
| 1025 | if (ret) { |
| 1026 | dev_err(dev, |
| 1027 | "Send wqe (qp:0x%lx) for mr free failed(%d)!\n", |
| 1028 | hr_qp->qpn, ret); |
| 1029 | goto free_work; |
| 1030 | } |
| 1031 | } |
| 1032 | |
Wei Hu(Xavier) | 5e437b1 | 2017-09-29 23:10:12 +0800 | [diff] [blame] | 1033 | if (!ne) { |
Colin Ian King | 978cb69 | 2017-10-10 16:14:51 +0100 | [diff] [blame] | 1034 | dev_err(dev, "Reserved loop qp is absent!\n"); |
Wei Hu(Xavier) | 5e437b1 | 2017-09-29 23:10:12 +0800 | [diff] [blame] | 1035 | goto free_work; |
| 1036 | } |
| 1037 | |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1038 | do { |
| 1039 | ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); |
oulijun | a0403be | 2018-05-04 10:57:13 +0800 | [diff] [blame] | 1040 | if (ret < 0 && hr_qp) { |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1041 | dev_err(dev, |
| 1042 | "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n", |
| 1043 | hr_qp->qpn, ret, hr_mr->key, ne); |
| 1044 | goto free_work; |
| 1045 | } |
| 1046 | ne -= ret; |
Leon Romanovsky | 98e77d9 | 2017-05-23 11:29:42 +0300 | [diff] [blame] | 1047 | usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000, |
| 1048 | (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000); |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1049 | } while (ne && time_before_eq(jiffies, end)); |
| 1050 | |
| 1051 | if (ne != 0) |
| 1052 | dev_err(dev, |
| 1053 | "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n", |
| 1054 | hr_mr->key, ne); |
| 1055 | |
| 1056 | free_work: |
| 1057 | if (mr_work->comp_flag) |
| 1058 | complete(mr_work->comp); |
| 1059 | kfree(mr_work); |
| 1060 | } |
| 1061 | |
Bart Van Assche | d61d6de | 2017-10-11 10:49:01 -0700 | [diff] [blame] | 1062 | static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 1063 | struct hns_roce_mr *mr, struct ib_udata *udata) |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1064 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 1065 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
| 1066 | struct hns_roce_free_mr *free_mr = &priv->free_mr; |
| 1067 | long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1068 | struct device *dev = &hr_dev->pdev->dev; |
| 1069 | struct hns_roce_mr_free_work *mr_work; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1070 | unsigned long start = jiffies; |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 1071 | struct completion comp; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1072 | int ret = 0; |
| 1073 | |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1074 | if (mr->enabled) { |
Yixing Liu | 6eef524 | 2019-11-05 19:07:58 +0800 | [diff] [blame] | 1075 | if (hns_roce_hw_destroy_mpt(hr_dev, NULL, |
| 1076 | key_to_hw_index(mr->key) & |
| 1077 | (hr_dev->caps.num_mtpts - 1))) |
| 1078 | dev_warn(dev, "DESTROY_MPT failed!\n"); |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1079 | } |
| 1080 | |
| 1081 | mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL); |
| 1082 | if (!mr_work) { |
| 1083 | ret = -ENOMEM; |
| 1084 | goto free_mr; |
| 1085 | } |
| 1086 | |
| 1087 | INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn); |
| 1088 | |
| 1089 | mr_work->ib_dev = &(hr_dev->ib_dev); |
| 1090 | mr_work->comp = ∁ |
| 1091 | mr_work->comp_flag = 1; |
| 1092 | mr_work->mr = (void *)mr; |
| 1093 | init_completion(mr_work->comp); |
| 1094 | |
| 1095 | queue_work(free_mr->free_mr_wq, &(mr_work->work)); |
| 1096 | |
Colin Ian King | a511f82 | 2019-05-31 10:21:00 +0100 | [diff] [blame] | 1097 | while (end > 0) { |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1098 | if (try_wait_for_completion(&comp)) |
| 1099 | goto free_mr; |
| 1100 | msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); |
Lang Cheng | 669cefb | 2019-05-24 15:31:23 +0800 | [diff] [blame] | 1101 | end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1102 | } |
| 1103 | |
| 1104 | mr_work->comp_flag = 0; |
| 1105 | if (try_wait_for_completion(&comp)) |
| 1106 | goto free_mr; |
| 1107 | |
| 1108 | dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key); |
| 1109 | ret = -ETIMEDOUT; |
| 1110 | |
| 1111 | free_mr: |
| 1112 | dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n", |
| 1113 | mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start)); |
| 1114 | |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1115 | hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, |
| 1116 | key_to_hw_index(mr->key), 0); |
Xi Wang | 9b2cf76 | 2020-04-28 19:03:39 +0800 | [diff] [blame] | 1117 | hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr); |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1118 | kfree(mr); |
| 1119 | |
| 1120 | return ret; |
| 1121 | } |
| 1122 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1123 | static void hns_roce_db_free(struct hns_roce_dev *hr_dev) |
| 1124 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 1125 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
| 1126 | struct hns_roce_db_table *db = &priv->db_table; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1127 | struct device *dev = &hr_dev->pdev->dev; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1128 | |
| 1129 | if (db->sdb_ext_mod) { |
| 1130 | dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE, |
| 1131 | db->ext_db->sdb_buf_list->buf, |
| 1132 | db->ext_db->sdb_buf_list->map); |
| 1133 | kfree(db->ext_db->sdb_buf_list); |
| 1134 | } |
| 1135 | |
| 1136 | if (db->odb_ext_mod) { |
| 1137 | dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE, |
| 1138 | db->ext_db->odb_buf_list->buf, |
| 1139 | db->ext_db->odb_buf_list->map); |
| 1140 | kfree(db->ext_db->odb_buf_list); |
| 1141 | } |
| 1142 | |
| 1143 | kfree(db->ext_db); |
| 1144 | } |
| 1145 | |
| 1146 | static int hns_roce_raq_init(struct hns_roce_dev *hr_dev) |
| 1147 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 1148 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
Colin Ian King | 48062b0 | 2020-05-28 16:04:27 +0100 | [diff] [blame] | 1149 | struct hns_roce_raq_table *raq = &priv->raq_table; |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 1150 | struct device *dev = &hr_dev->pdev->dev; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1151 | int raq_shift = 0; |
| 1152 | dma_addr_t addr; |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 1153 | __le32 tmp; |
| 1154 | u32 val; |
| 1155 | int ret; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1156 | |
| 1157 | raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL); |
| 1158 | if (!raq->e_raq_buf) |
| 1159 | return -ENOMEM; |
| 1160 | |
| 1161 | raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, |
| 1162 | &addr, GFP_KERNEL); |
| 1163 | if (!raq->e_raq_buf->buf) { |
| 1164 | ret = -ENOMEM; |
| 1165 | goto err_dma_alloc_raq; |
| 1166 | } |
| 1167 | raq->e_raq_buf->map = addr; |
| 1168 | |
| 1169 | /* Configure raq extended address. 48bit 4K align*/ |
| 1170 | roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12); |
| 1171 | |
| 1172 | /* Configure raq_shift */ |
| 1173 | raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY); |
| 1174 | val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1175 | tmp = cpu_to_le32(val); |
| 1176 | roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1177 | ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift); |
| 1178 | /* |
| 1179 | * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of |
| 1180 | * using 4K page, and shift more 32 because of |
| 1181 | * caculating the high 32 bit value evaluated to hardware. |
| 1182 | */ |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1183 | roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1184 | ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S, |
| 1185 | raq->e_raq_buf->map >> 44); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1186 | val = le32_to_cpu(tmp); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1187 | roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val); |
| 1188 | dev_dbg(dev, "Configure raq_shift 0x%x.\n", val); |
| 1189 | |
| 1190 | /* Configure raq threshold */ |
| 1191 | val = roce_read(hr_dev, ROCEE_RAQ_WL_REG); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1192 | tmp = cpu_to_le32(val); |
| 1193 | roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1194 | ROCEE_RAQ_WL_ROCEE_RAQ_WL_S, |
| 1195 | HNS_ROCE_V1_EXT_RAQ_WF); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1196 | val = le32_to_cpu(tmp); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1197 | roce_write(hr_dev, ROCEE_RAQ_WL_REG, val); |
| 1198 | dev_dbg(dev, "Configure raq_wl 0x%x.\n", val); |
| 1199 | |
| 1200 | /* Enable extend raq */ |
| 1201 | val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1202 | tmp = cpu_to_le32(val); |
| 1203 | roce_set_field(tmp, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1204 | ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M, |
| 1205 | ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S, |
| 1206 | POL_TIME_INTERVAL_VAL); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1207 | roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1); |
| 1208 | roce_set_field(tmp, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1209 | ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M, |
| 1210 | ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S, |
| 1211 | 2); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1212 | roce_set_bit(tmp, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1213 | ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1214 | val = le32_to_cpu(tmp); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1215 | roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val); |
| 1216 | dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val); |
| 1217 | |
| 1218 | /* Enable raq drop */ |
| 1219 | val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1220 | tmp = cpu_to_le32(val); |
| 1221 | roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1); |
| 1222 | val = le32_to_cpu(tmp); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1223 | roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); |
| 1224 | dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val); |
| 1225 | |
| 1226 | return 0; |
| 1227 | |
| 1228 | err_dma_alloc_raq: |
| 1229 | kfree(raq->e_raq_buf); |
| 1230 | return ret; |
| 1231 | } |
| 1232 | |
| 1233 | static void hns_roce_raq_free(struct hns_roce_dev *hr_dev) |
| 1234 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 1235 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
| 1236 | struct hns_roce_raq_table *raq = &priv->raq_table; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1237 | struct device *dev = &hr_dev->pdev->dev; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1238 | |
| 1239 | dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf, |
| 1240 | raq->e_raq_buf->map); |
| 1241 | kfree(raq->e_raq_buf); |
| 1242 | } |
| 1243 | |
| 1244 | static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag) |
| 1245 | { |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1246 | __le32 tmp; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1247 | u32 val; |
| 1248 | |
| 1249 | if (enable_flag) { |
| 1250 | val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); |
| 1251 | /* Open all ports */ |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1252 | tmp = cpu_to_le32(val); |
| 1253 | roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1254 | ROCEE_GLB_CFG_ROCEE_PORT_ST_S, |
| 1255 | ALL_PORT_VAL_OPEN); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1256 | val = le32_to_cpu(tmp); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1257 | roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); |
| 1258 | } else { |
| 1259 | val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); |
| 1260 | /* Close all ports */ |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1261 | tmp = cpu_to_le32(val); |
| 1262 | roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1263 | ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1264 | val = le32_to_cpu(tmp); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1265 | roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); |
| 1266 | } |
| 1267 | } |
| 1268 | |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 1269 | static int hns_roce_bt_init(struct hns_roce_dev *hr_dev) |
| 1270 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 1271 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 1272 | struct device *dev = &hr_dev->pdev->dev; |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 1273 | int ret; |
| 1274 | |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 1275 | priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev, |
| 1276 | HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map, |
| 1277 | GFP_KERNEL); |
| 1278 | if (!priv->bt_table.qpc_buf.buf) |
| 1279 | return -ENOMEM; |
| 1280 | |
| 1281 | priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev, |
| 1282 | HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map, |
| 1283 | GFP_KERNEL); |
| 1284 | if (!priv->bt_table.mtpt_buf.buf) { |
| 1285 | ret = -ENOMEM; |
| 1286 | goto err_failed_alloc_mtpt_buf; |
| 1287 | } |
| 1288 | |
| 1289 | priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev, |
| 1290 | HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map, |
| 1291 | GFP_KERNEL); |
| 1292 | if (!priv->bt_table.cqc_buf.buf) { |
| 1293 | ret = -ENOMEM; |
| 1294 | goto err_failed_alloc_cqc_buf; |
| 1295 | } |
| 1296 | |
| 1297 | return 0; |
| 1298 | |
| 1299 | err_failed_alloc_cqc_buf: |
| 1300 | dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, |
| 1301 | priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); |
| 1302 | |
| 1303 | err_failed_alloc_mtpt_buf: |
| 1304 | dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, |
| 1305 | priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); |
| 1306 | |
| 1307 | return ret; |
| 1308 | } |
| 1309 | |
| 1310 | static void hns_roce_bt_free(struct hns_roce_dev *hr_dev) |
| 1311 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 1312 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 1313 | struct device *dev = &hr_dev->pdev->dev; |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 1314 | |
| 1315 | dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, |
| 1316 | priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map); |
| 1317 | |
| 1318 | dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, |
| 1319 | priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); |
| 1320 | |
| 1321 | dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, |
| 1322 | priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); |
| 1323 | } |
| 1324 | |
Wei Hu (Xavier) | 8f3e9f3 | 2016-11-23 19:41:00 +0000 | [diff] [blame] | 1325 | static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev) |
| 1326 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 1327 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
| 1328 | struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf; |
Wei Hu (Xavier) | 8f3e9f3 | 2016-11-23 19:41:00 +0000 | [diff] [blame] | 1329 | struct device *dev = &hr_dev->pdev->dev; |
Wei Hu (Xavier) | 8f3e9f3 | 2016-11-23 19:41:00 +0000 | [diff] [blame] | 1330 | |
| 1331 | /* |
| 1332 | * This buffer will be used for CQ's tptr(tail pointer), also |
| 1333 | * named ci(customer index). Every CQ will use 2 bytes to save |
| 1334 | * cqe ci in hip06. Hardware will read this area to get new ci |
| 1335 | * when the queue is almost full. |
| 1336 | */ |
| 1337 | tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE, |
| 1338 | &tptr_buf->map, GFP_KERNEL); |
| 1339 | if (!tptr_buf->buf) |
| 1340 | return -ENOMEM; |
| 1341 | |
| 1342 | hr_dev->tptr_dma_addr = tptr_buf->map; |
| 1343 | hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE; |
| 1344 | |
| 1345 | return 0; |
| 1346 | } |
| 1347 | |
| 1348 | static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev) |
| 1349 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 1350 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
| 1351 | struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf; |
Wei Hu (Xavier) | 8f3e9f3 | 2016-11-23 19:41:00 +0000 | [diff] [blame] | 1352 | struct device *dev = &hr_dev->pdev->dev; |
Wei Hu (Xavier) | 8f3e9f3 | 2016-11-23 19:41:00 +0000 | [diff] [blame] | 1353 | |
| 1354 | dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE, |
| 1355 | tptr_buf->buf, tptr_buf->map); |
| 1356 | } |
| 1357 | |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1358 | static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev) |
| 1359 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 1360 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
| 1361 | struct hns_roce_free_mr *free_mr = &priv->free_mr; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1362 | struct device *dev = &hr_dev->pdev->dev; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1363 | int ret = 0; |
| 1364 | |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1365 | free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr"); |
| 1366 | if (!free_mr->free_mr_wq) { |
| 1367 | dev_err(dev, "Create free mr workqueue failed!\n"); |
| 1368 | return -ENOMEM; |
| 1369 | } |
| 1370 | |
| 1371 | ret = hns_roce_v1_rsv_lp_qp(hr_dev); |
| 1372 | if (ret) { |
| 1373 | dev_err(dev, "Reserved loop qp failed(%d)!\n", ret); |
| 1374 | flush_workqueue(free_mr->free_mr_wq); |
| 1375 | destroy_workqueue(free_mr->free_mr_wq); |
| 1376 | } |
| 1377 | |
| 1378 | return ret; |
| 1379 | } |
| 1380 | |
| 1381 | static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev) |
| 1382 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 1383 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
| 1384 | struct hns_roce_free_mr *free_mr = &priv->free_mr; |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1385 | |
| 1386 | flush_workqueue(free_mr->free_mr_wq); |
| 1387 | destroy_workqueue(free_mr->free_mr_wq); |
| 1388 | |
| 1389 | hns_roce_v1_release_lp_qp(hr_dev); |
| 1390 | } |
| 1391 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1392 | /** |
| 1393 | * hns_roce_v1_reset - reset RoCE |
| 1394 | * @hr_dev: RoCE device struct pointer |
| 1395 | * @enable: true -- drop reset, false -- reset |
| 1396 | * return 0 - success , negative --fail |
| 1397 | */ |
Bart Van Assche | d61d6de | 2017-10-11 10:49:01 -0700 | [diff] [blame] | 1398 | static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1399 | { |
| 1400 | struct device_node *dsaf_node; |
| 1401 | struct device *dev = &hr_dev->pdev->dev; |
| 1402 | struct device_node *np = dev->of_node; |
Salil | 528f1de | 2016-08-24 04:44:50 +0800 | [diff] [blame] | 1403 | struct fwnode_handle *fwnode; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1404 | int ret; |
| 1405 | |
Salil | 528f1de | 2016-08-24 04:44:50 +0800 | [diff] [blame] | 1406 | /* check if this is DT/ACPI case */ |
| 1407 | if (dev_of_node(dev)) { |
| 1408 | dsaf_node = of_parse_phandle(np, "dsaf-handle", 0); |
| 1409 | if (!dsaf_node) { |
| 1410 | dev_err(dev, "could not find dsaf-handle\n"); |
| 1411 | return -EINVAL; |
| 1412 | } |
| 1413 | fwnode = &dsaf_node->fwnode; |
| 1414 | } else if (is_acpi_device_node(dev->fwnode)) { |
Sakari Ailus | 977d5ad | 2018-07-17 17:19:11 +0300 | [diff] [blame] | 1415 | struct fwnode_reference_args args; |
Salil | 528f1de | 2016-08-24 04:44:50 +0800 | [diff] [blame] | 1416 | |
| 1417 | ret = acpi_node_get_property_reference(dev->fwnode, |
| 1418 | "dsaf-handle", 0, &args); |
| 1419 | if (ret) { |
| 1420 | dev_err(dev, "could not find dsaf-handle\n"); |
| 1421 | return ret; |
| 1422 | } |
Sakari Ailus | 977d5ad | 2018-07-17 17:19:11 +0300 | [diff] [blame] | 1423 | fwnode = args.fwnode; |
Salil | 528f1de | 2016-08-24 04:44:50 +0800 | [diff] [blame] | 1424 | } else { |
| 1425 | dev_err(dev, "cannot read data from DT or ACPI\n"); |
| 1426 | return -ENXIO; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1427 | } |
| 1428 | |
Salil | 528f1de | 2016-08-24 04:44:50 +0800 | [diff] [blame] | 1429 | ret = hns_dsaf_roce_reset(fwnode, false); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1430 | if (ret) |
| 1431 | return ret; |
| 1432 | |
Salil | 528f1de | 2016-08-24 04:44:50 +0800 | [diff] [blame] | 1433 | if (dereset) { |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1434 | msleep(SLEEP_TIME_INTERVAL); |
Salil | 528f1de | 2016-08-24 04:44:50 +0800 | [diff] [blame] | 1435 | ret = hns_dsaf_roce_reset(fwnode, true); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1436 | } |
| 1437 | |
Salil | 528f1de | 2016-08-24 04:44:50 +0800 | [diff] [blame] | 1438 | return ret; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1439 | } |
| 1440 | |
Bart Van Assche | d61d6de | 2017-10-11 10:49:01 -0700 | [diff] [blame] | 1441 | static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1442 | { |
| 1443 | int i = 0; |
| 1444 | struct hns_roce_caps *caps = &hr_dev->caps; |
| 1445 | |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1446 | hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG); |
| 1447 | hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG); |
| 1448 | hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) | |
| 1449 | ((u64)roce_read(hr_dev, |
| 1450 | ROCEE_SYS_IMAGE_GUID_H_REG) << 32); |
Wei Hu (Xavier) | 8f3e9f3 | 2016-11-23 19:41:00 +0000 | [diff] [blame] | 1451 | hr_dev->hw_rev = HNS_ROCE_HW_VER1; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1452 | |
| 1453 | caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM; |
| 1454 | caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM; |
Wei Hu(Xavier) | 926a01d | 2017-08-30 17:23:13 +0800 | [diff] [blame] | 1455 | caps->min_wqes = HNS_ROCE_MIN_WQE_NUM; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1456 | caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM; |
Wei Hu(Xavier) | 93aa218 | 2017-08-30 17:23:12 +0800 | [diff] [blame] | 1457 | caps->min_cqes = HNS_ROCE_MIN_CQE_NUM; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1458 | caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM; |
| 1459 | caps->max_sq_sg = HNS_ROCE_V1_SG_NUM; |
| 1460 | caps->max_rq_sg = HNS_ROCE_V1_SG_NUM; |
| 1461 | caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE; |
| 1462 | caps->num_uars = HNS_ROCE_V1_UAR_NUM; |
| 1463 | caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM; |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 1464 | caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM; |
| 1465 | caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM; |
| 1466 | caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1467 | caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM; |
| 1468 | caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS; |
| 1469 | caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM; |
| 1470 | caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA; |
| 1471 | caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA; |
| 1472 | caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ; |
| 1473 | caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ; |
| 1474 | caps->qpc_entry_sz = HNS_ROCE_V1_QPC_ENTRY_SIZE; |
| 1475 | caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE; |
| 1476 | caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE; |
| 1477 | caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE; |
| 1478 | caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE; |
| 1479 | caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE; |
| 1480 | caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1481 | caps->reserved_lkey = 0; |
| 1482 | caps->reserved_pds = 0; |
| 1483 | caps->reserved_mrws = 1; |
| 1484 | caps->reserved_uars = 0; |
| 1485 | caps->reserved_cqs = 0; |
chenglang | 21b97f5 | 2019-06-24 19:47:46 +0800 | [diff] [blame] | 1486 | caps->reserved_qps = 12; /* 2 SQP per port, six ports total 12 */ |
Wei Hu(Xavier) | 29a1fe5 | 2017-10-18 17:32:45 +0800 | [diff] [blame] | 1487 | caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1488 | |
| 1489 | for (i = 0; i < caps->num_ports; i++) |
| 1490 | caps->pkey_table_len[i] = 1; |
| 1491 | |
| 1492 | for (i = 0; i < caps->num_ports; i++) { |
| 1493 | /* Six ports shared 16 GID in v1 engine */ |
| 1494 | if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports)) |
| 1495 | caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM / |
| 1496 | caps->num_ports; |
| 1497 | else |
| 1498 | caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM / |
| 1499 | caps->num_ports + 1; |
| 1500 | } |
| 1501 | |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 1502 | caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM; |
| 1503 | caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM; |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1504 | caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1505 | caps->max_mtu = IB_MTU_2048; |
Wei Hu(Xavier) | cfc85f3 | 2017-08-30 17:23:04 +0800 | [diff] [blame] | 1506 | |
| 1507 | return 0; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1508 | } |
| 1509 | |
Bart Van Assche | d61d6de | 2017-10-11 10:49:01 -0700 | [diff] [blame] | 1510 | static int hns_roce_v1_init(struct hns_roce_dev *hr_dev) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1511 | { |
| 1512 | int ret; |
| 1513 | u32 val; |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1514 | __le32 tmp; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1515 | struct device *dev = &hr_dev->pdev->dev; |
| 1516 | |
| 1517 | /* DMAE user config */ |
| 1518 | val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1519 | tmp = cpu_to_le32(val); |
| 1520 | roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1521 | ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1522 | roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1523 | ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S, |
| 1524 | 1 << PAGES_SHIFT_16); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1525 | val = le32_to_cpu(tmp); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1526 | roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val); |
| 1527 | |
| 1528 | val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1529 | tmp = cpu_to_le32(val); |
| 1530 | roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1531 | ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1532 | roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1533 | ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S, |
| 1534 | 1 << PAGES_SHIFT_16); |
| 1535 | |
| 1536 | ret = hns_roce_db_init(hr_dev); |
| 1537 | if (ret) { |
| 1538 | dev_err(dev, "doorbell init failed!\n"); |
| 1539 | return ret; |
| 1540 | } |
| 1541 | |
| 1542 | ret = hns_roce_raq_init(hr_dev); |
| 1543 | if (ret) { |
| 1544 | dev_err(dev, "raq init failed!\n"); |
| 1545 | goto error_failed_raq_init; |
| 1546 | } |
| 1547 | |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 1548 | ret = hns_roce_bt_init(hr_dev); |
| 1549 | if (ret) { |
| 1550 | dev_err(dev, "bt init failed!\n"); |
| 1551 | goto error_failed_bt_init; |
| 1552 | } |
| 1553 | |
Wei Hu (Xavier) | 8f3e9f3 | 2016-11-23 19:41:00 +0000 | [diff] [blame] | 1554 | ret = hns_roce_tptr_init(hr_dev); |
| 1555 | if (ret) { |
| 1556 | dev_err(dev, "tptr init failed!\n"); |
| 1557 | goto error_failed_tptr_init; |
| 1558 | } |
| 1559 | |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1560 | ret = hns_roce_free_mr_init(hr_dev); |
| 1561 | if (ret) { |
| 1562 | dev_err(dev, "free mr init failed!\n"); |
| 1563 | goto error_failed_free_mr_init; |
| 1564 | } |
| 1565 | |
Wei Hu (Xavier) | d838c48 | 2016-11-29 23:10:25 +0000 | [diff] [blame] | 1566 | hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP); |
| 1567 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1568 | return 0; |
| 1569 | |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1570 | error_failed_free_mr_init: |
Wei Hu (Xavier) | d838c48 | 2016-11-29 23:10:25 +0000 | [diff] [blame] | 1571 | hns_roce_tptr_free(hr_dev); |
| 1572 | |
Wei Hu (Xavier) | 8f3e9f3 | 2016-11-23 19:41:00 +0000 | [diff] [blame] | 1573 | error_failed_tptr_init: |
| 1574 | hns_roce_bt_free(hr_dev); |
| 1575 | |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 1576 | error_failed_bt_init: |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 1577 | hns_roce_raq_free(hr_dev); |
| 1578 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1579 | error_failed_raq_init: |
| 1580 | hns_roce_db_free(hr_dev); |
| 1581 | return ret; |
| 1582 | } |
| 1583 | |
Bart Van Assche | d61d6de | 2017-10-11 10:49:01 -0700 | [diff] [blame] | 1584 | static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1585 | { |
Wei Hu (Xavier) | d838c48 | 2016-11-29 23:10:25 +0000 | [diff] [blame] | 1586 | hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN); |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1587 | hns_roce_free_mr_free(hr_dev); |
Wei Hu (Xavier) | 8f3e9f3 | 2016-11-23 19:41:00 +0000 | [diff] [blame] | 1588 | hns_roce_tptr_free(hr_dev); |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 1589 | hns_roce_bt_free(hr_dev); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1590 | hns_roce_raq_free(hr_dev); |
| 1591 | hns_roce_db_free(hr_dev); |
| 1592 | } |
| 1593 | |
Wei Hu(Xavier) | a680f2f | 2017-08-30 17:23:05 +0800 | [diff] [blame] | 1594 | static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev) |
| 1595 | { |
| 1596 | u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG); |
| 1597 | |
| 1598 | return (!!(status & (1 << HCR_GO_BIT))); |
| 1599 | } |
| 1600 | |
Colin Ian King | 281d0cc | 2017-09-29 14:16:01 +0100 | [diff] [blame] | 1601 | static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, |
| 1602 | u64 out_param, u32 in_modifier, u8 op_modifier, |
| 1603 | u16 op, u16 token, int event) |
Wei Hu(Xavier) | a680f2f | 2017-08-30 17:23:05 +0800 | [diff] [blame] | 1604 | { |
Bart Van Assche | cc4ed08 | 2017-10-11 10:49:00 -0700 | [diff] [blame] | 1605 | u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG); |
Wei Hu(Xavier) | a680f2f | 2017-08-30 17:23:05 +0800 | [diff] [blame] | 1606 | unsigned long end; |
| 1607 | u32 val = 0; |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1608 | __le32 tmp; |
Wei Hu(Xavier) | a680f2f | 2017-08-30 17:23:05 +0800 | [diff] [blame] | 1609 | |
| 1610 | end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies; |
| 1611 | while (hns_roce_v1_cmd_pending(hr_dev)) { |
| 1612 | if (time_after(jiffies, end)) { |
| 1613 | dev_err(hr_dev->dev, "jiffies=%d end=%d\n", |
| 1614 | (int)jiffies, (int)end); |
| 1615 | return -EAGAIN; |
| 1616 | } |
| 1617 | cond_resched(); |
| 1618 | } |
| 1619 | |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1620 | tmp = cpu_to_le32(val); |
| 1621 | roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S, |
Wei Hu(Xavier) | a680f2f | 2017-08-30 17:23:05 +0800 | [diff] [blame] | 1622 | op); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1623 | roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M, |
Wei Hu(Xavier) | a680f2f | 2017-08-30 17:23:05 +0800 | [diff] [blame] | 1624 | ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1625 | roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event); |
| 1626 | roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1); |
| 1627 | roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M, |
Wei Hu(Xavier) | a680f2f | 2017-08-30 17:23:05 +0800 | [diff] [blame] | 1628 | ROCEE_MB6_ROCEE_MB_TOKEN_S, token); |
| 1629 | |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1630 | val = le32_to_cpu(tmp); |
Andy Shevchenko | 71591d1 | 2018-02-14 20:11:17 +0200 | [diff] [blame] | 1631 | writeq(in_param, hcr + 0); |
| 1632 | writeq(out_param, hcr + 2); |
| 1633 | writel(in_modifier, hcr + 4); |
Wei Hu(Xavier) | a680f2f | 2017-08-30 17:23:05 +0800 | [diff] [blame] | 1634 | /* Memory barrier */ |
| 1635 | wmb(); |
| 1636 | |
Andy Shevchenko | 71591d1 | 2018-02-14 20:11:17 +0200 | [diff] [blame] | 1637 | writel(val, hcr + 5); |
Wei Hu(Xavier) | a680f2f | 2017-08-30 17:23:05 +0800 | [diff] [blame] | 1638 | |
Wei Hu(Xavier) | a680f2f | 2017-08-30 17:23:05 +0800 | [diff] [blame] | 1639 | return 0; |
| 1640 | } |
| 1641 | |
| 1642 | static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev, |
| 1643 | unsigned long timeout) |
| 1644 | { |
| 1645 | u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG; |
| 1646 | unsigned long end = 0; |
| 1647 | u32 status = 0; |
| 1648 | |
| 1649 | end = msecs_to_jiffies(timeout) + jiffies; |
| 1650 | while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end)) |
| 1651 | cond_resched(); |
| 1652 | |
| 1653 | if (hns_roce_v1_cmd_pending(hr_dev)) { |
| 1654 | dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n"); |
| 1655 | return -ETIMEDOUT; |
| 1656 | } |
| 1657 | |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1658 | status = le32_to_cpu((__force __le32) |
Wei Hu(Xavier) | a680f2f | 2017-08-30 17:23:05 +0800 | [diff] [blame] | 1659 | __raw_readl(hcr + HCR_STATUS_OFFSET)); |
| 1660 | if ((status & STATUS_MASK) != 0x1) { |
| 1661 | dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status); |
| 1662 | return -EBUSY; |
| 1663 | } |
| 1664 | |
| 1665 | return 0; |
| 1666 | } |
| 1667 | |
Wei Hu(Xavier) | b5ff0f6 | 2017-10-26 17:10:25 +0800 | [diff] [blame] | 1668 | static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, |
Parav Pandit | f4df9a7 | 2018-06-05 08:40:16 +0300 | [diff] [blame] | 1669 | int gid_index, const union ib_gid *gid, |
Wei Hu(Xavier) | b5ff0f6 | 2017-10-26 17:10:25 +0800 | [diff] [blame] | 1670 | const struct ib_gid_attr *attr) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1671 | { |
Lang Cheng | 780f339 | 2019-05-24 15:31:22 +0800 | [diff] [blame] | 1672 | unsigned long flags; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1673 | u32 *p = NULL; |
| 1674 | u8 gid_idx = 0; |
| 1675 | |
| 1676 | gid_idx = hns_get_gid_index(hr_dev, port, gid_index); |
| 1677 | |
Lang Cheng | 780f339 | 2019-05-24 15:31:22 +0800 | [diff] [blame] | 1678 | spin_lock_irqsave(&hr_dev->iboe.lock, flags); |
| 1679 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1680 | p = (u32 *)&gid->raw[0]; |
| 1681 | roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG + |
| 1682 | (HNS_ROCE_V1_GID_NUM * gid_idx)); |
| 1683 | |
| 1684 | p = (u32 *)&gid->raw[4]; |
| 1685 | roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG + |
| 1686 | (HNS_ROCE_V1_GID_NUM * gid_idx)); |
| 1687 | |
| 1688 | p = (u32 *)&gid->raw[8]; |
| 1689 | roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG + |
| 1690 | (HNS_ROCE_V1_GID_NUM * gid_idx)); |
| 1691 | |
| 1692 | p = (u32 *)&gid->raw[0xc]; |
| 1693 | roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG + |
| 1694 | (HNS_ROCE_V1_GID_NUM * gid_idx)); |
Wei Hu(Xavier) | b5ff0f6 | 2017-10-26 17:10:25 +0800 | [diff] [blame] | 1695 | |
Lang Cheng | 780f339 | 2019-05-24 15:31:22 +0800 | [diff] [blame] | 1696 | spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); |
| 1697 | |
Wei Hu(Xavier) | b5ff0f6 | 2017-10-26 17:10:25 +0800 | [diff] [blame] | 1698 | return 0; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1699 | } |
| 1700 | |
Wei Hu(Xavier) | a74dc41 | 2017-09-29 23:10:09 +0800 | [diff] [blame] | 1701 | static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, |
| 1702 | u8 *addr) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1703 | { |
| 1704 | u32 reg_smac_l; |
| 1705 | u16 reg_smac_h; |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1706 | __le32 tmp; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1707 | u16 *p_h; |
| 1708 | u32 *p; |
| 1709 | u32 val; |
| 1710 | |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1711 | /* |
| 1712 | * When mac changed, loopback may fail |
| 1713 | * because of smac not equal to dmac. |
| 1714 | * We Need to release and create reserved qp again. |
| 1715 | */ |
Wei Hu(Xavier) | a74dc41 | 2017-09-29 23:10:09 +0800 | [diff] [blame] | 1716 | if (hr_dev->hw->dereg_mr) { |
| 1717 | int ret; |
| 1718 | |
| 1719 | ret = hns_roce_v1_recreate_lp_qp(hr_dev); |
| 1720 | if (ret && ret != -ETIMEDOUT) |
| 1721 | return ret; |
| 1722 | } |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 1723 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1724 | p = (u32 *)(&addr[0]); |
| 1725 | reg_smac_l = *p; |
| 1726 | roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG + |
| 1727 | PHY_PORT_OFFSET * phy_port); |
| 1728 | |
| 1729 | val = roce_read(hr_dev, |
| 1730 | ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1731 | tmp = cpu_to_le32(val); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1732 | p_h = (u16 *)(&addr[4]); |
| 1733 | reg_smac_h = *p_h; |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1734 | roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1735 | ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1736 | val = le32_to_cpu(tmp); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1737 | roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET, |
| 1738 | val); |
Wei Hu(Xavier) | a74dc41 | 2017-09-29 23:10:09 +0800 | [diff] [blame] | 1739 | |
| 1740 | return 0; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1741 | } |
| 1742 | |
Bart Van Assche | d61d6de | 2017-10-11 10:49:01 -0700 | [diff] [blame] | 1743 | static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port, |
| 1744 | enum ib_mtu mtu) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1745 | { |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1746 | __le32 tmp; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1747 | u32 val; |
| 1748 | |
| 1749 | val = roce_read(hr_dev, |
| 1750 | ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1751 | tmp = cpu_to_le32(val); |
| 1752 | roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1753 | ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1754 | val = le32_to_cpu(tmp); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1755 | roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET, |
| 1756 | val); |
| 1757 | } |
| 1758 | |
Yangyang Li | 98a6151 | 2020-06-16 21:37:09 +0800 | [diff] [blame] | 1759 | static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf, |
| 1760 | struct hns_roce_mr *mr, |
Bart Van Assche | d61d6de | 2017-10-11 10:49:01 -0700 | [diff] [blame] | 1761 | unsigned long mtpt_idx) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1762 | { |
Xi Wang | 9b2cf76 | 2020-04-28 19:03:39 +0800 | [diff] [blame] | 1763 | u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 }; |
| 1764 | struct ib_device *ibdev = &hr_dev->ib_dev; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1765 | struct hns_roce_v1_mpt_entry *mpt_entry; |
Xi Wang | 9b2cf76 | 2020-04-28 19:03:39 +0800 | [diff] [blame] | 1766 | dma_addr_t pbl_ba; |
| 1767 | int count; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1768 | int i; |
| 1769 | |
| 1770 | /* MPT filled into mailbox buf */ |
| 1771 | mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf; |
| 1772 | memset(mpt_entry, 0, sizeof(*mpt_entry)); |
| 1773 | |
| 1774 | roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M, |
| 1775 | MPT_BYTE_4_KEY_STATE_S, KEY_VALID); |
| 1776 | roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M, |
| 1777 | MPT_BYTE_4_KEY_S, mr->key); |
| 1778 | roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M, |
| 1779 | MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K); |
| 1780 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0); |
| 1781 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S, |
| 1782 | (mr->access & IB_ACCESS_MW_BIND ? 1 : 0)); |
| 1783 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0); |
| 1784 | roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M, |
| 1785 | MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type); |
| 1786 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0); |
| 1787 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S, |
| 1788 | (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0)); |
| 1789 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S, |
| 1790 | (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0)); |
| 1791 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S, |
| 1792 | (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0)); |
| 1793 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S, |
| 1794 | 0); |
| 1795 | roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0); |
| 1796 | |
| 1797 | roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M, |
| 1798 | MPT_BYTE_12_PBL_ADDR_H_S, 0); |
| 1799 | roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M, |
| 1800 | MPT_BYTE_12_MW_BIND_COUNTER_S, 0); |
| 1801 | |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1802 | mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova); |
| 1803 | mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32)); |
| 1804 | mpt_entry->length = cpu_to_le32((u32)mr->size); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1805 | |
| 1806 | roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M, |
| 1807 | MPT_BYTE_28_PD_S, mr->pd); |
| 1808 | roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M, |
| 1809 | MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx); |
| 1810 | roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M, |
| 1811 | MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT); |
| 1812 | |
Stephen Boyd | ad61dd3 | 2017-05-08 15:57:50 -0700 | [diff] [blame] | 1813 | /* DMA memory register */ |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1814 | if (mr->type == MR_TYPE_DMA) |
| 1815 | return 0; |
| 1816 | |
Xi Wang | 9b2cf76 | 2020-04-28 19:03:39 +0800 | [diff] [blame] | 1817 | count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, |
| 1818 | ARRAY_SIZE(pages), &pbl_ba); |
| 1819 | if (count < 1) { |
| 1820 | ibdev_err(ibdev, "failed to find PBL mtr, count = %d.", count); |
| 1821 | return -ENOBUFS; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1822 | } |
| 1823 | |
| 1824 | /* Register user mr */ |
Xi Wang | 9b2cf76 | 2020-04-28 19:03:39 +0800 | [diff] [blame] | 1825 | for (i = 0; i < count; i++) { |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1826 | switch (i) { |
| 1827 | case 0: |
| 1828 | mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i])); |
| 1829 | roce_set_field(mpt_entry->mpt_byte_36, |
| 1830 | MPT_BYTE_36_PA0_H_M, |
| 1831 | MPT_BYTE_36_PA0_H_S, |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1832 | (u32)(pages[i] >> PAGES_SHIFT_32)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1833 | break; |
| 1834 | case 1: |
| 1835 | roce_set_field(mpt_entry->mpt_byte_36, |
| 1836 | MPT_BYTE_36_PA1_L_M, |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1837 | MPT_BYTE_36_PA1_L_S, (u32)(pages[i])); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1838 | roce_set_field(mpt_entry->mpt_byte_40, |
| 1839 | MPT_BYTE_40_PA1_H_M, |
| 1840 | MPT_BYTE_40_PA1_H_S, |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1841 | (u32)(pages[i] >> PAGES_SHIFT_24)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1842 | break; |
| 1843 | case 2: |
| 1844 | roce_set_field(mpt_entry->mpt_byte_40, |
| 1845 | MPT_BYTE_40_PA2_L_M, |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1846 | MPT_BYTE_40_PA2_L_S, (u32)(pages[i])); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1847 | roce_set_field(mpt_entry->mpt_byte_44, |
| 1848 | MPT_BYTE_44_PA2_H_M, |
| 1849 | MPT_BYTE_44_PA2_H_S, |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1850 | (u32)(pages[i] >> PAGES_SHIFT_16)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1851 | break; |
| 1852 | case 3: |
| 1853 | roce_set_field(mpt_entry->mpt_byte_44, |
| 1854 | MPT_BYTE_44_PA3_L_M, |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1855 | MPT_BYTE_44_PA3_L_S, (u32)(pages[i])); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1856 | roce_set_field(mpt_entry->mpt_byte_48, |
| 1857 | MPT_BYTE_48_PA3_H_M, |
| 1858 | MPT_BYTE_48_PA3_H_S, |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1859 | (u32)(pages[i] >> PAGES_SHIFT_8)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1860 | break; |
| 1861 | case 4: |
| 1862 | mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i])); |
| 1863 | roce_set_field(mpt_entry->mpt_byte_56, |
| 1864 | MPT_BYTE_56_PA4_H_M, |
| 1865 | MPT_BYTE_56_PA4_H_S, |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1866 | (u32)(pages[i] >> PAGES_SHIFT_32)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1867 | break; |
| 1868 | case 5: |
| 1869 | roce_set_field(mpt_entry->mpt_byte_56, |
| 1870 | MPT_BYTE_56_PA5_L_M, |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1871 | MPT_BYTE_56_PA5_L_S, (u32)(pages[i])); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1872 | roce_set_field(mpt_entry->mpt_byte_60, |
| 1873 | MPT_BYTE_60_PA5_H_M, |
| 1874 | MPT_BYTE_60_PA5_H_S, |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1875 | (u32)(pages[i] >> PAGES_SHIFT_24)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1876 | break; |
| 1877 | case 6: |
| 1878 | roce_set_field(mpt_entry->mpt_byte_60, |
| 1879 | MPT_BYTE_60_PA6_L_M, |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1880 | MPT_BYTE_60_PA6_L_S, (u32)(pages[i])); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1881 | roce_set_field(mpt_entry->mpt_byte_64, |
| 1882 | MPT_BYTE_64_PA6_H_M, |
| 1883 | MPT_BYTE_64_PA6_H_S, |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1884 | (u32)(pages[i] >> PAGES_SHIFT_16)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1885 | break; |
| 1886 | default: |
| 1887 | break; |
| 1888 | } |
| 1889 | } |
| 1890 | |
Xi Wang | 9b2cf76 | 2020-04-28 19:03:39 +0800 | [diff] [blame] | 1891 | mpt_entry->pbl_addr_l = cpu_to_le32(pbl_ba); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1892 | roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M, |
Xi Wang | 9b2cf76 | 2020-04-28 19:03:39 +0800 | [diff] [blame] | 1893 | MPT_BYTE_12_PBL_ADDR_H_S, upper_32_bits(pbl_ba)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1894 | |
| 1895 | return 0; |
| 1896 | } |
| 1897 | |
| 1898 | static void *get_cqe(struct hns_roce_cq *hr_cq, int n) |
| 1899 | { |
Xi Wang | 744b7bd | 2020-04-13 19:58:11 +0800 | [diff] [blame] | 1900 | return hns_roce_buf_offset(hr_cq->mtr.kmem, |
| 1901 | n * HNS_ROCE_V1_CQE_ENTRY_SIZE); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1902 | } |
| 1903 | |
| 1904 | static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n) |
| 1905 | { |
| 1906 | struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe); |
| 1907 | |
| 1908 | /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */ |
| 1909 | return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^ |
Yixian Liu | e2b2744 | 2019-11-18 10:34:50 +0800 | [diff] [blame] | 1910 | !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1911 | } |
| 1912 | |
| 1913 | static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq) |
| 1914 | { |
| 1915 | return get_sw_cqe(hr_cq, hr_cq->cons_index); |
| 1916 | } |
| 1917 | |
Bart Van Assche | d61d6de | 2017-10-11 10:49:01 -0700 | [diff] [blame] | 1918 | static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1919 | { |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1920 | __le32 doorbell[2]; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1921 | |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 1922 | doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1)); |
Arnd Bergmann | 5b0ff9a | 2017-03-24 23:02:48 +0100 | [diff] [blame] | 1923 | doorbell[1] = 0; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1924 | roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1); |
| 1925 | roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M, |
| 1926 | ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3); |
| 1927 | roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M, |
| 1928 | ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0); |
| 1929 | roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M, |
| 1930 | ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn); |
| 1931 | |
| 1932 | hns_roce_write64_k(doorbell, hr_cq->cq_db_l); |
| 1933 | } |
| 1934 | |
| 1935 | static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, |
| 1936 | struct hns_roce_srq *srq) |
| 1937 | { |
| 1938 | struct hns_roce_cqe *cqe, *dest; |
| 1939 | u32 prod_index; |
| 1940 | int nfreed = 0; |
| 1941 | u8 owner_bit; |
| 1942 | |
| 1943 | for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index); |
| 1944 | ++prod_index) { |
| 1945 | if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe) |
| 1946 | break; |
| 1947 | } |
| 1948 | |
| 1949 | /* |
Salil | e84e40be | 2016-11-23 19:41:09 +0000 | [diff] [blame] | 1950 | * Now backwards through the CQ, removing CQ entries |
| 1951 | * that match our QP by overwriting them with next entries. |
| 1952 | */ |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1953 | while ((int) --prod_index - (int) hr_cq->cons_index >= 0) { |
| 1954 | cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe); |
| 1955 | if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, |
| 1956 | CQE_BYTE_16_LOCAL_QPN_S) & |
| 1957 | HNS_ROCE_CQE_QPN_MASK) == qpn) { |
| 1958 | /* In v1 engine, not support SRQ */ |
| 1959 | ++nfreed; |
| 1960 | } else if (nfreed) { |
| 1961 | dest = get_cqe(hr_cq, (prod_index + nfreed) & |
| 1962 | hr_cq->ib_cq.cqe); |
| 1963 | owner_bit = roce_get_bit(dest->cqe_byte_4, |
| 1964 | CQE_BYTE_4_OWNER_S); |
| 1965 | memcpy(dest, cqe, sizeof(*cqe)); |
| 1966 | roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S, |
| 1967 | owner_bit); |
| 1968 | } |
| 1969 | } |
| 1970 | |
| 1971 | if (nfreed) { |
| 1972 | hr_cq->cons_index += nfreed; |
| 1973 | /* |
Salil | e84e40be | 2016-11-23 19:41:09 +0000 | [diff] [blame] | 1974 | * Make sure update of buffer contents is done before |
| 1975 | * updating consumer index. |
| 1976 | */ |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1977 | wmb(); |
| 1978 | |
Lijun Ou | a4be892 | 2016-09-20 17:06:54 +0100 | [diff] [blame] | 1979 | hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1980 | } |
| 1981 | } |
| 1982 | |
| 1983 | static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, |
| 1984 | struct hns_roce_srq *srq) |
| 1985 | { |
| 1986 | spin_lock_irq(&hr_cq->lock); |
| 1987 | __hns_roce_v1_cq_clean(hr_cq, qpn, srq); |
| 1988 | spin_unlock_irq(&hr_cq->lock); |
| 1989 | } |
| 1990 | |
Bart Van Assche | d61d6de | 2017-10-11 10:49:01 -0700 | [diff] [blame] | 1991 | static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev, |
| 1992 | struct hns_roce_cq *hr_cq, void *mb_buf, |
Yixian Liu | e2b2744 | 2019-11-18 10:34:50 +0800 | [diff] [blame] | 1993 | u64 *mtts, dma_addr_t dma_handle) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 1994 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 1995 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
| 1996 | struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf; |
| 1997 | struct hns_roce_cq_context *cq_context = mb_buf; |
Wei Hu (Xavier) | 8f3e9f3 | 2016-11-23 19:41:00 +0000 | [diff] [blame] | 1998 | dma_addr_t tptr_dma_addr; |
| 1999 | int offset; |
| 2000 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2001 | memset(cq_context, 0, sizeof(*cq_context)); |
| 2002 | |
Wei Hu (Xavier) | 8f3e9f3 | 2016-11-23 19:41:00 +0000 | [diff] [blame] | 2003 | /* Get the tptr for this CQ. */ |
| 2004 | offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE; |
| 2005 | tptr_dma_addr = tptr_buf->map + offset; |
| 2006 | hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2007 | |
| 2008 | /* Register cq_context members */ |
| 2009 | roce_set_field(cq_context->cqc_byte_4, |
| 2010 | CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M, |
| 2011 | CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID); |
| 2012 | roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M, |
| 2013 | CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2014 | |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 2015 | cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2016 | |
| 2017 | roce_set_field(cq_context->cqc_byte_12, |
| 2018 | CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M, |
| 2019 | CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S, |
| 2020 | ((u64)dma_handle >> 32)); |
| 2021 | roce_set_field(cq_context->cqc_byte_12, |
| 2022 | CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M, |
| 2023 | CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S, |
Yixian Liu | e2b2744 | 2019-11-18 10:34:50 +0800 | [diff] [blame] | 2024 | ilog2(hr_cq->cq_depth)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2025 | roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M, |
Yixian Liu | e2b2744 | 2019-11-18 10:34:50 +0800 | [diff] [blame] | 2026 | CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2027 | |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 2028 | cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0])); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2029 | |
| 2030 | roce_set_field(cq_context->cqc_byte_20, |
| 2031 | CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M, |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 2032 | CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2033 | /* Dedicated hardware, directly set 0 */ |
| 2034 | roce_set_field(cq_context->cqc_byte_20, |
| 2035 | CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M, |
| 2036 | CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0); |
| 2037 | /** |
| 2038 | * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of |
| 2039 | * using 4K page, and shift more 32 because of |
| 2040 | * caculating the high 32 bit value evaluated to hardware. |
| 2041 | */ |
| 2042 | roce_set_field(cq_context->cqc_byte_20, |
| 2043 | CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M, |
| 2044 | CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S, |
Wei Hu (Xavier) | 8f3e9f3 | 2016-11-23 19:41:00 +0000 | [diff] [blame] | 2045 | tptr_dma_addr >> 44); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2046 | |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 2047 | cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2048 | |
| 2049 | roce_set_field(cq_context->cqc_byte_32, |
| 2050 | CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M, |
| 2051 | CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0); |
| 2052 | roce_set_bit(cq_context->cqc_byte_32, |
| 2053 | CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0); |
| 2054 | roce_set_bit(cq_context->cqc_byte_32, |
| 2055 | CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0); |
| 2056 | roce_set_bit(cq_context->cqc_byte_32, |
| 2057 | CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0); |
| 2058 | roce_set_bit(cq_context->cqc_byte_32, |
| 2059 | CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S, |
| 2060 | 0); |
Salil | e84e40be | 2016-11-23 19:41:09 +0000 | [diff] [blame] | 2061 | /* The initial value of cq's ci is 0 */ |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2062 | roce_set_field(cq_context->cqc_byte_32, |
| 2063 | CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M, |
| 2064 | CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2065 | } |
| 2066 | |
oulijun | b156269 | 2017-10-19 11:52:40 +0800 | [diff] [blame] | 2067 | static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) |
| 2068 | { |
| 2069 | return -EOPNOTSUPP; |
| 2070 | } |
| 2071 | |
Bart Van Assche | d61d6de | 2017-10-11 10:49:01 -0700 | [diff] [blame] | 2072 | static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, |
| 2073 | enum ib_cq_notify_flags flags) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2074 | { |
| 2075 | struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); |
| 2076 | u32 notification_flag; |
Lang Cheng | bfe8603 | 2019-08-21 21:14:32 +0800 | [diff] [blame] | 2077 | __le32 doorbell[2] = {}; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2078 | |
| 2079 | notification_flag = (flags & IB_CQ_SOLICITED_MASK) == |
| 2080 | IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL; |
| 2081 | /* |
Salil | e84e40be | 2016-11-23 19:41:09 +0000 | [diff] [blame] | 2082 | * flags = 0; Notification Flag = 1, next |
| 2083 | * flags = 1; Notification Flag = 0, solocited |
| 2084 | */ |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 2085 | doorbell[0] = |
| 2086 | cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2087 | roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1); |
| 2088 | roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M, |
| 2089 | ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3); |
| 2090 | roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M, |
| 2091 | ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1); |
| 2092 | roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M, |
| 2093 | ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, |
| 2094 | hr_cq->cqn | notification_flag); |
| 2095 | |
| 2096 | hns_roce_write64_k(doorbell, hr_cq->cq_db_l); |
| 2097 | |
kbuild test robot | 87809f8 | 2017-07-25 13:36:25 +0800 | [diff] [blame] | 2098 | return 0; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2099 | } |
| 2100 | |
| 2101 | static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq, |
| 2102 | struct hns_roce_qp **cur_qp, struct ib_wc *wc) |
| 2103 | { |
| 2104 | int qpn; |
| 2105 | int is_send; |
| 2106 | u16 wqe_ctr; |
| 2107 | u32 status; |
| 2108 | u32 opcode; |
| 2109 | struct hns_roce_cqe *cqe; |
| 2110 | struct hns_roce_qp *hr_qp; |
| 2111 | struct hns_roce_wq *wq; |
| 2112 | struct hns_roce_wqe_ctrl_seg *sq_wqe; |
| 2113 | struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); |
| 2114 | struct device *dev = &hr_dev->pdev->dev; |
| 2115 | |
| 2116 | /* Find cqe according consumer index */ |
| 2117 | cqe = next_cqe_sw(hr_cq); |
| 2118 | if (!cqe) |
| 2119 | return -EAGAIN; |
| 2120 | |
| 2121 | ++hr_cq->cons_index; |
| 2122 | /* Memory barrier */ |
| 2123 | rmb(); |
| 2124 | /* 0->SQ, 1->RQ */ |
| 2125 | is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S)); |
| 2126 | |
| 2127 | /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */ |
| 2128 | if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, |
| 2129 | CQE_BYTE_16_LOCAL_QPN_S) <= 1) { |
| 2130 | qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M, |
| 2131 | CQE_BYTE_20_PORT_NUM_S) + |
| 2132 | roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, |
| 2133 | CQE_BYTE_16_LOCAL_QPN_S) * |
| 2134 | HNS_ROCE_MAX_PORTS; |
| 2135 | } else { |
| 2136 | qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, |
| 2137 | CQE_BYTE_16_LOCAL_QPN_S); |
| 2138 | } |
| 2139 | |
| 2140 | if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) { |
| 2141 | hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); |
| 2142 | if (unlikely(!hr_qp)) { |
| 2143 | dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n", |
| 2144 | hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK)); |
| 2145 | return -EINVAL; |
| 2146 | } |
| 2147 | |
| 2148 | *cur_qp = hr_qp; |
| 2149 | } |
| 2150 | |
| 2151 | wc->qp = &(*cur_qp)->ibqp; |
| 2152 | wc->vendor_err = 0; |
| 2153 | |
| 2154 | status = roce_get_field(cqe->cqe_byte_4, |
| 2155 | CQE_BYTE_4_STATUS_OF_THE_OPERATION_M, |
| 2156 | CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) & |
| 2157 | HNS_ROCE_CQE_STATUS_MASK; |
| 2158 | switch (status) { |
| 2159 | case HNS_ROCE_CQE_SUCCESS: |
| 2160 | wc->status = IB_WC_SUCCESS; |
| 2161 | break; |
| 2162 | case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR: |
| 2163 | wc->status = IB_WC_LOC_LEN_ERR; |
| 2164 | break; |
| 2165 | case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR: |
| 2166 | wc->status = IB_WC_LOC_QP_OP_ERR; |
| 2167 | break; |
| 2168 | case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR: |
| 2169 | wc->status = IB_WC_LOC_PROT_ERR; |
| 2170 | break; |
| 2171 | case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR: |
| 2172 | wc->status = IB_WC_WR_FLUSH_ERR; |
| 2173 | break; |
| 2174 | case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR: |
| 2175 | wc->status = IB_WC_MW_BIND_ERR; |
| 2176 | break; |
| 2177 | case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR: |
| 2178 | wc->status = IB_WC_BAD_RESP_ERR; |
| 2179 | break; |
| 2180 | case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR: |
| 2181 | wc->status = IB_WC_LOC_ACCESS_ERR; |
| 2182 | break; |
| 2183 | case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: |
| 2184 | wc->status = IB_WC_REM_INV_REQ_ERR; |
| 2185 | break; |
| 2186 | case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR: |
| 2187 | wc->status = IB_WC_REM_ACCESS_ERR; |
| 2188 | break; |
| 2189 | case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR: |
| 2190 | wc->status = IB_WC_REM_OP_ERR; |
| 2191 | break; |
| 2192 | case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: |
| 2193 | wc->status = IB_WC_RETRY_EXC_ERR; |
| 2194 | break; |
| 2195 | case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR: |
| 2196 | wc->status = IB_WC_RNR_RETRY_EXC_ERR; |
| 2197 | break; |
| 2198 | default: |
| 2199 | wc->status = IB_WC_GENERAL_ERR; |
| 2200 | break; |
| 2201 | } |
| 2202 | |
| 2203 | /* CQE status error, directly return */ |
| 2204 | if (wc->status != IB_WC_SUCCESS) |
| 2205 | return 0; |
| 2206 | |
| 2207 | if (is_send) { |
| 2208 | /* SQ conrespond to CQE */ |
Xi Wang | 6c6e392 | 2020-03-10 19:18:00 +0800 | [diff] [blame] | 2209 | sq_wqe = hns_roce_get_send_wqe(*cur_qp, |
| 2210 | roce_get_field(cqe->cqe_byte_4, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2211 | CQE_BYTE_4_WQE_INDEX_M, |
Xi Wang | 6c6e392 | 2020-03-10 19:18:00 +0800 | [diff] [blame] | 2212 | CQE_BYTE_4_WQE_INDEX_S) & |
Salil | 1bdab40 | 2016-09-20 17:07:12 +0100 | [diff] [blame] | 2213 | ((*cur_qp)->sq.wqe_cnt-1)); |
oulijun | 8b9b8d1 | 2018-02-05 21:14:00 +0800 | [diff] [blame] | 2214 | switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) { |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2215 | case HNS_ROCE_WQE_OPCODE_SEND: |
| 2216 | wc->opcode = IB_WC_SEND; |
| 2217 | break; |
| 2218 | case HNS_ROCE_WQE_OPCODE_RDMA_READ: |
| 2219 | wc->opcode = IB_WC_RDMA_READ; |
| 2220 | wc->byte_len = le32_to_cpu(cqe->byte_cnt); |
| 2221 | break; |
| 2222 | case HNS_ROCE_WQE_OPCODE_RDMA_WRITE: |
| 2223 | wc->opcode = IB_WC_RDMA_WRITE; |
| 2224 | break; |
| 2225 | case HNS_ROCE_WQE_OPCODE_LOCAL_INV: |
| 2226 | wc->opcode = IB_WC_LOCAL_INV; |
| 2227 | break; |
| 2228 | case HNS_ROCE_WQE_OPCODE_UD_SEND: |
| 2229 | wc->opcode = IB_WC_SEND; |
| 2230 | break; |
| 2231 | default: |
| 2232 | wc->status = IB_WC_GENERAL_ERR; |
| 2233 | break; |
| 2234 | } |
oulijun | 8b9b8d1 | 2018-02-05 21:14:00 +0800 | [diff] [blame] | 2235 | wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ? |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2236 | IB_WC_WITH_IMM : 0); |
| 2237 | |
| 2238 | wq = &(*cur_qp)->sq; |
| 2239 | if ((*cur_qp)->sq_signal_bits) { |
| 2240 | /* |
Salil | e84e40be | 2016-11-23 19:41:09 +0000 | [diff] [blame] | 2241 | * If sg_signal_bit is 1, |
| 2242 | * firstly tail pointer updated to wqe |
| 2243 | * which current cqe correspond to |
| 2244 | */ |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2245 | wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4, |
| 2246 | CQE_BYTE_4_WQE_INDEX_M, |
| 2247 | CQE_BYTE_4_WQE_INDEX_S); |
| 2248 | wq->tail += (wqe_ctr - (u16)wq->tail) & |
| 2249 | (wq->wqe_cnt - 1); |
| 2250 | } |
| 2251 | wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; |
| 2252 | ++wq->tail; |
oulijun | 5f110ac4 | 2017-06-10 18:49:25 +0800 | [diff] [blame] | 2253 | } else { |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2254 | /* RQ conrespond to CQE */ |
| 2255 | wc->byte_len = le32_to_cpu(cqe->byte_cnt); |
| 2256 | opcode = roce_get_field(cqe->cqe_byte_4, |
| 2257 | CQE_BYTE_4_OPERATION_TYPE_M, |
| 2258 | CQE_BYTE_4_OPERATION_TYPE_S) & |
| 2259 | HNS_ROCE_CQE_OPCODE_MASK; |
| 2260 | switch (opcode) { |
| 2261 | case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE: |
| 2262 | wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; |
| 2263 | wc->wc_flags = IB_WC_WITH_IMM; |
Jason Gunthorpe | ccb8a29 | 2018-01-11 14:43:06 -0700 | [diff] [blame] | 2264 | wc->ex.imm_data = |
| 2265 | cpu_to_be32(le32_to_cpu(cqe->immediate_data)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2266 | break; |
| 2267 | case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE: |
| 2268 | if (roce_get_bit(cqe->cqe_byte_4, |
| 2269 | CQE_BYTE_4_IMM_INDICATOR_S)) { |
| 2270 | wc->opcode = IB_WC_RECV; |
| 2271 | wc->wc_flags = IB_WC_WITH_IMM; |
Jason Gunthorpe | ccb8a29 | 2018-01-11 14:43:06 -0700 | [diff] [blame] | 2272 | wc->ex.imm_data = cpu_to_be32( |
| 2273 | le32_to_cpu(cqe->immediate_data)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2274 | } else { |
| 2275 | wc->opcode = IB_WC_RECV; |
| 2276 | wc->wc_flags = 0; |
| 2277 | } |
| 2278 | break; |
| 2279 | default: |
| 2280 | wc->status = IB_WC_GENERAL_ERR; |
| 2281 | break; |
| 2282 | } |
| 2283 | |
| 2284 | /* Update tail pointer, record wr_id */ |
| 2285 | wq = &(*cur_qp)->rq; |
| 2286 | wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; |
| 2287 | ++wq->tail; |
| 2288 | wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M, |
| 2289 | CQE_BYTE_20_SL_S); |
| 2290 | wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20, |
| 2291 | CQE_BYTE_20_REMOTE_QPN_M, |
| 2292 | CQE_BYTE_20_REMOTE_QPN_S); |
| 2293 | wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20, |
| 2294 | CQE_BYTE_20_GRH_PRESENT_S) ? |
| 2295 | IB_WC_GRH : 0); |
| 2296 | wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28, |
| 2297 | CQE_BYTE_28_P_KEY_IDX_M, |
| 2298 | CQE_BYTE_28_P_KEY_IDX_S); |
| 2299 | } |
| 2300 | |
| 2301 | return 0; |
| 2302 | } |
| 2303 | |
| 2304 | int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) |
| 2305 | { |
| 2306 | struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); |
| 2307 | struct hns_roce_qp *cur_qp = NULL; |
| 2308 | unsigned long flags; |
| 2309 | int npolled; |
| 2310 | int ret = 0; |
| 2311 | |
| 2312 | spin_lock_irqsave(&hr_cq->lock, flags); |
| 2313 | |
| 2314 | for (npolled = 0; npolled < num_entries; ++npolled) { |
| 2315 | ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled); |
| 2316 | if (ret) |
| 2317 | break; |
| 2318 | } |
| 2319 | |
Wei Hu (Xavier) | 8f3e9f3 | 2016-11-23 19:41:00 +0000 | [diff] [blame] | 2320 | if (npolled) { |
| 2321 | *hr_cq->tptr_addr = hr_cq->cons_index & |
| 2322 | ((hr_cq->cq_depth << 1) - 1); |
| 2323 | |
| 2324 | /* Memroy barrier */ |
| 2325 | wmb(); |
Lijun Ou | a4be892 | 2016-09-20 17:06:54 +0100 | [diff] [blame] | 2326 | hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); |
Wei Hu (Xavier) | 8f3e9f3 | 2016-11-23 19:41:00 +0000 | [diff] [blame] | 2327 | } |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2328 | |
| 2329 | spin_unlock_irqrestore(&hr_cq->lock, flags); |
| 2330 | |
| 2331 | if (ret == 0 || ret == -EAGAIN) |
| 2332 | return npolled; |
| 2333 | else |
| 2334 | return ret; |
| 2335 | } |
| 2336 | |
Bart Van Assche | d61d6de | 2017-10-11 10:49:01 -0700 | [diff] [blame] | 2337 | static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, |
| 2338 | struct hns_roce_hem_table *table, int obj, |
| 2339 | int step_idx) |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 2340 | { |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 2341 | struct hns_roce_v1_priv *priv = hr_dev->priv; |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 2342 | struct device *dev = &hr_dev->pdev->dev; |
Colin Ian King | a511f82 | 2019-05-31 10:21:00 +0100 | [diff] [blame] | 2343 | long end = HW_SYNC_TIMEOUT_MSECS; |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 2344 | __le32 bt_cmd_val[2] = {0}; |
Weihang Li | 14ba873 | 2020-05-22 21:02:56 +0800 | [diff] [blame] | 2345 | unsigned long flags = 0; |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 2346 | void __iomem *bt_cmd; |
| 2347 | u64 bt_ba = 0; |
| 2348 | |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 2349 | switch (table->type) { |
| 2350 | case HEM_TYPE_QPC: |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 2351 | bt_ba = priv->bt_table.qpc_buf.map >> 12; |
| 2352 | break; |
| 2353 | case HEM_TYPE_MTPT: |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 2354 | bt_ba = priv->bt_table.mtpt_buf.map >> 12; |
| 2355 | break; |
| 2356 | case HEM_TYPE_CQC: |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 2357 | bt_ba = priv->bt_table.cqc_buf.map >> 12; |
| 2358 | break; |
| 2359 | case HEM_TYPE_SRQC: |
| 2360 | dev_dbg(dev, "HEM_TYPE_SRQC not support.\n"); |
| 2361 | return -EINVAL; |
| 2362 | default: |
| 2363 | return 0; |
| 2364 | } |
Lang Cheng | bfe8603 | 2019-08-21 21:14:32 +0800 | [diff] [blame] | 2365 | roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, |
| 2366 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type); |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 2367 | roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, |
| 2368 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); |
| 2369 | roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); |
| 2370 | roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); |
| 2371 | |
| 2372 | spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags); |
| 2373 | |
| 2374 | bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; |
| 2375 | |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 2376 | while (1) { |
| 2377 | if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { |
Leon Romanovsky | da39292 | 2019-06-16 15:05:58 +0300 | [diff] [blame] | 2378 | if (!end) { |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 2379 | dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); |
| 2380 | spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, |
| 2381 | flags); |
| 2382 | return -EBUSY; |
| 2383 | } |
| 2384 | } else { |
| 2385 | break; |
| 2386 | } |
Lang Cheng | 669cefb | 2019-05-24 15:31:23 +0800 | [diff] [blame] | 2387 | mdelay(HW_SYNC_SLEEP_TIME_INTERVAL); |
| 2388 | end -= HW_SYNC_SLEEP_TIME_INTERVAL; |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 2389 | } |
| 2390 | |
Lang Cheng | bfe8603 | 2019-08-21 21:14:32 +0800 | [diff] [blame] | 2391 | bt_cmd_val[0] = cpu_to_le32(bt_ba); |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 2392 | roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, |
| 2393 | ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32); |
| 2394 | hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); |
| 2395 | |
| 2396 | spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags); |
| 2397 | |
| 2398 | return 0; |
| 2399 | } |
| 2400 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2401 | static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2402 | enum hns_roce_qp_state cur_state, |
| 2403 | enum hns_roce_qp_state new_state, |
| 2404 | struct hns_roce_qp_context *context, |
| 2405 | struct hns_roce_qp *hr_qp) |
| 2406 | { |
| 2407 | static const u16 |
| 2408 | op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = { |
| 2409 | [HNS_ROCE_QP_STATE_RST] = { |
| 2410 | [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, |
| 2411 | [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, |
| 2412 | [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP, |
| 2413 | }, |
| 2414 | [HNS_ROCE_QP_STATE_INIT] = { |
| 2415 | [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, |
| 2416 | [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, |
| 2417 | /* Note: In v1 engine, HW doesn't support RST2INIT. |
| 2418 | * We use RST2INIT cmd instead of INIT2INIT. |
| 2419 | */ |
| 2420 | [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP, |
| 2421 | [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP, |
| 2422 | }, |
| 2423 | [HNS_ROCE_QP_STATE_RTR] = { |
| 2424 | [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, |
| 2425 | [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, |
| 2426 | [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP, |
| 2427 | }, |
| 2428 | [HNS_ROCE_QP_STATE_RTS] = { |
| 2429 | [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, |
| 2430 | [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, |
| 2431 | [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP, |
| 2432 | [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP, |
| 2433 | }, |
| 2434 | [HNS_ROCE_QP_STATE_SQD] = { |
| 2435 | [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, |
| 2436 | [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, |
| 2437 | [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP, |
| 2438 | [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP, |
| 2439 | }, |
| 2440 | [HNS_ROCE_QP_STATE_ERR] = { |
| 2441 | [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, |
| 2442 | [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, |
| 2443 | } |
| 2444 | }; |
| 2445 | |
| 2446 | struct hns_roce_cmd_mailbox *mailbox; |
| 2447 | struct device *dev = &hr_dev->pdev->dev; |
| 2448 | int ret = 0; |
| 2449 | |
| 2450 | if (cur_state >= HNS_ROCE_QP_NUM_STATE || |
| 2451 | new_state >= HNS_ROCE_QP_NUM_STATE || |
| 2452 | !op[cur_state][new_state]) { |
| 2453 | dev_err(dev, "[modify_qp]not support state %d to %d\n", |
| 2454 | cur_state, new_state); |
| 2455 | return -EINVAL; |
| 2456 | } |
| 2457 | |
| 2458 | if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP) |
| 2459 | return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2, |
| 2460 | HNS_ROCE_CMD_2RST_QP, |
Wei Hu (Xavier) | 6b877c3 | 2016-11-23 19:41:05 +0000 | [diff] [blame] | 2461 | HNS_ROCE_CMD_TIMEOUT_MSECS); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2462 | |
| 2463 | if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP) |
| 2464 | return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2, |
| 2465 | HNS_ROCE_CMD_2ERR_QP, |
Wei Hu (Xavier) | 6b877c3 | 2016-11-23 19:41:05 +0000 | [diff] [blame] | 2466 | HNS_ROCE_CMD_TIMEOUT_MSECS); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2467 | |
| 2468 | mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); |
| 2469 | if (IS_ERR(mailbox)) |
| 2470 | return PTR_ERR(mailbox); |
| 2471 | |
| 2472 | memcpy(mailbox->buf, context, sizeof(*context)); |
| 2473 | |
| 2474 | ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0, |
| 2475 | op[cur_state][new_state], |
Wei Hu (Xavier) | 6b877c3 | 2016-11-23 19:41:05 +0000 | [diff] [blame] | 2476 | HNS_ROCE_CMD_TIMEOUT_MSECS); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2477 | |
| 2478 | hns_roce_free_cmd_mailbox(hr_dev, mailbox); |
| 2479 | return ret; |
| 2480 | } |
| 2481 | |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 2482 | static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, |
| 2483 | u64 *sq_ba, u64 *rq_ba, dma_addr_t *bt_ba) |
| 2484 | { |
| 2485 | struct ib_device *ibdev = &hr_dev->ib_dev; |
| 2486 | int rq_pa_start; |
| 2487 | int count; |
| 2488 | |
| 2489 | count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, sq_ba, 1, bt_ba); |
| 2490 | if (count < 1) { |
| 2491 | ibdev_err(ibdev, "Failed to find SQ ba\n"); |
| 2492 | return -ENOBUFS; |
| 2493 | } |
| 2494 | rq_pa_start = hr_qp->rq.offset >> hr_qp->mtr.hem_cfg.buf_pg_shift; |
| 2495 | count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, rq_pa_start, rq_ba, 1, |
| 2496 | NULL); |
| 2497 | if (!count) { |
| 2498 | ibdev_err(ibdev, "Failed to find RQ ba\n"); |
| 2499 | return -ENOBUFS; |
| 2500 | } |
| 2501 | |
| 2502 | return 0; |
| 2503 | } |
| 2504 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2505 | static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, |
| 2506 | int attr_mask, enum ib_qp_state cur_state, |
| 2507 | enum ib_qp_state new_state) |
| 2508 | { |
| 2509 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); |
| 2510 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); |
| 2511 | struct hns_roce_sqp_context *context; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2512 | dma_addr_t dma_handle = 0; |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 2513 | u32 __iomem *addr; |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 2514 | u64 sq_ba = 0; |
| 2515 | u64 rq_ba = 0; |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 2516 | __le32 tmp; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2517 | u32 reg_val; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2518 | |
| 2519 | context = kzalloc(sizeof(*context), GFP_KERNEL); |
| 2520 | if (!context) |
| 2521 | return -ENOMEM; |
| 2522 | |
| 2523 | /* Search QP buf's MTTs */ |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 2524 | if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle)) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2525 | goto out; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2526 | |
| 2527 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { |
| 2528 | roce_set_field(context->qp1c_bytes_4, |
| 2529 | QP1C_BYTES_4_SQ_WQE_SHIFT_M, |
| 2530 | QP1C_BYTES_4_SQ_WQE_SHIFT_S, |
| 2531 | ilog2((unsigned int)hr_qp->sq.wqe_cnt)); |
| 2532 | roce_set_field(context->qp1c_bytes_4, |
| 2533 | QP1C_BYTES_4_RQ_WQE_SHIFT_M, |
| 2534 | QP1C_BYTES_4_RQ_WQE_SHIFT_S, |
| 2535 | ilog2((unsigned int)hr_qp->rq.wqe_cnt)); |
| 2536 | roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M, |
| 2537 | QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn); |
| 2538 | |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 2539 | context->sq_rq_bt_l = cpu_to_le32(dma_handle); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2540 | roce_set_field(context->qp1c_bytes_12, |
| 2541 | QP1C_BYTES_12_SQ_RQ_BT_H_M, |
| 2542 | QP1C_BYTES_12_SQ_RQ_BT_H_S, |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 2543 | upper_32_bits(dma_handle)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2544 | |
| 2545 | roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M, |
| 2546 | QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head); |
| 2547 | roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M, |
Lijun Ou | 7716809 | 2016-09-15 23:48:10 +0100 | [diff] [blame] | 2548 | QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2549 | roce_set_bit(context->qp1c_bytes_16, |
| 2550 | QP1C_BYTES_16_SIGNALING_TYPE_S, |
Lang Cheng | bfe8603 | 2019-08-21 21:14:32 +0800 | [diff] [blame] | 2551 | hr_qp->sq_signal_bits); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2552 | roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S, |
| 2553 | 1); |
| 2554 | roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S, |
| 2555 | 1); |
| 2556 | roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S, |
| 2557 | 0); |
| 2558 | |
| 2559 | roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M, |
| 2560 | QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head); |
| 2561 | roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M, |
| 2562 | QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index); |
| 2563 | |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 2564 | context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2565 | |
| 2566 | roce_set_field(context->qp1c_bytes_28, |
| 2567 | QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M, |
| 2568 | QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S, |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 2569 | upper_32_bits(rq_ba)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2570 | roce_set_field(context->qp1c_bytes_28, |
| 2571 | QP1C_BYTES_28_RQ_CUR_IDX_M, |
| 2572 | QP1C_BYTES_28_RQ_CUR_IDX_S, 0); |
| 2573 | |
| 2574 | roce_set_field(context->qp1c_bytes_32, |
| 2575 | QP1C_BYTES_32_RX_CQ_NUM_M, |
| 2576 | QP1C_BYTES_32_RX_CQ_NUM_S, |
| 2577 | to_hr_cq(ibqp->recv_cq)->cqn); |
| 2578 | roce_set_field(context->qp1c_bytes_32, |
| 2579 | QP1C_BYTES_32_TX_CQ_NUM_M, |
| 2580 | QP1C_BYTES_32_TX_CQ_NUM_S, |
| 2581 | to_hr_cq(ibqp->send_cq)->cqn); |
| 2582 | |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 2583 | context->cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2584 | |
| 2585 | roce_set_field(context->qp1c_bytes_40, |
| 2586 | QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M, |
| 2587 | QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S, |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 2588 | upper_32_bits(sq_ba)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2589 | roce_set_field(context->qp1c_bytes_40, |
| 2590 | QP1C_BYTES_40_SQ_CUR_IDX_M, |
| 2591 | QP1C_BYTES_40_SQ_CUR_IDX_S, 0); |
| 2592 | |
| 2593 | /* Copy context to QP1C register */ |
Bart Van Assche | cc4ed08 | 2017-10-11 10:49:00 -0700 | [diff] [blame] | 2594 | addr = (u32 __iomem *)(hr_dev->reg_base + |
| 2595 | ROCEE_QP1C_CFG0_0_REG + |
| 2596 | hr_qp->phy_port * sizeof(*context)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2597 | |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 2598 | writel(le32_to_cpu(context->qp1c_bytes_4), addr); |
| 2599 | writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1); |
| 2600 | writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2); |
| 2601 | writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3); |
| 2602 | writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4); |
| 2603 | writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5); |
| 2604 | writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6); |
| 2605 | writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7); |
| 2606 | writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8); |
| 2607 | writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2608 | } |
| 2609 | |
| 2610 | /* Modify QP1C status */ |
| 2611 | reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG + |
Lijun Ou | 7716809 | 2016-09-15 23:48:10 +0100 | [diff] [blame] | 2612 | hr_qp->phy_port * sizeof(*context)); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 2613 | tmp = cpu_to_le32(reg_val); |
| 2614 | roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2615 | ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 2616 | reg_val = le32_to_cpu(tmp); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2617 | roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG + |
Lijun Ou | 7716809 | 2016-09-15 23:48:10 +0100 | [diff] [blame] | 2618 | hr_qp->phy_port * sizeof(*context), reg_val); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2619 | |
| 2620 | hr_qp->state = new_state; |
| 2621 | if (new_state == IB_QPS_RESET) { |
| 2622 | hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, |
| 2623 | ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); |
| 2624 | if (ibqp->send_cq != ibqp->recv_cq) |
| 2625 | hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq), |
| 2626 | hr_qp->qpn, NULL); |
| 2627 | |
| 2628 | hr_qp->rq.head = 0; |
| 2629 | hr_qp->rq.tail = 0; |
| 2630 | hr_qp->sq.head = 0; |
| 2631 | hr_qp->sq.tail = 0; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2632 | } |
| 2633 | |
| 2634 | kfree(context); |
| 2635 | return 0; |
| 2636 | |
| 2637 | out: |
| 2638 | kfree(context); |
| 2639 | return -EINVAL; |
| 2640 | } |
| 2641 | |
Lang Cheng | a97bf49 | 2020-04-15 16:14:35 +0800 | [diff] [blame] | 2642 | static bool check_qp_state(enum ib_qp_state cur_state, |
| 2643 | enum ib_qp_state new_state) |
| 2644 | { |
| 2645 | static const bool sm[][IB_QPS_ERR + 1] = { |
| 2646 | [IB_QPS_RESET] = { [IB_QPS_RESET] = true, |
| 2647 | [IB_QPS_INIT] = true }, |
| 2648 | [IB_QPS_INIT] = { [IB_QPS_RESET] = true, |
| 2649 | [IB_QPS_INIT] = true, |
| 2650 | [IB_QPS_RTR] = true, |
| 2651 | [IB_QPS_ERR] = true }, |
| 2652 | [IB_QPS_RTR] = { [IB_QPS_RESET] = true, |
| 2653 | [IB_QPS_RTS] = true, |
| 2654 | [IB_QPS_ERR] = true }, |
| 2655 | [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }, |
| 2656 | [IB_QPS_SQD] = {}, |
| 2657 | [IB_QPS_SQE] = {}, |
| 2658 | [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true } |
| 2659 | }; |
| 2660 | |
| 2661 | return sm[cur_state][new_state]; |
| 2662 | } |
| 2663 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2664 | static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, |
| 2665 | int attr_mask, enum ib_qp_state cur_state, |
| 2666 | enum ib_qp_state new_state) |
| 2667 | { |
| 2668 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); |
| 2669 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); |
| 2670 | struct device *dev = &hr_dev->pdev->dev; |
| 2671 | struct hns_roce_qp_context *context; |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 2672 | const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2673 | dma_addr_t dma_handle_2 = 0; |
| 2674 | dma_addr_t dma_handle = 0; |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 2675 | __le32 doorbell[2] = {0}; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2676 | u64 *mtts_2 = NULL; |
| 2677 | int ret = -EINVAL; |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 2678 | u64 sq_ba = 0; |
| 2679 | u64 rq_ba = 0; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2680 | int port; |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 2681 | u8 port_num; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2682 | u8 *dmac; |
| 2683 | u8 *smac; |
| 2684 | |
Lang Cheng | a97bf49 | 2020-04-15 16:14:35 +0800 | [diff] [blame] | 2685 | if (!check_qp_state(cur_state, new_state)) { |
| 2686 | ibdev_err(ibqp->device, |
| 2687 | "not support QP(%u) status from %d to %d\n", |
| 2688 | ibqp->qp_num, cur_state, new_state); |
| 2689 | return -EINVAL; |
| 2690 | } |
| 2691 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2692 | context = kzalloc(sizeof(*context), GFP_KERNEL); |
| 2693 | if (!context) |
| 2694 | return -ENOMEM; |
| 2695 | |
| 2696 | /* Search qp buf's mtts */ |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 2697 | if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle)) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2698 | goto out; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2699 | |
| 2700 | /* Search IRRL's mtts */ |
Shaobo Xu | 6a93c77 | 2017-08-30 17:23:08 +0800 | [diff] [blame] | 2701 | mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, |
| 2702 | hr_qp->qpn, &dma_handle_2); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2703 | if (mtts_2 == NULL) { |
| 2704 | dev_err(dev, "qp irrl_table find failed\n"); |
| 2705 | goto out; |
| 2706 | } |
| 2707 | |
| 2708 | /* |
Salil | e84e40be | 2016-11-23 19:41:09 +0000 | [diff] [blame] | 2709 | * Reset to init |
| 2710 | * Mandatory param: |
| 2711 | * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS |
| 2712 | * Optional param: NA |
| 2713 | */ |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2714 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { |
| 2715 | roce_set_field(context->qpc_bytes_4, |
| 2716 | QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M, |
| 2717 | QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S, |
| 2718 | to_hr_qp_type(hr_qp->ibqp.qp_type)); |
| 2719 | |
| 2720 | roce_set_bit(context->qpc_bytes_4, |
| 2721 | QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0); |
| 2722 | roce_set_bit(context->qpc_bytes_4, |
| 2723 | QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, |
| 2724 | !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ)); |
| 2725 | roce_set_bit(context->qpc_bytes_4, |
| 2726 | QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, |
| 2727 | !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) |
| 2728 | ); |
| 2729 | roce_set_bit(context->qpc_bytes_4, |
| 2730 | QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S, |
| 2731 | !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) |
| 2732 | ); |
| 2733 | roce_set_bit(context->qpc_bytes_4, |
| 2734 | QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1); |
| 2735 | roce_set_field(context->qpc_bytes_4, |
| 2736 | QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M, |
| 2737 | QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S, |
| 2738 | ilog2((unsigned int)hr_qp->sq.wqe_cnt)); |
| 2739 | roce_set_field(context->qpc_bytes_4, |
| 2740 | QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M, |
| 2741 | QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S, |
| 2742 | ilog2((unsigned int)hr_qp->rq.wqe_cnt)); |
| 2743 | roce_set_field(context->qpc_bytes_4, |
| 2744 | QP_CONTEXT_QPC_BYTES_4_PD_M, |
| 2745 | QP_CONTEXT_QPC_BYTES_4_PD_S, |
| 2746 | to_hr_pd(ibqp->pd)->pdn); |
| 2747 | hr_qp->access_flags = attr->qp_access_flags; |
| 2748 | roce_set_field(context->qpc_bytes_8, |
| 2749 | QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M, |
| 2750 | QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S, |
| 2751 | to_hr_cq(ibqp->send_cq)->cqn); |
| 2752 | roce_set_field(context->qpc_bytes_8, |
| 2753 | QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M, |
| 2754 | QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S, |
| 2755 | to_hr_cq(ibqp->recv_cq)->cqn); |
| 2756 | |
| 2757 | if (ibqp->srq) |
| 2758 | roce_set_field(context->qpc_bytes_12, |
| 2759 | QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M, |
| 2760 | QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S, |
| 2761 | to_hr_srq(ibqp->srq)->srqn); |
| 2762 | |
| 2763 | roce_set_field(context->qpc_bytes_12, |
| 2764 | QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, |
| 2765 | QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, |
| 2766 | attr->pkey_index); |
| 2767 | hr_qp->pkey_index = attr->pkey_index; |
| 2768 | roce_set_field(context->qpc_bytes_16, |
| 2769 | QP_CONTEXT_QPC_BYTES_16_QP_NUM_M, |
| 2770 | QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn); |
| 2771 | |
| 2772 | } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { |
| 2773 | roce_set_field(context->qpc_bytes_4, |
| 2774 | QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M, |
| 2775 | QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S, |
| 2776 | to_hr_qp_type(hr_qp->ibqp.qp_type)); |
| 2777 | roce_set_bit(context->qpc_bytes_4, |
| 2778 | QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0); |
| 2779 | if (attr_mask & IB_QP_ACCESS_FLAGS) { |
| 2780 | roce_set_bit(context->qpc_bytes_4, |
| 2781 | QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, |
| 2782 | !!(attr->qp_access_flags & |
| 2783 | IB_ACCESS_REMOTE_READ)); |
| 2784 | roce_set_bit(context->qpc_bytes_4, |
| 2785 | QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, |
| 2786 | !!(attr->qp_access_flags & |
| 2787 | IB_ACCESS_REMOTE_WRITE)); |
| 2788 | } else { |
| 2789 | roce_set_bit(context->qpc_bytes_4, |
| 2790 | QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, |
| 2791 | !!(hr_qp->access_flags & |
| 2792 | IB_ACCESS_REMOTE_READ)); |
| 2793 | roce_set_bit(context->qpc_bytes_4, |
| 2794 | QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, |
| 2795 | !!(hr_qp->access_flags & |
| 2796 | IB_ACCESS_REMOTE_WRITE)); |
| 2797 | } |
| 2798 | |
| 2799 | roce_set_bit(context->qpc_bytes_4, |
| 2800 | QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1); |
| 2801 | roce_set_field(context->qpc_bytes_4, |
| 2802 | QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M, |
| 2803 | QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S, |
| 2804 | ilog2((unsigned int)hr_qp->sq.wqe_cnt)); |
| 2805 | roce_set_field(context->qpc_bytes_4, |
| 2806 | QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M, |
| 2807 | QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S, |
| 2808 | ilog2((unsigned int)hr_qp->rq.wqe_cnt)); |
| 2809 | roce_set_field(context->qpc_bytes_4, |
| 2810 | QP_CONTEXT_QPC_BYTES_4_PD_M, |
| 2811 | QP_CONTEXT_QPC_BYTES_4_PD_S, |
| 2812 | to_hr_pd(ibqp->pd)->pdn); |
| 2813 | |
| 2814 | roce_set_field(context->qpc_bytes_8, |
| 2815 | QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M, |
| 2816 | QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S, |
| 2817 | to_hr_cq(ibqp->send_cq)->cqn); |
| 2818 | roce_set_field(context->qpc_bytes_8, |
| 2819 | QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M, |
| 2820 | QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S, |
| 2821 | to_hr_cq(ibqp->recv_cq)->cqn); |
| 2822 | |
| 2823 | if (ibqp->srq) |
| 2824 | roce_set_field(context->qpc_bytes_12, |
| 2825 | QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M, |
| 2826 | QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S, |
| 2827 | to_hr_srq(ibqp->srq)->srqn); |
| 2828 | if (attr_mask & IB_QP_PKEY_INDEX) |
| 2829 | roce_set_field(context->qpc_bytes_12, |
| 2830 | QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, |
| 2831 | QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, |
| 2832 | attr->pkey_index); |
| 2833 | else |
| 2834 | roce_set_field(context->qpc_bytes_12, |
| 2835 | QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, |
| 2836 | QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, |
| 2837 | hr_qp->pkey_index); |
| 2838 | |
| 2839 | roce_set_field(context->qpc_bytes_16, |
| 2840 | QP_CONTEXT_QPC_BYTES_16_QP_NUM_M, |
| 2841 | QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn); |
| 2842 | } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { |
| 2843 | if ((attr_mask & IB_QP_ALT_PATH) || |
| 2844 | (attr_mask & IB_QP_ACCESS_FLAGS) || |
| 2845 | (attr_mask & IB_QP_PKEY_INDEX) || |
| 2846 | (attr_mask & IB_QP_QKEY)) { |
| 2847 | dev_err(dev, "INIT2RTR attr_mask error\n"); |
| 2848 | goto out; |
| 2849 | } |
| 2850 | |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 2851 | dmac = (u8 *)attr->ah_attr.roce.dmac; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2852 | |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 2853 | context->sq_rq_bt_l = cpu_to_le32(dma_handle); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2854 | roce_set_field(context->qpc_bytes_24, |
| 2855 | QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M, |
| 2856 | QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S, |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 2857 | upper_32_bits(dma_handle)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2858 | roce_set_bit(context->qpc_bytes_24, |
| 2859 | QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S, |
| 2860 | 1); |
| 2861 | roce_set_field(context->qpc_bytes_24, |
| 2862 | QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M, |
| 2863 | QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S, |
| 2864 | attr->min_rnr_timer); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 2865 | context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2866 | roce_set_field(context->qpc_bytes_32, |
| 2867 | QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M, |
| 2868 | QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S, |
| 2869 | ((u32)(dma_handle_2 >> 32)) & |
| 2870 | QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M); |
| 2871 | roce_set_field(context->qpc_bytes_32, |
| 2872 | QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M, |
| 2873 | QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0); |
| 2874 | roce_set_bit(context->qpc_bytes_32, |
| 2875 | QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S, |
| 2876 | 1); |
| 2877 | roce_set_bit(context->qpc_bytes_32, |
| 2878 | QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S, |
Lang Cheng | bfe8603 | 2019-08-21 21:14:32 +0800 | [diff] [blame] | 2879 | hr_qp->sq_signal_bits); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2880 | |
Lijun Ou | 80596c6 | 2016-11-23 19:41:03 +0000 | [diff] [blame] | 2881 | port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : |
| 2882 | hr_qp->port; |
| 2883 | smac = (u8 *)hr_dev->dev_addr[port]; |
| 2884 | /* when dmac equals smac or loop_idc is 1, it should loopback */ |
| 2885 | if (ether_addr_equal_unaligned(dmac, smac) || |
| 2886 | hr_dev->loop_idc == 0x1) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2887 | roce_set_bit(context->qpc_bytes_32, |
Lijun Ou | 80596c6 | 2016-11-23 19:41:03 +0000 | [diff] [blame] | 2888 | QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2889 | |
| 2890 | roce_set_bit(context->qpc_bytes_32, |
| 2891 | QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S, |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 2892 | rdma_ah_get_ah_flags(&attr->ah_attr)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2893 | roce_set_field(context->qpc_bytes_32, |
| 2894 | QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M, |
| 2895 | QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S, |
| 2896 | ilog2((unsigned int)attr->max_dest_rd_atomic)); |
| 2897 | |
Lijun Ou | 512f4f1 | 2017-09-29 23:10:10 +0800 | [diff] [blame] | 2898 | if (attr_mask & IB_QP_DEST_QPN) |
| 2899 | roce_set_field(context->qpc_bytes_36, |
| 2900 | QP_CONTEXT_QPC_BYTES_36_DEST_QP_M, |
| 2901 | QP_CONTEXT_QPC_BYTES_36_DEST_QP_S, |
| 2902 | attr->dest_qp_num); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2903 | |
| 2904 | /* Configure GID index */ |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 2905 | port_num = rdma_ah_get_port_num(&attr->ah_attr); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2906 | roce_set_field(context->qpc_bytes_36, |
| 2907 | QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M, |
| 2908 | QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S, |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 2909 | hns_get_gid_index(hr_dev, |
| 2910 | port_num - 1, |
| 2911 | grh->sgid_index)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2912 | |
| 2913 | memcpy(&(context->dmac_l), dmac, 4); |
| 2914 | |
| 2915 | roce_set_field(context->qpc_bytes_44, |
| 2916 | QP_CONTEXT_QPC_BYTES_44_DMAC_H_M, |
| 2917 | QP_CONTEXT_QPC_BYTES_44_DMAC_H_S, |
| 2918 | *((u16 *)(&dmac[4]))); |
| 2919 | roce_set_field(context->qpc_bytes_44, |
| 2920 | QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M, |
| 2921 | QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S, |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 2922 | rdma_ah_get_static_rate(&attr->ah_attr)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2923 | roce_set_field(context->qpc_bytes_44, |
| 2924 | QP_CONTEXT_QPC_BYTES_44_HOPLMT_M, |
| 2925 | QP_CONTEXT_QPC_BYTES_44_HOPLMT_S, |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 2926 | grh->hop_limit); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2927 | |
| 2928 | roce_set_field(context->qpc_bytes_48, |
| 2929 | QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M, |
| 2930 | QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S, |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 2931 | grh->flow_label); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2932 | roce_set_field(context->qpc_bytes_48, |
| 2933 | QP_CONTEXT_QPC_BYTES_48_TCLASS_M, |
| 2934 | QP_CONTEXT_QPC_BYTES_48_TCLASS_S, |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 2935 | grh->traffic_class); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2936 | roce_set_field(context->qpc_bytes_48, |
| 2937 | QP_CONTEXT_QPC_BYTES_48_MTU_M, |
| 2938 | QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu); |
| 2939 | |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 2940 | memcpy(context->dgid, grh->dgid.raw, |
| 2941 | sizeof(grh->dgid.raw)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2942 | |
| 2943 | dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l, |
| 2944 | roce_get_field(context->qpc_bytes_44, |
| 2945 | QP_CONTEXT_QPC_BYTES_44_DMAC_H_M, |
| 2946 | QP_CONTEXT_QPC_BYTES_44_DMAC_H_S)); |
| 2947 | |
| 2948 | roce_set_field(context->qpc_bytes_68, |
| 2949 | QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M, |
Lijun Ou | 1fad5fab | 2016-09-20 17:07:09 +0100 | [diff] [blame] | 2950 | QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, |
| 2951 | hr_qp->rq.head); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2952 | roce_set_field(context->qpc_bytes_68, |
| 2953 | QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M, |
| 2954 | QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0); |
| 2955 | |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 2956 | context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2957 | |
| 2958 | roce_set_field(context->qpc_bytes_76, |
| 2959 | QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M, |
| 2960 | QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S, |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 2961 | upper_32_bits(rq_ba)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 2962 | roce_set_field(context->qpc_bytes_76, |
| 2963 | QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M, |
| 2964 | QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0); |
| 2965 | |
| 2966 | context->rx_rnr_time = 0; |
| 2967 | |
| 2968 | roce_set_field(context->qpc_bytes_84, |
| 2969 | QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M, |
| 2970 | QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S, |
| 2971 | attr->rq_psn - 1); |
| 2972 | roce_set_field(context->qpc_bytes_84, |
| 2973 | QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M, |
| 2974 | QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0); |
| 2975 | |
| 2976 | roce_set_field(context->qpc_bytes_88, |
| 2977 | QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M, |
| 2978 | QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S, |
| 2979 | attr->rq_psn); |
| 2980 | roce_set_bit(context->qpc_bytes_88, |
| 2981 | QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0); |
| 2982 | roce_set_bit(context->qpc_bytes_88, |
| 2983 | QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0); |
| 2984 | roce_set_field(context->qpc_bytes_88, |
| 2985 | QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M, |
| 2986 | QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S, |
| 2987 | 0); |
| 2988 | roce_set_field(context->qpc_bytes_88, |
| 2989 | QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M, |
| 2990 | QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S, |
| 2991 | 0); |
| 2992 | |
| 2993 | context->dma_length = 0; |
| 2994 | context->r_key = 0; |
| 2995 | context->va_l = 0; |
| 2996 | context->va_h = 0; |
| 2997 | |
| 2998 | roce_set_field(context->qpc_bytes_108, |
| 2999 | QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M, |
| 3000 | QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0); |
| 3001 | roce_set_bit(context->qpc_bytes_108, |
| 3002 | QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0); |
| 3003 | roce_set_bit(context->qpc_bytes_108, |
| 3004 | QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0); |
| 3005 | |
| 3006 | roce_set_field(context->qpc_bytes_112, |
| 3007 | QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M, |
| 3008 | QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0); |
| 3009 | roce_set_field(context->qpc_bytes_112, |
| 3010 | QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M, |
| 3011 | QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0); |
| 3012 | |
| 3013 | /* For chip resp ack */ |
| 3014 | roce_set_field(context->qpc_bytes_156, |
| 3015 | QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, |
| 3016 | QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, |
Lijun Ou | 7716809 | 2016-09-15 23:48:10 +0100 | [diff] [blame] | 3017 | hr_qp->phy_port); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3018 | roce_set_field(context->qpc_bytes_156, |
| 3019 | QP_CONTEXT_QPC_BYTES_156_SL_M, |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 3020 | QP_CONTEXT_QPC_BYTES_156_SL_S, |
| 3021 | rdma_ah_get_sl(&attr->ah_attr)); |
| 3022 | hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); |
Lang Cheng | a97bf49 | 2020-04-15 16:14:35 +0800 | [diff] [blame] | 3023 | } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) { |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3024 | /* If exist optional param, return error */ |
| 3025 | if ((attr_mask & IB_QP_ALT_PATH) || |
| 3026 | (attr_mask & IB_QP_ACCESS_FLAGS) || |
| 3027 | (attr_mask & IB_QP_QKEY) || |
| 3028 | (attr_mask & IB_QP_PATH_MIG_STATE) || |
| 3029 | (attr_mask & IB_QP_CUR_STATE) || |
| 3030 | (attr_mask & IB_QP_MIN_RNR_TIMER)) { |
| 3031 | dev_err(dev, "RTR2RTS attr_mask error\n"); |
| 3032 | goto out; |
| 3033 | } |
| 3034 | |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 3035 | context->rx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3036 | |
| 3037 | roce_set_field(context->qpc_bytes_120, |
| 3038 | QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M, |
| 3039 | QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S, |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 3040 | upper_32_bits(sq_ba)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3041 | |
| 3042 | roce_set_field(context->qpc_bytes_124, |
| 3043 | QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M, |
| 3044 | QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0); |
| 3045 | roce_set_field(context->qpc_bytes_124, |
| 3046 | QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M, |
| 3047 | QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0); |
| 3048 | |
| 3049 | roce_set_field(context->qpc_bytes_128, |
| 3050 | QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M, |
| 3051 | QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S, |
| 3052 | attr->sq_psn); |
| 3053 | roce_set_bit(context->qpc_bytes_128, |
| 3054 | QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0); |
| 3055 | roce_set_field(context->qpc_bytes_128, |
| 3056 | QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M, |
| 3057 | QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S, |
| 3058 | 0); |
| 3059 | roce_set_bit(context->qpc_bytes_128, |
| 3060 | QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0); |
| 3061 | |
| 3062 | roce_set_field(context->qpc_bytes_132, |
| 3063 | QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M, |
| 3064 | QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0); |
| 3065 | roce_set_field(context->qpc_bytes_132, |
| 3066 | QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M, |
| 3067 | QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0); |
| 3068 | |
| 3069 | roce_set_field(context->qpc_bytes_136, |
| 3070 | QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M, |
| 3071 | QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S, |
| 3072 | attr->sq_psn); |
| 3073 | roce_set_field(context->qpc_bytes_136, |
| 3074 | QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M, |
| 3075 | QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S, |
| 3076 | attr->sq_psn); |
| 3077 | |
| 3078 | roce_set_field(context->qpc_bytes_140, |
| 3079 | QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M, |
| 3080 | QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S, |
| 3081 | (attr->sq_psn >> SQ_PSN_SHIFT)); |
| 3082 | roce_set_field(context->qpc_bytes_140, |
| 3083 | QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M, |
| 3084 | QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0); |
| 3085 | roce_set_bit(context->qpc_bytes_140, |
| 3086 | QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0); |
| 3087 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3088 | roce_set_field(context->qpc_bytes_148, |
| 3089 | QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M, |
| 3090 | QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0); |
| 3091 | roce_set_field(context->qpc_bytes_148, |
| 3092 | QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, |
Lijun Ou | 7c7a4ea | 2016-09-20 17:07:06 +0100 | [diff] [blame] | 3093 | QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, |
| 3094 | attr->retry_cnt); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3095 | roce_set_field(context->qpc_bytes_148, |
| 3096 | QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M, |
Lijun Ou | 7c7a4ea | 2016-09-20 17:07:06 +0100 | [diff] [blame] | 3097 | QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, |
| 3098 | attr->rnr_retry); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3099 | roce_set_field(context->qpc_bytes_148, |
| 3100 | QP_CONTEXT_QPC_BYTES_148_LSN_M, |
| 3101 | QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100); |
| 3102 | |
| 3103 | context->rnr_retry = 0; |
| 3104 | |
| 3105 | roce_set_field(context->qpc_bytes_156, |
| 3106 | QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M, |
| 3107 | QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S, |
| 3108 | attr->retry_cnt); |
Lijun Ou | c6c3bfe | 2016-09-20 17:07:05 +0100 | [diff] [blame] | 3109 | if (attr->timeout < 0x12) { |
| 3110 | dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n", |
| 3111 | attr->timeout); |
| 3112 | roce_set_field(context->qpc_bytes_156, |
| 3113 | QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, |
| 3114 | QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, |
| 3115 | 0x12); |
| 3116 | } else { |
| 3117 | roce_set_field(context->qpc_bytes_156, |
| 3118 | QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, |
| 3119 | QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, |
| 3120 | attr->timeout); |
| 3121 | } |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3122 | roce_set_field(context->qpc_bytes_156, |
| 3123 | QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M, |
| 3124 | QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S, |
| 3125 | attr->rnr_retry); |
| 3126 | roce_set_field(context->qpc_bytes_156, |
| 3127 | QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, |
| 3128 | QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, |
Lijun Ou | 7716809 | 2016-09-15 23:48:10 +0100 | [diff] [blame] | 3129 | hr_qp->phy_port); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3130 | roce_set_field(context->qpc_bytes_156, |
| 3131 | QP_CONTEXT_QPC_BYTES_156_SL_M, |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 3132 | QP_CONTEXT_QPC_BYTES_156_SL_S, |
| 3133 | rdma_ah_get_sl(&attr->ah_attr)); |
| 3134 | hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3135 | roce_set_field(context->qpc_bytes_156, |
| 3136 | QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M, |
| 3137 | QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S, |
| 3138 | ilog2((unsigned int)attr->max_rd_atomic)); |
| 3139 | roce_set_field(context->qpc_bytes_156, |
| 3140 | QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M, |
| 3141 | QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0); |
| 3142 | context->pkt_use_len = 0; |
| 3143 | |
| 3144 | roce_set_field(context->qpc_bytes_164, |
| 3145 | QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M, |
| 3146 | QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn); |
| 3147 | roce_set_field(context->qpc_bytes_164, |
| 3148 | QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M, |
| 3149 | QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0); |
| 3150 | |
| 3151 | roce_set_field(context->qpc_bytes_168, |
| 3152 | QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M, |
| 3153 | QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S, |
| 3154 | attr->sq_psn); |
| 3155 | roce_set_field(context->qpc_bytes_168, |
| 3156 | QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M, |
| 3157 | QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0); |
| 3158 | roce_set_field(context->qpc_bytes_168, |
| 3159 | QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M, |
| 3160 | QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0); |
| 3161 | roce_set_bit(context->qpc_bytes_168, |
| 3162 | QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0); |
| 3163 | roce_set_bit(context->qpc_bytes_168, |
| 3164 | QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0); |
| 3165 | roce_set_bit(context->qpc_bytes_168, |
| 3166 | QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0); |
| 3167 | context->sge_use_len = 0; |
| 3168 | |
| 3169 | roce_set_field(context->qpc_bytes_176, |
| 3170 | QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M, |
| 3171 | QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0); |
| 3172 | roce_set_field(context->qpc_bytes_176, |
| 3173 | QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M, |
| 3174 | QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S, |
| 3175 | 0); |
| 3176 | roce_set_field(context->qpc_bytes_180, |
| 3177 | QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M, |
| 3178 | QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0); |
| 3179 | roce_set_field(context->qpc_bytes_180, |
| 3180 | QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M, |
| 3181 | QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0); |
| 3182 | |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 3183 | context->tx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3184 | |
| 3185 | roce_set_field(context->qpc_bytes_188, |
| 3186 | QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M, |
| 3187 | QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S, |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 3188 | upper_32_bits(sq_ba)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3189 | roce_set_bit(context->qpc_bytes_188, |
| 3190 | QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0); |
| 3191 | roce_set_field(context->qpc_bytes_188, |
| 3192 | QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M, |
| 3193 | QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S, |
| 3194 | 0); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3195 | } |
| 3196 | |
| 3197 | /* Every status migrate must change state */ |
| 3198 | roce_set_field(context->qpc_bytes_144, |
| 3199 | QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, |
Lijun Ou | 1dec243 | 2016-11-23 19:41:04 +0000 | [diff] [blame] | 3200 | QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3201 | |
| 3202 | /* SW pass context to HW */ |
Xi Wang | d563099 | 2020-04-13 19:58:09 +0800 | [diff] [blame] | 3203 | ret = hns_roce_v1_qp_modify(hr_dev, to_hns_roce_state(cur_state), |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3204 | to_hns_roce_state(new_state), context, |
| 3205 | hr_qp); |
| 3206 | if (ret) { |
| 3207 | dev_err(dev, "hns_roce_qp_modify failed\n"); |
| 3208 | goto out; |
| 3209 | } |
| 3210 | |
| 3211 | /* |
Salil | e84e40be | 2016-11-23 19:41:09 +0000 | [diff] [blame] | 3212 | * Use rst2init to instead of init2init with drv, |
| 3213 | * need to hw to flash RQ HEAD by DB again |
| 3214 | */ |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3215 | if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { |
| 3216 | /* Memory barrier */ |
| 3217 | wmb(); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3218 | |
Lijun Ou | 509bf0c | 2016-09-15 23:48:12 +0100 | [diff] [blame] | 3219 | roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M, |
| 3220 | RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head); |
| 3221 | roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M, |
| 3222 | RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); |
| 3223 | roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M, |
| 3224 | RQ_DOORBELL_U32_8_CMD_S, 1); |
| 3225 | roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3226 | |
Lijun Ou | 509bf0c | 2016-09-15 23:48:12 +0100 | [diff] [blame] | 3227 | if (ibqp->uobject) { |
| 3228 | hr_qp->rq.db_reg_l = hr_dev->reg_base + |
Wei Hu(Xavier) | 2d40788 | 2017-08-30 17:23:14 +0800 | [diff] [blame] | 3229 | hr_dev->odb_offset + |
Lijun Ou | 509bf0c | 2016-09-15 23:48:12 +0100 | [diff] [blame] | 3230 | DB_REG_OFFSET * hr_dev->priv_uar.index; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3231 | } |
Lijun Ou | 509bf0c | 2016-09-15 23:48:12 +0100 | [diff] [blame] | 3232 | |
| 3233 | hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3234 | } |
| 3235 | |
| 3236 | hr_qp->state = new_state; |
| 3237 | |
| 3238 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) |
| 3239 | hr_qp->resp_depth = attr->max_dest_rd_atomic; |
Lijun Ou | 7716809 | 2016-09-15 23:48:10 +0100 | [diff] [blame] | 3240 | if (attr_mask & IB_QP_PORT) { |
| 3241 | hr_qp->port = attr->port_num - 1; |
| 3242 | hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; |
| 3243 | } |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3244 | |
| 3245 | if (new_state == IB_QPS_RESET && !ibqp->uobject) { |
| 3246 | hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, |
| 3247 | ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); |
| 3248 | if (ibqp->send_cq != ibqp->recv_cq) |
| 3249 | hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq), |
| 3250 | hr_qp->qpn, NULL); |
| 3251 | |
| 3252 | hr_qp->rq.head = 0; |
| 3253 | hr_qp->rq.tail = 0; |
| 3254 | hr_qp->sq.head = 0; |
| 3255 | hr_qp->sq.tail = 0; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3256 | } |
| 3257 | out: |
| 3258 | kfree(context); |
| 3259 | return ret; |
| 3260 | } |
| 3261 | |
Bart Van Assche | d61d6de | 2017-10-11 10:49:01 -0700 | [diff] [blame] | 3262 | static int hns_roce_v1_modify_qp(struct ib_qp *ibqp, |
| 3263 | const struct ib_qp_attr *attr, int attr_mask, |
| 3264 | enum ib_qp_state cur_state, |
| 3265 | enum ib_qp_state new_state) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3266 | { |
| 3267 | |
| 3268 | if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) |
| 3269 | return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state, |
| 3270 | new_state); |
| 3271 | else |
| 3272 | return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state, |
| 3273 | new_state); |
| 3274 | } |
| 3275 | |
| 3276 | static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state) |
| 3277 | { |
| 3278 | switch (state) { |
| 3279 | case HNS_ROCE_QP_STATE_RST: |
| 3280 | return IB_QPS_RESET; |
| 3281 | case HNS_ROCE_QP_STATE_INIT: |
| 3282 | return IB_QPS_INIT; |
| 3283 | case HNS_ROCE_QP_STATE_RTR: |
| 3284 | return IB_QPS_RTR; |
| 3285 | case HNS_ROCE_QP_STATE_RTS: |
| 3286 | return IB_QPS_RTS; |
| 3287 | case HNS_ROCE_QP_STATE_SQD: |
| 3288 | return IB_QPS_SQD; |
| 3289 | case HNS_ROCE_QP_STATE_ERR: |
| 3290 | return IB_QPS_ERR; |
| 3291 | default: |
| 3292 | return IB_QPS_ERR; |
| 3293 | } |
| 3294 | } |
| 3295 | |
| 3296 | static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev, |
| 3297 | struct hns_roce_qp *hr_qp, |
| 3298 | struct hns_roce_qp_context *hr_context) |
| 3299 | { |
| 3300 | struct hns_roce_cmd_mailbox *mailbox; |
| 3301 | int ret; |
| 3302 | |
| 3303 | mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); |
| 3304 | if (IS_ERR(mailbox)) |
| 3305 | return PTR_ERR(mailbox); |
| 3306 | |
| 3307 | ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0, |
| 3308 | HNS_ROCE_CMD_QUERY_QP, |
Wei Hu (Xavier) | 6b877c3 | 2016-11-23 19:41:05 +0000 | [diff] [blame] | 3309 | HNS_ROCE_CMD_TIMEOUT_MSECS); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3310 | if (!ret) |
| 3311 | memcpy(hr_context, mailbox->buf, sizeof(*hr_context)); |
| 3312 | else |
| 3313 | dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n"); |
| 3314 | |
| 3315 | hns_roce_free_cmd_mailbox(hr_dev, mailbox); |
| 3316 | |
| 3317 | return ret; |
| 3318 | } |
| 3319 | |
Lijun Ou | 9eefa95 | 2016-11-23 19:40:59 +0000 | [diff] [blame] | 3320 | static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, |
| 3321 | int qp_attr_mask, |
| 3322 | struct ib_qp_init_attr *qp_init_attr) |
| 3323 | { |
| 3324 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); |
| 3325 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); |
| 3326 | struct hns_roce_sqp_context context; |
| 3327 | u32 addr; |
| 3328 | |
| 3329 | mutex_lock(&hr_qp->mutex); |
| 3330 | |
| 3331 | if (hr_qp->state == IB_QPS_RESET) { |
| 3332 | qp_attr->qp_state = IB_QPS_RESET; |
| 3333 | goto done; |
| 3334 | } |
| 3335 | |
| 3336 | addr = ROCEE_QP1C_CFG0_0_REG + |
| 3337 | hr_qp->port * sizeof(struct hns_roce_sqp_context); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 3338 | context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr)); |
| 3339 | context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1)); |
| 3340 | context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2)); |
| 3341 | context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3)); |
| 3342 | context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4)); |
| 3343 | context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5)); |
| 3344 | context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6)); |
| 3345 | context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7)); |
| 3346 | context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8)); |
| 3347 | context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9)); |
Lijun Ou | 9eefa95 | 2016-11-23 19:40:59 +0000 | [diff] [blame] | 3348 | |
| 3349 | hr_qp->state = roce_get_field(context.qp1c_bytes_4, |
| 3350 | QP1C_BYTES_4_QP_STATE_M, |
| 3351 | QP1C_BYTES_4_QP_STATE_S); |
| 3352 | qp_attr->qp_state = hr_qp->state; |
| 3353 | qp_attr->path_mtu = IB_MTU_256; |
| 3354 | qp_attr->path_mig_state = IB_MIG_ARMED; |
| 3355 | qp_attr->qkey = QKEY_VAL; |
Lijun Ou | 2bf910d | 2017-09-29 23:10:11 +0800 | [diff] [blame] | 3356 | qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; |
Lijun Ou | 9eefa95 | 2016-11-23 19:40:59 +0000 | [diff] [blame] | 3357 | qp_attr->rq_psn = 0; |
| 3358 | qp_attr->sq_psn = 0; |
| 3359 | qp_attr->dest_qp_num = 1; |
| 3360 | qp_attr->qp_access_flags = 6; |
| 3361 | |
| 3362 | qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20, |
| 3363 | QP1C_BYTES_20_PKEY_IDX_M, |
| 3364 | QP1C_BYTES_20_PKEY_IDX_S); |
| 3365 | qp_attr->port_num = hr_qp->port + 1; |
| 3366 | qp_attr->sq_draining = 0; |
| 3367 | qp_attr->max_rd_atomic = 0; |
| 3368 | qp_attr->max_dest_rd_atomic = 0; |
| 3369 | qp_attr->min_rnr_timer = 0; |
| 3370 | qp_attr->timeout = 0; |
| 3371 | qp_attr->retry_cnt = 0; |
| 3372 | qp_attr->rnr_retry = 0; |
| 3373 | qp_attr->alt_timeout = 0; |
| 3374 | |
| 3375 | done: |
| 3376 | qp_attr->cur_qp_state = qp_attr->qp_state; |
| 3377 | qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; |
| 3378 | qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs; |
| 3379 | qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; |
| 3380 | qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; |
| 3381 | qp_attr->cap.max_inline_data = 0; |
| 3382 | qp_init_attr->cap = qp_attr->cap; |
| 3383 | qp_init_attr->create_flags = 0; |
| 3384 | |
| 3385 | mutex_unlock(&hr_qp->mutex); |
| 3386 | |
| 3387 | return 0; |
| 3388 | } |
| 3389 | |
| 3390 | static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, |
| 3391 | int qp_attr_mask, |
| 3392 | struct ib_qp_init_attr *qp_init_attr) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3393 | { |
| 3394 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); |
| 3395 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); |
| 3396 | struct device *dev = &hr_dev->pdev->dev; |
| 3397 | struct hns_roce_qp_context *context; |
| 3398 | int tmp_qp_state = 0; |
| 3399 | int ret = 0; |
| 3400 | int state; |
| 3401 | |
| 3402 | context = kzalloc(sizeof(*context), GFP_KERNEL); |
| 3403 | if (!context) |
| 3404 | return -ENOMEM; |
| 3405 | |
| 3406 | memset(qp_attr, 0, sizeof(*qp_attr)); |
| 3407 | memset(qp_init_attr, 0, sizeof(*qp_init_attr)); |
| 3408 | |
| 3409 | mutex_lock(&hr_qp->mutex); |
| 3410 | |
| 3411 | if (hr_qp->state == IB_QPS_RESET) { |
| 3412 | qp_attr->qp_state = IB_QPS_RESET; |
| 3413 | goto done; |
| 3414 | } |
| 3415 | |
| 3416 | ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context); |
| 3417 | if (ret) { |
| 3418 | dev_err(dev, "query qpc error\n"); |
| 3419 | ret = -EINVAL; |
| 3420 | goto out; |
| 3421 | } |
| 3422 | |
| 3423 | state = roce_get_field(context->qpc_bytes_144, |
| 3424 | QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, |
| 3425 | QP_CONTEXT_QPC_BYTES_144_QP_STATE_S); |
| 3426 | tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state); |
| 3427 | if (tmp_qp_state == -1) { |
| 3428 | dev_err(dev, "to_ib_qp_state error\n"); |
| 3429 | ret = -EINVAL; |
| 3430 | goto out; |
| 3431 | } |
| 3432 | hr_qp->state = (u8)tmp_qp_state; |
| 3433 | qp_attr->qp_state = (enum ib_qp_state)hr_qp->state; |
| 3434 | qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48, |
| 3435 | QP_CONTEXT_QPC_BYTES_48_MTU_M, |
| 3436 | QP_CONTEXT_QPC_BYTES_48_MTU_S); |
| 3437 | qp_attr->path_mig_state = IB_MIG_ARMED; |
Lijun Ou | 2bf910d | 2017-09-29 23:10:11 +0800 | [diff] [blame] | 3438 | qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3439 | if (hr_qp->ibqp.qp_type == IB_QPT_UD) |
| 3440 | qp_attr->qkey = QKEY_VAL; |
| 3441 | |
| 3442 | qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88, |
| 3443 | QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M, |
| 3444 | QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S); |
| 3445 | qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164, |
| 3446 | QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M, |
| 3447 | QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S); |
| 3448 | qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36, |
| 3449 | QP_CONTEXT_QPC_BYTES_36_DEST_QP_M, |
| 3450 | QP_CONTEXT_QPC_BYTES_36_DEST_QP_S); |
| 3451 | qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4, |
| 3452 | QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) | |
| 3453 | ((roce_get_bit(context->qpc_bytes_4, |
| 3454 | QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) | |
| 3455 | ((roce_get_bit(context->qpc_bytes_4, |
| 3456 | QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3); |
| 3457 | |
| 3458 | if (hr_qp->ibqp.qp_type == IB_QPT_RC || |
| 3459 | hr_qp->ibqp.qp_type == IB_QPT_UC) { |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 3460 | struct ib_global_route *grh = |
| 3461 | rdma_ah_retrieve_grh(&qp_attr->ah_attr); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3462 | |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 3463 | rdma_ah_set_sl(&qp_attr->ah_attr, |
| 3464 | roce_get_field(context->qpc_bytes_156, |
| 3465 | QP_CONTEXT_QPC_BYTES_156_SL_M, |
| 3466 | QP_CONTEXT_QPC_BYTES_156_SL_S)); |
| 3467 | rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH); |
| 3468 | grh->flow_label = |
| 3469 | roce_get_field(context->qpc_bytes_48, |
| 3470 | QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M, |
| 3471 | QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S); |
| 3472 | grh->sgid_index = |
| 3473 | roce_get_field(context->qpc_bytes_36, |
| 3474 | QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M, |
| 3475 | QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S); |
| 3476 | grh->hop_limit = |
| 3477 | roce_get_field(context->qpc_bytes_44, |
| 3478 | QP_CONTEXT_QPC_BYTES_44_HOPLMT_M, |
| 3479 | QP_CONTEXT_QPC_BYTES_44_HOPLMT_S); |
| 3480 | grh->traffic_class = |
| 3481 | roce_get_field(context->qpc_bytes_48, |
| 3482 | QP_CONTEXT_QPC_BYTES_48_TCLASS_M, |
| 3483 | QP_CONTEXT_QPC_BYTES_48_TCLASS_S); |
| 3484 | |
| 3485 | memcpy(grh->dgid.raw, context->dgid, |
| 3486 | sizeof(grh->dgid.raw)); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3487 | } |
| 3488 | |
| 3489 | qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12, |
| 3490 | QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, |
| 3491 | QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S); |
Wei Hu (Xavier) | dd783a2 | 2016-11-23 19:41:06 +0000 | [diff] [blame] | 3492 | qp_attr->port_num = hr_qp->port + 1; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3493 | qp_attr->sq_draining = 0; |
Lijun Ou | be7acd9 | 2017-09-29 23:10:07 +0800 | [diff] [blame] | 3494 | qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3495 | QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M, |
| 3496 | QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S); |
Lijun Ou | be7acd9 | 2017-09-29 23:10:07 +0800 | [diff] [blame] | 3497 | qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3498 | QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M, |
| 3499 | QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S); |
| 3500 | qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24, |
| 3501 | QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M, |
| 3502 | QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S)); |
| 3503 | qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156, |
| 3504 | QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, |
| 3505 | QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S)); |
| 3506 | qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148, |
| 3507 | QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, |
| 3508 | QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S); |
Lang Cheng | bfe8603 | 2019-08-21 21:14:32 +0800 | [diff] [blame] | 3509 | qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry); |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3510 | |
| 3511 | done: |
| 3512 | qp_attr->cur_qp_state = qp_attr->qp_state; |
| 3513 | qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; |
| 3514 | qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs; |
| 3515 | |
| 3516 | if (!ibqp->uobject) { |
| 3517 | qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; |
| 3518 | qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; |
| 3519 | } else { |
| 3520 | qp_attr->cap.max_send_wr = 0; |
| 3521 | qp_attr->cap.max_send_sge = 0; |
| 3522 | } |
| 3523 | |
| 3524 | qp_init_attr->cap = qp_attr->cap; |
| 3525 | |
| 3526 | out: |
| 3527 | mutex_unlock(&hr_qp->mutex); |
| 3528 | kfree(context); |
| 3529 | return ret; |
| 3530 | } |
| 3531 | |
Bart Van Assche | d61d6de | 2017-10-11 10:49:01 -0700 | [diff] [blame] | 3532 | static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, |
| 3533 | int qp_attr_mask, |
| 3534 | struct ib_qp_init_attr *qp_init_attr) |
Lijun Ou | 9eefa95 | 2016-11-23 19:40:59 +0000 | [diff] [blame] | 3535 | { |
| 3536 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); |
| 3537 | |
| 3538 | return hr_qp->doorbell_qpn <= 1 ? |
| 3539 | hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) : |
| 3540 | hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr); |
| 3541 | } |
Wei Hu (Xavier) | d838c48 | 2016-11-29 23:10:25 +0000 | [diff] [blame] | 3542 | |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3543 | int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3544 | { |
| 3545 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); |
| 3546 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); |
Wei Hu (Xavier) | d838c48 | 2016-11-29 23:10:25 +0000 | [diff] [blame] | 3547 | struct hns_roce_cq *send_cq, *recv_cq; |
Wei Hu (Xavier) | d838c48 | 2016-11-29 23:10:25 +0000 | [diff] [blame] | 3548 | int ret; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3549 | |
Leon Romanovsky | 5742582 | 2019-04-04 09:56:38 +0300 | [diff] [blame] | 3550 | ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET); |
| 3551 | if (ret) |
Wei Hu (Xavier) | d838c48 | 2016-11-29 23:10:25 +0000 | [diff] [blame] | 3552 | return ret; |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3553 | |
Xi Wang | 626903e | 2020-01-09 20:20:12 +0800 | [diff] [blame] | 3554 | send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL; |
| 3555 | recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL; |
Wei Hu (Xavier) | d838c48 | 2016-11-29 23:10:25 +0000 | [diff] [blame] | 3556 | |
| 3557 | hns_roce_lock_cqs(send_cq, recv_cq); |
Leon Romanovsky | 5742582 | 2019-04-04 09:56:38 +0300 | [diff] [blame] | 3558 | if (!udata) { |
Xi Wang | 626903e | 2020-01-09 20:20:12 +0800 | [diff] [blame] | 3559 | if (recv_cq) |
| 3560 | __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, |
| 3561 | (hr_qp->ibqp.srq ? |
| 3562 | to_hr_srq(hr_qp->ibqp.srq) : |
| 3563 | NULL)); |
| 3564 | |
| 3565 | if (send_cq && send_cq != recv_cq) |
Wei Hu (Xavier) | d838c48 | 2016-11-29 23:10:25 +0000 | [diff] [blame] | 3566 | __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL); |
| 3567 | } |
Xi Wang | e365b26 | 2020-02-24 14:37:32 +0800 | [diff] [blame] | 3568 | hns_roce_qp_remove(hr_dev, hr_qp); |
Wei Hu (Xavier) | d838c48 | 2016-11-29 23:10:25 +0000 | [diff] [blame] | 3569 | hns_roce_unlock_cqs(send_cq, recv_cq); |
| 3570 | |
Xi Wang | e365b26 | 2020-02-24 14:37:32 +0800 | [diff] [blame] | 3571 | hns_roce_qp_destroy(hr_dev, hr_qp, udata); |
Wei Hu (Xavier) | d838c48 | 2016-11-29 23:10:25 +0000 | [diff] [blame] | 3572 | |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 3573 | return 0; |
| 3574 | } |
| 3575 | |
Leon Romanovsky | a52c8e2 | 2019-05-28 14:37:28 +0300 | [diff] [blame] | 3576 | static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) |
Shaobo Xu | afb6b09 | 2016-11-29 23:10:29 +0000 | [diff] [blame] | 3577 | { |
| 3578 | struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); |
| 3579 | struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); |
| 3580 | struct device *dev = &hr_dev->pdev->dev; |
| 3581 | u32 cqe_cnt_ori; |
| 3582 | u32 cqe_cnt_cur; |
Shaobo Xu | afb6b09 | 2016-11-29 23:10:29 +0000 | [diff] [blame] | 3583 | int wait_time = 0; |
Shaobo Xu | afb6b09 | 2016-11-29 23:10:29 +0000 | [diff] [blame] | 3584 | |
Shaobo Xu | afb6b09 | 2016-11-29 23:10:29 +0000 | [diff] [blame] | 3585 | /* |
| 3586 | * Before freeing cq buffer, we need to ensure that the outstanding CQE |
| 3587 | * have been written by checking the CQE counter. |
| 3588 | */ |
| 3589 | cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT); |
| 3590 | while (1) { |
| 3591 | if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) & |
| 3592 | HNS_ROCE_CQE_WCMD_EMPTY_BIT) |
| 3593 | break; |
| 3594 | |
| 3595 | cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT); |
| 3596 | if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT) |
| 3597 | break; |
| 3598 | |
| 3599 | msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS); |
| 3600 | if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) { |
| 3601 | dev_warn(dev, "Destroy cq 0x%lx timeout!\n", |
| 3602 | hr_cq->cqn); |
Shaobo Xu | afb6b09 | 2016-11-29 23:10:29 +0000 | [diff] [blame] | 3603 | break; |
| 3604 | } |
| 3605 | wait_time++; |
| 3606 | } |
Shaobo Xu | afb6b09 | 2016-11-29 23:10:29 +0000 | [diff] [blame] | 3607 | } |
| 3608 | |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3609 | static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not) |
| 3610 | { |
| 3611 | roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) | |
| 3612 | (req_not << eq->log_entries), eq->doorbell); |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3613 | } |
| 3614 | |
| 3615 | static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev, |
| 3616 | struct hns_roce_aeqe *aeqe, int qpn) |
| 3617 | { |
| 3618 | struct device *dev = &hr_dev->pdev->dev; |
| 3619 | |
| 3620 | dev_warn(dev, "Local Work Queue Catastrophic Error.\n"); |
| 3621 | switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, |
| 3622 | HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { |
| 3623 | case HNS_ROCE_LWQCE_QPC_ERROR: |
| 3624 | dev_warn(dev, "QP %d, QPC error.\n", qpn); |
| 3625 | break; |
| 3626 | case HNS_ROCE_LWQCE_MTU_ERROR: |
| 3627 | dev_warn(dev, "QP %d, MTU error.\n", qpn); |
| 3628 | break; |
| 3629 | case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR: |
| 3630 | dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn); |
| 3631 | break; |
| 3632 | case HNS_ROCE_LWQCE_WQE_ADDR_ERROR: |
| 3633 | dev_warn(dev, "QP %d, WQE addr error.\n", qpn); |
| 3634 | break; |
| 3635 | case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR: |
| 3636 | dev_warn(dev, "QP %d, WQE shift error\n", qpn); |
| 3637 | break; |
| 3638 | case HNS_ROCE_LWQCE_SL_ERROR: |
| 3639 | dev_warn(dev, "QP %d, SL error.\n", qpn); |
| 3640 | break; |
| 3641 | case HNS_ROCE_LWQCE_PORT_ERROR: |
| 3642 | dev_warn(dev, "QP %d, port error.\n", qpn); |
| 3643 | break; |
| 3644 | default: |
| 3645 | break; |
| 3646 | } |
| 3647 | } |
| 3648 | |
| 3649 | static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, |
| 3650 | struct hns_roce_aeqe *aeqe, |
| 3651 | int qpn) |
| 3652 | { |
| 3653 | struct device *dev = &hr_dev->pdev->dev; |
| 3654 | |
| 3655 | dev_warn(dev, "Local Access Violation Work Queue Error.\n"); |
| 3656 | switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, |
| 3657 | HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { |
| 3658 | case HNS_ROCE_LAVWQE_R_KEY_VIOLATION: |
| 3659 | dev_warn(dev, "QP %d, R_key violation.\n", qpn); |
| 3660 | break; |
| 3661 | case HNS_ROCE_LAVWQE_LENGTH_ERROR: |
| 3662 | dev_warn(dev, "QP %d, length error.\n", qpn); |
| 3663 | break; |
| 3664 | case HNS_ROCE_LAVWQE_VA_ERROR: |
| 3665 | dev_warn(dev, "QP %d, VA error.\n", qpn); |
| 3666 | break; |
| 3667 | case HNS_ROCE_LAVWQE_PD_ERROR: |
| 3668 | dev_err(dev, "QP %d, PD error.\n", qpn); |
| 3669 | break; |
| 3670 | case HNS_ROCE_LAVWQE_RW_ACC_ERROR: |
| 3671 | dev_warn(dev, "QP %d, rw acc error.\n", qpn); |
| 3672 | break; |
| 3673 | case HNS_ROCE_LAVWQE_KEY_STATE_ERROR: |
| 3674 | dev_warn(dev, "QP %d, key state error.\n", qpn); |
| 3675 | break; |
| 3676 | case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR: |
| 3677 | dev_warn(dev, "QP %d, MR operation error.\n", qpn); |
| 3678 | break; |
| 3679 | default: |
| 3680 | break; |
| 3681 | } |
| 3682 | } |
| 3683 | |
| 3684 | static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev, |
| 3685 | struct hns_roce_aeqe *aeqe, |
| 3686 | int event_type) |
| 3687 | { |
| 3688 | struct device *dev = &hr_dev->pdev->dev; |
| 3689 | int phy_port; |
| 3690 | int qpn; |
| 3691 | |
| 3692 | qpn = roce_get_field(aeqe->event.qp_event.qp, |
| 3693 | HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, |
| 3694 | HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S); |
| 3695 | phy_port = roce_get_field(aeqe->event.qp_event.qp, |
| 3696 | HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M, |
| 3697 | HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S); |
| 3698 | if (qpn <= 1) |
| 3699 | qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port; |
| 3700 | |
| 3701 | switch (event_type) { |
| 3702 | case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: |
| 3703 | dev_warn(dev, "Invalid Req Local Work Queue Error.\n" |
| 3704 | "QP %d, phy_port %d.\n", qpn, phy_port); |
| 3705 | break; |
| 3706 | case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: |
| 3707 | hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn); |
| 3708 | break; |
| 3709 | case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: |
| 3710 | hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn); |
| 3711 | break; |
| 3712 | default: |
| 3713 | break; |
| 3714 | } |
| 3715 | |
| 3716 | hns_roce_qp_event(hr_dev, qpn, event_type); |
| 3717 | } |
| 3718 | |
| 3719 | static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev, |
| 3720 | struct hns_roce_aeqe *aeqe, |
| 3721 | int event_type) |
| 3722 | { |
| 3723 | struct device *dev = &hr_dev->pdev->dev; |
| 3724 | u32 cqn; |
| 3725 | |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 3726 | cqn = roce_get_field(aeqe->event.cq_event.cq, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3727 | HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 3728 | HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S); |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3729 | |
| 3730 | switch (event_type) { |
| 3731 | case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: |
| 3732 | dev_warn(dev, "CQ 0x%x access err.\n", cqn); |
| 3733 | break; |
| 3734 | case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: |
| 3735 | dev_warn(dev, "CQ 0x%x overflow\n", cqn); |
| 3736 | break; |
| 3737 | case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: |
| 3738 | dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn); |
| 3739 | break; |
| 3740 | default: |
| 3741 | break; |
| 3742 | } |
| 3743 | |
| 3744 | hns_roce_cq_event(hr_dev, cqn, event_type); |
| 3745 | } |
| 3746 | |
| 3747 | static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev, |
| 3748 | struct hns_roce_aeqe *aeqe) |
| 3749 | { |
| 3750 | struct device *dev = &hr_dev->pdev->dev; |
| 3751 | |
| 3752 | switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, |
| 3753 | HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { |
| 3754 | case HNS_ROCE_DB_SUBTYPE_SDB_OVF: |
| 3755 | dev_warn(dev, "SDB overflow.\n"); |
| 3756 | break; |
| 3757 | case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF: |
| 3758 | dev_warn(dev, "SDB almost overflow.\n"); |
| 3759 | break; |
| 3760 | case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP: |
| 3761 | dev_warn(dev, "SDB almost empty.\n"); |
| 3762 | break; |
| 3763 | case HNS_ROCE_DB_SUBTYPE_ODB_OVF: |
| 3764 | dev_warn(dev, "ODB overflow.\n"); |
| 3765 | break; |
| 3766 | case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF: |
| 3767 | dev_warn(dev, "ODB almost overflow.\n"); |
| 3768 | break; |
| 3769 | case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP: |
| 3770 | dev_warn(dev, "SDB almost empty.\n"); |
| 3771 | break; |
| 3772 | default: |
| 3773 | break; |
| 3774 | } |
| 3775 | } |
| 3776 | |
| 3777 | static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry) |
| 3778 | { |
| 3779 | unsigned long off = (entry & (eq->entries - 1)) * |
| 3780 | HNS_ROCE_AEQ_ENTRY_SIZE; |
| 3781 | |
| 3782 | return (struct hns_roce_aeqe *)((u8 *) |
| 3783 | (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) + |
| 3784 | off % HNS_ROCE_BA_SIZE); |
| 3785 | } |
| 3786 | |
| 3787 | static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq) |
| 3788 | { |
| 3789 | struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index); |
| 3790 | |
| 3791 | return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^ |
| 3792 | !!(eq->cons_index & eq->entries)) ? aeqe : NULL; |
| 3793 | } |
| 3794 | |
| 3795 | static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev, |
| 3796 | struct hns_roce_eq *eq) |
| 3797 | { |
| 3798 | struct device *dev = &hr_dev->pdev->dev; |
| 3799 | struct hns_roce_aeqe *aeqe; |
| 3800 | int aeqes_found = 0; |
| 3801 | int event_type; |
| 3802 | |
| 3803 | while ((aeqe = next_aeqe_sw_v1(eq))) { |
Yixian Liu | 4044a3f | 2017-12-29 19:26:18 +0800 | [diff] [blame] | 3804 | |
| 3805 | /* Make sure we read the AEQ entry after we have checked the |
| 3806 | * ownership bit |
| 3807 | */ |
| 3808 | dma_rmb(); |
| 3809 | |
Lang Cheng | fd7dd8b | 2019-06-24 19:47:50 +0800 | [diff] [blame] | 3810 | dev_dbg(dev, "aeqe = %pK, aeqe->asyn.event_type = 0x%lx\n", |
| 3811 | aeqe, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3812 | roce_get_field(aeqe->asyn, |
| 3813 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, |
| 3814 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3815 | event_type = roce_get_field(aeqe->asyn, |
| 3816 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, |
| 3817 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S); |
| 3818 | switch (event_type) { |
| 3819 | case HNS_ROCE_EVENT_TYPE_PATH_MIG: |
| 3820 | dev_warn(dev, "PATH MIG not supported\n"); |
| 3821 | break; |
| 3822 | case HNS_ROCE_EVENT_TYPE_COMM_EST: |
| 3823 | dev_warn(dev, "COMMUNICATION established\n"); |
| 3824 | break; |
| 3825 | case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: |
| 3826 | dev_warn(dev, "SQ DRAINED not supported\n"); |
| 3827 | break; |
| 3828 | case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: |
| 3829 | dev_warn(dev, "PATH MIG failed\n"); |
| 3830 | break; |
| 3831 | case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: |
| 3832 | case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: |
| 3833 | case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: |
| 3834 | hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type); |
| 3835 | break; |
| 3836 | case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: |
| 3837 | case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: |
| 3838 | case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: |
| 3839 | dev_warn(dev, "SRQ not support!\n"); |
| 3840 | break; |
| 3841 | case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: |
| 3842 | case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: |
| 3843 | case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: |
| 3844 | hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type); |
| 3845 | break; |
| 3846 | case HNS_ROCE_EVENT_TYPE_PORT_CHANGE: |
| 3847 | dev_warn(dev, "port change.\n"); |
| 3848 | break; |
| 3849 | case HNS_ROCE_EVENT_TYPE_MB: |
| 3850 | hns_roce_cmd_event(hr_dev, |
| 3851 | le16_to_cpu(aeqe->event.cmd.token), |
| 3852 | aeqe->event.cmd.status, |
| 3853 | le64_to_cpu(aeqe->event.cmd.out_param |
| 3854 | )); |
| 3855 | break; |
| 3856 | case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: |
| 3857 | hns_roce_v1_db_overflow_handle(hr_dev, aeqe); |
| 3858 | break; |
| 3859 | case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW: |
| 3860 | dev_warn(dev, "CEQ 0x%lx overflow.\n", |
| 3861 | roce_get_field(aeqe->event.ce_event.ceqe, |
| 3862 | HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M, |
| 3863 | HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S)); |
| 3864 | break; |
| 3865 | default: |
| 3866 | dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n", |
| 3867 | event_type, eq->eqn, eq->cons_index); |
| 3868 | break; |
| 3869 | } |
| 3870 | |
| 3871 | eq->cons_index++; |
| 3872 | aeqes_found = 1; |
| 3873 | |
Wenpeng Liang | bceda6e | 2020-03-20 11:23:39 +0800 | [diff] [blame] | 3874 | if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3875 | eq->cons_index = 0; |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3876 | } |
| 3877 | |
| 3878 | set_eq_cons_index_v1(eq, 0); |
| 3879 | |
| 3880 | return aeqes_found; |
| 3881 | } |
| 3882 | |
| 3883 | static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry) |
| 3884 | { |
| 3885 | unsigned long off = (entry & (eq->entries - 1)) * |
| 3886 | HNS_ROCE_CEQ_ENTRY_SIZE; |
| 3887 | |
| 3888 | return (struct hns_roce_ceqe *)((u8 *) |
| 3889 | (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) + |
| 3890 | off % HNS_ROCE_BA_SIZE); |
| 3891 | } |
| 3892 | |
| 3893 | static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq) |
| 3894 | { |
| 3895 | struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index); |
| 3896 | |
| 3897 | return (!!(roce_get_bit(ceqe->comp, |
| 3898 | HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^ |
| 3899 | (!!(eq->cons_index & eq->entries)) ? ceqe : NULL; |
| 3900 | } |
| 3901 | |
| 3902 | static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev, |
| 3903 | struct hns_roce_eq *eq) |
| 3904 | { |
| 3905 | struct hns_roce_ceqe *ceqe; |
| 3906 | int ceqes_found = 0; |
| 3907 | u32 cqn; |
| 3908 | |
| 3909 | while ((ceqe = next_ceqe_sw_v1(eq))) { |
Yixian Liu | 4044a3f | 2017-12-29 19:26:18 +0800 | [diff] [blame] | 3910 | |
| 3911 | /* Make sure we read CEQ entry after we have checked the |
| 3912 | * ownership bit |
| 3913 | */ |
| 3914 | dma_rmb(); |
| 3915 | |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3916 | cqn = roce_get_field(ceqe->comp, |
| 3917 | HNS_ROCE_CEQE_CEQE_COMP_CQN_M, |
| 3918 | HNS_ROCE_CEQE_CEQE_COMP_CQN_S); |
| 3919 | hns_roce_cq_completion(hr_dev, cqn); |
| 3920 | |
| 3921 | ++eq->cons_index; |
| 3922 | ceqes_found = 1; |
| 3923 | |
Lijun Ou | 90c559b | 2019-08-21 21:14:31 +0800 | [diff] [blame] | 3924 | if (eq->cons_index > |
Wenpeng Liang | bceda6e | 2020-03-20 11:23:39 +0800 | [diff] [blame] | 3925 | EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1) |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3926 | eq->cons_index = 0; |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3927 | } |
| 3928 | |
| 3929 | set_eq_cons_index_v1(eq, 0); |
| 3930 | |
| 3931 | return ceqes_found; |
| 3932 | } |
| 3933 | |
| 3934 | static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr) |
| 3935 | { |
| 3936 | struct hns_roce_eq *eq = eq_ptr; |
| 3937 | struct hns_roce_dev *hr_dev = eq->hr_dev; |
| 3938 | int int_work = 0; |
| 3939 | |
| 3940 | if (eq->type_flag == HNS_ROCE_CEQ) |
| 3941 | /* CEQ irq routine, CEQ is pulse irq, not clear */ |
| 3942 | int_work = hns_roce_v1_ceq_int(hr_dev, eq); |
| 3943 | else |
| 3944 | /* AEQ irq routine, AEQ is pulse irq, not clear */ |
| 3945 | int_work = hns_roce_v1_aeq_int(hr_dev, eq); |
| 3946 | |
| 3947 | return IRQ_RETVAL(int_work); |
| 3948 | } |
| 3949 | |
| 3950 | static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id) |
| 3951 | { |
| 3952 | struct hns_roce_dev *hr_dev = dev_id; |
| 3953 | struct device *dev = &hr_dev->pdev->dev; |
| 3954 | int int_work = 0; |
| 3955 | u32 caepaemask_val; |
| 3956 | u32 cealmovf_val; |
| 3957 | u32 caepaest_val; |
| 3958 | u32 aeshift_val; |
| 3959 | u32 ceshift_val; |
| 3960 | u32 cemask_val; |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 3961 | __le32 tmp; |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3962 | int i; |
| 3963 | |
| 3964 | /* |
| 3965 | * Abnormal interrupt: |
| 3966 | * AEQ overflow, ECC multi-bit err, CEQ overflow must clear |
| 3967 | * interrupt, mask irq, clear irq, cancel mask operation |
| 3968 | */ |
| 3969 | aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 3970 | tmp = cpu_to_le32(aeshift_val); |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3971 | |
| 3972 | /* AEQE overflow */ |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 3973 | if (roce_get_bit(tmp, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3974 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) { |
| 3975 | dev_warn(dev, "AEQ overflow!\n"); |
| 3976 | |
| 3977 | /* Set mask */ |
| 3978 | caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 3979 | tmp = cpu_to_le32(caepaemask_val); |
| 3980 | roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3981 | HNS_ROCE_INT_MASK_ENABLE); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 3982 | caepaemask_val = le32_to_cpu(tmp); |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3983 | roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val); |
| 3984 | |
| 3985 | /* Clear int state(INT_WC : write 1 clear) */ |
| 3986 | caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 3987 | tmp = cpu_to_le32(caepaest_val); |
| 3988 | roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1); |
| 3989 | caepaest_val = le32_to_cpu(tmp); |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3990 | roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val); |
| 3991 | |
| 3992 | /* Clear mask */ |
| 3993 | caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 3994 | tmp = cpu_to_le32(caepaemask_val); |
| 3995 | roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3996 | HNS_ROCE_INT_MASK_DISABLE); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 3997 | caepaemask_val = le32_to_cpu(tmp); |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 3998 | roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val); |
| 3999 | } |
| 4000 | |
| 4001 | /* CEQ almost overflow */ |
| 4002 | for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) { |
| 4003 | ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG + |
| 4004 | i * CEQ_REG_OFFSET); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4005 | tmp = cpu_to_le32(ceshift_val); |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4006 | |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4007 | if (roce_get_bit(tmp, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4008 | ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) { |
| 4009 | dev_warn(dev, "CEQ[%d] almost overflow!\n", i); |
| 4010 | int_work++; |
| 4011 | |
| 4012 | /* Set mask */ |
| 4013 | cemask_val = roce_read(hr_dev, |
| 4014 | ROCEE_CAEP_CE_IRQ_MASK_0_REG + |
| 4015 | i * CEQ_REG_OFFSET); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4016 | tmp = cpu_to_le32(cemask_val); |
| 4017 | roce_set_bit(tmp, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4018 | ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S, |
| 4019 | HNS_ROCE_INT_MASK_ENABLE); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4020 | cemask_val = le32_to_cpu(tmp); |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4021 | roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + |
| 4022 | i * CEQ_REG_OFFSET, cemask_val); |
| 4023 | |
| 4024 | /* Clear int state(INT_WC : write 1 clear) */ |
| 4025 | cealmovf_val = roce_read(hr_dev, |
| 4026 | ROCEE_CAEP_CEQ_ALM_OVF_0_REG + |
| 4027 | i * CEQ_REG_OFFSET); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4028 | tmp = cpu_to_le32(cealmovf_val); |
| 4029 | roce_set_bit(tmp, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4030 | ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S, |
| 4031 | 1); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4032 | cealmovf_val = le32_to_cpu(tmp); |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4033 | roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG + |
| 4034 | i * CEQ_REG_OFFSET, cealmovf_val); |
| 4035 | |
| 4036 | /* Clear mask */ |
| 4037 | cemask_val = roce_read(hr_dev, |
| 4038 | ROCEE_CAEP_CE_IRQ_MASK_0_REG + |
| 4039 | i * CEQ_REG_OFFSET); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4040 | tmp = cpu_to_le32(cemask_val); |
| 4041 | roce_set_bit(tmp, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4042 | ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S, |
| 4043 | HNS_ROCE_INT_MASK_DISABLE); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4044 | cemask_val = le32_to_cpu(tmp); |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4045 | roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + |
| 4046 | i * CEQ_REG_OFFSET, cemask_val); |
| 4047 | } |
| 4048 | } |
| 4049 | |
| 4050 | /* ECC multi-bit error alarm */ |
| 4051 | dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n", |
| 4052 | roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG), |
| 4053 | roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG), |
| 4054 | roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG)); |
| 4055 | |
| 4056 | dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n", |
| 4057 | roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG), |
| 4058 | roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG), |
| 4059 | roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG)); |
| 4060 | |
| 4061 | return IRQ_RETVAL(int_work); |
| 4062 | } |
| 4063 | |
| 4064 | static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev) |
| 4065 | { |
| 4066 | u32 aemask_val; |
| 4067 | int masken = 0; |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4068 | __le32 tmp; |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4069 | int i; |
| 4070 | |
| 4071 | /* AEQ INT */ |
| 4072 | aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4073 | tmp = cpu_to_le32(aemask_val); |
| 4074 | roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4075 | masken); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4076 | roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken); |
| 4077 | aemask_val = le32_to_cpu(tmp); |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4078 | roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val); |
| 4079 | |
| 4080 | /* CEQ INT */ |
| 4081 | for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) { |
| 4082 | /* IRQ mask */ |
| 4083 | roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + |
| 4084 | i * CEQ_REG_OFFSET, masken); |
| 4085 | } |
| 4086 | } |
| 4087 | |
| 4088 | static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev, |
| 4089 | struct hns_roce_eq *eq) |
| 4090 | { |
| 4091 | int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) + |
| 4092 | HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE; |
| 4093 | int i; |
| 4094 | |
| 4095 | if (!eq->buf_list) |
| 4096 | return; |
| 4097 | |
| 4098 | for (i = 0; i < npages; ++i) |
| 4099 | dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE, |
| 4100 | eq->buf_list[i].buf, eq->buf_list[i].map); |
| 4101 | |
| 4102 | kfree(eq->buf_list); |
| 4103 | } |
| 4104 | |
| 4105 | static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num, |
| 4106 | int enable_flag) |
| 4107 | { |
| 4108 | void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num]; |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4109 | __le32 tmp; |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4110 | u32 val; |
| 4111 | |
| 4112 | val = readl(eqc); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4113 | tmp = cpu_to_le32(val); |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4114 | |
| 4115 | if (enable_flag) |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4116 | roce_set_field(tmp, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4117 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, |
| 4118 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, |
| 4119 | HNS_ROCE_EQ_STAT_VALID); |
| 4120 | else |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4121 | roce_set_field(tmp, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4122 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, |
| 4123 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, |
| 4124 | HNS_ROCE_EQ_STAT_INVALID); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4125 | |
| 4126 | val = le32_to_cpu(tmp); |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4127 | writel(val, eqc); |
| 4128 | } |
| 4129 | |
| 4130 | static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev, |
| 4131 | struct hns_roce_eq *eq) |
| 4132 | { |
| 4133 | void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn]; |
| 4134 | struct device *dev = &hr_dev->pdev->dev; |
| 4135 | dma_addr_t tmp_dma_addr; |
| 4136 | u32 eqconsindx_val = 0; |
| 4137 | u32 eqcuridx_val = 0; |
| 4138 | u32 eqshift_val = 0; |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4139 | __le32 tmp2 = 0; |
| 4140 | __le32 tmp1 = 0; |
| 4141 | __le32 tmp = 0; |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4142 | int num_bas; |
| 4143 | int ret; |
| 4144 | int i; |
| 4145 | |
| 4146 | num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) + |
| 4147 | HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE; |
| 4148 | |
| 4149 | if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) { |
| 4150 | dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n", |
| 4151 | (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE, |
| 4152 | num_bas); |
| 4153 | return -EINVAL; |
| 4154 | } |
| 4155 | |
| 4156 | eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL); |
| 4157 | if (!eq->buf_list) |
| 4158 | return -ENOMEM; |
| 4159 | |
| 4160 | for (i = 0; i < num_bas; ++i) { |
| 4161 | eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE, |
| 4162 | &tmp_dma_addr, |
| 4163 | GFP_KERNEL); |
| 4164 | if (!eq->buf_list[i].buf) { |
| 4165 | ret = -ENOMEM; |
| 4166 | goto err_out_free_pages; |
| 4167 | } |
| 4168 | |
| 4169 | eq->buf_list[i].map = tmp_dma_addr; |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4170 | } |
| 4171 | eq->cons_index = 0; |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4172 | roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4173 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, |
| 4174 | HNS_ROCE_EQ_STAT_INVALID); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4175 | roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4176 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S, |
| 4177 | eq->log_entries); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4178 | eqshift_val = le32_to_cpu(tmp); |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4179 | writel(eqshift_val, eqc); |
| 4180 | |
| 4181 | /* Configure eq extended address 12~44bit */ |
| 4182 | writel((u32)(eq->buf_list[0].map >> 12), eqc + 4); |
| 4183 | |
| 4184 | /* |
| 4185 | * Configure eq extended address 45~49 bit. |
| 4186 | * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of |
| 4187 | * using 4K page, and shift more 32 because of |
| 4188 | * caculating the high 32 bit value evaluated to hardware. |
| 4189 | */ |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4190 | roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4191 | ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S, |
| 4192 | eq->buf_list[0].map >> 44); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4193 | roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4194 | ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4195 | eqcuridx_val = le32_to_cpu(tmp1); |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4196 | writel(eqcuridx_val, eqc + 8); |
| 4197 | |
| 4198 | /* Configure eq consumer index */ |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4199 | roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4200 | ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0); |
oulijun | 0576cbd | 2018-07-09 17:48:06 +0800 | [diff] [blame] | 4201 | eqconsindx_val = le32_to_cpu(tmp2); |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4202 | writel(eqconsindx_val, eqc + 0xc); |
| 4203 | |
| 4204 | return 0; |
| 4205 | |
| 4206 | err_out_free_pages: |
| 4207 | for (i -= 1; i >= 0; i--) |
| 4208 | dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf, |
| 4209 | eq->buf_list[i].map); |
| 4210 | |
| 4211 | kfree(eq->buf_list); |
| 4212 | return ret; |
| 4213 | } |
| 4214 | |
| 4215 | static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev) |
| 4216 | { |
| 4217 | struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; |
| 4218 | struct device *dev = &hr_dev->pdev->dev; |
| 4219 | struct hns_roce_eq *eq; |
| 4220 | int irq_num; |
| 4221 | int eq_num; |
| 4222 | int ret; |
| 4223 | int i, j; |
| 4224 | |
| 4225 | eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; |
| 4226 | irq_num = eq_num + hr_dev->caps.num_other_vectors; |
| 4227 | |
| 4228 | eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL); |
| 4229 | if (!eq_table->eq) |
| 4230 | return -ENOMEM; |
| 4231 | |
| 4232 | eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base), |
| 4233 | GFP_KERNEL); |
| 4234 | if (!eq_table->eqc_base) { |
| 4235 | ret = -ENOMEM; |
| 4236 | goto err_eqc_base_alloc_fail; |
| 4237 | } |
| 4238 | |
| 4239 | for (i = 0; i < eq_num; i++) { |
| 4240 | eq = &eq_table->eq[i]; |
| 4241 | eq->hr_dev = hr_dev; |
| 4242 | eq->eqn = i; |
| 4243 | eq->irq = hr_dev->irq[i]; |
| 4244 | eq->log_page_size = PAGE_SHIFT; |
| 4245 | |
| 4246 | if (i < hr_dev->caps.num_comp_vectors) { |
| 4247 | /* CEQ */ |
| 4248 | eq_table->eqc_base[i] = hr_dev->reg_base + |
| 4249 | ROCEE_CAEP_CEQC_SHIFT_0_REG + |
| 4250 | CEQ_REG_OFFSET * i; |
| 4251 | eq->type_flag = HNS_ROCE_CEQ; |
| 4252 | eq->doorbell = hr_dev->reg_base + |
| 4253 | ROCEE_CAEP_CEQC_CONS_IDX_0_REG + |
| 4254 | CEQ_REG_OFFSET * i; |
| 4255 | eq->entries = hr_dev->caps.ceqe_depth; |
| 4256 | eq->log_entries = ilog2(eq->entries); |
| 4257 | eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE; |
| 4258 | } else { |
| 4259 | /* AEQ */ |
| 4260 | eq_table->eqc_base[i] = hr_dev->reg_base + |
| 4261 | ROCEE_CAEP_AEQC_AEQE_SHIFT_REG; |
| 4262 | eq->type_flag = HNS_ROCE_AEQ; |
| 4263 | eq->doorbell = hr_dev->reg_base + |
| 4264 | ROCEE_CAEP_AEQE_CONS_IDX_REG; |
| 4265 | eq->entries = hr_dev->caps.aeqe_depth; |
| 4266 | eq->log_entries = ilog2(eq->entries); |
| 4267 | eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE; |
| 4268 | } |
| 4269 | } |
| 4270 | |
| 4271 | /* Disable irq */ |
| 4272 | hns_roce_v1_int_mask_enable(hr_dev); |
| 4273 | |
| 4274 | /* Configure ce int interval */ |
| 4275 | roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG, |
| 4276 | HNS_ROCE_CEQ_DEFAULT_INTERVAL); |
| 4277 | |
| 4278 | /* Configure ce int burst num */ |
| 4279 | roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG, |
| 4280 | HNS_ROCE_CEQ_DEFAULT_BURST_NUM); |
| 4281 | |
| 4282 | for (i = 0; i < eq_num; i++) { |
| 4283 | ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]); |
| 4284 | if (ret) { |
| 4285 | dev_err(dev, "eq create failed\n"); |
| 4286 | goto err_create_eq_fail; |
| 4287 | } |
| 4288 | } |
| 4289 | |
| 4290 | for (j = 0; j < irq_num; j++) { |
| 4291 | if (j < eq_num) |
| 4292 | ret = request_irq(hr_dev->irq[j], |
| 4293 | hns_roce_v1_msix_interrupt_eq, 0, |
| 4294 | hr_dev->irq_names[j], |
| 4295 | &eq_table->eq[j]); |
| 4296 | else |
| 4297 | ret = request_irq(hr_dev->irq[j], |
| 4298 | hns_roce_v1_msix_interrupt_abn, 0, |
| 4299 | hr_dev->irq_names[j], hr_dev); |
| 4300 | |
| 4301 | if (ret) { |
| 4302 | dev_err(dev, "request irq error!\n"); |
| 4303 | goto err_request_irq_fail; |
| 4304 | } |
| 4305 | } |
| 4306 | |
| 4307 | for (i = 0; i < eq_num; i++) |
| 4308 | hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE); |
| 4309 | |
| 4310 | return 0; |
| 4311 | |
| 4312 | err_request_irq_fail: |
| 4313 | for (j -= 1; j >= 0; j--) |
| 4314 | free_irq(hr_dev->irq[j], &eq_table->eq[j]); |
| 4315 | |
| 4316 | err_create_eq_fail: |
| 4317 | for (i -= 1; i >= 0; i--) |
| 4318 | hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]); |
| 4319 | |
| 4320 | kfree(eq_table->eqc_base); |
| 4321 | |
| 4322 | err_eqc_base_alloc_fail: |
| 4323 | kfree(eq_table->eq); |
| 4324 | |
| 4325 | return ret; |
| 4326 | } |
| 4327 | |
| 4328 | static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev) |
| 4329 | { |
| 4330 | struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; |
| 4331 | int irq_num; |
| 4332 | int eq_num; |
| 4333 | int i; |
| 4334 | |
| 4335 | eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; |
| 4336 | irq_num = eq_num + hr_dev->caps.num_other_vectors; |
| 4337 | for (i = 0; i < eq_num; i++) { |
| 4338 | /* Disable EQ */ |
| 4339 | hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE); |
| 4340 | |
| 4341 | free_irq(hr_dev->irq[i], &eq_table->eq[i]); |
| 4342 | |
| 4343 | hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]); |
| 4344 | } |
| 4345 | for (i = eq_num; i < irq_num; i++) |
| 4346 | free_irq(hr_dev->irq[i], hr_dev); |
| 4347 | |
| 4348 | kfree(eq_table->eqc_base); |
| 4349 | kfree(eq_table->eq); |
| 4350 | } |
| 4351 | |
Kamal Heib | 7f645a5 | 2018-12-10 21:09:35 +0200 | [diff] [blame] | 4352 | static const struct ib_device_ops hns_roce_v1_dev_ops = { |
| 4353 | .destroy_qp = hns_roce_v1_destroy_qp, |
| 4354 | .modify_cq = hns_roce_v1_modify_cq, |
| 4355 | .poll_cq = hns_roce_v1_poll_cq, |
| 4356 | .post_recv = hns_roce_v1_post_recv, |
| 4357 | .post_send = hns_roce_v1_post_send, |
| 4358 | .query_qp = hns_roce_v1_query_qp, |
| 4359 | .req_notify_cq = hns_roce_v1_req_notify_cq, |
| 4360 | }; |
| 4361 | |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4362 | static const struct hns_roce_hw hns_roce_hw_v1 = { |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 4363 | .reset = hns_roce_v1_reset, |
| 4364 | .hw_profile = hns_roce_v1_profile, |
| 4365 | .hw_init = hns_roce_v1_init, |
| 4366 | .hw_exit = hns_roce_v1_exit, |
Wei Hu(Xavier) | a680f2f | 2017-08-30 17:23:05 +0800 | [diff] [blame] | 4367 | .post_mbox = hns_roce_v1_post_mbox, |
| 4368 | .chk_mbox = hns_roce_v1_chk_mbox, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 4369 | .set_gid = hns_roce_v1_set_gid, |
| 4370 | .set_mac = hns_roce_v1_set_mac, |
| 4371 | .set_mtu = hns_roce_v1_set_mtu, |
| 4372 | .write_mtpt = hns_roce_v1_write_mtpt, |
| 4373 | .write_cqc = hns_roce_v1_write_cqc, |
oulijun | b156269 | 2017-10-19 11:52:40 +0800 | [diff] [blame] | 4374 | .modify_cq = hns_roce_v1_modify_cq, |
Wei Hu (Xavier) | 97f0e39 | 2016-09-20 17:06:59 +0100 | [diff] [blame] | 4375 | .clear_hem = hns_roce_v1_clear_hem, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 4376 | .modify_qp = hns_roce_v1_modify_qp, |
| 4377 | .query_qp = hns_roce_v1_query_qp, |
| 4378 | .destroy_qp = hns_roce_v1_destroy_qp, |
| 4379 | .post_send = hns_roce_v1_post_send, |
| 4380 | .post_recv = hns_roce_v1_post_recv, |
| 4381 | .req_notify_cq = hns_roce_v1_req_notify_cq, |
| 4382 | .poll_cq = hns_roce_v1_poll_cq, |
Shaobo Xu | bfcc681b | 2016-11-29 23:10:26 +0000 | [diff] [blame] | 4383 | .dereg_mr = hns_roce_v1_dereg_mr, |
Shaobo Xu | afb6b09 | 2016-11-29 23:10:29 +0000 | [diff] [blame] | 4384 | .destroy_cq = hns_roce_v1_destroy_cq, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4385 | .init_eq = hns_roce_v1_init_eq_table, |
| 4386 | .cleanup_eq = hns_roce_v1_cleanup_eq_table, |
Kamal Heib | 7f645a5 | 2018-12-10 21:09:35 +0200 | [diff] [blame] | 4387 | .hns_roce_dev_ops = &hns_roce_v1_dev_ops, |
oulijun | 9a44353 | 2016-07-21 19:06:38 +0800 | [diff] [blame] | 4388 | }; |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4389 | |
| 4390 | static const struct of_device_id hns_roce_of_match[] = { |
| 4391 | { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, }, |
| 4392 | {}, |
| 4393 | }; |
| 4394 | MODULE_DEVICE_TABLE(of, hns_roce_of_match); |
| 4395 | |
| 4396 | static const struct acpi_device_id hns_roce_acpi_match[] = { |
| 4397 | { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 }, |
| 4398 | {}, |
| 4399 | }; |
| 4400 | MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match); |
| 4401 | |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4402 | static struct |
| 4403 | platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode) |
| 4404 | { |
| 4405 | struct device *dev; |
| 4406 | |
| 4407 | /* get the 'device' corresponding to the matching 'fwnode' */ |
Suzuki K Poulose | 67843bb | 2019-07-23 23:18:34 +0100 | [diff] [blame] | 4408 | dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode); |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4409 | /* get the platform device */ |
| 4410 | return dev ? to_platform_device(dev) : NULL; |
| 4411 | } |
| 4412 | |
| 4413 | static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) |
| 4414 | { |
| 4415 | struct device *dev = &hr_dev->pdev->dev; |
| 4416 | struct platform_device *pdev = NULL; |
| 4417 | struct net_device *netdev = NULL; |
| 4418 | struct device_node *net_node; |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4419 | int port_cnt = 0; |
| 4420 | u8 phy_port; |
| 4421 | int ret; |
| 4422 | int i; |
| 4423 | |
| 4424 | /* check if we are compatible with the underlying SoC */ |
| 4425 | if (dev_of_node(dev)) { |
| 4426 | const struct of_device_id *of_id; |
| 4427 | |
| 4428 | of_id = of_match_node(hns_roce_of_match, dev->of_node); |
| 4429 | if (!of_id) { |
| 4430 | dev_err(dev, "device is not compatible!\n"); |
| 4431 | return -ENXIO; |
| 4432 | } |
| 4433 | hr_dev->hw = (const struct hns_roce_hw *)of_id->data; |
| 4434 | if (!hr_dev->hw) { |
| 4435 | dev_err(dev, "couldn't get H/W specific DT data!\n"); |
| 4436 | return -ENXIO; |
| 4437 | } |
| 4438 | } else if (is_acpi_device_node(dev->fwnode)) { |
| 4439 | const struct acpi_device_id *acpi_id; |
| 4440 | |
| 4441 | acpi_id = acpi_match_device(hns_roce_acpi_match, dev); |
| 4442 | if (!acpi_id) { |
| 4443 | dev_err(dev, "device is not compatible!\n"); |
| 4444 | return -ENXIO; |
| 4445 | } |
| 4446 | hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data; |
| 4447 | if (!hr_dev->hw) { |
| 4448 | dev_err(dev, "couldn't get H/W specific ACPI data!\n"); |
| 4449 | return -ENXIO; |
| 4450 | } |
| 4451 | } else { |
| 4452 | dev_err(dev, "can't read compatibility data from DT or ACPI\n"); |
| 4453 | return -ENXIO; |
| 4454 | } |
| 4455 | |
| 4456 | /* get the mapped register base address */ |
YueHaibing | 3b961b4 | 2019-09-06 22:17:27 +0800 | [diff] [blame] | 4457 | hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0); |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4458 | if (IS_ERR(hr_dev->reg_base)) |
| 4459 | return PTR_ERR(hr_dev->reg_base); |
| 4460 | |
| 4461 | /* read the node_guid of IB device from the DT or ACPI */ |
| 4462 | ret = device_property_read_u8_array(dev, "node-guid", |
| 4463 | (u8 *)&hr_dev->ib_dev.node_guid, |
| 4464 | GUID_LEN); |
| 4465 | if (ret) { |
| 4466 | dev_err(dev, "couldn't get node_guid from DT or ACPI!\n"); |
| 4467 | return ret; |
| 4468 | } |
| 4469 | |
| 4470 | /* get the RoCE associated ethernet ports or netdevices */ |
| 4471 | for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) { |
| 4472 | if (dev_of_node(dev)) { |
| 4473 | net_node = of_parse_phandle(dev->of_node, "eth-handle", |
| 4474 | i); |
| 4475 | if (!net_node) |
| 4476 | continue; |
| 4477 | pdev = of_find_device_by_node(net_node); |
| 4478 | } else if (is_acpi_device_node(dev->fwnode)) { |
Sakari Ailus | 977d5ad | 2018-07-17 17:19:11 +0300 | [diff] [blame] | 4479 | struct fwnode_reference_args args; |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4480 | |
| 4481 | ret = acpi_node_get_property_reference(dev->fwnode, |
| 4482 | "eth-handle", |
| 4483 | i, &args); |
| 4484 | if (ret) |
| 4485 | continue; |
Sakari Ailus | 977d5ad | 2018-07-17 17:19:11 +0300 | [diff] [blame] | 4486 | pdev = hns_roce_find_pdev(args.fwnode); |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4487 | } else { |
| 4488 | dev_err(dev, "cannot read data from DT or ACPI\n"); |
| 4489 | return -ENXIO; |
| 4490 | } |
| 4491 | |
| 4492 | if (pdev) { |
| 4493 | netdev = platform_get_drvdata(pdev); |
| 4494 | phy_port = (u8)i; |
| 4495 | if (netdev) { |
| 4496 | hr_dev->iboe.netdevs[port_cnt] = netdev; |
| 4497 | hr_dev->iboe.phy_port[port_cnt] = phy_port; |
| 4498 | } else { |
| 4499 | dev_err(dev, "no netdev found with pdev %s\n", |
| 4500 | pdev->name); |
| 4501 | return -ENODEV; |
| 4502 | } |
| 4503 | port_cnt++; |
| 4504 | } |
| 4505 | } |
| 4506 | |
| 4507 | if (port_cnt == 0) { |
| 4508 | dev_err(dev, "unable to get eth-handle for available ports!\n"); |
| 4509 | return -EINVAL; |
| 4510 | } |
| 4511 | |
| 4512 | hr_dev->caps.num_ports = port_cnt; |
| 4513 | |
| 4514 | /* cmd issue mode: 0 is poll, 1 is event */ |
| 4515 | hr_dev->cmd_mod = 1; |
| 4516 | hr_dev->loop_idc = 0; |
Wei Hu(Xavier) | 2d40788 | 2017-08-30 17:23:14 +0800 | [diff] [blame] | 4517 | hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; |
| 4518 | hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG; |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4519 | |
| 4520 | /* read the interrupt names from the DT or ACPI */ |
| 4521 | ret = device_property_read_string_array(dev, "interrupt-names", |
| 4522 | hr_dev->irq_names, |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4523 | HNS_ROCE_V1_MAX_IRQ_NUM); |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4524 | if (ret < 0) { |
| 4525 | dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n"); |
| 4526 | return ret; |
| 4527 | } |
| 4528 | |
| 4529 | /* fetch the interrupt numbers */ |
Yixian Liu | b16f818 | 2017-11-14 17:26:16 +0800 | [diff] [blame] | 4530 | for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) { |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4531 | hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i); |
Stephen Boyd | cb560f5 | 2019-07-30 11:15:20 -0700 | [diff] [blame] | 4532 | if (hr_dev->irq[i] <= 0) |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4533 | return -EINVAL; |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4534 | } |
| 4535 | |
| 4536 | return 0; |
| 4537 | } |
| 4538 | |
| 4539 | /** |
| 4540 | * hns_roce_probe - RoCE driver entrance |
| 4541 | * @pdev: pointer to platform device |
| 4542 | * Return : int |
| 4543 | * |
| 4544 | */ |
| 4545 | static int hns_roce_probe(struct platform_device *pdev) |
| 4546 | { |
| 4547 | int ret; |
| 4548 | struct hns_roce_dev *hr_dev; |
| 4549 | struct device *dev = &pdev->dev; |
| 4550 | |
Leon Romanovsky | 459cc69 | 2019-01-30 12:49:11 +0200 | [diff] [blame] | 4551 | hr_dev = ib_alloc_device(hns_roce_dev, ib_dev); |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4552 | if (!hr_dev) |
| 4553 | return -ENOMEM; |
| 4554 | |
Wei Hu(Xavier) | 016a0059 | 2017-08-30 17:23:00 +0800 | [diff] [blame] | 4555 | hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL); |
| 4556 | if (!hr_dev->priv) { |
| 4557 | ret = -ENOMEM; |
| 4558 | goto error_failed_kzalloc; |
| 4559 | } |
| 4560 | |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4561 | hr_dev->pdev = pdev; |
Wei Hu(Xavier) | 13ca970 | 2017-08-30 17:23:02 +0800 | [diff] [blame] | 4562 | hr_dev->dev = dev; |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4563 | platform_set_drvdata(pdev, hr_dev); |
| 4564 | |
| 4565 | if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) && |
| 4566 | dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) { |
| 4567 | dev_err(dev, "Not usable DMA addressing mode\n"); |
| 4568 | ret = -EIO; |
| 4569 | goto error_failed_get_cfg; |
| 4570 | } |
| 4571 | |
| 4572 | ret = hns_roce_get_cfg(hr_dev); |
| 4573 | if (ret) { |
| 4574 | dev_err(dev, "Get Configuration failed!\n"); |
| 4575 | goto error_failed_get_cfg; |
| 4576 | } |
| 4577 | |
| 4578 | ret = hns_roce_init(hr_dev); |
| 4579 | if (ret) { |
| 4580 | dev_err(dev, "RoCE engine init failed!\n"); |
| 4581 | goto error_failed_get_cfg; |
| 4582 | } |
| 4583 | |
| 4584 | return 0; |
| 4585 | |
| 4586 | error_failed_get_cfg: |
Wei Hu(Xavier) | 016a0059 | 2017-08-30 17:23:00 +0800 | [diff] [blame] | 4587 | kfree(hr_dev->priv); |
| 4588 | |
| 4589 | error_failed_kzalloc: |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4590 | ib_dealloc_device(&hr_dev->ib_dev); |
| 4591 | |
| 4592 | return ret; |
| 4593 | } |
| 4594 | |
| 4595 | /** |
| 4596 | * hns_roce_remove - remove RoCE device |
| 4597 | * @pdev: pointer to platform device |
| 4598 | */ |
| 4599 | static int hns_roce_remove(struct platform_device *pdev) |
| 4600 | { |
| 4601 | struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev); |
| 4602 | |
| 4603 | hns_roce_exit(hr_dev); |
Wei Hu(Xavier) | 016a0059 | 2017-08-30 17:23:00 +0800 | [diff] [blame] | 4604 | kfree(hr_dev->priv); |
Wei Hu(Xavier) | 08805fd | 2017-08-30 17:22:59 +0800 | [diff] [blame] | 4605 | ib_dealloc_device(&hr_dev->ib_dev); |
| 4606 | |
| 4607 | return 0; |
| 4608 | } |
| 4609 | |
| 4610 | static struct platform_driver hns_roce_driver = { |
| 4611 | .probe = hns_roce_probe, |
| 4612 | .remove = hns_roce_remove, |
| 4613 | .driver = { |
| 4614 | .name = DRV_NAME, |
| 4615 | .of_match_table = hns_roce_of_match, |
| 4616 | .acpi_match_table = ACPI_PTR(hns_roce_acpi_match), |
| 4617 | }, |
| 4618 | }; |
| 4619 | |
| 4620 | module_platform_driver(hns_roce_driver); |
| 4621 | |
| 4622 | MODULE_LICENSE("Dual BSD/GPL"); |
| 4623 | MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>"); |
| 4624 | MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>"); |
| 4625 | MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>"); |
| 4626 | MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver"); |