blob: 012973d75ad039436fb0007e9452eb0565f4938c [file] [log] [blame]
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintz0a7fb112016-10-01 21:59:55 +03003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Yuval Mintz0a7fb112016-10-01 21:59:55 +03009 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +020010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintz0a7fb112016-10-01 21:59:55 +030031 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/dma-mapping.h>
36#include <linux/if_vlan.h>
37#include <linux/kernel.h>
38#include <linux/pci.h>
39#include <linux/slab.h>
40#include <linux/stddef.h>
Yuval Mintz0a7fb112016-10-01 21:59:55 +030041#include <linux/workqueue.h>
42#include <net/ipv6.h>
43#include <linux/bitops.h>
44#include <linux/delay.h>
45#include <linux/errno.h>
46#include <linux/etherdevice.h>
47#include <linux/io.h>
48#include <linux/list.h>
49#include <linux/mutex.h>
50#include <linux/spinlock.h>
51#include <linux/string.h>
52#include <linux/qed/qed_ll2_if.h>
53#include "qed.h"
54#include "qed_cxt.h"
55#include "qed_dev_api.h"
56#include "qed_hsi.h"
57#include "qed_hw.h"
58#include "qed_int.h"
59#include "qed_ll2.h"
60#include "qed_mcp.h"
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -080061#include "qed_ooo.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030062#include "qed_reg_addr.h"
63#include "qed_sp.h"
Kalderon, Michalb71b9af2017-06-21 16:22:45 +030064#include "qed_rdma.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030065
66#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
67#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
68
69#define QED_LL2_TX_SIZE (256)
70#define QED_LL2_RX_SIZE (4096)
71
72struct qed_cb_ll2_info {
73 int rx_cnt;
74 u32 rx_size;
75 u8 handle;
Yuval Mintz0a7fb112016-10-01 21:59:55 +030076
77 /* Lock protecting LL2 buffer lists in sleepless context */
78 spinlock_t lock;
79 struct list_head list;
80
81 const struct qed_ll2_cb_ops *cbs;
82 void *cb_cookie;
83};
84
85struct qed_ll2_buffer {
86 struct list_head list;
87 void *data;
88 dma_addr_t phys_addr;
89};
90
Michal Kalderon0518c122017-06-09 17:13:22 +030091static void qed_ll2b_complete_tx_packet(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +030092 u8 connection_handle,
93 void *cookie,
94 dma_addr_t first_frag_addr,
95 bool b_last_fragment,
96 bool b_last_packet)
97{
Michal Kalderon0518c122017-06-09 17:13:22 +030098 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +030099 struct qed_dev *cdev = p_hwfn->cdev;
100 struct sk_buff *skb = cookie;
101
102 /* All we need to do is release the mapping */
103 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
104 skb_headlen(skb), DMA_TO_DEVICE);
105
106 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
107 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
108 b_last_fragment);
109
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300110 dev_kfree_skb_any(skb);
111}
112
113static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
114 u8 **data, dma_addr_t *phys_addr)
115{
116 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
117 if (!(*data)) {
118 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
119 return -ENOMEM;
120 }
121
122 *phys_addr = dma_map_single(&cdev->pdev->dev,
123 ((*data) + NET_SKB_PAD),
124 cdev->ll2->rx_size, DMA_FROM_DEVICE);
125 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
126 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
127 kfree((*data));
128 return -ENOMEM;
129 }
130
131 return 0;
132}
133
134static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
135 struct qed_ll2_buffer *buffer)
136{
137 spin_lock_bh(&cdev->ll2->lock);
138
139 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
140 cdev->ll2->rx_size, DMA_FROM_DEVICE);
141 kfree(buffer->data);
142 list_del(&buffer->list);
143
144 cdev->ll2->rx_cnt--;
145 if (!cdev->ll2->rx_cnt)
146 DP_INFO(cdev, "All LL2 entries were removed\n");
147
148 spin_unlock_bh(&cdev->ll2->lock);
149
150 return 0;
151}
152
153static void qed_ll2_kill_buffers(struct qed_dev *cdev)
154{
155 struct qed_ll2_buffer *buffer, *tmp_buffer;
156
157 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
158 qed_ll2_dealloc_buffer(cdev, buffer);
159}
160
Michal Kalderon0518c122017-06-09 17:13:22 +0300161void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300162{
Michal Kalderon0518c122017-06-09 17:13:22 +0300163 struct qed_hwfn *p_hwfn = cxt;
Mintz, Yuval68be9102017-06-09 17:13:19 +0300164 struct qed_ll2_buffer *buffer = data->cookie;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300165 struct qed_dev *cdev = p_hwfn->cdev;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300166 dma_addr_t new_phys_addr;
167 struct sk_buff *skb;
168 bool reuse = false;
169 int rc = -EINVAL;
170 u8 *new_data;
171
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300172 DP_VERBOSE(p_hwfn,
173 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
174 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
Mintz, Yuval68be9102017-06-09 17:13:19 +0300175 (u64)data->rx_buf_addr,
176 data->u.placement_offset,
177 data->length.packet_length,
178 data->parse_flags,
179 data->vlan, data->opaque_data_0, data->opaque_data_1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300180
181 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
182 print_hex_dump(KERN_INFO, "",
183 DUMP_PREFIX_OFFSET, 16, 1,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300184 buffer->data, data->length.packet_length, false);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300185 }
186
187 /* Determine if data is valid */
Mintz, Yuval68be9102017-06-09 17:13:19 +0300188 if (data->length.packet_length < ETH_HLEN)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300189 reuse = true;
190
191 /* Allocate a replacement for buffer; Reuse upon failure */
192 if (!reuse)
193 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
194 &new_phys_addr);
195
196 /* If need to reuse or there's no replacement buffer, repost this */
197 if (rc)
198 goto out_post;
Mintz, Yuval752ecb22017-03-14 15:26:00 +0200199 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
200 cdev->ll2->rx_size, DMA_FROM_DEVICE);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300201
202 skb = build_skb(buffer->data, 0);
203 if (!skb) {
Sudarsana Reddy Kalluru4f9de4d2018-06-18 21:58:00 -0700204 DP_INFO(cdev, "Failed to build SKB\n");
205 kfree(buffer->data);
206 goto out_post1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300207 }
208
Mintz, Yuval68be9102017-06-09 17:13:19 +0300209 data->u.placement_offset += NET_SKB_PAD;
210 skb_reserve(skb, data->u.placement_offset);
211 skb_put(skb, data->length.packet_length);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300212 skb_checksum_none_assert(skb);
213
214 /* Get parital ethernet information instead of eth_type_trans(),
215 * Since we don't have an associated net_device.
216 */
217 skb_reset_mac_header(skb);
218 skb->protocol = eth_hdr(skb)->h_proto;
219
220 /* Pass SKB onward */
221 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
Mintz, Yuval68be9102017-06-09 17:13:19 +0300222 if (data->vlan)
223 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
224 data->vlan);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300225 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300226 data->opaque_data_0,
227 data->opaque_data_1);
Sudarsana Reddy Kalluru4f9de4d2018-06-18 21:58:00 -0700228 } else {
229 DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
230 QED_MSG_LL2 | QED_MSG_STORAGE),
231 "Dropping the packet\n");
232 kfree(buffer->data);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300233 }
234
Sudarsana Reddy Kalluru4f9de4d2018-06-18 21:58:00 -0700235out_post1:
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300236 /* Update Buffer information and update FW producer */
237 buffer->data = new_data;
238 buffer->phys_addr = new_phys_addr;
239
240out_post:
241 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
242 buffer->phys_addr, 0, buffer, 1);
243
244 if (rc)
245 qed_ll2_dealloc_buffer(cdev, buffer);
246}
247
248static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
249 u8 connection_handle,
250 bool b_lock,
251 bool b_only_active)
252{
253 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
254
255 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
256 return NULL;
257
258 if (!p_hwfn->p_ll2_info)
259 return NULL;
260
261 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
262
263 if (b_only_active) {
264 if (b_lock)
265 mutex_lock(&p_ll2_conn->mutex);
266 if (p_ll2_conn->b_active)
267 p_ret = p_ll2_conn;
268 if (b_lock)
269 mutex_unlock(&p_ll2_conn->mutex);
270 } else {
271 p_ret = p_ll2_conn;
272 }
273
274 return p_ret;
275}
276
277static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
278 u8 connection_handle)
279{
280 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
281}
282
283static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
284 u8 connection_handle)
285{
286 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
287}
288
289static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
290 *p_hwfn,
291 u8 connection_handle)
292{
293 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
294}
295
296static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
297{
298 bool b_last_packet = false, b_last_frag = false;
299 struct qed_ll2_tx_packet *p_pkt = NULL;
300 struct qed_ll2_info *p_ll2_conn;
301 struct qed_ll2_tx_queue *p_tx;
Michal Kalderon6291c602018-05-16 14:44:39 +0300302 unsigned long flags = 0;
Ram Amraniabd49672016-10-01 22:00:01 +0300303 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300304
305 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
306 if (!p_ll2_conn)
307 return;
308
309 p_tx = &p_ll2_conn->tx_queue;
310
Michal Kalderon6291c602018-05-16 14:44:39 +0300311 spin_lock_irqsave(&p_tx->lock, flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300312 while (!list_empty(&p_tx->active_descq)) {
313 p_pkt = list_first_entry(&p_tx->active_descq,
314 struct qed_ll2_tx_packet, list_entry);
315 if (!p_pkt)
316 break;
317
318 list_del(&p_pkt->list_entry);
319 b_last_packet = list_empty(&p_tx->active_descq);
320 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
Michal Kalderon6291c602018-05-16 14:44:39 +0300321 spin_unlock_irqrestore(&p_tx->lock, flags);
Kalderon, Michal526d1d02017-07-02 10:29:23 +0300322 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800323 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300324
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800325 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
326 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
327 p_buffer);
328 } else {
329 p_tx->cur_completing_packet = *p_pkt;
330 p_tx->cur_completing_bd_idx = 1;
331 b_last_frag =
332 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
333 tx_frag = p_pkt->bds_set[0].tx_frag;
Michal Kalderon0518c122017-06-09 17:13:22 +0300334 p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
335 p_ll2_conn->my_id,
336 p_pkt->cookie,
337 tx_frag,
338 b_last_frag,
339 b_last_packet);
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800340 }
Michal Kalderon6291c602018-05-16 14:44:39 +0300341 spin_lock_irqsave(&p_tx->lock, flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300342 }
Michal Kalderon6291c602018-05-16 14:44:39 +0300343 spin_unlock_irqrestore(&p_tx->lock, flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300344}
345
346static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
347{
348 struct qed_ll2_info *p_ll2_conn = p_cookie;
349 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
350 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
351 struct qed_ll2_tx_packet *p_pkt;
352 bool b_last_frag = false;
353 unsigned long flags;
354 int rc = -EINVAL;
355
356 spin_lock_irqsave(&p_tx->lock, flags);
357 if (p_tx->b_completing_packet) {
358 rc = -EBUSY;
359 goto out;
360 }
361
362 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
363 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
364 while (num_bds) {
365 if (list_empty(&p_tx->active_descq))
366 goto out;
367
368 p_pkt = list_first_entry(&p_tx->active_descq,
369 struct qed_ll2_tx_packet, list_entry);
370 if (!p_pkt)
371 goto out;
372
373 p_tx->b_completing_packet = true;
374 p_tx->cur_completing_packet = *p_pkt;
375 num_bds_in_packet = p_pkt->bd_used;
376 list_del(&p_pkt->list_entry);
377
378 if (num_bds < num_bds_in_packet) {
379 DP_NOTICE(p_hwfn,
380 "Rest of BDs does not cover whole packet\n");
381 goto out;
382 }
383
384 num_bds -= num_bds_in_packet;
385 p_tx->bds_idx += num_bds_in_packet;
386 while (num_bds_in_packet--)
387 qed_chain_consume(&p_tx->txq_chain);
388
389 p_tx->cur_completing_bd_idx = 1;
390 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
391 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
392
393 spin_unlock_irqrestore(&p_tx->lock, flags);
Michal Kalderon0518c122017-06-09 17:13:22 +0300394
395 p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
396 p_ll2_conn->my_id,
397 p_pkt->cookie,
398 p_pkt->bds_set[0].tx_frag,
399 b_last_frag, !num_bds);
400
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300401 spin_lock_irqsave(&p_tx->lock, flags);
402 }
403
404 p_tx->b_completing_packet = false;
405 rc = 0;
406out:
407 spin_unlock_irqrestore(&p_tx->lock, flags);
408 return rc;
409}
410
Michal Kalderon0518c122017-06-09 17:13:22 +0300411static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
412 union core_rx_cqe_union *p_cqe,
413 struct qed_ll2_comp_rx_data *data)
Ram Amraniabd49672016-10-01 22:00:01 +0300414{
Michal Kalderon0518c122017-06-09 17:13:22 +0300415 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
416 data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
417 data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
418 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
419 data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
420 data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
Tomer Tayarda090912017-12-27 19:30:07 +0200421 data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
422
423 data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
Ram Amraniabd49672016-10-01 22:00:01 +0300424}
425
Mintz, Yuval68be9102017-06-09 17:13:19 +0300426static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
427 union core_rx_cqe_union *p_cqe,
428 struct qed_ll2_comp_rx_data *data)
429{
430 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
Michal Kalderon1e99c492017-09-24 12:09:45 +0300431 data->err_flags = le16_to_cpu(p_cqe->rx_cqe_fp.err_flags.flags);
Mintz, Yuval68be9102017-06-09 17:13:19 +0300432 data->length.packet_length =
433 le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
434 data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
435 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
436 data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
437 data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
438}
439
440static int
Michal Kalderon6f34a282017-10-09 12:37:48 +0300441qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
442 struct qed_ll2_info *p_ll2_conn,
443 union core_rx_cqe_union *p_cqe,
444 unsigned long *p_lock_flags)
445{
446 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
447 struct core_rx_slow_path_cqe *sp_cqe;
448
449 sp_cqe = &p_cqe->rx_cqe_sp;
450 if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) {
451 DP_NOTICE(p_hwfn,
452 "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
453 sp_cqe->ramrod_cmd_id);
454 return -EINVAL;
455 }
456
457 if (!p_ll2_conn->cbs.slowpath_cb) {
458 DP_NOTICE(p_hwfn,
459 "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
460 return -EINVAL;
461 }
462
463 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
464
465 p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie,
466 p_ll2_conn->my_id,
467 le32_to_cpu(sp_cqe->opaque_data.data[0]),
468 le32_to_cpu(sp_cqe->opaque_data.data[1]));
469
470 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
471
472 return 0;
473}
474
475static int
Mintz, Yuval68be9102017-06-09 17:13:19 +0300476qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
477 struct qed_ll2_info *p_ll2_conn,
478 union core_rx_cqe_union *p_cqe,
479 unsigned long *p_lock_flags, bool b_last_cqe)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300480{
481 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
482 struct qed_ll2_rx_packet *p_pkt = NULL;
Mintz, Yuval68be9102017-06-09 17:13:19 +0300483 struct qed_ll2_comp_rx_data data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300484
485 if (!list_empty(&p_rx->active_descq))
486 p_pkt = list_first_entry(&p_rx->active_descq,
487 struct qed_ll2_rx_packet, list_entry);
488 if (!p_pkt) {
489 DP_NOTICE(p_hwfn,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300490 "[%d] LL2 Rx completion but active_descq is empty\n",
Mintz, Yuval13c547712017-06-09 17:13:20 +0300491 p_ll2_conn->input.conn_type);
Mintz, Yuval68be9102017-06-09 17:13:19 +0300492
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300493 return -EIO;
494 }
495 list_del(&p_pkt->list_entry);
496
Michal Kalderon0518c122017-06-09 17:13:22 +0300497 if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
498 qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
499 else
500 qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300501 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
502 DP_NOTICE(p_hwfn,
503 "Mismatch between active_descq and the LL2 Rx chain\n");
Mintz, Yuval68be9102017-06-09 17:13:19 +0300504
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300505 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
506
Mintz, Yuval68be9102017-06-09 17:13:19 +0300507 data.connection_handle = p_ll2_conn->my_id;
508 data.cookie = p_pkt->cookie;
509 data.rx_buf_addr = p_pkt->rx_buf_addr;
510 data.b_last_packet = b_last_cqe;
511
Ram Amrani1df2ade2017-03-14 15:26:02 +0200512 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
Michal Kalderon0518c122017-06-09 17:13:22 +0300513 p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
514
Ram Amrani1df2ade2017-03-14 15:26:02 +0200515 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300516
517 return 0;
518}
519
520static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
521{
Mintz, Yuval13c547712017-06-09 17:13:20 +0300522 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300523 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
524 union core_rx_cqe_union *cqe = NULL;
525 u16 cq_new_idx = 0, cq_old_idx = 0;
526 unsigned long flags = 0;
527 int rc = 0;
528
529 spin_lock_irqsave(&p_rx->lock, flags);
530 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
531 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
532
533 while (cq_new_idx != cq_old_idx) {
534 bool b_last_cqe = (cq_new_idx == cq_old_idx);
535
Mintz, Yuval13c547712017-06-09 17:13:20 +0300536 cqe =
537 (union core_rx_cqe_union *)
538 qed_chain_consume(&p_rx->rcq_chain);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300539 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
540
541 DP_VERBOSE(p_hwfn,
542 QED_MSG_LL2,
543 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
544 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
545
546 switch (cqe->rx_cqe_sp.type) {
547 case CORE_RX_CQE_TYPE_SLOW_PATH:
Michal Kalderon6f34a282017-10-09 12:37:48 +0300548 rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn,
549 cqe, &flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300550 break;
Ram Amraniabd49672016-10-01 22:00:01 +0300551 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300552 case CORE_RX_CQE_TYPE_REGULAR:
Mintz, Yuval68be9102017-06-09 17:13:19 +0300553 rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
554 cqe, &flags,
555 b_last_cqe);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300556 break;
557 default:
558 rc = -EIO;
559 }
560 }
561
562 spin_unlock_irqrestore(&p_rx->lock, flags);
563 return rc;
564}
565
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300566static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300567{
568 struct qed_ll2_info *p_ll2_conn = NULL;
569 struct qed_ll2_rx_packet *p_pkt = NULL;
570 struct qed_ll2_rx_queue *p_rx;
Michal Kalderon6291c602018-05-16 14:44:39 +0300571 unsigned long flags = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300572
573 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
574 if (!p_ll2_conn)
575 return;
576
577 p_rx = &p_ll2_conn->rx_queue;
578
Michal Kalderon6291c602018-05-16 14:44:39 +0300579 spin_lock_irqsave(&p_rx->lock, flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300580 while (!list_empty(&p_rx->active_descq)) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300581 p_pkt = list_first_entry(&p_rx->active_descq,
582 struct qed_ll2_rx_packet, list_entry);
583 if (!p_pkt)
584 break;
Wei Yongjunb4f0fd42016-10-17 15:17:51 +0000585 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
Michal Kalderon6291c602018-05-16 14:44:39 +0300586 spin_unlock_irqrestore(&p_rx->lock, flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300587
Kalderon, Michal526d1d02017-07-02 10:29:23 +0300588 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800589 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300590
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800591 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
592 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
593 p_buffer);
594 } else {
Mintz, Yuval54f19f02017-06-09 17:13:24 +0300595 dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr;
596 void *cookie = p_pkt->cookie;
597 bool b_last;
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800598
599 b_last = list_empty(&p_rx->active_descq);
Mintz, Yuval54f19f02017-06-09 17:13:24 +0300600 p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie,
601 p_ll2_conn->my_id,
602 cookie,
603 rx_buf_addr, b_last);
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800604 }
Michal Kalderon6291c602018-05-16 14:44:39 +0300605 spin_lock_irqsave(&p_rx->lock, flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300606 }
Michal Kalderon6291c602018-05-16 14:44:39 +0300607 spin_unlock_irqrestore(&p_rx->lock, flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300608}
609
Michal Kalderon974f6c02018-05-16 14:44:38 +0300610static bool
611qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
612 struct core_rx_slow_path_cqe *p_cqe)
613{
614 struct ooo_opaque *iscsi_ooo;
615 u32 cid;
616
617 if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
618 return false;
619
620 iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
621 if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
622 return false;
623
624 /* Need to make a flush */
625 cid = le32_to_cpu(iscsi_ooo->cid);
626 qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
627
628 return true;
629}
630
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800631static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
632 struct qed_ll2_info *p_ll2_conn)
633{
634 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
635 u16 packet_length = 0, parse_flags = 0, vlan = 0;
636 struct qed_ll2_rx_packet *p_pkt = NULL;
637 u32 num_ooo_add_to_peninsula = 0, cid;
638 union core_rx_cqe_union *cqe = NULL;
639 u16 cq_new_idx = 0, cq_old_idx = 0;
640 struct qed_ooo_buffer *p_buffer;
641 struct ooo_opaque *iscsi_ooo;
642 u8 placement_offset = 0;
643 u8 cqe_type;
644
645 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
646 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
647 if (cq_new_idx == cq_old_idx)
648 return 0;
649
650 while (cq_new_idx != cq_old_idx) {
651 struct core_rx_fast_path_cqe *p_cqe_fp;
652
653 cqe = qed_chain_consume(&p_rx->rcq_chain);
654 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
655 cqe_type = cqe->rx_cqe_sp.type;
656
Michal Kalderon974f6c02018-05-16 14:44:38 +0300657 if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
658 if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
659 &cqe->rx_cqe_sp))
660 continue;
661
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800662 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
663 DP_NOTICE(p_hwfn,
664 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
665 cqe_type);
666 return -EINVAL;
667 }
668 p_cqe_fp = &cqe->rx_cqe_fp;
669
670 placement_offset = p_cqe_fp->placement_offset;
671 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
672 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
673 vlan = le16_to_cpu(p_cqe_fp->vlan);
674 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
675 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
676 iscsi_ooo);
677 cid = le32_to_cpu(iscsi_ooo->cid);
678
679 /* Process delete isle first */
680 if (iscsi_ooo->drop_size)
681 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
682 iscsi_ooo->drop_isle,
683 iscsi_ooo->drop_size);
684
685 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
686 continue;
687
688 /* Now process create/add/join isles */
689 if (list_empty(&p_rx->active_descq)) {
690 DP_NOTICE(p_hwfn,
691 "LL2 OOO RX chain has no submitted buffers\n"
692 );
693 return -EIO;
694 }
695
696 p_pkt = list_first_entry(&p_rx->active_descq,
697 struct qed_ll2_rx_packet, list_entry);
698
699 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
700 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
701 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
702 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
703 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
704 if (!p_pkt) {
705 DP_NOTICE(p_hwfn,
706 "LL2 OOO RX packet is not valid\n");
707 return -EIO;
708 }
709 list_del(&p_pkt->list_entry);
710 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
711 p_buffer->packet_length = packet_length;
712 p_buffer->parse_flags = parse_flags;
713 p_buffer->vlan = vlan;
714 p_buffer->placement_offset = placement_offset;
715 qed_chain_consume(&p_rx->rxq_chain);
716 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
717
718 switch (iscsi_ooo->ooo_opcode) {
719 case TCP_EVENT_ADD_NEW_ISLE:
720 qed_ooo_add_new_isle(p_hwfn,
721 p_hwfn->p_ooo_info,
722 cid,
723 iscsi_ooo->ooo_isle,
724 p_buffer);
725 break;
726 case TCP_EVENT_ADD_ISLE_RIGHT:
727 qed_ooo_add_new_buffer(p_hwfn,
728 p_hwfn->p_ooo_info,
729 cid,
730 iscsi_ooo->ooo_isle,
731 p_buffer,
732 QED_OOO_RIGHT_BUF);
733 break;
734 case TCP_EVENT_ADD_ISLE_LEFT:
735 qed_ooo_add_new_buffer(p_hwfn,
736 p_hwfn->p_ooo_info,
737 cid,
738 iscsi_ooo->ooo_isle,
739 p_buffer,
740 QED_OOO_LEFT_BUF);
741 break;
742 case TCP_EVENT_JOIN:
743 qed_ooo_add_new_buffer(p_hwfn,
744 p_hwfn->p_ooo_info,
745 cid,
746 iscsi_ooo->ooo_isle +
747 1,
748 p_buffer,
749 QED_OOO_LEFT_BUF);
750 qed_ooo_join_isles(p_hwfn,
751 p_hwfn->p_ooo_info,
752 cid, iscsi_ooo->ooo_isle);
753 break;
754 case TCP_EVENT_ADD_PEN:
755 num_ooo_add_to_peninsula++;
756 qed_ooo_put_ready_buffer(p_hwfn,
757 p_hwfn->p_ooo_info,
758 p_buffer, true);
759 break;
760 }
761 } else {
762 DP_NOTICE(p_hwfn,
763 "Unexpected event (%d) TX OOO completion\n",
764 iscsi_ooo->ooo_opcode);
765 }
766 }
767
768 return 0;
769}
770
771static void
772qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
773 struct qed_ll2_info *p_ll2_conn)
774{
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300775 struct qed_ll2_tx_pkt_info tx_pkt;
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800776 struct qed_ooo_buffer *p_buffer;
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800777 u16 l4_hdr_offset_w;
778 dma_addr_t first_frag;
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800779 u8 bd_flags;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300780 int rc;
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800781
782 /* Submit Tx buffers here */
783 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
784 p_hwfn->p_ooo_info))) {
785 l4_hdr_offset_w = 0;
786 bd_flags = 0;
787
788 first_frag = p_buffer->rx_buffer_phys_addr +
789 p_buffer->placement_offset;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200790 SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
791 SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800792
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300793 memset(&tx_pkt, 0, sizeof(tx_pkt));
794 tx_pkt.num_of_bds = 1;
795 tx_pkt.vlan = p_buffer->vlan;
796 tx_pkt.bd_flags = bd_flags;
797 tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
Mintz, Yuval13c547712017-06-09 17:13:20 +0300798 tx_pkt.tx_dest = p_ll2_conn->tx_dest;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300799 tx_pkt.first_frag = first_frag;
800 tx_pkt.first_frag_len = p_buffer->packet_length;
801 tx_pkt.cookie = p_buffer;
802
803 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
804 &tx_pkt, true);
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800805 if (rc) {
806 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
807 p_buffer, false);
808 break;
809 }
810 }
811}
812
813static void
814qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
815 struct qed_ll2_info *p_ll2_conn)
816{
817 struct qed_ooo_buffer *p_buffer;
818 int rc;
819
820 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
821 p_hwfn->p_ooo_info))) {
822 rc = qed_ll2_post_rx_buffer(p_hwfn,
823 p_ll2_conn->my_id,
824 p_buffer->rx_buffer_phys_addr,
825 0, p_buffer, true);
826 if (rc) {
827 qed_ooo_put_free_buffer(p_hwfn,
828 p_hwfn->p_ooo_info, p_buffer);
829 break;
830 }
831 }
832}
833
834static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
835{
836 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
837 int rc;
838
Michal Kalderonfc16f562018-05-16 14:44:40 +0300839 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
840 return 0;
841
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800842 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
843 if (rc)
844 return rc;
845
846 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
847 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
848
849 return 0;
850}
851
852static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
853{
854 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
855 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
856 struct qed_ll2_tx_packet *p_pkt = NULL;
857 struct qed_ooo_buffer *p_buffer;
858 bool b_dont_submit_rx = false;
859 u16 new_idx = 0, num_bds = 0;
860 int rc;
861
Michal Kalderonfc16f562018-05-16 14:44:40 +0300862 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
863 return 0;
864
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800865 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
866 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
867
868 if (!num_bds)
869 return 0;
870
871 while (num_bds) {
872 if (list_empty(&p_tx->active_descq))
873 return -EINVAL;
874
875 p_pkt = list_first_entry(&p_tx->active_descq,
876 struct qed_ll2_tx_packet, list_entry);
877 if (!p_pkt)
878 return -EINVAL;
879
880 if (p_pkt->bd_used != 1) {
881 DP_NOTICE(p_hwfn,
882 "Unexpectedly many BDs(%d) in TX OOO completion\n",
883 p_pkt->bd_used);
884 return -EINVAL;
885 }
886
887 list_del(&p_pkt->list_entry);
888
889 num_bds--;
890 p_tx->bds_idx++;
891 qed_chain_consume(&p_tx->txq_chain);
892
893 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
894 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
895
896 if (b_dont_submit_rx) {
897 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
898 p_buffer);
899 continue;
900 }
901
902 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
903 p_buffer->rx_buffer_phys_addr, 0,
904 p_buffer, true);
905 if (rc != 0) {
906 qed_ooo_put_free_buffer(p_hwfn,
907 p_hwfn->p_ooo_info, p_buffer);
908 b_dont_submit_rx = true;
909 }
910 }
911
912 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
913
914 return 0;
915}
916
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -0800917static void qed_ll2_stop_ooo(struct qed_dev *cdev)
918{
919 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
920 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
921
922 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
923 *handle);
924
925 qed_ll2_terminate_connection(hwfn, *handle);
926 qed_ll2_release_connection(hwfn, *handle);
927 *handle = QED_LL2_UNUSED_HANDLE;
928}
929
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300930static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
931 struct qed_ll2_info *p_ll2_conn,
932 u8 action_on_error)
933{
Mintz, Yuval13c547712017-06-09 17:13:20 +0300934 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300935 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
936 struct core_rx_start_ramrod_data *p_ramrod = NULL;
937 struct qed_spq_entry *p_ent = NULL;
938 struct qed_sp_init_data init_data;
939 u16 cqe_pbl_size;
940 int rc = 0;
941
942 /* Get SPQ entry */
943 memset(&init_data, 0, sizeof(init_data));
944 init_data.cid = p_ll2_conn->cid;
945 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
946 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
947
948 rc = qed_sp_init_request(p_hwfn, &p_ent,
949 CORE_RAMROD_RX_QUEUE_START,
950 PROTOCOLID_CORE, &init_data);
951 if (rc)
952 return rc;
953
954 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
955
956 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
957 p_ramrod->sb_index = p_rx->rx_sb_index;
958 p_ramrod->complete_event_flg = 1;
959
Mintz, Yuval13c547712017-06-09 17:13:20 +0300960 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
961 DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300962 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
963 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
964 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
965 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
966
Mintz, Yuval13c547712017-06-09 17:13:20 +0300967 p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
Tomer Tayarda090912017-12-27 19:30:07 +0200968 p_ramrod->inner_vlan_stripping_en =
969 p_ll2_conn->input.rx_vlan_removal_en;
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -0700970
971 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
972 p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE)
973 p_ramrod->report_outer_vlan = 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300974 p_ramrod->queue_id = p_ll2_conn->queue_id;
Michal Kalderoned468eb2017-10-09 12:37:44 +0300975 p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300976
Sudarsana Reddy Kalluru0bc5fe82018-05-05 18:42:59 -0700977 if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) &&
978 p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE &&
979 conn_type != QED_LL2_TYPE_IWARP) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300980 p_ramrod->mf_si_bcast_accept_all = 1;
981 p_ramrod->mf_si_mcast_accept_all = 1;
982 } else {
983 p_ramrod->mf_si_bcast_accept_all = 0;
984 p_ramrod->mf_si_mcast_accept_all = 0;
985 }
986
987 p_ramrod->action_on_error.error_type = action_on_error;
Mintz, Yuval13c547712017-06-09 17:13:20 +0300988 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300989 return qed_spq_post(p_hwfn, p_ent, NULL);
990}
991
992static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
993 struct qed_ll2_info *p_ll2_conn)
994{
Mintz, Yuval13c547712017-06-09 17:13:20 +0300995 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300996 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
997 struct core_tx_start_ramrod_data *p_ramrod = NULL;
998 struct qed_spq_entry *p_ent = NULL;
999 struct qed_sp_init_data init_data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001000 u16 pq_id = 0, pbl_size;
1001 int rc = -EINVAL;
1002
1003 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1004 return 0;
1005
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001006 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -08001007 p_ll2_conn->tx_stats_en = 0;
1008 else
1009 p_ll2_conn->tx_stats_en = 1;
1010
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001011 /* Get SPQ entry */
1012 memset(&init_data, 0, sizeof(init_data));
1013 init_data.cid = p_ll2_conn->cid;
1014 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1015 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1016
1017 rc = qed_sp_init_request(p_hwfn, &p_ent,
1018 CORE_RAMROD_TX_QUEUE_START,
1019 PROTOCOLID_CORE, &init_data);
1020 if (rc)
1021 return rc;
1022
1023 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
1024
1025 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1026 p_ramrod->sb_index = p_tx->tx_sb_index;
Mintz, Yuval13c547712017-06-09 17:13:20 +03001027 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001028 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1029 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1030
1031 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
1032 qed_chain_get_pbl_phys(&p_tx->txq_chain));
1033 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
1034 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1035
Mintz, Yuval13c547712017-06-09 17:13:20 +03001036 switch (p_ll2_conn->input.tx_tc) {
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001037 case PURE_LB_TC:
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001038 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
1039 break;
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001040 case PKT_LB_TC:
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001041 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
Colin Ian King827d2402017-04-05 13:35:44 +01001042 break;
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001043 default:
1044 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
1045 break;
1046 }
1047
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001048 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1049
1050 switch (conn_type) {
Arun Easi1e128c82017-02-15 06:28:22 -08001051 case QED_LL2_TYPE_FCOE:
1052 p_ramrod->conn_type = PROTOCOLID_FCOE;
1053 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001054 case QED_LL2_TYPE_ISCSI:
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001055 p_ramrod->conn_type = PROTOCOLID_ISCSI;
1056 break;
1057 case QED_LL2_TYPE_ROCE:
1058 p_ramrod->conn_type = PROTOCOLID_ROCE;
1059 break;
Kalderon, Michalcc4ad322017-07-02 10:29:24 +03001060 case QED_LL2_TYPE_IWARP:
1061 p_ramrod->conn_type = PROTOCOLID_IWARP;
1062 break;
1063 case QED_LL2_TYPE_OOO:
1064 if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
1065 p_ramrod->conn_type = PROTOCOLID_ISCSI;
1066 else
1067 p_ramrod->conn_type = PROTOCOLID_IWARP;
1068 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001069 default:
1070 p_ramrod->conn_type = PROTOCOLID_ETH;
1071 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1072 }
1073
Mintz, Yuval13c547712017-06-09 17:13:20 +03001074 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
1075
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001076 return qed_spq_post(p_hwfn, p_ent, NULL);
1077}
1078
1079static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
1080 struct qed_ll2_info *p_ll2_conn)
1081{
1082 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1083 struct qed_spq_entry *p_ent = NULL;
1084 struct qed_sp_init_data init_data;
1085 int rc = -EINVAL;
1086
1087 /* Get SPQ entry */
1088 memset(&init_data, 0, sizeof(init_data));
1089 init_data.cid = p_ll2_conn->cid;
1090 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1091 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1092
1093 rc = qed_sp_init_request(p_hwfn, &p_ent,
1094 CORE_RAMROD_RX_QUEUE_STOP,
1095 PROTOCOLID_CORE, &init_data);
1096 if (rc)
1097 return rc;
1098
1099 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1100
1101 p_ramrod->complete_event_flg = 1;
1102 p_ramrod->queue_id = p_ll2_conn->queue_id;
1103
1104 return qed_spq_post(p_hwfn, p_ent, NULL);
1105}
1106
1107static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1108 struct qed_ll2_info *p_ll2_conn)
1109{
1110 struct qed_spq_entry *p_ent = NULL;
1111 struct qed_sp_init_data init_data;
1112 int rc = -EINVAL;
1113
1114 /* Get SPQ entry */
1115 memset(&init_data, 0, sizeof(init_data));
1116 init_data.cid = p_ll2_conn->cid;
1117 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1118 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1119
1120 rc = qed_sp_init_request(p_hwfn, &p_ent,
1121 CORE_RAMROD_TX_QUEUE_STOP,
1122 PROTOCOLID_CORE, &init_data);
1123 if (rc)
1124 return rc;
1125
1126 return qed_spq_post(p_hwfn, p_ent, NULL);
1127}
1128
1129static int
1130qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
Mintz, Yuval13c547712017-06-09 17:13:20 +03001131 struct qed_ll2_info *p_ll2_info)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001132{
1133 struct qed_ll2_rx_packet *p_descq;
1134 u32 capacity;
1135 int rc = 0;
1136
Mintz, Yuval13c547712017-06-09 17:13:20 +03001137 if (!p_ll2_info->input.rx_num_desc)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001138 goto out;
1139
1140 rc = qed_chain_alloc(p_hwfn->cdev,
1141 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1142 QED_CHAIN_MODE_NEXT_PTR,
1143 QED_CHAIN_CNT_TYPE_U16,
Mintz, Yuval13c547712017-06-09 17:13:20 +03001144 p_ll2_info->input.rx_num_desc,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001145 sizeof(struct core_rx_bd),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001146 &p_ll2_info->rx_queue.rxq_chain, NULL);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001147 if (rc) {
1148 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1149 goto out;
1150 }
1151
1152 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1153 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1154 GFP_KERNEL);
1155 if (!p_descq) {
1156 rc = -ENOMEM;
1157 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1158 goto out;
1159 }
1160 p_ll2_info->rx_queue.descq_array = p_descq;
1161
1162 rc = qed_chain_alloc(p_hwfn->cdev,
1163 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1164 QED_CHAIN_MODE_PBL,
1165 QED_CHAIN_CNT_TYPE_U16,
Mintz, Yuval13c547712017-06-09 17:13:20 +03001166 p_ll2_info->input.rx_num_desc,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001167 sizeof(struct core_rx_fast_path_cqe),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001168 &p_ll2_info->rx_queue.rcq_chain, NULL);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001169 if (rc) {
1170 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1171 goto out;
1172 }
1173
1174 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1175 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
Mintz, Yuval13c547712017-06-09 17:13:20 +03001176 p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001177
1178out:
1179 return rc;
1180}
1181
1182static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
Mintz, Yuval13c547712017-06-09 17:13:20 +03001183 struct qed_ll2_info *p_ll2_info)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001184{
1185 struct qed_ll2_tx_packet *p_descq;
Michal Kalderonf5823fe2017-10-09 12:37:43 +03001186 u32 desc_size;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001187 u32 capacity;
1188 int rc = 0;
1189
Mintz, Yuval13c547712017-06-09 17:13:20 +03001190 if (!p_ll2_info->input.tx_num_desc)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001191 goto out;
1192
1193 rc = qed_chain_alloc(p_hwfn->cdev,
1194 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1195 QED_CHAIN_MODE_PBL,
1196 QED_CHAIN_CNT_TYPE_U16,
Mintz, Yuval13c547712017-06-09 17:13:20 +03001197 p_ll2_info->input.tx_num_desc,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001198 sizeof(struct core_tx_bd),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001199 &p_ll2_info->tx_queue.txq_chain, NULL);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001200 if (rc)
1201 goto out;
1202
1203 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
Michal Kalderonf5823fe2017-10-09 12:37:43 +03001204 /* First element is part of the packet, rest are flexibly added */
1205 desc_size = (sizeof(*p_descq) +
1206 (p_ll2_info->input.tx_max_bds_per_packet - 1) *
1207 sizeof(p_descq->bds_set));
1208
1209 p_descq = kcalloc(capacity, desc_size, GFP_KERNEL);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001210 if (!p_descq) {
1211 rc = -ENOMEM;
1212 goto out;
1213 }
Michal Kalderonf5823fe2017-10-09 12:37:43 +03001214 p_ll2_info->tx_queue.descq_mem = p_descq;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001215
1216 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1217 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
Mintz, Yuval13c547712017-06-09 17:13:20 +03001218 p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001219
1220out:
1221 if (rc)
1222 DP_NOTICE(p_hwfn,
1223 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
Mintz, Yuval13c547712017-06-09 17:13:20 +03001224 p_ll2_info->input.tx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001225 return rc;
1226}
1227
Mintz, Yuval13c547712017-06-09 17:13:20 +03001228static int
1229qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1230 struct qed_ll2_info *p_ll2_info, u16 mtu)
1231{
1232 struct qed_ooo_buffer *p_buf = NULL;
1233 void *p_virt;
1234 u16 buf_idx;
1235 int rc = 0;
1236
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001237 if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO)
Mintz, Yuval13c547712017-06-09 17:13:20 +03001238 return rc;
1239
1240 /* Correct number of requested OOO buffers if needed */
1241 if (!p_ll2_info->input.rx_num_ooo_buffers) {
1242 u16 num_desc = p_ll2_info->input.rx_num_desc;
1243
1244 if (!num_desc)
1245 return -EINVAL;
1246 p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
1247 }
1248
1249 for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
1250 buf_idx++) {
1251 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
1252 if (!p_buf) {
1253 rc = -ENOMEM;
1254 goto out;
1255 }
1256
1257 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
1258 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
1259 ETH_CACHE_LINE_SIZE - 1) &
1260 ~(ETH_CACHE_LINE_SIZE - 1);
1261 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1262 p_buf->rx_buffer_size,
1263 &p_buf->rx_buffer_phys_addr,
1264 GFP_KERNEL);
1265 if (!p_virt) {
1266 kfree(p_buf);
1267 rc = -ENOMEM;
1268 goto out;
1269 }
1270
1271 p_buf->rx_buffer_virt_addr = p_virt;
1272 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
1273 }
1274
1275 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1276 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
1277 p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
1278
1279out:
1280 return rc;
1281}
1282
Michal Kalderon0518c122017-06-09 17:13:22 +03001283static int
1284qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
1285{
1286 if (!cbs || (!cbs->rx_comp_cb ||
1287 !cbs->rx_release_cb ||
1288 !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
1289 return -EINVAL;
1290
1291 p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
1292 p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
1293 p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
1294 p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
Michal Kalderon6f34a282017-10-09 12:37:48 +03001295 p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb;
Michal Kalderon0518c122017-06-09 17:13:22 +03001296 p_ll2_info->cbs.cookie = cbs->cookie;
1297
1298 return 0;
1299}
1300
Mintz, Yuval13c547712017-06-09 17:13:20 +03001301static enum core_error_handle
1302qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
1303{
1304 switch (err) {
1305 case QED_LL2_DROP_PACKET:
1306 return LL2_DROP_PACKET;
1307 case QED_LL2_DO_NOTHING:
1308 return LL2_DO_NOTHING;
1309 case QED_LL2_ASSERT:
1310 return LL2_ASSERT;
1311 default:
1312 return LL2_DO_NOTHING;
1313 }
1314}
1315
Michal Kalderon0518c122017-06-09 17:13:22 +03001316int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001317{
Michal Kalderon0518c122017-06-09 17:13:22 +03001318 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001319 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1320 struct qed_ll2_info *p_ll2_info = NULL;
Mintz, Yuval13c547712017-06-09 17:13:20 +03001321 u8 i, *p_tx_max;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001322 int rc;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001323
Mintz, Yuval13c547712017-06-09 17:13:20 +03001324 if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001325 return -EINVAL;
1326
1327 /* Find a free connection to be used */
1328 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
1329 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1330 if (p_hwfn->p_ll2_info[i].b_active) {
1331 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1332 continue;
1333 }
1334
1335 p_hwfn->p_ll2_info[i].b_active = true;
1336 p_ll2_info = &p_hwfn->p_ll2_info[i];
1337 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1338 break;
1339 }
1340 if (!p_ll2_info)
1341 return -EBUSY;
1342
Mintz, Yuval13c547712017-06-09 17:13:20 +03001343 memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001344
Tomer Tayarda090912017-12-27 19:30:07 +02001345 switch (data->input.tx_dest) {
1346 case QED_LL2_TX_DEST_NW:
1347 p_ll2_info->tx_dest = CORE_TX_DEST_NW;
1348 break;
1349 case QED_LL2_TX_DEST_LB:
1350 p_ll2_info->tx_dest = CORE_TX_DEST_LB;
1351 break;
1352 case QED_LL2_TX_DEST_DROP:
1353 p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
1354 break;
1355 default:
1356 return -EINVAL;
1357 }
1358
Michal Kalderoned468eb2017-10-09 12:37:44 +03001359 if (data->input.conn_type == QED_LL2_TYPE_OOO ||
1360 data->input.secondary_queue)
1361 p_ll2_info->main_func_queue = false;
1362 else
1363 p_ll2_info->main_func_queue = true;
Mintz, Yuval13c547712017-06-09 17:13:20 +03001364
1365 /* Correct maximum number of Tx BDs */
1366 p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
1367 if (*p_tx_max == 0)
1368 *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
1369 else
1370 *p_tx_max = min_t(u8, *p_tx_max,
1371 CORE_LL2_TX_MAX_BDS_PER_PACKET);
Michal Kalderon0518c122017-06-09 17:13:22 +03001372
1373 rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
1374 if (rc) {
1375 DP_NOTICE(p_hwfn, "Invalid callback functions\n");
1376 goto q_allocate_fail;
1377 }
1378
Mintz, Yuval13c547712017-06-09 17:13:20 +03001379 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001380 if (rc)
1381 goto q_allocate_fail;
1382
Mintz, Yuval13c547712017-06-09 17:13:20 +03001383 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001384 if (rc)
1385 goto q_allocate_fail;
1386
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -08001387 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
Mintz, Yuval13c547712017-06-09 17:13:20 +03001388 data->input.mtu);
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -08001389 if (rc)
1390 goto q_allocate_fail;
1391
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001392 /* Register callbacks for the Rx/Tx queues */
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001393 if (data->input.conn_type == QED_LL2_TYPE_OOO) {
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -08001394 comp_rx_cb = qed_ll2_lb_rxq_completion;
1395 comp_tx_cb = qed_ll2_lb_txq_completion;
1396 } else {
1397 comp_rx_cb = qed_ll2_rxq_completion;
1398 comp_tx_cb = qed_ll2_txq_completion;
1399 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001400
Mintz, Yuval13c547712017-06-09 17:13:20 +03001401 if (data->input.rx_num_desc) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001402 qed_int_register_cb(p_hwfn, comp_rx_cb,
1403 &p_hwfn->p_ll2_info[i],
1404 &p_ll2_info->rx_queue.rx_sb_index,
1405 &p_ll2_info->rx_queue.p_fw_cons);
1406 p_ll2_info->rx_queue.b_cb_registred = true;
1407 }
1408
Mintz, Yuval13c547712017-06-09 17:13:20 +03001409 if (data->input.tx_num_desc) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001410 qed_int_register_cb(p_hwfn,
1411 comp_tx_cb,
1412 &p_hwfn->p_ll2_info[i],
1413 &p_ll2_info->tx_queue.tx_sb_index,
1414 &p_ll2_info->tx_queue.p_fw_cons);
1415 p_ll2_info->tx_queue.b_cb_registred = true;
1416 }
1417
Mintz, Yuval13c547712017-06-09 17:13:20 +03001418 *data->p_connection_handle = i;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001419 return rc;
1420
1421q_allocate_fail:
1422 qed_ll2_release_connection(p_hwfn, i);
1423 return -ENOMEM;
1424}
1425
1426static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1427 struct qed_ll2_info *p_ll2_conn)
1428{
Mintz, Yuval13c547712017-06-09 17:13:20 +03001429 enum qed_ll2_error_handle error_input;
1430 enum core_error_handle error_mode;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001431 u8 action_on_error = 0;
1432
1433 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1434 return 0;
1435
1436 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
Mintz, Yuval13c547712017-06-09 17:13:20 +03001437 error_input = p_ll2_conn->input.ai_err_packet_too_big;
1438 error_mode = qed_ll2_get_error_choice(error_input);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001439 SET_FIELD(action_on_error,
Mintz, Yuval13c547712017-06-09 17:13:20 +03001440 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
1441 error_input = p_ll2_conn->input.ai_err_no_buf;
1442 error_mode = qed_ll2_get_error_choice(error_input);
1443 SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001444
1445 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1446}
1447
Mintz, Yuval58de2892017-06-09 17:13:21 +03001448static void
1449qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1450 struct qed_ll2_info *p_ll2_conn)
1451{
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001452 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
Mintz, Yuval58de2892017-06-09 17:13:21 +03001453 return;
1454
1455 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1456 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
1457}
Michal Kalderon0518c122017-06-09 17:13:22 +03001458
1459int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001460{
Michal Kalderon0518c122017-06-09 17:13:22 +03001461 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001462 struct qed_ll2_info *p_ll2_conn;
Michal Kalderonf5823fe2017-10-09 12:37:43 +03001463 struct qed_ll2_tx_packet *p_pkt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001464 struct qed_ll2_rx_queue *p_rx;
1465 struct qed_ll2_tx_queue *p_tx;
Rahul Verma15582962017-04-06 15:58:29 +03001466 struct qed_ptt *p_ptt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001467 int rc = -EINVAL;
1468 u32 i, capacity;
Michal Kalderonf5823fe2017-10-09 12:37:43 +03001469 u32 desc_size;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001470 u8 qid;
1471
Rahul Verma15582962017-04-06 15:58:29 +03001472 p_ptt = qed_ptt_acquire(p_hwfn);
1473 if (!p_ptt)
1474 return -EAGAIN;
1475
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001476 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
Rahul Verma15582962017-04-06 15:58:29 +03001477 if (!p_ll2_conn) {
1478 rc = -EINVAL;
1479 goto out;
1480 }
1481
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001482 p_rx = &p_ll2_conn->rx_queue;
1483 p_tx = &p_ll2_conn->tx_queue;
1484
1485 qed_chain_reset(&p_rx->rxq_chain);
1486 qed_chain_reset(&p_rx->rcq_chain);
1487 INIT_LIST_HEAD(&p_rx->active_descq);
1488 INIT_LIST_HEAD(&p_rx->free_descq);
1489 INIT_LIST_HEAD(&p_rx->posting_descq);
1490 spin_lock_init(&p_rx->lock);
1491 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1492 for (i = 0; i < capacity; i++)
1493 list_add_tail(&p_rx->descq_array[i].list_entry,
1494 &p_rx->free_descq);
1495 *p_rx->p_fw_cons = 0;
1496
1497 qed_chain_reset(&p_tx->txq_chain);
1498 INIT_LIST_HEAD(&p_tx->active_descq);
1499 INIT_LIST_HEAD(&p_tx->free_descq);
1500 INIT_LIST_HEAD(&p_tx->sending_descq);
1501 spin_lock_init(&p_tx->lock);
1502 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
Michal Kalderonf5823fe2017-10-09 12:37:43 +03001503 /* First element is part of the packet, rest are flexibly added */
1504 desc_size = (sizeof(*p_pkt) +
1505 (p_ll2_conn->input.tx_max_bds_per_packet - 1) *
1506 sizeof(p_pkt->bds_set));
1507
1508 for (i = 0; i < capacity; i++) {
1509 p_pkt = p_tx->descq_mem + desc_size * i;
1510 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
1511 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001512 p_tx->cur_completing_bd_idx = 0;
1513 p_tx->bds_idx = 0;
1514 p_tx->b_completing_packet = false;
1515 p_tx->cur_send_packet = NULL;
1516 p_tx->cur_send_frag_num = 0;
1517 p_tx->cur_completing_frag_num = 0;
1518 *p_tx->p_fw_cons = 0;
1519
Rahul Verma15582962017-04-06 15:58:29 +03001520 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1521 if (rc)
1522 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001523
1524 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
1525 p_ll2_conn->queue_id = qid;
1526 p_ll2_conn->tx_stats_id = qid;
1527 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
1528 GTT_BAR0_MAP_REG_TSDM_RAM +
1529 TSTORM_LL2_RX_PRODS_OFFSET(qid);
1530 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1531 qed_db_addr(p_ll2_conn->cid,
1532 DQ_DEMS_LEGACY);
1533
1534 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1535 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001536 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001537
1538 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1539 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001540 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001541
Kalderon, Michalc851a9d2017-07-02 10:29:21 +03001542 if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
Rahul Verma15582962017-04-06 15:58:29 +03001543 qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001544
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -08001545 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1546
Mintz, Yuval13c547712017-06-09 17:13:20 +03001547 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001548 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1549 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
1550 ETH_P_FCOE, 0,
1551 QED_LLH_FILTER_ETHERTYPE);
Rahul Verma15582962017-04-06 15:58:29 +03001552 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001553 ETH_P_FIP, 0,
Arun Easi1e128c82017-02-15 06:28:22 -08001554 QED_LLH_FILTER_ETHERTYPE);
1555 }
1556
Rahul Verma15582962017-04-06 15:58:29 +03001557out:
1558 qed_ptt_release(p_hwfn, p_ptt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001559 return rc;
1560}
1561
1562static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1563 struct qed_ll2_rx_queue *p_rx,
1564 struct qed_ll2_rx_packet *p_curp)
1565{
1566 struct qed_ll2_rx_packet *p_posting_packet = NULL;
1567 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
1568 bool b_notify_fw = false;
1569 u16 bd_prod, cq_prod;
1570
1571 /* This handles the flushing of already posted buffers */
1572 while (!list_empty(&p_rx->posting_descq)) {
1573 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1574 struct qed_ll2_rx_packet,
1575 list_entry);
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001576 list_move_tail(&p_posting_packet->list_entry,
1577 &p_rx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001578 b_notify_fw = true;
1579 }
1580
1581 /* This handles the supplied packet [if there is one] */
1582 if (p_curp) {
1583 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1584 b_notify_fw = true;
1585 }
1586
1587 if (!b_notify_fw)
1588 return;
1589
1590 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1591 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1592 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1593 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1594 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1595}
1596
Michal Kalderon0518c122017-06-09 17:13:22 +03001597int qed_ll2_post_rx_buffer(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001598 u8 connection_handle,
1599 dma_addr_t addr,
1600 u16 buf_len, void *cookie, u8 notify_fw)
1601{
Michal Kalderon0518c122017-06-09 17:13:22 +03001602 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001603 struct core_rx_bd_with_buff_len *p_curb = NULL;
1604 struct qed_ll2_rx_packet *p_curp = NULL;
1605 struct qed_ll2_info *p_ll2_conn;
1606 struct qed_ll2_rx_queue *p_rx;
1607 unsigned long flags;
1608 void *p_data;
1609 int rc = 0;
1610
1611 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1612 if (!p_ll2_conn)
1613 return -EINVAL;
1614 p_rx = &p_ll2_conn->rx_queue;
1615
1616 spin_lock_irqsave(&p_rx->lock, flags);
1617 if (!list_empty(&p_rx->free_descq))
1618 p_curp = list_first_entry(&p_rx->free_descq,
1619 struct qed_ll2_rx_packet, list_entry);
1620 if (p_curp) {
1621 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1622 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1623 p_data = qed_chain_produce(&p_rx->rxq_chain);
1624 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1625 qed_chain_produce(&p_rx->rcq_chain);
1626 }
1627 }
1628
1629 /* If we're lacking entires, let's try to flush buffers to FW */
1630 if (!p_curp || !p_curb) {
1631 rc = -EBUSY;
1632 p_curp = NULL;
1633 goto out_notify;
1634 }
1635
1636 /* We have an Rx packet we can fill */
1637 DMA_REGPAIR_LE(p_curb->addr, addr);
1638 p_curb->buff_length = cpu_to_le16(buf_len);
1639 p_curp->rx_buf_addr = addr;
1640 p_curp->cookie = cookie;
1641 p_curp->rxq_bd = p_curb;
1642 p_curp->buf_length = buf_len;
1643 list_del(&p_curp->list_entry);
1644
1645 /* Check if we only want to enqueue this packet without informing FW */
1646 if (!notify_fw) {
1647 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1648 goto out;
1649 }
1650
1651out_notify:
1652 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1653out:
1654 spin_unlock_irqrestore(&p_rx->lock, flags);
1655 return rc;
1656}
1657
1658static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1659 struct qed_ll2_tx_queue *p_tx,
1660 struct qed_ll2_tx_packet *p_curp,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001661 struct qed_ll2_tx_pkt_info *pkt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001662 u8 notify_fw)
1663{
1664 list_del(&p_curp->list_entry);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001665 p_curp->cookie = pkt->cookie;
1666 p_curp->bd_used = pkt->num_of_bds;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001667 p_curp->notify_fw = notify_fw;
1668 p_tx->cur_send_packet = p_curp;
1669 p_tx->cur_send_frag_num = 0;
1670
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001671 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
1672 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001673 p_tx->cur_send_frag_num++;
1674}
1675
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001676static void
1677qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1678 struct qed_ll2_info *p_ll2,
1679 struct qed_ll2_tx_packet *p_curp,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001680 struct qed_ll2_tx_pkt_info *pkt)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001681{
1682 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1683 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1684 struct core_tx_bd *start_bd = NULL;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001685 enum core_roce_flavor_type roce_flavor;
1686 enum core_tx_dest tx_dest;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001687 u16 bd_data = 0, frag_idx;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001688
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001689 roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
1690 : CORE_RROCE;
1691
Michal Kalderon77caa792017-10-09 12:37:45 +03001692 switch (pkt->tx_dest) {
1693 case QED_LL2_TX_DEST_NW:
1694 tx_dest = CORE_TX_DEST_NW;
1695 break;
1696 case QED_LL2_TX_DEST_LB:
1697 tx_dest = CORE_TX_DEST_LB;
1698 break;
1699 case QED_LL2_TX_DEST_DROP:
1700 tx_dest = CORE_TX_DEST_DROP;
1701 break;
1702 default:
1703 tx_dest = CORE_TX_DEST_LB;
1704 break;
1705 }
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001706
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001707 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
Michal Kalderon89d65112017-10-09 12:37:47 +03001708 if (QED_IS_IWARP_PERSONALITY(p_hwfn) &&
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001709 p_ll2->input.conn_type == QED_LL2_TYPE_OOO) {
Michal Kalderon89d65112017-10-09 12:37:47 +03001710 start_bd->nw_vlan_or_lb_echo =
1711 cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001712 } else {
Michal Kalderon89d65112017-10-09 12:37:47 +03001713 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001714 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
1715 p_ll2->input.conn_type == QED_LL2_TYPE_FCOE)
1716 pkt->remove_stag = true;
1717 }
1718
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001719 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001720 cpu_to_le16(pkt->l4_hdr_offset_w));
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001721 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001722 bd_data |= pkt->bd_flags;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001723 SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001724 SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001725 SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
Michal Kalderon6df60fe2017-10-09 12:37:46 +03001726 SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum));
1727 SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum));
1728 SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len));
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001729 SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION,
1730 !!(pkt->remove_stag));
1731
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001732 start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001733 DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
1734 start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001735
1736 DP_VERBOSE(p_hwfn,
1737 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1738 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1739 p_ll2->queue_id,
1740 p_ll2->cid,
Mintz, Yuval13c547712017-06-09 17:13:20 +03001741 p_ll2->input.conn_type,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001742 prod_idx,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001743 pkt->first_frag_len,
1744 pkt->num_of_bds,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001745 le32_to_cpu(start_bd->addr.hi),
1746 le32_to_cpu(start_bd->addr.lo));
1747
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001748 if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001749 return;
1750
1751 /* Need to provide the packet with additional BDs for frags */
1752 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001753 frag_idx < pkt->num_of_bds; frag_idx++) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001754 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1755
1756 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001757 (*p_bd)->bd_data.as_bitfield = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001758 (*p_bd)->bitfield1 = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001759 p_curp->bds_set[frag_idx].tx_frag = 0;
1760 p_curp->bds_set[frag_idx].frag_len = 0;
1761 }
1762}
1763
1764/* This should be called while the Txq spinlock is being held */
1765static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1766 struct qed_ll2_info *p_ll2_conn)
1767{
1768 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1769 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1770 struct qed_ll2_tx_packet *p_pkt = NULL;
1771 struct core_db_data db_msg = { 0, 0, 0 };
1772 u16 bd_prod;
1773
1774 /* If there are missing BDs, don't do anything now */
1775 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1776 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1777 return;
1778
1779 /* Push the current packet to the list and clean after it */
1780 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1781 &p_ll2_conn->tx_queue.sending_descq);
1782 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1783 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1784
1785 /* Notify FW of packet only if requested to */
1786 if (!b_notify)
1787 return;
1788
1789 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1790
1791 while (!list_empty(&p_tx->sending_descq)) {
1792 p_pkt = list_first_entry(&p_tx->sending_descq,
1793 struct qed_ll2_tx_packet, list_entry);
1794 if (!p_pkt)
1795 break;
1796
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001797 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001798 }
1799
1800 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1801 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1802 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1803 DQ_XCM_CORE_TX_BD_PROD_CMD);
1804 db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1805 db_msg.spq_prod = cpu_to_le16(bd_prod);
1806
1807 /* Make sure the BDs data is updated before ringing the doorbell */
1808 wmb();
1809
1810 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1811
1812 DP_VERBOSE(p_hwfn,
1813 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1814 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1815 p_ll2_conn->queue_id,
Mintz, Yuval13c547712017-06-09 17:13:20 +03001816 p_ll2_conn->cid,
1817 p_ll2_conn->input.conn_type, db_msg.spq_prod);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001818}
1819
Michal Kalderon0518c122017-06-09 17:13:22 +03001820int qed_ll2_prepare_tx_packet(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001821 u8 connection_handle,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001822 struct qed_ll2_tx_pkt_info *pkt,
1823 bool notify_fw)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001824{
Michal Kalderon0518c122017-06-09 17:13:22 +03001825 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001826 struct qed_ll2_tx_packet *p_curp = NULL;
1827 struct qed_ll2_info *p_ll2_conn = NULL;
1828 struct qed_ll2_tx_queue *p_tx;
1829 struct qed_chain *p_tx_chain;
1830 unsigned long flags;
1831 int rc = 0;
1832
1833 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1834 if (!p_ll2_conn)
1835 return -EINVAL;
1836 p_tx = &p_ll2_conn->tx_queue;
1837 p_tx_chain = &p_tx->txq_chain;
1838
Michal Kalderonf5823fe2017-10-09 12:37:43 +03001839 if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001840 return -EIO;
1841
1842 spin_lock_irqsave(&p_tx->lock, flags);
1843 if (p_tx->cur_send_packet) {
1844 rc = -EEXIST;
1845 goto out;
1846 }
1847
1848 /* Get entry, but only if we have tx elements for it */
1849 if (!list_empty(&p_tx->free_descq))
1850 p_curp = list_first_entry(&p_tx->free_descq,
1851 struct qed_ll2_tx_packet, list_entry);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001852 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001853 p_curp = NULL;
1854
1855 if (!p_curp) {
1856 rc = -EBUSY;
1857 goto out;
1858 }
1859
1860 /* Prepare packet and BD, and perhaps send a doorbell to FW */
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001861 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
1862
1863 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001864
1865 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1866
1867out:
1868 spin_unlock_irqrestore(&p_tx->lock, flags);
1869 return rc;
1870}
1871
Michal Kalderon0518c122017-06-09 17:13:22 +03001872int qed_ll2_set_fragment_of_tx_packet(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001873 u8 connection_handle,
1874 dma_addr_t addr, u16 nbytes)
1875{
1876 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
Michal Kalderon0518c122017-06-09 17:13:22 +03001877 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001878 struct qed_ll2_info *p_ll2_conn = NULL;
1879 u16 cur_send_frag_num = 0;
1880 struct core_tx_bd *p_bd;
1881 unsigned long flags;
1882
1883 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1884 if (!p_ll2_conn)
1885 return -EINVAL;
1886
1887 if (!p_ll2_conn->tx_queue.cur_send_packet)
1888 return -EINVAL;
1889
1890 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1891 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1892
1893 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1894 return -EINVAL;
1895
1896 /* Fill the BD information, and possibly notify FW */
1897 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1898 DMA_REGPAIR_LE(p_bd->addr, addr);
1899 p_bd->nbytes = cpu_to_le16(nbytes);
1900 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1901 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1902
1903 p_ll2_conn->tx_queue.cur_send_frag_num++;
1904
1905 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1906 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1907 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1908
1909 return 0;
1910}
1911
Michal Kalderon0518c122017-06-09 17:13:22 +03001912int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001913{
Michal Kalderon0518c122017-06-09 17:13:22 +03001914 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001915 struct qed_ll2_info *p_ll2_conn = NULL;
1916 int rc = -EINVAL;
Rahul Verma15582962017-04-06 15:58:29 +03001917 struct qed_ptt *p_ptt;
1918
1919 p_ptt = qed_ptt_acquire(p_hwfn);
1920 if (!p_ptt)
1921 return -EAGAIN;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001922
1923 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
Rahul Verma15582962017-04-06 15:58:29 +03001924 if (!p_ll2_conn) {
1925 rc = -EINVAL;
1926 goto out;
1927 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001928
1929 /* Stop Tx & Rx of connection, if needed */
1930 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
Michal Kalderonfc16f562018-05-16 14:44:40 +03001931 p_ll2_conn->tx_queue.b_cb_registred = false;
1932 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001933 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1934 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001935 goto out;
Michal Kalderonfc16f562018-05-16 14:44:40 +03001936
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001937 qed_ll2_txq_flush(p_hwfn, connection_handle);
Michal Kalderonfc16f562018-05-16 14:44:40 +03001938 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001939 }
1940
1941 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
Michal Kalderonfc16f562018-05-16 14:44:40 +03001942 p_ll2_conn->rx_queue.b_cb_registred = false;
1943 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001944 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1945 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001946 goto out;
Michal Kalderonfc16f562018-05-16 14:44:40 +03001947
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001948 qed_ll2_rxq_flush(p_hwfn, connection_handle);
Michal Kalderonfc16f562018-05-16 14:44:40 +03001949 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001950 }
1951
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001952 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -08001953 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1954
Mintz, Yuval13c547712017-06-09 17:13:20 +03001955 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001956 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1957 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
1958 ETH_P_FCOE, 0,
1959 QED_LLH_FILTER_ETHERTYPE);
Rahul Verma15582962017-04-06 15:58:29 +03001960 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001961 ETH_P_FIP, 0,
Arun Easi1e128c82017-02-15 06:28:22 -08001962 QED_LLH_FILTER_ETHERTYPE);
1963 }
1964
Rahul Verma15582962017-04-06 15:58:29 +03001965out:
1966 qed_ptt_release(p_hwfn, p_ptt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001967 return rc;
1968}
1969
Mintz, Yuval58de2892017-06-09 17:13:21 +03001970static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
1971 struct qed_ll2_info *p_ll2_conn)
1972{
1973 struct qed_ooo_buffer *p_buffer;
1974
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001975 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
Mintz, Yuval58de2892017-06-09 17:13:21 +03001976 return;
1977
1978 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1979 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
1980 p_hwfn->p_ooo_info))) {
1981 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1982 p_buffer->rx_buffer_size,
1983 p_buffer->rx_buffer_virt_addr,
1984 p_buffer->rx_buffer_phys_addr);
1985 kfree(p_buffer);
1986 }
1987}
Michal Kalderon0518c122017-06-09 17:13:22 +03001988
1989void qed_ll2_release_connection(void *cxt, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001990{
Michal Kalderon0518c122017-06-09 17:13:22 +03001991 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001992 struct qed_ll2_info *p_ll2_conn = NULL;
1993
1994 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1995 if (!p_ll2_conn)
1996 return;
1997
Michal Kalderonf5823fe2017-10-09 12:37:43 +03001998 kfree(p_ll2_conn->tx_queue.descq_mem);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001999 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
2000
2001 kfree(p_ll2_conn->rx_queue.descq_array);
2002 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
2003 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
2004
2005 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
2006
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -08002007 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
2008
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002009 mutex_lock(&p_ll2_conn->mutex);
2010 p_ll2_conn->b_active = false;
2011 mutex_unlock(&p_ll2_conn->mutex);
2012}
2013
Tomer Tayar3587cb82017-05-21 12:10:56 +03002014int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002015{
2016 struct qed_ll2_info *p_ll2_connections;
2017 u8 i;
2018
2019 /* Allocate LL2's set struct */
2020 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
2021 sizeof(struct qed_ll2_info), GFP_KERNEL);
2022 if (!p_ll2_connections) {
2023 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
Tomer Tayar3587cb82017-05-21 12:10:56 +03002024 return -ENOMEM;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002025 }
2026
2027 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
2028 p_ll2_connections[i].my_id = i;
2029
Tomer Tayar3587cb82017-05-21 12:10:56 +03002030 p_hwfn->p_ll2_info = p_ll2_connections;
2031 return 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002032}
2033
Tomer Tayar3587cb82017-05-21 12:10:56 +03002034void qed_ll2_setup(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002035{
2036 int i;
2037
2038 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
Tomer Tayar3587cb82017-05-21 12:10:56 +03002039 mutex_init(&p_hwfn->p_ll2_info[i].mutex);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002040}
2041
Tomer Tayar3587cb82017-05-21 12:10:56 +03002042void qed_ll2_free(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002043{
Tomer Tayar3587cb82017-05-21 12:10:56 +03002044 if (!p_hwfn->p_ll2_info)
2045 return;
2046
2047 kfree(p_hwfn->p_ll2_info);
2048 p_hwfn->p_ll2_info = NULL;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002049}
2050
Mintz, Yuvalfef1c3f2017-06-09 17:13:25 +03002051static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
2052 struct qed_ptt *p_ptt,
2053 struct qed_ll2_stats *p_stats)
2054{
2055 struct core_ll2_port_stats port_stats;
2056
2057 memset(&port_stats, 0, sizeof(port_stats));
2058 qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
2059 BAR0_MAP_REG_TSDM_RAM +
2060 TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
2061 sizeof(port_stats));
2062
2063 p_stats->gsi_invalid_hdr = HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
2064 p_stats->gsi_invalid_pkt_length =
2065 HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
2066 p_stats->gsi_unsupported_pkt_typ =
2067 HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
2068 p_stats->gsi_crcchksm_error =
2069 HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
2070}
2071
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002072static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
2073 struct qed_ptt *p_ptt,
2074 struct qed_ll2_info *p_ll2_conn,
2075 struct qed_ll2_stats *p_stats)
2076{
2077 struct core_ll2_tstorm_per_queue_stat tstats;
2078 u8 qid = p_ll2_conn->queue_id;
2079 u32 tstats_addr;
2080
2081 memset(&tstats, 0, sizeof(tstats));
2082 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
2083 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
2084 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
2085
2086 p_stats->packet_too_big_discard =
2087 HILO_64_REGPAIR(tstats.packet_too_big_discard);
2088 p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
2089}
2090
2091static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
2092 struct qed_ptt *p_ptt,
2093 struct qed_ll2_info *p_ll2_conn,
2094 struct qed_ll2_stats *p_stats)
2095{
2096 struct core_ll2_ustorm_per_queue_stat ustats;
2097 u8 qid = p_ll2_conn->queue_id;
2098 u32 ustats_addr;
2099
2100 memset(&ustats, 0, sizeof(ustats));
2101 ustats_addr = BAR0_MAP_REG_USDM_RAM +
2102 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
2103 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
2104
2105 p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
2106 p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
2107 p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
2108 p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
2109 p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
2110 p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
2111}
2112
2113static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
2114 struct qed_ptt *p_ptt,
2115 struct qed_ll2_info *p_ll2_conn,
2116 struct qed_ll2_stats *p_stats)
2117{
2118 struct core_ll2_pstorm_per_queue_stat pstats;
2119 u8 stats_id = p_ll2_conn->tx_stats_id;
2120 u32 pstats_addr;
2121
2122 memset(&pstats, 0, sizeof(pstats));
2123 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
2124 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
2125 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
2126
2127 p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
2128 p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
2129 p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
2130 p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
2131 p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
2132 p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
2133}
2134
Michal Kalderon0518c122017-06-09 17:13:22 +03002135int qed_ll2_get_stats(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002136 u8 connection_handle, struct qed_ll2_stats *p_stats)
2137{
Michal Kalderon0518c122017-06-09 17:13:22 +03002138 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002139 struct qed_ll2_info *p_ll2_conn = NULL;
2140 struct qed_ptt *p_ptt;
2141
2142 memset(p_stats, 0, sizeof(*p_stats));
2143
2144 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
2145 !p_hwfn->p_ll2_info)
2146 return -EINVAL;
2147
2148 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
2149
2150 p_ptt = qed_ptt_acquire(p_hwfn);
2151 if (!p_ptt) {
2152 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2153 return -EINVAL;
2154 }
2155
Mintz, Yuvalfef1c3f2017-06-09 17:13:25 +03002156 if (p_ll2_conn->input.gsi_enable)
2157 _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002158 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2159 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2160 if (p_ll2_conn->tx_stats_en)
2161 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2162
2163 qed_ptt_release(p_hwfn, p_ptt);
2164 return 0;
2165}
2166
Michal Kalderon0518c122017-06-09 17:13:22 +03002167static void qed_ll2b_release_rx_packet(void *cxt,
2168 u8 connection_handle,
2169 void *cookie,
2170 dma_addr_t rx_buf_addr,
2171 bool b_last_packet)
2172{
2173 struct qed_hwfn *p_hwfn = cxt;
2174
2175 qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
2176}
2177
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002178static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2179 const struct qed_ll2_cb_ops *ops,
2180 void *cookie)
2181{
2182 cdev->ll2->cbs = ops;
2183 cdev->ll2->cb_cookie = cookie;
2184}
2185
Michal Kalderon0518c122017-06-09 17:13:22 +03002186struct qed_ll2_cbs ll2_cbs = {
2187 .rx_comp_cb = &qed_ll2b_complete_rx_packet,
2188 .rx_release_cb = &qed_ll2b_release_rx_packet,
2189 .tx_comp_cb = &qed_ll2b_complete_tx_packet,
2190 .tx_release_cb = &qed_ll2b_complete_tx_packet,
2191};
2192
Mintz, Yuval13c547712017-06-09 17:13:20 +03002193static void qed_ll2_set_conn_data(struct qed_dev *cdev,
2194 struct qed_ll2_acquire_data *data,
2195 struct qed_ll2_params *params,
2196 enum qed_ll2_conn_type conn_type,
Michal Kalderon0518c122017-06-09 17:13:22 +03002197 u8 *handle, bool lb)
Mintz, Yuval13c547712017-06-09 17:13:20 +03002198{
2199 memset(data, 0, sizeof(*data));
2200
2201 data->input.conn_type = conn_type;
2202 data->input.mtu = params->mtu;
2203 data->input.rx_num_desc = QED_LL2_RX_SIZE;
2204 data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2205 data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
2206 data->input.tx_num_desc = QED_LL2_TX_SIZE;
Mintz, Yuval13c547712017-06-09 17:13:20 +03002207 data->p_connection_handle = handle;
Michal Kalderon0518c122017-06-09 17:13:22 +03002208 data->cbs = &ll2_cbs;
2209 ll2_cbs.cookie = QED_LEADING_HWFN(cdev);
2210
Mintz, Yuval13c547712017-06-09 17:13:20 +03002211 if (lb) {
Kalderon, Michal526d1d02017-07-02 10:29:23 +03002212 data->input.tx_tc = PKT_LB_TC;
Mintz, Yuval13c547712017-06-09 17:13:20 +03002213 data->input.tx_dest = QED_LL2_TX_DEST_LB;
2214 } else {
2215 data->input.tx_tc = 0;
2216 data->input.tx_dest = QED_LL2_TX_DEST_NW;
2217 }
2218}
2219
2220static int qed_ll2_start_ooo(struct qed_dev *cdev,
2221 struct qed_ll2_params *params)
2222{
2223 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2224 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
2225 struct qed_ll2_acquire_data data;
2226 int rc;
2227
2228 qed_ll2_set_conn_data(cdev, &data, params,
Kalderon, Michal526d1d02017-07-02 10:29:23 +03002229 QED_LL2_TYPE_OOO, handle, true);
Mintz, Yuval13c547712017-06-09 17:13:20 +03002230
2231 rc = qed_ll2_acquire_connection(hwfn, &data);
2232 if (rc) {
2233 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
2234 goto out;
2235 }
2236
2237 rc = qed_ll2_establish_connection(hwfn, *handle);
2238 if (rc) {
2239 DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
2240 goto fail;
2241 }
2242
2243 return 0;
2244
2245fail:
2246 qed_ll2_release_connection(hwfn, *handle);
2247out:
2248 *handle = QED_LL2_UNUSED_HANDLE;
2249 return rc;
2250}
2251
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002252static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2253{
Wei Yongjun88a24282016-10-10 14:08:28 +00002254 struct qed_ll2_buffer *buffer, *tmp_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002255 enum qed_ll2_conn_type conn_type;
Mintz, Yuval13c547712017-06-09 17:13:20 +03002256 struct qed_ll2_acquire_data data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002257 struct qed_ptt *p_ptt;
2258 int rc, i;
Michal Kalderon0518c122017-06-09 17:13:22 +03002259
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002260
2261 /* Initialize LL2 locks & lists */
2262 INIT_LIST_HEAD(&cdev->ll2->list);
2263 spin_lock_init(&cdev->ll2->lock);
2264 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2265 L1_CACHE_BYTES + params->mtu;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002266
2267 /*Allocate memory for LL2 */
2268 DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
2269 cdev->ll2->rx_size);
2270 for (i = 0; i < QED_LL2_RX_SIZE; i++) {
2271 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2272 if (!buffer) {
2273 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2274 goto fail;
2275 }
2276
2277 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2278 &buffer->phys_addr);
2279 if (rc) {
2280 kfree(buffer);
2281 goto fail;
2282 }
2283
2284 list_add_tail(&buffer->list, &cdev->ll2->list);
2285 }
2286
2287 switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002288 case QED_PCI_FCOE:
2289 conn_type = QED_LL2_TYPE_FCOE;
Arun Easi1e128c82017-02-15 06:28:22 -08002290 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002291 case QED_PCI_ISCSI:
2292 conn_type = QED_LL2_TYPE_ISCSI;
2293 break;
2294 case QED_PCI_ETH_ROCE:
2295 conn_type = QED_LL2_TYPE_ROCE;
2296 break;
2297 default:
2298 conn_type = QED_LL2_TYPE_TEST;
2299 }
2300
Mintz, Yuval13c547712017-06-09 17:13:20 +03002301 qed_ll2_set_conn_data(cdev, &data, params, conn_type,
Michal Kalderon0518c122017-06-09 17:13:22 +03002302 &cdev->ll2->handle, false);
Arnd Bergmann0629a332017-01-18 15:52:52 +01002303
Mintz, Yuval13c547712017-06-09 17:13:20 +03002304 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002305 if (rc) {
2306 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
2307 goto fail;
2308 }
2309
2310 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2311 cdev->ll2->handle);
2312 if (rc) {
2313 DP_INFO(cdev, "Failed to establish LL2 connection\n");
2314 goto release_fail;
2315 }
2316
2317 /* Post all Rx buffers to FW */
2318 spin_lock_bh(&cdev->ll2->lock);
Wei Yongjun88a24282016-10-10 14:08:28 +00002319 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002320 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2321 cdev->ll2->handle,
2322 buffer->phys_addr, 0, buffer, 1);
2323 if (rc) {
2324 DP_INFO(cdev,
2325 "Failed to post an Rx buffer; Deleting it\n");
2326 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2327 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2328 kfree(buffer->data);
2329 list_del(&buffer->list);
2330 kfree(buffer);
2331 } else {
2332 cdev->ll2->rx_cnt++;
2333 }
2334 }
2335 spin_unlock_bh(&cdev->ll2->lock);
2336
2337 if (!cdev->ll2->rx_cnt) {
2338 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
2339 goto release_terminate;
2340 }
2341
2342 if (!is_valid_ether_addr(params->ll2_mac_address)) {
2343 DP_INFO(cdev, "Invalid Ethernet address\n");
2344 goto release_terminate;
2345 }
2346
Tomer Tayarda090912017-12-27 19:30:07 +02002347 if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) {
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -08002348 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2349 rc = qed_ll2_start_ooo(cdev, params);
2350 if (rc) {
2351 DP_INFO(cdev,
2352 "Failed to initialize the OOO LL2 queue\n");
2353 goto release_terminate;
2354 }
2355 }
2356
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002357 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2358 if (!p_ptt) {
2359 DP_INFO(cdev, "Failed to acquire PTT\n");
2360 goto release_terminate;
2361 }
2362
2363 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2364 params->ll2_mac_address);
2365 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2366 if (rc) {
2367 DP_ERR(cdev, "Failed to allocate LLH filter\n");
2368 goto release_terminate_all;
2369 }
2370
2371 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002372 return 0;
2373
2374release_terminate_all:
2375
2376release_terminate:
2377 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2378release_fail:
2379 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2380fail:
2381 qed_ll2_kill_buffers(cdev);
2382 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2383 return -EINVAL;
2384}
2385
2386static int qed_ll2_stop(struct qed_dev *cdev)
2387{
2388 struct qed_ptt *p_ptt;
2389 int rc;
2390
2391 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2392 return 0;
2393
2394 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2395 if (!p_ptt) {
2396 DP_INFO(cdev, "Failed to acquire PTT\n");
2397 goto fail;
2398 }
2399
2400 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2401 cdev->ll2_mac_address);
2402 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2403 eth_zero_addr(cdev->ll2_mac_address);
2404
Tomer Tayarda090912017-12-27 19:30:07 +02002405 if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI)
Yuval Mintz1d6cff4fc2016-12-01 00:21:07 -08002406 qed_ll2_stop_ooo(cdev);
2407
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002408 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2409 cdev->ll2->handle);
2410 if (rc)
2411 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2412
2413 qed_ll2_kill_buffers(cdev);
2414
2415 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2416 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2417
2418 return rc;
2419fail:
2420 return -EINVAL;
2421}
2422
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07002423static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2424 unsigned long xmit_flags)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002425{
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03002426 struct qed_ll2_tx_pkt_info pkt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002427 const skb_frag_t *frag;
2428 int rc = -EINVAL, i;
2429 dma_addr_t mapping;
2430 u16 vlan = 0;
2431 u8 flags = 0;
2432
2433 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
Colin Ian Kingff81de72018-04-28 10:43:20 +01002434 DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002435 return -EINVAL;
2436 }
2437
2438 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2439 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2440 1 + skb_shinfo(skb)->nr_frags);
2441 return -EINVAL;
2442 }
2443
2444 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2445 skb->len, DMA_TO_DEVICE);
2446 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2447 DP_NOTICE(cdev, "SKB mapping failed\n");
2448 return -EINVAL;
2449 }
2450
2451 /* Request HW to calculate IP csum */
2452 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2453 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002454 flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002455
2456 if (skb_vlan_tag_present(skb)) {
2457 vlan = skb_vlan_tag_get(skb);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002458 flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002459 }
2460
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03002461 memset(&pkt, 0, sizeof(pkt));
2462 pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
2463 pkt.vlan = vlan;
2464 pkt.bd_flags = flags;
2465 pkt.tx_dest = QED_LL2_TX_DEST_NW;
2466 pkt.first_frag = mapping;
2467 pkt.first_frag_len = skb->len;
2468 pkt.cookie = skb;
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07002469 if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) &&
2470 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
2471 pkt.remove_stag = true;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03002472
2473 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
2474 &pkt, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002475 if (rc)
2476 goto err;
2477
2478 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2479 frag = &skb_shinfo(skb)->frags[i];
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002480
Mintz, Yuvald2201a22017-06-09 17:13:23 +03002481 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2482 skb_frag_size(frag), DMA_TO_DEVICE);
2483
2484 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2485 DP_NOTICE(cdev,
2486 "Unable to map frag - dropping packet\n");
2487 goto err;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002488 }
2489
2490 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2491 cdev->ll2->handle,
2492 mapping,
2493 skb_frag_size(frag));
2494
2495 /* if failed not much to do here, partial packet has been posted
2496 * we can't free memory, will need to wait for completion.
2497 */
2498 if (rc)
2499 goto err2;
2500 }
2501
2502 return 0;
2503
2504err:
2505 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2506
2507err2:
2508 return rc;
2509}
2510
2511static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2512{
2513 if (!cdev->ll2)
2514 return -EINVAL;
2515
2516 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2517 cdev->ll2->handle, stats);
2518}
2519
2520const struct qed_ll2_ops qed_ll2_ops_pass = {
2521 .start = &qed_ll2_start,
2522 .stop = &qed_ll2_stop,
2523 .start_xmit = &qed_ll2_start_xmit,
2524 .register_cb_ops = &qed_ll2_register_cb_ops,
2525 .get_stats = &qed_ll2_stats,
2526};
2527
2528int qed_ll2_alloc_if(struct qed_dev *cdev)
2529{
2530 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2531 return cdev->ll2 ? 0 : -ENOMEM;
2532}
2533
2534void qed_ll2_dealloc_if(struct qed_dev *cdev)
2535{
2536 kfree(cdev->ll2);
2537 cdev->ll2 = NULL;
2538}