Alexander Lobakin | 1f4d4ed | 2020-06-29 14:05:08 +0300 | [diff] [blame] | 1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 2 | /* QLogic qed NIC Driver |
Mintz, Yuval | e8f1cb5 | 2017-01-01 13:57:00 +0200 | [diff] [blame] | 3 | * Copyright (c) 2015-2017 QLogic Corporation |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 4 | * Copyright (c) 2019-2021 Marvell International Ltd. |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <linux/types.h> |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 8 | #include <linux/crc8.h> |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 9 | #include <linux/delay.h> |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <linux/string.h> |
| 13 | #include "qed_hsi.h" |
| 14 | #include "qed_hw.h" |
| 15 | #include "qed_init_ops.h" |
Omkar Kulkarni | ee824f4 | 2021-10-04 09:58:41 +0300 | [diff] [blame] | 16 | #include "qed_iro_hsi.h" |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 17 | #include "qed_reg_addr.h" |
| 18 | |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 19 | #define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 20 | |
Shai Malin | fb09a1e | 2021-10-04 09:58:40 +0300 | [diff] [blame] | 21 | static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = { |
Michal Kalderon | 6aebde8 | 2020-01-27 15:26:08 +0200 | [diff] [blame] | 22 | {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */ |
| 23 | {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */ |
| 24 | {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */ |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 25 | }; |
| 26 | |
Shai Malin | fb09a1e | 2021-10-04 09:58:40 +0300 | [diff] [blame] | 27 | static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = { |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 28 | {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */ |
| 29 | }; |
| 30 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 31 | /* General constants */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 32 | #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \ |
| 33 | QM_PQ_ELEMENT_SIZE, \ |
| 34 | 0x1000) : 0) |
| 35 | #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \ |
| 36 | 0x100) - 1 : 0) |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 37 | #define QM_INVALID_PQ_ID 0xffff |
| 38 | |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 39 | /* Max link speed (in Mbps) */ |
| 40 | #define QM_MAX_LINK_SPEED 100000 |
| 41 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 42 | /* Feature enable */ |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 43 | #define QM_BYPASS_EN 1 |
| 44 | #define QM_BYTE_CRD_EN 1 |
| 45 | |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 46 | /* Initial VOQ byte credit */ |
| 47 | #define QM_INITIAL_VOQ_BYTE_CRD 98304 |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 48 | /* Other PQ constants */ |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 49 | #define QM_OTHER_PQS_PER_PF 4 |
| 50 | |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 51 | /* VOQ constants */ |
| 52 | #define MAX_NUM_VOQS (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2) |
| 53 | #define VOQS_BIT_MASK (BIT(MAX_NUM_VOQS) - 1) |
| 54 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 55 | /* WFQ constants */ |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 56 | |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 57 | /* PF WFQ increment value, 0x9000 = 4*9*1024 */ |
| 58 | #define QM_PF_WFQ_INC_VAL(weight) ((weight) * 0x9000) |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 59 | |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 60 | /* PF WFQ Upper bound, in MB, 10 * burst size of 1ms in 50Gbps */ |
| 61 | #define QM_PF_WFQ_UPPER_BOUND 62500000 |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 62 | |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 63 | /* PF WFQ max increment value, 0.7 * upper bound */ |
| 64 | #define QM_PF_WFQ_MAX_INC_VAL ((QM_PF_WFQ_UPPER_BOUND * 7) / 10) |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 65 | |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 66 | /* Number of VOQs in E5 PF WFQ credit register (QmWfqCrd) */ |
| 67 | #define QM_PF_WFQ_CRD_E5_NUM_VOQS 16 |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 68 | |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 69 | /* VP WFQ increment value */ |
| 70 | #define QM_VP_WFQ_INC_VAL(weight) ((weight) * QM_VP_WFQ_MIN_INC_VAL) |
| 71 | |
| 72 | /* VP WFQ min increment value */ |
| 73 | #define QM_VP_WFQ_MIN_INC_VAL 10800 |
| 74 | |
| 75 | /* VP WFQ max increment value, 2^30 */ |
| 76 | #define QM_VP_WFQ_MAX_INC_VAL 0x40000000 |
| 77 | |
| 78 | /* VP WFQ bypass threshold */ |
| 79 | #define QM_VP_WFQ_BYPASS_THRESH (QM_VP_WFQ_MIN_INC_VAL - 100) |
| 80 | |
| 81 | /* VP RL credit task cost */ |
| 82 | #define QM_VP_RL_CRD_TASK_COST 9700 |
| 83 | |
| 84 | /* Bit of VOQ in VP WFQ PQ map */ |
| 85 | #define QM_VP_WFQ_PQ_VOQ_SHIFT 0 |
| 86 | |
| 87 | /* Bit of PF in VP WFQ PQ map */ |
| 88 | #define QM_VP_WFQ_PQ_PF_SHIFT 5 |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 89 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 90 | /* RL constants */ |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 91 | |
| 92 | /* Period in us */ |
| 93 | #define QM_RL_PERIOD 5 |
| 94 | |
| 95 | /* Period in 25MHz cycles */ |
| 96 | #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) |
| 97 | |
| 98 | /* RL increment value - rate is specified in mbps */ |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 99 | #define QM_RL_INC_VAL(rate) ({ \ |
| 100 | typeof(rate) __rate = (rate); \ |
| 101 | max_t(u32, \ |
| 102 | (u32)(((__rate ? __rate : \ |
| 103 | 100000) * \ |
| 104 | QM_RL_PERIOD * \ |
| 105 | 101) / (8 * 100)), 1); }) |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 106 | |
| 107 | /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */ |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 108 | #define QM_PF_RL_UPPER_BOUND 62500000 |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 109 | |
| 110 | /* Max PF RL increment value is 0.7 * upper bound */ |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 111 | #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10) |
| 112 | |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 113 | /* QCN RL Upper bound, speed is in Mpbs */ |
| 114 | #define QM_GLOBAL_RL_UPPER_BOUND(speed) ((u32)max_t( \ |
| 115 | u32, \ |
| 116 | (u32)(((speed) * \ |
| 117 | QM_RL_PERIOD * 101) / (8 * 100)), \ |
| 118 | QM_VP_RL_CRD_TASK_COST \ |
| 119 | + 1000)) |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 120 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 121 | /* AFullOprtnstcCrdMask constants */ |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 122 | #define QM_OPPOR_LINE_VOQ_DEF 1 |
| 123 | #define QM_OPPOR_FW_STOP_DEF 0 |
| 124 | #define QM_OPPOR_PQ_EMPTY_DEF 1 |
| 125 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 126 | /* Command Queue constants */ |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 127 | |
| 128 | /* Pure LB CmdQ lines (+spare) */ |
| 129 | #define PBF_CMDQ_PURE_LB_LINES 150 |
| 130 | |
| 131 | #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \ |
| 132 | (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \ |
| 133 | (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \ |
| 134 | PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET)) |
| 135 | |
| 136 | #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \ |
| 137 | (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \ |
| 138 | (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \ |
| 139 | PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET)) |
| 140 | |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 141 | /* Returns the VOQ line credit for the specified number of PBF command lines. |
| 142 | * PBF lines are specified in 256b units. |
| 143 | */ |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 144 | #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \ |
| 145 | ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT) |
| 146 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 147 | /* BTB: blocks constants (block size = 256B) */ |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 148 | |
| 149 | /* 256B blocks in 9700B packet */ |
| 150 | #define BTB_JUMBO_PKT_BLOCKS 38 |
| 151 | |
| 152 | /* Headroom per-port */ |
| 153 | #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS |
| 154 | #define BTB_PURE_LB_FACTOR 10 |
| 155 | |
| 156 | /* Factored (hence really 0.7) */ |
| 157 | #define BTB_PURE_LB_RATIO 7 |
| 158 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 159 | /* QM stop command constants */ |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 160 | #define QM_STOP_PQ_MASK_WIDTH 32 |
| 161 | #define QM_STOP_CMD_ADDR 2 |
| 162 | #define QM_STOP_CMD_STRUCT_SIZE 2 |
| 163 | #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 |
| 164 | #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 |
| 165 | #define QM_STOP_CMD_PAUSE_MASK_MASK -1 |
| 166 | #define QM_STOP_CMD_GROUP_ID_OFFSET 1 |
| 167 | #define QM_STOP_CMD_GROUP_ID_SHIFT 16 |
| 168 | #define QM_STOP_CMD_GROUP_ID_MASK 15 |
| 169 | #define QM_STOP_CMD_PQ_TYPE_OFFSET 1 |
| 170 | #define QM_STOP_CMD_PQ_TYPE_SHIFT 24 |
| 171 | #define QM_STOP_CMD_PQ_TYPE_MASK 1 |
| 172 | #define QM_STOP_CMD_MAX_POLL_COUNT 100 |
| 173 | #define QM_STOP_CMD_POLL_PERIOD_US 500 |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 174 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 175 | /* QM command macros */ |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 176 | #define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE |
| 177 | #define QM_CMD_SET_FIELD(var, cmd, field, value) \ |
| 178 | SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \ |
| 179 | cmd ## _ ## field, \ |
| 180 | value) |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 181 | |
Shai Malin | fb09a1e | 2021-10-04 09:58:40 +0300 | [diff] [blame] | 182 | #define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, rl_valid, \ |
Alexander Lobakin | 1451e46 | 2020-07-06 18:38:17 +0300 | [diff] [blame] | 183 | rl_id, ext_voq, wrr) \ |
| 184 | do { \ |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 185 | u32 __reg = 0; \ |
Alexander Lobakin | 1451e46 | 2020-07-06 18:38:17 +0300 | [diff] [blame] | 186 | \ |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 187 | BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg)); \ |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 188 | memset(&(map), 0, sizeof(map)); \ |
Shai Malin | fb09a1e | 2021-10-04 09:58:40 +0300 | [diff] [blame] | 189 | SET_FIELD(__reg, QM_RF_PQ_MAP_PQ_VALID, 1); \ |
| 190 | SET_FIELD(__reg, QM_RF_PQ_MAP_RL_VALID, \ |
Alexander Lobakin | 1451e46 | 2020-07-06 18:38:17 +0300 | [diff] [blame] | 191 | !!(rl_valid)); \ |
Shai Malin | fb09a1e | 2021-10-04 09:58:40 +0300 | [diff] [blame] | 192 | SET_FIELD(__reg, QM_RF_PQ_MAP_VP_PQ_ID, (vp_pq_id)); \ |
| 193 | SET_FIELD(__reg, QM_RF_PQ_MAP_RL_ID, (rl_id)); \ |
| 194 | SET_FIELD(__reg, QM_RF_PQ_MAP_VOQ, (ext_voq)); \ |
| 195 | SET_FIELD(__reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, \ |
Alexander Lobakin | 1451e46 | 2020-07-06 18:38:17 +0300 | [diff] [blame] | 196 | (wrr)); \ |
| 197 | \ |
| 198 | STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \ |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 199 | __reg); \ |
| 200 | (map).reg = cpu_to_le32(__reg); \ |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 201 | } while (0) |
| 202 | |
| 203 | #define WRITE_PQ_INFO_TO_RAM 1 |
| 204 | #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \ |
| 205 | (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \ |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 206 | ((rl_valid ? 1 : 0) << 22) | (((rl) & 255) << 24) | \ |
| 207 | (((rl) >> 8) << 9)) |
| 208 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 209 | #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \ |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 210 | (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \ |
| 211 | XSTORM_PQ_INFO_OFFSET(pq_id)) |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 212 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 213 | /******************** INTERNAL IMPLEMENTATION *********************/ |
Tomer Tayar | a2e7699 | 2017-12-27 19:30:05 +0200 | [diff] [blame] | 214 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 215 | /* Returns the external VOQ number */ |
| 216 | static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn, |
| 217 | u8 port_id, u8 tc, u8 max_phys_tcs_per_port) |
| 218 | { |
| 219 | if (tc == PURE_LB_TC) |
| 220 | return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id; |
| 221 | else |
| 222 | return port_id * max_phys_tcs_per_port + tc; |
| 223 | } |
| 224 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 225 | /* Prepare PF RL enable/disable runtime init values */ |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 226 | static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 227 | { |
| 228 | STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); |
| 229 | if (pf_rl_en) { |
Shai Malin | fb09a1e | 2021-10-04 09:58:40 +0300 | [diff] [blame] | 230 | u8 num_ext_voqs = MAX_NUM_VOQS; |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 231 | u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1; |
| 232 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 233 | /* Enable RLs for all VOQs */ |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 234 | STORE_RT_REG(p_hwfn, |
| 235 | QM_REG_RLPFVOQENABLE_RT_OFFSET, |
| 236 | (u32)voq_bit_mask); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 237 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 238 | /* Write RL period */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 239 | STORE_RT_REG(p_hwfn, |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 240 | QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 241 | STORE_RT_REG(p_hwfn, |
| 242 | QM_REG_RLPFPERIODTIMER_RT_OFFSET, |
| 243 | QM_RL_PERIOD_CLK_25M); |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 244 | |
| 245 | /* Set credit threshold for QM bypass flow */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 246 | if (QM_BYPASS_EN) |
| 247 | STORE_RT_REG(p_hwfn, |
| 248 | QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 249 | QM_PF_RL_UPPER_BOUND); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 250 | } |
| 251 | } |
| 252 | |
| 253 | /* Prepare PF WFQ enable/disable runtime init values */ |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 254 | static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 255 | { |
| 256 | STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 257 | |
| 258 | /* Set credit threshold for QM bypass flow */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 259 | if (pf_wfq_en && QM_BYPASS_EN) |
| 260 | STORE_RT_REG(p_hwfn, |
| 261 | QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 262 | QM_PF_WFQ_UPPER_BOUND); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 263 | } |
| 264 | |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 265 | /* Prepare global RL enable/disable runtime init values */ |
| 266 | static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 267 | { |
| 268 | STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 269 | global_rl_en ? 1 : 0); |
| 270 | if (global_rl_en) { |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 271 | /* Write RL period (use timer 0 only) */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 272 | STORE_RT_REG(p_hwfn, |
| 273 | QM_REG_RLGLBLPERIOD_0_RT_OFFSET, |
| 274 | QM_RL_PERIOD_CLK_25M); |
| 275 | STORE_RT_REG(p_hwfn, |
| 276 | QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, |
| 277 | QM_RL_PERIOD_CLK_25M); |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 278 | |
| 279 | /* Set credit threshold for QM bypass flow */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 280 | if (QM_BYPASS_EN) |
| 281 | STORE_RT_REG(p_hwfn, |
| 282 | QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 283 | QM_GLOBAL_RL_UPPER_BOUND(10000) - 1); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 284 | } |
| 285 | } |
| 286 | |
| 287 | /* Prepare VPORT WFQ enable/disable runtime init values */ |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 288 | static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 289 | { |
| 290 | STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, |
| 291 | vport_wfq_en ? 1 : 0); |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 292 | |
| 293 | /* Set credit threshold for QM bypass flow */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 294 | if (vport_wfq_en && QM_BYPASS_EN) |
| 295 | STORE_RT_REG(p_hwfn, |
| 296 | QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 297 | QM_VP_WFQ_BYPASS_THRESH); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 298 | } |
| 299 | |
| 300 | /* Prepare runtime init values to allocate PBF command queue lines for |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 301 | * the specified VOQ. |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 302 | */ |
| 303 | static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 304 | u8 ext_voq, u16 cmdq_lines) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 305 | { |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 306 | u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 307 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 308 | OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 309 | (u32)cmdq_lines); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 310 | STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq, |
| 311 | qm_line_crd); |
| 312 | STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq, |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 313 | qm_line_crd); |
| 314 | } |
| 315 | |
| 316 | /* Prepare runtime init values to allocate PBF command queue lines. */ |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 317 | static void |
| 318 | qed_cmdq_lines_rt_init(struct qed_hwfn *p_hwfn, |
| 319 | u8 max_ports_per_engine, |
| 320 | u8 max_phys_tcs_per_port, |
| 321 | struct init_qm_port_params port_params[MAX_NUM_PORTS]) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 322 | { |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 323 | u8 tc, ext_voq, port_id, num_tcs_in_port; |
Shai Malin | fb09a1e | 2021-10-04 09:58:40 +0300 | [diff] [blame] | 324 | u8 num_ext_voqs = MAX_NUM_VOQS; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 325 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 326 | /* Clear PBF lines of all VOQs */ |
| 327 | for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++) |
| 328 | STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0); |
| 329 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 330 | for (port_id = 0; port_id < max_ports_per_engine; port_id++) { |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 331 | u16 phys_lines, phys_lines_per_tc; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 332 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 333 | if (!port_params[port_id].active) |
| 334 | continue; |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 335 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 336 | /* Find number of command queue lines to divide between the |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 337 | * active physical TCs. |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 338 | */ |
| 339 | phys_lines = port_params[port_id].num_pbf_cmd_lines; |
| 340 | phys_lines -= PBF_CMDQ_PURE_LB_LINES; |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 341 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 342 | /* Find #lines per active physical TC */ |
| 343 | num_tcs_in_port = 0; |
| 344 | for (tc = 0; tc < max_phys_tcs_per_port; tc++) |
| 345 | if (((port_params[port_id].active_phys_tcs >> |
| 346 | tc) & 0x1) == 1) |
| 347 | num_tcs_in_port++; |
| 348 | phys_lines_per_tc = phys_lines / num_tcs_in_port; |
| 349 | |
| 350 | /* Init registers per active TC */ |
| 351 | for (tc = 0; tc < max_phys_tcs_per_port; tc++) { |
| 352 | ext_voq = qed_get_ext_voq(p_hwfn, |
| 353 | port_id, |
| 354 | tc, max_phys_tcs_per_port); |
| 355 | if (((port_params[port_id].active_phys_tcs >> |
| 356 | tc) & 0x1) == 1) |
| 357 | qed_cmdq_lines_voq_rt_init(p_hwfn, |
| 358 | ext_voq, |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 359 | phys_lines_per_tc); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 360 | } |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 361 | |
| 362 | /* Init registers for pure LB TC */ |
| 363 | ext_voq = qed_get_ext_voq(p_hwfn, |
| 364 | port_id, |
| 365 | PURE_LB_TC, max_phys_tcs_per_port); |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 366 | qed_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, |
| 367 | PBF_CMDQ_PURE_LB_LINES); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 368 | } |
| 369 | } |
| 370 | |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 371 | /* Prepare runtime init values to allocate guaranteed BTB blocks for the |
| 372 | * specified port. The guaranteed BTB space is divided between the TCs as |
| 373 | * follows (shared space Is currently not used): |
| 374 | * 1. Parameters: |
| 375 | * B - BTB blocks for this port |
| 376 | * C - Number of physical TCs for this port |
| 377 | * 2. Calculation: |
| 378 | * a. 38 blocks (9700B jumbo frame) are allocated for global per port |
| 379 | * headroom. |
| 380 | * b. B = B - 38 (remainder after global headroom allocation). |
| 381 | * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ. |
| 382 | * d. B = B - MAX(38, B/(C+0.7)) (remainder after pure LB allocation). |
| 383 | * e. B/C blocks are allocated for each physical TC. |
| 384 | * Assumptions: |
| 385 | * - MTU is up to 9700 bytes (38 blocks) |
| 386 | * - All TCs are considered symmetrical (same rate and packet size) |
| 387 | * - No optimization for lossy TC (all are considered lossless). Shared space |
| 388 | * is not enabled and allocated for each TC. |
| 389 | */ |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 390 | static void |
| 391 | qed_btb_blocks_rt_init(struct qed_hwfn *p_hwfn, |
| 392 | u8 max_ports_per_engine, |
| 393 | u8 max_phys_tcs_per_port, |
| 394 | struct init_qm_port_params port_params[MAX_NUM_PORTS]) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 395 | { |
| 396 | u32 usable_blocks, pure_lb_blocks, phys_blocks; |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 397 | u8 tc, ext_voq, port_id, num_tcs_in_port; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 398 | |
| 399 | for (port_id = 0; port_id < max_ports_per_engine; port_id++) { |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 400 | if (!port_params[port_id].active) |
| 401 | continue; |
| 402 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 403 | /* Subtract headroom blocks */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 404 | usable_blocks = port_params[port_id].num_btb_blocks - |
| 405 | BTB_HEADROOM_BLOCKS; |
| 406 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 407 | /* Find blocks per physical TC. Use factor to avoid floating |
| 408 | * arithmethic. |
| 409 | */ |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 410 | num_tcs_in_port = 0; |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 411 | for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 412 | if (((port_params[port_id].active_phys_tcs >> |
| 413 | tc) & 0x1) == 1) |
| 414 | num_tcs_in_port++; |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 415 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 416 | pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 417 | (num_tcs_in_port * BTB_PURE_LB_FACTOR + |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 418 | BTB_PURE_LB_RATIO); |
| 419 | pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS, |
| 420 | pure_lb_blocks / BTB_PURE_LB_FACTOR); |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 421 | phys_blocks = (usable_blocks - pure_lb_blocks) / |
| 422 | num_tcs_in_port; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 423 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 424 | /* Init physical TCs */ |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 425 | for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { |
| 426 | if (((port_params[port_id].active_phys_tcs >> |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 427 | tc) & 0x1) == 1) { |
| 428 | ext_voq = |
| 429 | qed_get_ext_voq(p_hwfn, |
| 430 | port_id, |
| 431 | tc, |
| 432 | max_phys_tcs_per_port); |
| 433 | STORE_RT_REG(p_hwfn, |
| 434 | PBF_BTB_GUARANTEED_RT_OFFSET |
| 435 | (ext_voq), phys_blocks); |
| 436 | } |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 437 | } |
| 438 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 439 | /* Init pure LB TC */ |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 440 | ext_voq = qed_get_ext_voq(p_hwfn, |
| 441 | port_id, |
| 442 | PURE_LB_TC, max_phys_tcs_per_port); |
| 443 | STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 444 | pure_lb_blocks); |
| 445 | } |
| 446 | } |
| 447 | |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 448 | /* Prepare runtime init values for the specified RL. |
| 449 | * Set max link speed (100Gbps) per rate limiter. |
| 450 | * Return -1 on error. |
| 451 | */ |
| 452 | static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn) |
| 453 | { |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 454 | u32 upper_bound = QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) | |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 455 | (u32)QM_RL_CRD_REG_SIGN_BIT; |
| 456 | u32 inc_val; |
| 457 | u16 rl_id; |
| 458 | |
| 459 | /* Go over all global RLs */ |
| 460 | for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) { |
| 461 | inc_val = QM_RL_INC_VAL(QM_MAX_LINK_SPEED); |
| 462 | |
| 463 | STORE_RT_REG(p_hwfn, |
| 464 | QM_REG_RLGLBLCRD_RT_OFFSET + rl_id, |
| 465 | (u32)QM_RL_CRD_REG_SIGN_BIT); |
| 466 | STORE_RT_REG(p_hwfn, |
| 467 | QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id, |
| 468 | upper_bound); |
| 469 | STORE_RT_REG(p_hwfn, |
| 470 | QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, inc_val); |
| 471 | } |
| 472 | |
| 473 | return 0; |
| 474 | } |
| 475 | |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 476 | /* Returns the upper bound for the specified Vport RL parameters. |
| 477 | * link_speed is in Mbps. |
| 478 | * Returns 0 in case of error. |
| 479 | */ |
| 480 | static u32 qed_get_vport_rl_upper_bound(enum init_qm_rl_type vport_rl_type, |
| 481 | u32 link_speed) |
| 482 | { |
| 483 | switch (vport_rl_type) { |
| 484 | case QM_RL_TYPE_NORMAL: |
| 485 | return QM_INITIAL_VOQ_BYTE_CRD; |
| 486 | case QM_RL_TYPE_QCN: |
| 487 | return QM_GLOBAL_RL_UPPER_BOUND(link_speed); |
| 488 | default: |
| 489 | return 0; |
| 490 | } |
| 491 | } |
| 492 | |
| 493 | /* Prepare VPORT RL runtime init values. |
| 494 | * Return -1 on error. |
| 495 | */ |
| 496 | static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, |
| 497 | u16 start_rl, |
| 498 | u16 num_rls, |
| 499 | u32 link_speed, |
| 500 | struct init_qm_rl_params *rl_params) |
| 501 | { |
| 502 | u16 i, rl_id; |
| 503 | |
| 504 | if (num_rls && start_rl + num_rls >= MAX_QM_GLOBAL_RLS) { |
| 505 | DP_NOTICE(p_hwfn, "Invalid rate limiter configuration\n"); |
| 506 | return -1; |
| 507 | } |
| 508 | |
| 509 | /* Go over all PF VPORTs */ |
| 510 | for (i = 0, rl_id = start_rl; i < num_rls; i++, rl_id++) { |
| 511 | u32 upper_bound, inc_val; |
| 512 | |
| 513 | upper_bound = |
| 514 | qed_get_vport_rl_upper_bound((enum init_qm_rl_type) |
| 515 | rl_params[i].vport_rl_type, |
| 516 | link_speed); |
| 517 | |
| 518 | inc_val = |
| 519 | QM_RL_INC_VAL(rl_params[i].vport_rl ? |
| 520 | rl_params[i].vport_rl : link_speed); |
| 521 | if (inc_val > upper_bound) { |
| 522 | DP_NOTICE(p_hwfn, |
| 523 | "Invalid RL rate - limit configuration\n"); |
| 524 | return -1; |
| 525 | } |
| 526 | |
| 527 | STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id, |
| 528 | (u32)QM_RL_CRD_REG_SIGN_BIT); |
| 529 | STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id, |
| 530 | upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT); |
| 531 | STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, |
| 532 | inc_val); |
| 533 | } |
| 534 | |
| 535 | return 0; |
| 536 | } |
| 537 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 538 | /* Prepare Tx PQ mapping runtime init values for the specified PF */ |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 539 | static int qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, |
| 540 | struct qed_ptt *p_ptt, |
| 541 | struct qed_qm_pf_rt_init_params *p_params, |
| 542 | u32 base_mem_addr_4kb) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 543 | { |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 544 | u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 545 | struct init_qm_vport_params *vport_params = p_params->vport_params; |
Mintz, Yuval | be086e7 | 2017-03-11 18:39:18 +0200 | [diff] [blame] | 546 | u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE; |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 547 | u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group; |
| 548 | struct init_qm_pq_params *pq_params = p_params->pq_params; |
| 549 | u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb; |
| 550 | |
| 551 | num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; |
| 552 | |
| 553 | first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE; |
| 554 | last_pq_group = (p_params->start_pq + num_pqs - 1) / |
| 555 | QM_PF_QUEUE_GROUP_SIZE; |
| 556 | |
| 557 | pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids); |
| 558 | vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids); |
| 559 | mem_addr_4kb = base_mem_addr_4kb; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 560 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 561 | /* Set mapping from PQ group to PF */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 562 | for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++) |
| 563 | STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, |
| 564 | (u32)(p_params->pf_id)); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 565 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 566 | /* Set PQ sizes */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 567 | STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, |
| 568 | QM_PQ_SIZE_256B(p_params->num_pf_cids)); |
| 569 | STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, |
| 570 | QM_PQ_SIZE_256B(p_params->num_vf_cids)); |
| 571 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 572 | /* Go over all Tx PQs */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 573 | for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) { |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 574 | u16 *p_first_tx_pq_id, vport_id_in_pf; |
Shai Malin | fb09a1e | 2021-10-04 09:58:40 +0300 | [diff] [blame] | 575 | struct qm_rf_pq_map tx_pq_map; |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 576 | u8 tc_id = pq_params[i].tc_id; |
| 577 | bool is_vf_pq; |
| 578 | u8 ext_voq; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 579 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 580 | ext_voq = qed_get_ext_voq(p_hwfn, |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 581 | pq_params[i].port_id, |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 582 | tc_id, |
| 583 | p_params->max_phys_tcs_per_port); |
| 584 | is_vf_pq = (i >= p_params->num_pf_pqs); |
Mintz, Yuval | be086e7 | 2017-03-11 18:39:18 +0200 | [diff] [blame] | 585 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 586 | /* Update first Tx PQ of VPORT/TC */ |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 587 | vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport; |
| 588 | p_first_tx_pq_id = |
| 589 | &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id]; |
| 590 | if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) { |
| 591 | u32 map_val = |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 592 | (ext_voq << QM_VP_WFQ_PQ_VOQ_SHIFT) | |
| 593 | (p_params->pf_id << QM_VP_WFQ_PQ_PF_SHIFT); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 594 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 595 | /* Create new VP PQ */ |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 596 | *p_first_tx_pq_id = pq_id; |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 597 | |
| 598 | /* Map VP PQ to VOQ and PF */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 599 | STORE_RT_REG(p_hwfn, |
| 600 | QM_REG_WFQVPMAP_RT_OFFSET + |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 601 | *p_first_tx_pq_id, |
| 602 | map_val); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 603 | } |
Mintz, Yuval | be086e7 | 2017-03-11 18:39:18 +0200 | [diff] [blame] | 604 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 605 | /* Prepare PQ map entry */ |
| 606 | QM_INIT_TX_PQ_MAP(p_hwfn, |
| 607 | tx_pq_map, |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 608 | pq_id, |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 609 | *p_first_tx_pq_id, |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 610 | pq_params[i].rl_valid, |
| 611 | pq_params[i].rl_id, |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 612 | ext_voq, pq_params[i].wrr_group); |
| 613 | |
| 614 | /* Set PQ base address */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 615 | STORE_RT_REG(p_hwfn, |
| 616 | QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, |
| 617 | mem_addr_4kb); |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 618 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 619 | /* Clear PQ pointer table entry (64 bit) */ |
| 620 | if (p_params->is_pf_loading) |
| 621 | for (j = 0; j < 2; j++) |
| 622 | STORE_RT_REG(p_hwfn, |
| 623 | QM_REG_PTRTBLTX_RT_OFFSET + |
| 624 | (pq_id * 2) + j, 0); |
| 625 | |
| 626 | /* Write PQ info to RAM */ |
| 627 | if (WRITE_PQ_INFO_TO_RAM != 0) { |
| 628 | u32 pq_info = 0; |
| 629 | |
| 630 | pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id, |
| 631 | p_params->pf_id, |
| 632 | tc_id, |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 633 | pq_params[i].port_id, |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 634 | pq_params[i].rl_valid, |
| 635 | pq_params[i].rl_id); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 636 | qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), |
| 637 | pq_info); |
| 638 | } |
| 639 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 640 | /* If VF PQ, add indication to PQ VF mask */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 641 | if (is_vf_pq) { |
Mintz, Yuval | be086e7 | 2017-03-11 18:39:18 +0200 | [diff] [blame] | 642 | tx_pq_vf_mask[pq_id / |
| 643 | QM_PF_QUEUE_GROUP_SIZE] |= |
| 644 | BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE)); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 645 | mem_addr_4kb += vport_pq_mem_4kb; |
| 646 | } else { |
| 647 | mem_addr_4kb += pq_mem_4kb; |
| 648 | } |
| 649 | } |
| 650 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 651 | /* Store Tx PQ VF mask to size select register */ |
| 652 | for (i = 0; i < num_tx_pq_vf_masks; i++) |
| 653 | if (tx_pq_vf_mask[i]) |
| 654 | STORE_RT_REG(p_hwfn, |
| 655 | QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i, |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 656 | tx_pq_vf_mask[i]); |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 657 | |
| 658 | return 0; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 659 | } |
| 660 | |
| 661 | /* Prepare Other PQ mapping runtime init values for the specified PF */ |
| 662 | static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 663 | u8 pf_id, |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 664 | bool is_pf_loading, |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 665 | u32 num_pf_cids, |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 666 | u32 num_tids, u32 base_mem_addr_4kb) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 667 | { |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 668 | u32 pq_size, pq_mem_4kb, mem_addr_4kb; |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 669 | u16 i, j, pq_id, pq_group; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 670 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 671 | /* A single other PQ group is used in each PF, where PQ group i is used |
| 672 | * in PF i. |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 673 | */ |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 674 | pq_group = pf_id; |
| 675 | pq_size = num_pf_cids + num_tids; |
| 676 | pq_mem_4kb = QM_PQ_MEM_4KB(pq_size); |
| 677 | mem_addr_4kb = base_mem_addr_4kb; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 678 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 679 | /* Map PQ group to PF */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 680 | STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, |
| 681 | (u32)(pf_id)); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 682 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 683 | /* Set PQ sizes */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 684 | STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, |
| 685 | QM_PQ_SIZE_256B(pq_size)); |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 686 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 687 | for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; |
| 688 | i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 689 | /* Set PQ base address */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 690 | STORE_RT_REG(p_hwfn, |
| 691 | QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, |
| 692 | mem_addr_4kb); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 693 | |
| 694 | /* Clear PQ pointer table entry */ |
| 695 | if (is_pf_loading) |
| 696 | for (j = 0; j < 2; j++) |
| 697 | STORE_RT_REG(p_hwfn, |
| 698 | QM_REG_PTRTBLOTHER_RT_OFFSET + |
| 699 | (pq_id * 2) + j, 0); |
| 700 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 701 | mem_addr_4kb += pq_mem_4kb; |
| 702 | } |
| 703 | } |
| 704 | |
| 705 | /* Prepare PF WFQ runtime init values for the specified PF. |
| 706 | * Return -1 on error. |
| 707 | */ |
| 708 | static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, |
| 709 | struct qed_qm_pf_rt_init_params *p_params) |
| 710 | { |
| 711 | u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 712 | struct init_qm_pq_params *pq_params = p_params->pq_params; |
| 713 | u32 inc_val, crd_reg_offset; |
| 714 | u8 ext_voq; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 715 | u16 i; |
| 716 | |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 717 | inc_val = QM_PF_WFQ_INC_VAL(p_params->pf_wfq); |
| 718 | if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) { |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 719 | DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 720 | return -1; |
| 721 | } |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 722 | |
| 723 | for (i = 0; i < num_tx_pqs; i++) { |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 724 | ext_voq = qed_get_ext_voq(p_hwfn, |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 725 | pq_params[i].port_id, |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 726 | pq_params[i].tc_id, |
| 727 | p_params->max_phys_tcs_per_port); |
| 728 | crd_reg_offset = |
| 729 | (p_params->pf_id < MAX_NUM_PFS_BB ? |
| 730 | QM_REG_WFQPFCRD_RT_OFFSET : |
| 731 | QM_REG_WFQPFCRD_MSB_RT_OFFSET) + |
| 732 | ext_voq * MAX_NUM_PFS_BB + |
| 733 | (p_params->pf_id % MAX_NUM_PFS_BB); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 734 | OVERWRITE_RT_REG(p_hwfn, |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 735 | crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 736 | } |
| 737 | |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 738 | STORE_RT_REG(p_hwfn, |
| 739 | QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 740 | QM_PF_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); |
Mintz, Yuval | be086e7 | 2017-03-11 18:39:18 +0200 | [diff] [blame] | 741 | STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, |
| 742 | inc_val); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 743 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 744 | return 0; |
| 745 | } |
| 746 | |
| 747 | /* Prepare PF RL runtime init values for the specified PF. |
| 748 | * Return -1 on error. |
| 749 | */ |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 750 | static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 751 | { |
| 752 | u32 inc_val = QM_RL_INC_VAL(pf_rl); |
| 753 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 754 | if (inc_val > QM_PF_RL_MAX_INC_VAL) { |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 755 | DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n"); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 756 | return -1; |
| 757 | } |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 758 | |
| 759 | STORE_RT_REG(p_hwfn, |
| 760 | QM_REG_RLPFCRD_RT_OFFSET + pf_id, |
| 761 | (u32)QM_RL_CRD_REG_SIGN_BIT); |
| 762 | STORE_RT_REG(p_hwfn, |
| 763 | QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, |
| 764 | QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 765 | STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 766 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 767 | return 0; |
| 768 | } |
| 769 | |
| 770 | /* Prepare VPORT WFQ runtime init values for the specified VPORTs. |
| 771 | * Return -1 on error. |
| 772 | */ |
| 773 | static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 774 | u16 num_vports, |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 775 | struct init_qm_vport_params *vport_params) |
| 776 | { |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 777 | u16 vport_pq_id, wfq, i; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 778 | u32 inc_val; |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 779 | u8 tc; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 780 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 781 | /* Go over all PF VPORTs */ |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 782 | for (i = 0; i < num_vports; i++) { |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 783 | /* Each VPORT can have several VPORT PQ IDs for various TCs */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 784 | for (tc = 0; tc < NUM_OF_TCS; tc++) { |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 785 | /* Check if VPORT/TC is valid */ |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 786 | vport_pq_id = vport_params[i].first_tx_pq_id[tc]; |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 787 | if (vport_pq_id == QM_INVALID_PQ_ID) |
| 788 | continue; |
| 789 | |
| 790 | /* Find WFQ weight (per VPORT or per VPORT+TC) */ |
| 791 | wfq = vport_params[i].wfq; |
| 792 | wfq = wfq ? wfq : vport_params[i].tc_wfq[tc]; |
| 793 | inc_val = QM_VP_WFQ_INC_VAL(wfq); |
| 794 | if (inc_val > QM_VP_WFQ_MAX_INC_VAL) { |
| 795 | DP_NOTICE(p_hwfn, |
| 796 | "Invalid VPORT WFQ weight configuration\n"); |
| 797 | return -1; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 798 | } |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 799 | |
| 800 | /* Config registers */ |
| 801 | STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + |
| 802 | vport_pq_id, |
| 803 | (u32)QM_WFQ_CRD_REG_SIGN_BIT); |
| 804 | STORE_RT_REG(p_hwfn, QM_REG_WFQVPUPPERBOUND_RT_OFFSET + |
| 805 | vport_pq_id, |
| 806 | inc_val | QM_WFQ_CRD_REG_SIGN_BIT); |
| 807 | STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET + |
| 808 | vport_pq_id, inc_val); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 809 | } |
| 810 | } |
| 811 | |
| 812 | return 0; |
| 813 | } |
| 814 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 815 | static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn, |
| 816 | struct qed_ptt *p_ptt) |
| 817 | { |
| 818 | u32 reg_val, i; |
| 819 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 820 | for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 821 | i++) { |
| 822 | udelay(QM_STOP_CMD_POLL_PERIOD_US); |
| 823 | reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY); |
| 824 | } |
| 825 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 826 | /* Check if timeout while waiting for SDM command ready */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 827 | if (i == QM_STOP_CMD_MAX_POLL_COUNT) { |
| 828 | DP_VERBOSE(p_hwfn, NETIF_MSG_HW, |
| 829 | "Timeout when waiting for QM SDM command ready signal\n"); |
| 830 | return false; |
| 831 | } |
| 832 | |
| 833 | return true; |
| 834 | } |
| 835 | |
| 836 | static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn, |
| 837 | struct qed_ptt *p_ptt, |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 838 | u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 839 | { |
| 840 | if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt)) |
| 841 | return false; |
| 842 | |
| 843 | qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr); |
| 844 | qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb); |
| 845 | qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb); |
| 846 | qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1); |
| 847 | qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0); |
| 848 | |
| 849 | return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt); |
| 850 | } |
| 851 | |
| 852 | /******************** INTERFACE IMPLEMENTATION *********************/ |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 853 | |
| 854 | u32 qed_qm_pf_mem_size(u32 num_pf_cids, |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 855 | u32 num_vf_cids, |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 856 | u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 857 | { |
| 858 | return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs + |
| 859 | QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs + |
| 860 | QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF; |
| 861 | } |
| 862 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 863 | int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, |
| 864 | struct qed_qm_common_rt_init_params *p_params) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 865 | { |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 866 | u32 mask = 0; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 867 | |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 868 | /* Init AFullOprtnstcCrdMask */ |
| 869 | SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ, |
| 870 | QM_OPPOR_LINE_VOQ_DEF); |
| 871 | SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN); |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 872 | SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, |
| 873 | p_params->pf_wfq_en ? 1 : 0); |
| 874 | SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, |
| 875 | p_params->vport_wfq_en ? 1 : 0); |
| 876 | SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, |
| 877 | p_params->pf_rl_en ? 1 : 0); |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 878 | SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL, |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 879 | p_params->global_rl_en ? 1 : 0); |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 880 | SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF); |
| 881 | SET_FIELD(mask, |
| 882 | QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 883 | STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 884 | |
| 885 | /* Enable/disable PF RL */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 886 | qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 887 | |
| 888 | /* Enable/disable PF WFQ */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 889 | qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 890 | |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 891 | /* Enable/disable global RL */ |
| 892 | qed_enable_global_rl(p_hwfn, p_params->global_rl_en); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 893 | |
| 894 | /* Enable/disable VPORT WFQ */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 895 | qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 896 | |
| 897 | /* Init PBF CMDQ line credit */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 898 | qed_cmdq_lines_rt_init(p_hwfn, |
| 899 | p_params->max_ports_per_engine, |
| 900 | p_params->max_phys_tcs_per_port, |
| 901 | p_params->port_params); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 902 | |
| 903 | /* Init BTB blocks in PBF */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 904 | qed_btb_blocks_rt_init(p_hwfn, |
| 905 | p_params->max_ports_per_engine, |
| 906 | p_params->max_phys_tcs_per_port, |
| 907 | p_params->port_params); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 908 | |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 909 | qed_global_rl_rt_init(p_hwfn); |
| 910 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 911 | return 0; |
| 912 | } |
| 913 | |
| 914 | int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, |
| 915 | struct qed_ptt *p_ptt, |
| 916 | struct qed_qm_pf_rt_init_params *p_params) |
| 917 | { |
| 918 | struct init_qm_vport_params *vport_params = p_params->vport_params; |
| 919 | u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids + |
| 920 | p_params->num_tids) * |
| 921 | QM_OTHER_PQS_PER_PF; |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 922 | u16 i; |
| 923 | u8 tc; |
| 924 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 925 | /* Clear first Tx PQ ID array for each VPORT */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 926 | for (i = 0; i < p_params->num_vports; i++) |
| 927 | for (tc = 0; tc < NUM_OF_TCS; tc++) |
| 928 | vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID; |
| 929 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 930 | /* Map Other PQs (if any) */ |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 931 | qed_other_pq_map_rt_init(p_hwfn, |
| 932 | p_params->pf_id, |
| 933 | p_params->is_pf_loading, p_params->num_pf_cids, |
| 934 | p_params->num_tids, 0); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 935 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 936 | /* Map Tx PQs */ |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 937 | if (qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb)) |
| 938 | return -1; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 939 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 940 | /* Init PF WFQ */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 941 | if (p_params->pf_wfq) |
| 942 | if (qed_pf_wfq_rt_init(p_hwfn, p_params)) |
| 943 | return -1; |
| 944 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 945 | /* Init PF RL */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 946 | if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl)) |
| 947 | return -1; |
| 948 | |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 949 | /* Init VPORT WFQ */ |
Yuval Mintz | fc48b7a | 2016-02-15 13:22:35 -0500 | [diff] [blame] | 950 | if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params)) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 951 | return -1; |
| 952 | |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 953 | /* Set VPORT RL */ |
| 954 | if (qed_vport_rl_rt_init(p_hwfn, p_params->start_rl, |
| 955 | p_params->num_rls, p_params->link_speed, |
| 956 | p_params->rl_params)) |
| 957 | return -1; |
| 958 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 959 | return 0; |
| 960 | } |
| 961 | |
Manish Chopra | a64b02d | 2016-04-26 10:56:10 -0400 | [diff] [blame] | 962 | int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 963 | struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq) |
Manish Chopra | a64b02d | 2016-04-26 10:56:10 -0400 | [diff] [blame] | 964 | { |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 965 | u32 inc_val = QM_PF_WFQ_INC_VAL(pf_wfq); |
Manish Chopra | a64b02d | 2016-04-26 10:56:10 -0400 | [diff] [blame] | 966 | |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 967 | if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) { |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 968 | DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); |
Manish Chopra | a64b02d | 2016-04-26 10:56:10 -0400 | [diff] [blame] | 969 | return -1; |
| 970 | } |
| 971 | |
| 972 | qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 973 | |
Manish Chopra | a64b02d | 2016-04-26 10:56:10 -0400 | [diff] [blame] | 974 | return 0; |
| 975 | } |
| 976 | |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 977 | int qed_init_pf_rl(struct qed_hwfn *p_hwfn, |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 978 | struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 979 | { |
| 980 | u32 inc_val = QM_RL_INC_VAL(pf_rl); |
| 981 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 982 | if (inc_val > QM_PF_RL_MAX_INC_VAL) { |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 983 | DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n"); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 984 | return -1; |
| 985 | } |
| 986 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 987 | qed_wr(p_hwfn, |
| 988 | p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 989 | qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val); |
| 990 | |
| 991 | return 0; |
| 992 | } |
| 993 | |
Manish Chopra | bcd197c | 2016-04-26 10:56:08 -0400 | [diff] [blame] | 994 | int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, |
| 995 | struct qed_ptt *p_ptt, |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 996 | u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq) |
Manish Chopra | bcd197c | 2016-04-26 10:56:08 -0400 | [diff] [blame] | 997 | { |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 998 | int result = 0; |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 999 | u16 vport_pq_id; |
Manish Chopra | bcd197c | 2016-04-26 10:56:08 -0400 | [diff] [blame] | 1000 | u8 tc; |
| 1001 | |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1002 | for (tc = 0; tc < NUM_OF_TCS && !result; tc++) { |
| 1003 | vport_pq_id = first_tx_pq_id[tc]; |
| 1004 | if (vport_pq_id != QM_INVALID_PQ_ID) |
| 1005 | result = qed_init_vport_tc_wfq(p_hwfn, p_ptt, |
| 1006 | vport_pq_id, wfq); |
| 1007 | } |
| 1008 | |
| 1009 | return result; |
| 1010 | } |
| 1011 | |
| 1012 | int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
| 1013 | u16 first_tx_pq_id, u16 wfq) |
| 1014 | { |
| 1015 | u32 inc_val; |
| 1016 | |
| 1017 | if (first_tx_pq_id == QM_INVALID_PQ_ID) |
| 1018 | return -1; |
| 1019 | |
| 1020 | inc_val = QM_VP_WFQ_INC_VAL(wfq); |
| 1021 | if (!inc_val || inc_val > QM_VP_WFQ_MAX_INC_VAL) { |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 1022 | DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n"); |
Manish Chopra | bcd197c | 2016-04-26 10:56:08 -0400 | [diff] [blame] | 1023 | return -1; |
| 1024 | } |
| 1025 | |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1026 | qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPCRD + first_tx_pq_id * 4, |
| 1027 | (u32)QM_WFQ_CRD_REG_SIGN_BIT); |
| 1028 | qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPUPPERBOUND + first_tx_pq_id * 4, |
| 1029 | inc_val | QM_WFQ_CRD_REG_SIGN_BIT); |
| 1030 | qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + first_tx_pq_id * 4, |
| 1031 | inc_val); |
Manish Chopra | bcd197c | 2016-04-26 10:56:08 -0400 | [diff] [blame] | 1032 | |
| 1033 | return 0; |
| 1034 | } |
| 1035 | |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 1036 | int qed_init_global_rl(struct qed_hwfn *p_hwfn, |
Prabhakar Kushwaha | fe40a83 | 2021-10-04 09:58:44 +0300 | [diff] [blame] | 1037 | struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit, |
| 1038 | enum init_qm_rl_type vport_rl_type) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 1039 | { |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1040 | u32 inc_val, upper_bound; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 1041 | |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1042 | upper_bound = |
| 1043 | (vport_rl_type == |
| 1044 | QM_RL_TYPE_QCN) ? QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) : |
| 1045 | QM_INITIAL_VOQ_BYTE_CRD; |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 1046 | inc_val = QM_RL_INC_VAL(rate_limit); |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1047 | if (inc_val > upper_bound) { |
| 1048 | DP_NOTICE(p_hwfn, "Invalid VPORT rate limit configuration.\n"); |
Mintz, Yuval | be086e7 | 2017-03-11 18:39:18 +0200 | [diff] [blame] | 1049 | return -1; |
| 1050 | } |
| 1051 | |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 1052 | qed_wr(p_hwfn, p_ptt, |
| 1053 | QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1054 | qed_wr(p_hwfn, |
| 1055 | p_ptt, |
| 1056 | QM_REG_RLGLBLUPPERBOUND + rl_id * 4, |
| 1057 | upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT); |
Michal Kalderon | 92fae6f | 2020-01-27 15:26:09 +0200 | [diff] [blame] | 1058 | qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 1059 | |
| 1060 | return 0; |
| 1061 | } |
| 1062 | |
| 1063 | bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, |
| 1064 | struct qed_ptt *p_ptt, |
| 1065 | bool is_release_cmd, |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 1066 | bool is_tx_pq, u16 start_pq, u16 num_pqs) |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 1067 | { |
| 1068 | u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 }; |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1069 | u32 pq_mask = 0, last_pq, pq_id; |
| 1070 | |
| 1071 | last_pq = start_pq + num_pqs - 1; |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 1072 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 1073 | /* Set command's PQ type */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 1074 | QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1); |
| 1075 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1076 | /* Go over requested PQs */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 1077 | for (pq_id = start_pq; pq_id <= last_pq; pq_id++) { |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 1078 | /* Set PQ bit in mask (stop command only) */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 1079 | if (!is_release_cmd) |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1080 | pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH)); |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 1081 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 1082 | /* If last PQ or end of PQ mask, write command */ |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 1083 | if ((pq_id == last_pq) || |
| 1084 | (pq_id % QM_STOP_PQ_MASK_WIDTH == |
| 1085 | (QM_STOP_PQ_MASK_WIDTH - 1))) { |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1086 | QM_CMD_SET_FIELD(cmd_arr, |
| 1087 | QM_STOP_CMD, PAUSE_MASK, pq_mask); |
| 1088 | QM_CMD_SET_FIELD(cmd_arr, |
| 1089 | QM_STOP_CMD, |
Yuval Mintz | fe56b9e | 2015-10-26 11:02:25 +0200 | [diff] [blame] | 1090 | GROUP_ID, |
| 1091 | pq_id / QM_STOP_PQ_MASK_WIDTH); |
| 1092 | if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR, |
| 1093 | cmd_arr[0], cmd_arr[1])) |
| 1094 | return false; |
| 1095 | pq_mask = 0; |
| 1096 | } |
| 1097 | } |
| 1098 | |
| 1099 | return true; |
| 1100 | } |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1101 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1102 | #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \ |
| 1103 | do { \ |
| 1104 | typeof(var) *__p_var = &(var); \ |
| 1105 | typeof(offset) __offset = offset; \ |
| 1106 | *__p_var = (*__p_var & ~BIT(__offset)) | \ |
| 1107 | ((enable) ? BIT(__offset) : 0); \ |
| 1108 | } while (0) |
Michal Kalderon | 63ddca3 | 2020-01-27 15:26:10 +0200 | [diff] [blame] | 1109 | |
| 1110 | #define PRS_ETH_TUNN_OUTPUT_FORMAT 0xF4DAB910 |
| 1111 | #define PRS_ETH_OUTPUT_FORMAT 0xFFFF4910 |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1112 | |
Michal Kalderon | 804c570 | 2020-01-27 15:26:11 +0200 | [diff] [blame] | 1113 | #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \ |
| 1114 | do { \ |
| 1115 | u32 i; \ |
| 1116 | \ |
| 1117 | for (i = 0; i < (arr_size); i++) \ |
| 1118 | qed_wr(dev, ptt, \ |
| 1119 | ((addr) + (4 * i)), \ |
| 1120 | ((u32 *)&(arr))[i]); \ |
| 1121 | } while (0) |
| 1122 | |
| 1123 | /** |
Alexander Lobakin | 71e11a3 | 2020-07-06 18:38:16 +0300 | [diff] [blame] | 1124 | * qed_dmae_to_grc() - Internal function for writing from host to |
| 1125 | * wide-bus registers (split registers are not supported yet). |
Michal Kalderon | 804c570 | 2020-01-27 15:26:11 +0200 | [diff] [blame] | 1126 | * |
Alexander Lobakin | 71e11a3 | 2020-07-06 18:38:16 +0300 | [diff] [blame] | 1127 | * @p_hwfn: HW device data. |
| 1128 | * @p_ptt: PTT window used for writing the registers. |
| 1129 | * @p_data: Pointer to source data. |
| 1130 | * @addr: Destination register address. |
| 1131 | * @len_in_dwords: Data length in dwords (u32). |
| 1132 | * |
| 1133 | * Return: Length of the written data in dwords (u32) or -1 on invalid |
| 1134 | * input. |
Michal Kalderon | 804c570 | 2020-01-27 15:26:11 +0200 | [diff] [blame] | 1135 | */ |
Alexander Lobakin | 1451e46 | 2020-07-06 18:38:17 +0300 | [diff] [blame] | 1136 | static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 1137 | __le32 *p_data, u32 addr, u32 len_in_dwords) |
Michal Kalderon | 804c570 | 2020-01-27 15:26:11 +0200 | [diff] [blame] | 1138 | { |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1139 | struct qed_dmae_params params = { 0 }; |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 1140 | u32 *data_cpu; |
Michal Kalderon | 804c570 | 2020-01-27 15:26:11 +0200 | [diff] [blame] | 1141 | int rc; |
| 1142 | |
| 1143 | if (!p_data) |
| 1144 | return -1; |
| 1145 | |
| 1146 | /* Set DMAE params */ |
| 1147 | SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1); |
| 1148 | |
| 1149 | /* Execute DMAE command */ |
| 1150 | rc = qed_dmae_host2grc(p_hwfn, p_ptt, |
| 1151 | (u64)(uintptr_t)(p_data), |
| 1152 | addr, len_in_dwords, ¶ms); |
| 1153 | |
| 1154 | /* If not read using DMAE, read using GRC */ |
| 1155 | if (rc) { |
| 1156 | DP_VERBOSE(p_hwfn, |
| 1157 | QED_MSG_DEBUG, |
| 1158 | "Failed writing to chip using DMAE, using GRC instead\n"); |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 1159 | |
| 1160 | /* Swap to CPU byteorder and write to registers using GRC */ |
| 1161 | data_cpu = (__force u32 *)p_data; |
| 1162 | le32_to_cpu_array(data_cpu, len_in_dwords); |
| 1163 | |
| 1164 | ARR_REG_WR(p_hwfn, p_ptt, addr, data_cpu, len_in_dwords); |
| 1165 | cpu_to_le32_array(data_cpu, len_in_dwords); |
Michal Kalderon | 804c570 | 2020-01-27 15:26:11 +0200 | [diff] [blame] | 1166 | } |
| 1167 | |
| 1168 | return len_in_dwords; |
| 1169 | } |
| 1170 | |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1171 | void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 1172 | struct qed_ptt *p_ptt, u16 dest_port) |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1173 | { |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1174 | /* Update PRS register */ |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1175 | qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1176 | |
| 1177 | /* Update NIG register */ |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 1178 | qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1179 | |
| 1180 | /* Update PBF register */ |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1181 | qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port); |
| 1182 | } |
| 1183 | |
| 1184 | void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 1185 | struct qed_ptt *p_ptt, bool vxlan_enable) |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1186 | { |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1187 | u32 reg_val; |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1188 | u8 shift; |
| 1189 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1190 | /* Update PRS register */ |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1191 | reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1192 | SET_FIELD(reg_val, |
| 1193 | PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE, vxlan_enable); |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1194 | qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 1195 | if (reg_val) { |
| 1196 | reg_val = |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1197 | qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 1198 | |
| 1199 | /* Update output only if tunnel blocks not included. */ |
| 1200 | if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1201 | qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 1202 | (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); |
| 1203 | } |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1204 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1205 | /* Update NIG register */ |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1206 | reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); |
| 1207 | shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT; |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1208 | SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable); |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1209 | qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); |
| 1210 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1211 | /* Update DORQ register */ |
| 1212 | qed_wr(p_hwfn, |
| 1213 | p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0); |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1214 | } |
| 1215 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1216 | void qed_set_gre_enable(struct qed_hwfn *p_hwfn, |
| 1217 | struct qed_ptt *p_ptt, |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1218 | bool eth_gre_enable, bool ip_gre_enable) |
| 1219 | { |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1220 | u32 reg_val; |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1221 | u8 shift; |
| 1222 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1223 | /* Update PRS register */ |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1224 | reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1225 | SET_FIELD(reg_val, |
| 1226 | PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE, |
| 1227 | eth_gre_enable); |
| 1228 | SET_FIELD(reg_val, |
| 1229 | PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE, |
| 1230 | ip_gre_enable); |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1231 | qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 1232 | if (reg_val) { |
| 1233 | reg_val = |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1234 | qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 1235 | |
| 1236 | /* Update output only if tunnel blocks not included. */ |
| 1237 | if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1238 | qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 1239 | (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); |
| 1240 | } |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1241 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1242 | /* Update NIG register */ |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1243 | reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); |
| 1244 | shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT; |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1245 | SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable); |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1246 | shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT; |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1247 | SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable); |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1248 | qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); |
| 1249 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1250 | /* Update DORQ registers */ |
| 1251 | qed_wr(p_hwfn, |
| 1252 | p_ptt, |
| 1253 | DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0); |
| 1254 | qed_wr(p_hwfn, |
| 1255 | p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0); |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1256 | } |
| 1257 | |
| 1258 | void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 1259 | struct qed_ptt *p_ptt, u16 dest_port) |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1260 | { |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1261 | /* Update PRS register */ |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1262 | qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1263 | |
| 1264 | /* Update NIG register */ |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1265 | qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1266 | |
| 1267 | /* Update PBF register */ |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1268 | qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port); |
| 1269 | } |
| 1270 | |
| 1271 | void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, |
| 1272 | struct qed_ptt *p_ptt, |
Yuval Mintz | 351a4ded | 2016-06-02 10:23:29 +0300 | [diff] [blame] | 1273 | bool eth_geneve_enable, bool ip_geneve_enable) |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1274 | { |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1275 | u32 reg_val; |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1276 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1277 | /* Update PRS register */ |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1278 | reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1279 | SET_FIELD(reg_val, |
| 1280 | PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE, |
| 1281 | eth_geneve_enable); |
| 1282 | SET_FIELD(reg_val, |
| 1283 | PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE, |
| 1284 | ip_geneve_enable); |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1285 | qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 1286 | if (reg_val) { |
| 1287 | reg_val = |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1288 | qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 1289 | |
| 1290 | /* Update output only if tunnel blocks not included. */ |
| 1291 | if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1292 | qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 1293 | (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); |
| 1294 | } |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1295 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1296 | /* Update NIG register */ |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1297 | qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, |
| 1298 | eth_geneve_enable ? 1 : 0); |
| 1299 | qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0); |
| 1300 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1301 | /* EDPM with geneve tunnel not supported in BB */ |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1302 | if (QED_IS_BB_B0(p_hwfn->cdev)) |
| 1303 | return; |
| 1304 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1305 | /* Update DORQ registers */ |
| 1306 | qed_wr(p_hwfn, |
| 1307 | p_ptt, |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1308 | DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2, |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1309 | eth_geneve_enable ? 1 : 0); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1310 | qed_wr(p_hwfn, |
| 1311 | p_ptt, |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1312 | DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2, |
Manish Chopra | 464f664 | 2016-04-14 01:38:29 -0400 | [diff] [blame] | 1313 | ip_geneve_enable ? 1 : 0); |
| 1314 | } |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1315 | |
Michal Kalderon | 63ddca3 | 2020-01-27 15:26:10 +0200 | [diff] [blame] | 1316 | #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3 |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1317 | #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT 0xC8DAB910 |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 1318 | |
| 1319 | void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, |
| 1320 | struct qed_ptt *p_ptt, bool enable) |
| 1321 | { |
| 1322 | u32 reg_val, cfg_mask; |
| 1323 | |
| 1324 | /* read PRS config register */ |
| 1325 | reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO); |
| 1326 | |
| 1327 | /* set VXLAN_NO_L2_ENABLE mask */ |
| 1328 | cfg_mask = BIT(PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET); |
| 1329 | |
| 1330 | if (enable) { |
| 1331 | /* set VXLAN_NO_L2_ENABLE flag */ |
| 1332 | reg_val |= cfg_mask; |
| 1333 | |
| 1334 | /* update PRS FIC register */ |
| 1335 | qed_wr(p_hwfn, |
| 1336 | p_ptt, |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1337 | PRS_REG_OUTPUT_FORMAT_4_0, |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 1338 | (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT); |
| 1339 | } else { |
| 1340 | /* clear VXLAN_NO_L2_ENABLE flag */ |
| 1341 | reg_val &= ~cfg_mask; |
| 1342 | } |
| 1343 | |
| 1344 | /* write PRS config register */ |
| 1345 | qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val); |
| 1346 | } |
| 1347 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 1348 | #define T_ETH_PACKET_ACTION_GFT_EVENTID 23 |
| 1349 | #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272 |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1350 | #define T_ETH_PACKET_MATCH_RFS_EVENTID 25 |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 1351 | #define PARSER_ETH_CONN_CM_HDR 0 |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1352 | #define CAM_LINE_SIZE sizeof(u32) |
| 1353 | #define RAM_LINE_SIZE sizeof(u64) |
| 1354 | #define REG_SIZE sizeof(u32) |
| 1355 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1356 | void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id) |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1357 | { |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1358 | struct regpair ram_line = { 0 }; |
Michal Kalderon | 804c570 | 2020-01-27 15:26:11 +0200 | [diff] [blame] | 1359 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1360 | /* Disable gft search for PF */ |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1361 | qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1362 | |
| 1363 | /* Clean ram & cam for next gft session */ |
| 1364 | |
| 1365 | /* Zero camline */ |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 1366 | qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1367 | |
| 1368 | /* Zero ramline */ |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 1369 | qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, |
Michal Kalderon | 804c570 | 2020-01-27 15:26:11 +0200 | [diff] [blame] | 1370 | PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, |
| 1371 | sizeof(ram_line) / REG_SIZE); |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1372 | } |
| 1373 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1374 | void qed_gft_config(struct qed_hwfn *p_hwfn, |
| 1375 | struct qed_ptt *p_ptt, |
| 1376 | u16 pf_id, |
| 1377 | bool tcp, |
| 1378 | bool udp, |
| 1379 | bool ipv4, bool ipv6, enum gft_profile_type profile_type) |
| 1380 | { |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 1381 | struct regpair ram_line; |
| 1382 | u32 search_non_ip_as_gft; |
| 1383 | u32 reg_val, cam_line; |
| 1384 | u32 lo = 0, hi = 0; |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1385 | |
| 1386 | if (!ipv6 && !ipv4) |
| 1387 | DP_NOTICE(p_hwfn, |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1388 | "gft_config: must accept at least on of - ipv4 or ipv6'\n"); |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1389 | if (!tcp && !udp) |
| 1390 | DP_NOTICE(p_hwfn, |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1391 | "gft_config: must accept at least on of - udp or tcp\n"); |
| 1392 | if (profile_type >= MAX_GFT_PROFILE_TYPE) |
| 1393 | DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n"); |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1394 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1395 | /* Set RFS event ID to be awakened i Tstorm By Prs */ |
| 1396 | reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID << |
| 1397 | PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; |
| 1398 | reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; |
| 1399 | qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val); |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1400 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1401 | /* Do not load context only cid in PRS on match. */ |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1402 | qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1403 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1404 | /* Do not use tenant ID exist bit for gft search */ |
| 1405 | qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0); |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1406 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1407 | /* Set Cam */ |
| 1408 | cam_line = 0; |
| 1409 | SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1); |
| 1410 | |
| 1411 | /* Filters are per PF!! */ |
| 1412 | SET_FIELD(cam_line, |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 1413 | GFT_CAM_LINE_MAPPED_PF_ID_MASK, |
| 1414 | GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1415 | SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id); |
| 1416 | |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1417 | if (!(tcp && udp)) { |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1418 | SET_FIELD(cam_line, |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 1419 | GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, |
| 1420 | GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK); |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1421 | if (tcp) |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1422 | SET_FIELD(cam_line, |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1423 | GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, |
| 1424 | GFT_PROFILE_TCP_PROTOCOL); |
| 1425 | else |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1426 | SET_FIELD(cam_line, |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1427 | GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, |
| 1428 | GFT_PROFILE_UDP_PROTOCOL); |
| 1429 | } |
| 1430 | |
| 1431 | if (!(ipv4 && ipv6)) { |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1432 | SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1); |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1433 | if (ipv4) |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1434 | SET_FIELD(cam_line, |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1435 | GFT_CAM_LINE_MAPPED_IP_VERSION, |
| 1436 | GFT_PROFILE_IPV4); |
| 1437 | else |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1438 | SET_FIELD(cam_line, |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1439 | GFT_CAM_LINE_MAPPED_IP_VERSION, |
| 1440 | GFT_PROFILE_IPV6); |
| 1441 | } |
| 1442 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 1443 | /* Write characteristics to cam */ |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1444 | qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1445 | cam_line); |
| 1446 | cam_line = |
| 1447 | qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id); |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1448 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 1449 | /* Write line to RAM - compare to filter 4 tuple */ |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1450 | |
Michal Kalderon | d52c89f | 2018-06-05 13:11:16 +0300 | [diff] [blame] | 1451 | /* Search no IP as GFT */ |
| 1452 | search_non_ip_as_gft = 0; |
| 1453 | |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 1454 | /* Tunnel type */ |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 1455 | SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); |
| 1456 | SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 1457 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1458 | if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) { |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 1459 | SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1); |
| 1460 | SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1); |
| 1461 | SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); |
| 1462 | SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); |
| 1463 | SET_FIELD(lo, GFT_RAM_LINE_SRC_PORT, 1); |
| 1464 | SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1465 | } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) { |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 1466 | SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); |
| 1467 | SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); |
| 1468 | SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1); |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 1469 | } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) { |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 1470 | SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1); |
| 1471 | SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 1472 | } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) { |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 1473 | SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1); |
| 1474 | SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); |
Michal Kalderon | 50bc60c | 2018-03-28 11:42:16 +0300 | [diff] [blame] | 1475 | } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) { |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 1476 | SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); |
Michal Kalderon | d52c89f | 2018-06-05 13:11:16 +0300 | [diff] [blame] | 1477 | |
| 1478 | /* Allow tunneled traffic without inner IP */ |
| 1479 | search_non_ip_as_gft = 1; |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1480 | } |
| 1481 | |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 1482 | ram_line.lo = cpu_to_le32(lo); |
| 1483 | ram_line.hi = cpu_to_le32(hi); |
| 1484 | |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1485 | qed_wr(p_hwfn, |
Michal Kalderon | d52c89f | 2018-06-05 13:11:16 +0300 | [diff] [blame] | 1486 | p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft); |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 1487 | qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, |
Michal Kalderon | 804c570 | 2020-01-27 15:26:11 +0200 | [diff] [blame] | 1488 | PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, |
| 1489 | sizeof(ram_line) / REG_SIZE); |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1490 | |
Mintz, Yuval | 7b6859f | 2017-05-18 19:41:04 +0300 | [diff] [blame] | 1491 | /* Set default profile so that no filter match will happen */ |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 1492 | ram_line.lo = cpu_to_le32(0xffffffff); |
| 1493 | ram_line.hi = cpu_to_le32(0x3ff); |
| 1494 | qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, |
Michal Kalderon | 804c570 | 2020-01-27 15:26:11 +0200 | [diff] [blame] | 1495 | PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * |
| 1496 | PRS_GFT_CAM_LINES_NO_MATCH, |
| 1497 | sizeof(ram_line) / REG_SIZE); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1498 | |
| 1499 | /* Enable gft search */ |
| 1500 | qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1); |
| 1501 | } |
| 1502 | |
| 1503 | DECLARE_CRC8_TABLE(cdu_crc8_table); |
| 1504 | |
| 1505 | /* Calculate and return CDU validation byte per connection type/region/cid */ |
| 1506 | static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid) |
| 1507 | { |
| 1508 | const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG; |
| 1509 | u8 crc, validation_byte = 0; |
| 1510 | static u8 crc8_table_valid; /* automatically initialized to 0 */ |
| 1511 | u32 validation_string = 0; |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 1512 | __be32 data_to_crc; |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1513 | |
| 1514 | if (!crc8_table_valid) { |
| 1515 | crc8_populate_msb(cdu_crc8_table, 0x07); |
| 1516 | crc8_table_valid = 1; |
| 1517 | } |
| 1518 | |
| 1519 | /* The CRC is calculated on the String-to-compress: |
| 1520 | * [31:8] = {CID[31:20],CID[11:0]} |
| 1521 | * [7:4] = Region |
| 1522 | * [3:0] = Type |
| 1523 | */ |
| 1524 | if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1) |
| 1525 | validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8); |
| 1526 | |
| 1527 | if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1) |
| 1528 | validation_string |= ((region & 0xF) << 4); |
| 1529 | |
| 1530 | if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1) |
| 1531 | validation_string |= (conn_type & 0xF); |
| 1532 | |
| 1533 | /* Convert to big-endian and calculate CRC8 */ |
Alexander Lobakin | 5ab9034 | 2020-07-06 18:38:19 +0300 | [diff] [blame] | 1534 | data_to_crc = cpu_to_be32(validation_string); |
| 1535 | crc = crc8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc), |
| 1536 | CRC8_INIT_VALUE); |
Tomer Tayar | da09091 | 2017-12-27 19:30:07 +0200 | [diff] [blame] | 1537 | |
| 1538 | /* The validation byte [7:0] is composed: |
| 1539 | * for type A validation |
| 1540 | * [7] = active configuration bit |
| 1541 | * [6:0] = crc[6:0] |
| 1542 | * |
| 1543 | * for type B validation |
| 1544 | * [7] = active configuration bit |
| 1545 | * [6:3] = connection_type[3:0] |
| 1546 | * [2:0] = crc[2:0] |
| 1547 | */ |
| 1548 | validation_byte |= |
| 1549 | ((validation_cfg >> |
| 1550 | CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7; |
| 1551 | |
| 1552 | if ((validation_cfg >> |
| 1553 | CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1) |
| 1554 | validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7); |
| 1555 | else |
| 1556 | validation_byte |= crc & 0x7F; |
| 1557 | |
| 1558 | return validation_byte; |
| 1559 | } |
| 1560 | |
| 1561 | /* Calcualte and set validation bytes for session context */ |
| 1562 | void qed_calc_session_ctx_validation(void *p_ctx_mem, |
| 1563 | u16 ctx_size, u8 ctx_type, u32 cid) |
| 1564 | { |
| 1565 | u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; |
| 1566 | |
| 1567 | p_ctx = (u8 * const)p_ctx_mem; |
| 1568 | x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; |
| 1569 | t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; |
| 1570 | u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; |
| 1571 | |
| 1572 | memset(p_ctx, 0, ctx_size); |
| 1573 | |
| 1574 | *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid); |
| 1575 | *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid); |
| 1576 | *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid); |
| 1577 | } |
| 1578 | |
| 1579 | /* Calcualte and set validation bytes for task context */ |
| 1580 | void qed_calc_task_ctx_validation(void *p_ctx_mem, |
| 1581 | u16 ctx_size, u8 ctx_type, u32 tid) |
| 1582 | { |
| 1583 | u8 *p_ctx, *region1_val_ptr; |
| 1584 | |
| 1585 | p_ctx = (u8 * const)p_ctx_mem; |
| 1586 | region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; |
| 1587 | |
| 1588 | memset(p_ctx, 0, ctx_size); |
| 1589 | |
| 1590 | *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid); |
| 1591 | } |
| 1592 | |
| 1593 | /* Memset session context to 0 while preserving validation bytes */ |
| 1594 | void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) |
| 1595 | { |
| 1596 | u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; |
| 1597 | u8 x_val, t_val, u_val; |
| 1598 | |
| 1599 | p_ctx = (u8 * const)p_ctx_mem; |
| 1600 | x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; |
| 1601 | t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; |
| 1602 | u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; |
| 1603 | |
| 1604 | x_val = *x_val_ptr; |
| 1605 | t_val = *t_val_ptr; |
| 1606 | u_val = *u_val_ptr; |
| 1607 | |
| 1608 | memset(p_ctx, 0, ctx_size); |
| 1609 | |
| 1610 | *x_val_ptr = x_val; |
| 1611 | *t_val_ptr = t_val; |
| 1612 | *u_val_ptr = u_val; |
| 1613 | } |
| 1614 | |
| 1615 | /* Memset task context to 0 while preserving validation bytes */ |
| 1616 | void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) |
| 1617 | { |
| 1618 | u8 *p_ctx, *region1_val_ptr; |
| 1619 | u8 region1_val; |
| 1620 | |
| 1621 | p_ctx = (u8 * const)p_ctx_mem; |
| 1622 | region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; |
| 1623 | |
| 1624 | region1_val = *region1_val_ptr; |
| 1625 | |
| 1626 | memset(p_ctx, 0, ctx_size); |
| 1627 | |
| 1628 | *region1_val_ptr = region1_val; |
| 1629 | } |
| 1630 | |
| 1631 | /* Enable and configure context validation */ |
| 1632 | void qed_enable_context_validation(struct qed_hwfn *p_hwfn, |
| 1633 | struct qed_ptt *p_ptt) |
| 1634 | { |
| 1635 | u32 ctx_validation; |
| 1636 | |
| 1637 | /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */ |
| 1638 | ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24; |
| 1639 | qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation); |
| 1640 | |
| 1641 | /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */ |
| 1642 | ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; |
| 1643 | qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation); |
| 1644 | |
| 1645 | /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */ |
| 1646 | ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; |
| 1647 | qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); |
Chopra, Manish | d51e4af | 2017-04-13 04:54:44 -0700 | [diff] [blame] | 1648 | } |
Michal Kalderon | d52c89f | 2018-06-05 13:11:16 +0300 | [diff] [blame] | 1649 | |
| 1650 | static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id) |
| 1651 | { |
| 1652 | switch (storm_id) { |
| 1653 | case 0: |
| 1654 | return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + |
| 1655 | TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); |
| 1656 | case 1: |
| 1657 | return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + |
| 1658 | MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); |
| 1659 | case 2: |
| 1660 | return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + |
| 1661 | USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); |
| 1662 | case 3: |
| 1663 | return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + |
| 1664 | XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); |
| 1665 | case 4: |
| 1666 | return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + |
| 1667 | YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); |
| 1668 | case 5: |
| 1669 | return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + |
| 1670 | PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); |
| 1671 | |
| 1672 | default: |
| 1673 | return 0; |
| 1674 | } |
| 1675 | } |
| 1676 | |
| 1677 | void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, |
| 1678 | struct qed_ptt *p_ptt, |
| 1679 | u8 assert_level[NUM_STORMS]) |
| 1680 | { |
| 1681 | u8 storm_id; |
| 1682 | |
| 1683 | for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { |
| 1684 | u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id); |
| 1685 | |
| 1686 | qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]); |
| 1687 | } |
| 1688 | } |
Michal Kalderon | 30d5f85 | 2020-01-27 15:26:16 +0200 | [diff] [blame] | 1689 | |
| 1690 | #define PHYS_ADDR_DWORDS DIV_ROUND_UP(sizeof(dma_addr_t), 4) |
| 1691 | #define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4) |
| 1692 | |
| 1693 | static u32 qed_get_overlay_addr_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id) |
| 1694 | { |
| 1695 | switch (storm_id) { |
| 1696 | case 0: |
| 1697 | return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + |
| 1698 | TSTORM_OVERLAY_BUF_ADDR_OFFSET; |
| 1699 | case 1: |
| 1700 | return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + |
| 1701 | MSTORM_OVERLAY_BUF_ADDR_OFFSET; |
| 1702 | case 2: |
| 1703 | return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + |
| 1704 | USTORM_OVERLAY_BUF_ADDR_OFFSET; |
| 1705 | case 3: |
| 1706 | return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + |
| 1707 | XSTORM_OVERLAY_BUF_ADDR_OFFSET; |
| 1708 | case 4: |
| 1709 | return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + |
| 1710 | YSTORM_OVERLAY_BUF_ADDR_OFFSET; |
| 1711 | case 5: |
| 1712 | return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + |
| 1713 | PSTORM_OVERLAY_BUF_ADDR_OFFSET; |
| 1714 | |
| 1715 | default: |
| 1716 | return 0; |
| 1717 | } |
| 1718 | } |
| 1719 | |
| 1720 | struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, |
| 1721 | const u32 * const |
| 1722 | fw_overlay_in_buf, |
| 1723 | u32 buf_size_in_bytes) |
| 1724 | { |
| 1725 | u32 buf_size = buf_size_in_bytes / sizeof(u32), buf_offset = 0; |
| 1726 | struct phys_mem_desc *allocated_mem; |
| 1727 | |
| 1728 | if (!buf_size) |
| 1729 | return NULL; |
| 1730 | |
| 1731 | allocated_mem = kcalloc(NUM_STORMS, sizeof(struct phys_mem_desc), |
| 1732 | GFP_KERNEL); |
| 1733 | if (!allocated_mem) |
| 1734 | return NULL; |
| 1735 | |
| 1736 | memset(allocated_mem, 0, NUM_STORMS * sizeof(struct phys_mem_desc)); |
| 1737 | |
| 1738 | /* For each Storm, set physical address in RAM */ |
| 1739 | while (buf_offset < buf_size) { |
| 1740 | struct phys_mem_desc *storm_mem_desc; |
| 1741 | struct fw_overlay_buf_hdr *hdr; |
| 1742 | u32 storm_buf_size; |
| 1743 | u8 storm_id; |
| 1744 | |
| 1745 | hdr = |
| 1746 | (struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset]; |
| 1747 | storm_buf_size = GET_FIELD(hdr->data, |
| 1748 | FW_OVERLAY_BUF_HDR_BUF_SIZE); |
| 1749 | storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID); |
Omkar Kulkarni | b90cb53 | 2021-10-04 09:58:46 +0300 | [diff] [blame] | 1750 | if (storm_id >= NUM_STORMS) |
| 1751 | break; |
Michal Kalderon | 30d5f85 | 2020-01-27 15:26:16 +0200 | [diff] [blame] | 1752 | storm_mem_desc = allocated_mem + storm_id; |
| 1753 | storm_mem_desc->size = storm_buf_size * sizeof(u32); |
| 1754 | |
| 1755 | /* Allocate physical memory for Storm's overlays buffer */ |
| 1756 | storm_mem_desc->virt_addr = |
| 1757 | dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, |
| 1758 | storm_mem_desc->size, |
| 1759 | &storm_mem_desc->phys_addr, GFP_KERNEL); |
| 1760 | if (!storm_mem_desc->virt_addr) |
| 1761 | break; |
| 1762 | |
| 1763 | /* Skip overlays buffer header */ |
| 1764 | buf_offset += OVERLAY_HDR_SIZE_DWORDS; |
| 1765 | |
| 1766 | /* Copy Storm's overlays buffer to allocated memory */ |
| 1767 | memcpy(storm_mem_desc->virt_addr, |
| 1768 | &fw_overlay_in_buf[buf_offset], storm_mem_desc->size); |
| 1769 | |
| 1770 | /* Advance to next Storm */ |
| 1771 | buf_offset += storm_buf_size; |
| 1772 | } |
| 1773 | |
| 1774 | /* If memory allocation has failed, free all allocated memory */ |
| 1775 | if (buf_offset < buf_size) { |
Prabhakar Kushwaha | fe40a83 | 2021-10-04 09:58:44 +0300 | [diff] [blame] | 1776 | qed_fw_overlay_mem_free(p_hwfn, &allocated_mem); |
Michal Kalderon | 30d5f85 | 2020-01-27 15:26:16 +0200 | [diff] [blame] | 1777 | return NULL; |
| 1778 | } |
| 1779 | |
| 1780 | return allocated_mem; |
| 1781 | } |
| 1782 | |
| 1783 | void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn, |
| 1784 | struct qed_ptt *p_ptt, |
| 1785 | struct phys_mem_desc *fw_overlay_mem) |
| 1786 | { |
| 1787 | u8 storm_id; |
| 1788 | |
| 1789 | for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { |
| 1790 | struct phys_mem_desc *storm_mem_desc = |
| 1791 | (struct phys_mem_desc *)fw_overlay_mem + storm_id; |
| 1792 | u32 ram_addr, i; |
| 1793 | |
| 1794 | /* Skip Storms with no FW overlays */ |
| 1795 | if (!storm_mem_desc->virt_addr) |
| 1796 | continue; |
| 1797 | |
| 1798 | /* Calculate overlay RAM GRC address of current PF */ |
| 1799 | ram_addr = qed_get_overlay_addr_ram_addr(p_hwfn, storm_id) + |
| 1800 | sizeof(dma_addr_t) * p_hwfn->rel_pf_id; |
| 1801 | |
| 1802 | /* Write Storm's overlay physical address to RAM */ |
| 1803 | for (i = 0; i < PHYS_ADDR_DWORDS; i++, ram_addr += sizeof(u32)) |
| 1804 | qed_wr(p_hwfn, p_ptt, ram_addr, |
| 1805 | ((u32 *)&storm_mem_desc->phys_addr)[i]); |
| 1806 | } |
| 1807 | } |
| 1808 | |
| 1809 | void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, |
Prabhakar Kushwaha | fe40a83 | 2021-10-04 09:58:44 +0300 | [diff] [blame] | 1810 | struct phys_mem_desc **fw_overlay_mem) |
Michal Kalderon | 30d5f85 | 2020-01-27 15:26:16 +0200 | [diff] [blame] | 1811 | { |
| 1812 | u8 storm_id; |
| 1813 | |
Prabhakar Kushwaha | fe40a83 | 2021-10-04 09:58:44 +0300 | [diff] [blame] | 1814 | if (!fw_overlay_mem || !(*fw_overlay_mem)) |
Michal Kalderon | 30d5f85 | 2020-01-27 15:26:16 +0200 | [diff] [blame] | 1815 | return; |
| 1816 | |
| 1817 | for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { |
| 1818 | struct phys_mem_desc *storm_mem_desc = |
Prabhakar Kushwaha | fe40a83 | 2021-10-04 09:58:44 +0300 | [diff] [blame] | 1819 | (struct phys_mem_desc *)*fw_overlay_mem + storm_id; |
Michal Kalderon | 30d5f85 | 2020-01-27 15:26:16 +0200 | [diff] [blame] | 1820 | |
| 1821 | /* Free Storm's physical memory */ |
| 1822 | if (storm_mem_desc->virt_addr) |
| 1823 | dma_free_coherent(&p_hwfn->cdev->pdev->dev, |
| 1824 | storm_mem_desc->size, |
| 1825 | storm_mem_desc->virt_addr, |
| 1826 | storm_mem_desc->phys_addr); |
| 1827 | } |
| 1828 | |
| 1829 | /* Free allocated virtual memory */ |
Prabhakar Kushwaha | fe40a83 | 2021-10-04 09:58:44 +0300 | [diff] [blame] | 1830 | kfree(*fw_overlay_mem); |
| 1831 | *fw_overlay_mem = NULL; |
Michal Kalderon | 30d5f85 | 2020-01-27 15:26:16 +0200 | [diff] [blame] | 1832 | } |