| // SPDX-License-Identifier: GPL-2.0 |
| /* Copyright (c) 2019, Intel Corporation. */ |
| |
| #include "ice_common.h" |
| #include "ice_flex_pipe.h" |
| |
| /** |
| * ice_pkg_val_buf |
| * @buf: pointer to the ice buffer |
| * |
| * This helper function validates a buffer's header. |
| */ |
| static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) |
| { |
| struct ice_buf_hdr *hdr; |
| u16 section_count; |
| u16 data_end; |
| |
| hdr = (struct ice_buf_hdr *)buf->buf; |
| /* verify data */ |
| section_count = le16_to_cpu(hdr->section_count); |
| if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) |
| return NULL; |
| |
| data_end = le16_to_cpu(hdr->data_end); |
| if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) |
| return NULL; |
| |
| return hdr; |
| } |
| |
| /** |
| * ice_find_buf_table |
| * @ice_seg: pointer to the ice segment |
| * |
| * Returns the address of the buffer table within the ice segment. |
| */ |
| static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) |
| { |
| struct ice_nvm_table *nvms; |
| |
| nvms = (struct ice_nvm_table *) |
| (ice_seg->device_table + |
| le32_to_cpu(ice_seg->device_table_count)); |
| |
| return (__force struct ice_buf_table *) |
| (nvms->vers + le32_to_cpu(nvms->table_count)); |
| } |
| |
| /** |
| * ice_pkg_enum_buf |
| * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) |
| * @state: pointer to the enum state |
| * |
| * This function will enumerate all the buffers in the ice segment. The first |
| * call is made with the ice_seg parameter non-NULL; on subsequent calls, |
| * ice_seg is set to NULL which continues the enumeration. When the function |
| * returns a NULL pointer, then the end of the buffers has been reached, or an |
| * unexpected value has been detected (for example an invalid section count or |
| * an invalid buffer end value). |
| */ |
| static struct ice_buf_hdr * |
| ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) |
| { |
| if (ice_seg) { |
| state->buf_table = ice_find_buf_table(ice_seg); |
| if (!state->buf_table) |
| return NULL; |
| |
| state->buf_idx = 0; |
| return ice_pkg_val_buf(state->buf_table->buf_array); |
| } |
| |
| if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count)) |
| return ice_pkg_val_buf(state->buf_table->buf_array + |
| state->buf_idx); |
| else |
| return NULL; |
| } |
| |
| /** |
| * ice_pkg_advance_sect |
| * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) |
| * @state: pointer to the enum state |
| * |
| * This helper function will advance the section within the ice segment, |
| * also advancing the buffer if needed. |
| */ |
| static bool |
| ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) |
| { |
| if (!ice_seg && !state->buf) |
| return false; |
| |
| if (!ice_seg && state->buf) |
| if (++state->sect_idx < le16_to_cpu(state->buf->section_count)) |
| return true; |
| |
| state->buf = ice_pkg_enum_buf(ice_seg, state); |
| if (!state->buf) |
| return false; |
| |
| /* start of new buffer, reset section index */ |
| state->sect_idx = 0; |
| return true; |
| } |
| |
| /** |
| * ice_pkg_enum_section |
| * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) |
| * @state: pointer to the enum state |
| * @sect_type: section type to enumerate |
| * |
| * This function will enumerate all the sections of a particular type in the |
| * ice segment. The first call is made with the ice_seg parameter non-NULL; |
| * on subsequent calls, ice_seg is set to NULL which continues the enumeration. |
| * When the function returns a NULL pointer, then the end of the matching |
| * sections has been reached. |
| */ |
| static void * |
| ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, |
| u32 sect_type) |
| { |
| u16 offset, size; |
| |
| if (ice_seg) |
| state->type = sect_type; |
| |
| if (!ice_pkg_advance_sect(ice_seg, state)) |
| return NULL; |
| |
| /* scan for next matching section */ |
| while (state->buf->section_entry[state->sect_idx].type != |
| cpu_to_le32(state->type)) |
| if (!ice_pkg_advance_sect(NULL, state)) |
| return NULL; |
| |
| /* validate section */ |
| offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); |
| if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) |
| return NULL; |
| |
| size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size); |
| if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) |
| return NULL; |
| |
| /* make sure the section fits in the buffer */ |
| if (offset + size > ICE_PKG_BUF_SIZE) |
| return NULL; |
| |
| state->sect_type = |
| le32_to_cpu(state->buf->section_entry[state->sect_idx].type); |
| |
| /* calc pointer to this section */ |
| state->sect = ((u8 *)state->buf) + |
| le16_to_cpu(state->buf->section_entry[state->sect_idx].offset); |
| |
| return state->sect; |
| } |
| |
| /** |
| * ice_acquire_global_cfg_lock |
| * @hw: pointer to the HW structure |
| * @access: access type (read or write) |
| * |
| * This function will request ownership of the global config lock for reading |
| * or writing of the package. When attempting to obtain write access, the |
| * caller must check for the following two return values: |
| * |
| * ICE_SUCCESS - Means the caller has acquired the global config lock |
| * and can perform writing of the package. |
| * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the |
| * package or has found that no update was necessary; in |
| * this case, the caller can just skip performing any |
| * update of the package. |
| */ |
| static enum ice_status |
| ice_acquire_global_cfg_lock(struct ice_hw *hw, |
| enum ice_aq_res_access_type access) |
| { |
| enum ice_status status; |
| |
| status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, |
| ICE_GLOBAL_CFG_LOCK_TIMEOUT); |
| |
| if (!status) |
| mutex_lock(&ice_global_cfg_lock_sw); |
| else if (status == ICE_ERR_AQ_NO_WORK) |
| ice_debug(hw, ICE_DBG_PKG, |
| "Global config lock: No work to do\n"); |
| |
| return status; |
| } |
| |
| /** |
| * ice_release_global_cfg_lock |
| * @hw: pointer to the HW structure |
| * |
| * This function will release the global config lock. |
| */ |
| static void ice_release_global_cfg_lock(struct ice_hw *hw) |
| { |
| mutex_unlock(&ice_global_cfg_lock_sw); |
| ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); |
| } |
| |
| /** |
| * ice_aq_download_pkg |
| * @hw: pointer to the hardware structure |
| * @pkg_buf: the package buffer to transfer |
| * @buf_size: the size of the package buffer |
| * @last_buf: last buffer indicator |
| * @error_offset: returns error offset |
| * @error_info: returns error information |
| * @cd: pointer to command details structure or NULL |
| * |
| * Download Package (0x0C40) |
| */ |
| static enum ice_status |
| ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, |
| u16 buf_size, bool last_buf, u32 *error_offset, |
| u32 *error_info, struct ice_sq_cd *cd) |
| { |
| struct ice_aqc_download_pkg *cmd; |
| struct ice_aq_desc desc; |
| enum ice_status status; |
| |
| if (error_offset) |
| *error_offset = 0; |
| if (error_info) |
| *error_info = 0; |
| |
| cmd = &desc.params.download_pkg; |
| ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); |
| desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); |
| |
| if (last_buf) |
| cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; |
| |
| status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); |
| if (status == ICE_ERR_AQ_ERROR) { |
| /* Read error from buffer only when the FW returned an error */ |
| struct ice_aqc_download_pkg_resp *resp; |
| |
| resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; |
| if (error_offset) |
| *error_offset = le32_to_cpu(resp->error_offset); |
| if (error_info) |
| *error_info = le32_to_cpu(resp->error_info); |
| } |
| |
| return status; |
| } |
| |
| /** |
| * ice_find_seg_in_pkg |
| * @hw: pointer to the hardware structure |
| * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) |
| * @pkg_hdr: pointer to the package header to be searched |
| * |
| * This function searches a package file for a particular segment type. On |
| * success it returns a pointer to the segment header, otherwise it will |
| * return NULL. |
| */ |
| static struct ice_generic_seg_hdr * |
| ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, |
| struct ice_pkg_hdr *pkg_hdr) |
| { |
| u32 i; |
| |
| ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", |
| pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor, |
| pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft); |
| |
| /* Search all package segments for the requested segment type */ |
| for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { |
| struct ice_generic_seg_hdr *seg; |
| |
| seg = (struct ice_generic_seg_hdr *) |
| ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i])); |
| |
| if (le32_to_cpu(seg->seg_type) == seg_type) |
| return seg; |
| } |
| |
| return NULL; |
| } |
| |
| /** |
| * ice_dwnld_cfg_bufs |
| * @hw: pointer to the hardware structure |
| * @bufs: pointer to an array of buffers |
| * @count: the number of buffers in the array |
| * |
| * Obtains global config lock and downloads the package configuration buffers |
| * to the firmware. Metadata buffers are skipped, and the first metadata buffer |
| * found indicates that the rest of the buffers are all metadata buffers. |
| */ |
| static enum ice_status |
| ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) |
| { |
| enum ice_status status; |
| struct ice_buf_hdr *bh; |
| u32 offset, info, i; |
| |
| if (!bufs || !count) |
| return ICE_ERR_PARAM; |
| |
| /* If the first buffer's first section has its metadata bit set |
| * then there are no buffers to be downloaded, and the operation is |
| * considered a success. |
| */ |
| bh = (struct ice_buf_hdr *)bufs; |
| if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) |
| return 0; |
| |
| /* reset pkg_dwnld_status in case this function is called in the |
| * reset/rebuild flow |
| */ |
| hw->pkg_dwnld_status = ICE_AQ_RC_OK; |
| |
| status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); |
| if (status) { |
| if (status == ICE_ERR_AQ_NO_WORK) |
| hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST; |
| else |
| hw->pkg_dwnld_status = hw->adminq.sq_last_status; |
| return status; |
| } |
| |
| for (i = 0; i < count; i++) { |
| bool last = ((i + 1) == count); |
| |
| if (!last) { |
| /* check next buffer for metadata flag */ |
| bh = (struct ice_buf_hdr *)(bufs + i + 1); |
| |
| /* A set metadata flag in the next buffer will signal |
| * that the current buffer will be the last buffer |
| * downloaded |
| */ |
| if (le16_to_cpu(bh->section_count)) |
| if (le32_to_cpu(bh->section_entry[0].type) & |
| ICE_METADATA_BUF) |
| last = true; |
| } |
| |
| bh = (struct ice_buf_hdr *)(bufs + i); |
| |
| status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, |
| &offset, &info, NULL); |
| |
| /* Save AQ status from download package */ |
| hw->pkg_dwnld_status = hw->adminq.sq_last_status; |
| if (status) { |
| ice_debug(hw, ICE_DBG_PKG, |
| "Pkg download failed: err %d off %d inf %d\n", |
| status, offset, info); |
| |
| break; |
| } |
| |
| if (last) |
| break; |
| } |
| |
| ice_release_global_cfg_lock(hw); |
| |
| return status; |
| } |
| |
| /** |
| * ice_aq_get_pkg_info_list |
| * @hw: pointer to the hardware structure |
| * @pkg_info: the buffer which will receive the information list |
| * @buf_size: the size of the pkg_info information buffer |
| * @cd: pointer to command details structure or NULL |
| * |
| * Get Package Info List (0x0C43) |
| */ |
| static enum ice_status |
| ice_aq_get_pkg_info_list(struct ice_hw *hw, |
| struct ice_aqc_get_pkg_info_resp *pkg_info, |
| u16 buf_size, struct ice_sq_cd *cd) |
| { |
| struct ice_aq_desc desc; |
| |
| ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); |
| |
| return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); |
| } |
| |
| /** |
| * ice_download_pkg |
| * @hw: pointer to the hardware structure |
| * @ice_seg: pointer to the segment of the package to be downloaded |
| * |
| * Handles the download of a complete package. |
| */ |
| static enum ice_status |
| ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) |
| { |
| struct ice_buf_table *ice_buf_tbl; |
| |
| ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n", |
| ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor, |
| ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft); |
| |
| ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", |
| le32_to_cpu(ice_seg->hdr.seg_type), |
| le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name); |
| |
| ice_buf_tbl = ice_find_buf_table(ice_seg); |
| |
| ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", |
| le32_to_cpu(ice_buf_tbl->buf_count)); |
| |
| return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, |
| le32_to_cpu(ice_buf_tbl->buf_count)); |
| } |
| |
| /** |
| * ice_init_pkg_info |
| * @hw: pointer to the hardware structure |
| * @pkg_hdr: pointer to the driver's package hdr |
| * |
| * Saves off the package details into the HW structure. |
| */ |
| static enum ice_status |
| ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) |
| { |
| struct ice_global_metadata_seg *meta_seg; |
| struct ice_generic_seg_hdr *seg_hdr; |
| |
| if (!pkg_hdr) |
| return ICE_ERR_PARAM; |
| |
| meta_seg = (struct ice_global_metadata_seg *) |
| ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr); |
| if (meta_seg) { |
| hw->pkg_ver = meta_seg->pkg_ver; |
| memcpy(hw->pkg_name, meta_seg->pkg_name, sizeof(hw->pkg_name)); |
| |
| ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", |
| meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor, |
| meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft, |
| meta_seg->pkg_name); |
| } else { |
| ice_debug(hw, ICE_DBG_INIT, |
| "Did not find metadata segment in driver package\n"); |
| return ICE_ERR_CFG; |
| } |
| |
| seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); |
| if (seg_hdr) { |
| hw->ice_pkg_ver = seg_hdr->seg_ver; |
| memcpy(hw->ice_pkg_name, seg_hdr->seg_name, |
| sizeof(hw->ice_pkg_name)); |
| |
| ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n", |
| seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor, |
| seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft, |
| seg_hdr->seg_name); |
| } else { |
| ice_debug(hw, ICE_DBG_INIT, |
| "Did not find ice segment in driver package\n"); |
| return ICE_ERR_CFG; |
| } |
| |
| return 0; |
| } |
| |
| /** |
| * ice_get_pkg_info |
| * @hw: pointer to the hardware structure |
| * |
| * Store details of the package currently loaded in HW into the HW structure. |
| */ |
| static enum ice_status ice_get_pkg_info(struct ice_hw *hw) |
| { |
| struct ice_aqc_get_pkg_info_resp *pkg_info; |
| enum ice_status status; |
| u16 size; |
| u32 i; |
| |
| size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) * |
| (ICE_PKG_CNT - 1)); |
| pkg_info = kzalloc(size, GFP_KERNEL); |
| if (!pkg_info) |
| return ICE_ERR_NO_MEMORY; |
| |
| status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL); |
| if (status) |
| goto init_pkg_free_alloc; |
| |
| for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { |
| #define ICE_PKG_FLAG_COUNT 4 |
| char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; |
| u8 place = 0; |
| |
| if (pkg_info->pkg_info[i].is_active) { |
| flags[place++] = 'A'; |
| hw->active_pkg_ver = pkg_info->pkg_info[i].ver; |
| memcpy(hw->active_pkg_name, |
| pkg_info->pkg_info[i].name, |
| sizeof(hw->active_pkg_name)); |
| hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; |
| } |
| if (pkg_info->pkg_info[i].is_active_at_boot) |
| flags[place++] = 'B'; |
| if (pkg_info->pkg_info[i].is_modified) |
| flags[place++] = 'M'; |
| if (pkg_info->pkg_info[i].is_in_nvm) |
| flags[place++] = 'N'; |
| |
| ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", |
| i, pkg_info->pkg_info[i].ver.major, |
| pkg_info->pkg_info[i].ver.minor, |
| pkg_info->pkg_info[i].ver.update, |
| pkg_info->pkg_info[i].ver.draft, |
| pkg_info->pkg_info[i].name, flags); |
| } |
| |
| init_pkg_free_alloc: |
| kfree(pkg_info); |
| |
| return status; |
| } |
| |
| /** |
| * ice_verify_pkg - verify package |
| * @pkg: pointer to the package buffer |
| * @len: size of the package buffer |
| * |
| * Verifies various attributes of the package file, including length, format |
| * version, and the requirement of at least one segment. |
| */ |
| static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) |
| { |
| u32 seg_count; |
| u32 i; |
| |
| if (len < sizeof(*pkg)) |
| return ICE_ERR_BUF_TOO_SHORT; |
| |
| if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ || |
| pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR || |
| pkg->format_ver.update != ICE_PKG_FMT_VER_UPD || |
| pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT) |
| return ICE_ERR_CFG; |
| |
| /* pkg must have at least one segment */ |
| seg_count = le32_to_cpu(pkg->seg_count); |
| if (seg_count < 1) |
| return ICE_ERR_CFG; |
| |
| /* make sure segment array fits in package length */ |
| if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset))) |
| return ICE_ERR_BUF_TOO_SHORT; |
| |
| /* all segments must fit within length */ |
| for (i = 0; i < seg_count; i++) { |
| u32 off = le32_to_cpu(pkg->seg_offset[i]); |
| struct ice_generic_seg_hdr *seg; |
| |
| /* segment header must fit */ |
| if (len < off + sizeof(*seg)) |
| return ICE_ERR_BUF_TOO_SHORT; |
| |
| seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); |
| |
| /* segment body must fit */ |
| if (len < off + le32_to_cpu(seg->seg_size)) |
| return ICE_ERR_BUF_TOO_SHORT; |
| } |
| |
| return 0; |
| } |
| |
| /** |
| * ice_free_seg - free package segment pointer |
| * @hw: pointer to the hardware structure |
| * |
| * Frees the package segment pointer in the proper manner, depending on if the |
| * segment was allocated or just the passed in pointer was stored. |
| */ |
| void ice_free_seg(struct ice_hw *hw) |
| { |
| if (hw->pkg_copy) { |
| devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy); |
| hw->pkg_copy = NULL; |
| hw->pkg_size = 0; |
| } |
| hw->seg = NULL; |
| } |
| |
| /** |
| * ice_init_pkg_regs - initialize additional package registers |
| * @hw: pointer to the hardware structure |
| */ |
| static void ice_init_pkg_regs(struct ice_hw *hw) |
| { |
| #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF |
| #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF |
| #define ICE_SW_BLK_IDX 0 |
| |
| /* setup Switch block input mask, which is 48-bits in two parts */ |
| wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); |
| wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); |
| } |
| |
| /** |
| * ice_chk_pkg_version - check package version for compatibility with driver |
| * @pkg_ver: pointer to a version structure to check |
| * |
| * Check to make sure that the package about to be downloaded is compatible with |
| * the driver. To be compatible, the major and minor components of the package |
| * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR |
| * definitions. |
| */ |
| static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) |
| { |
| if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ || |
| pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) |
| return ICE_ERR_NOT_SUPPORTED; |
| |
| return 0; |
| } |
| |
| /** |
| * ice_init_pkg - initialize/download package |
| * @hw: pointer to the hardware structure |
| * @buf: pointer to the package buffer |
| * @len: size of the package buffer |
| * |
| * This function initializes a package. The package contains HW tables |
| * required to do packet processing. First, the function extracts package |
| * information such as version. Then it finds the ice configuration segment |
| * within the package; this function then saves a copy of the segment pointer |
| * within the supplied package buffer. Next, the function will cache any hints |
| * from the package, followed by downloading the package itself. Note, that if |
| * a previous PF driver has already downloaded the package successfully, then |
| * the current driver will not have to download the package again. |
| * |
| * The local package contents will be used to query default behavior and to |
| * update specific sections of the HW's version of the package (e.g. to update |
| * the parse graph to understand new protocols). |
| * |
| * This function stores a pointer to the package buffer memory, and it is |
| * expected that the supplied buffer will not be freed immediately. If the |
| * package buffer needs to be freed, such as when read from a file, use |
| * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this |
| * case. |
| */ |
| enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) |
| { |
| struct ice_pkg_hdr *pkg; |
| enum ice_status status; |
| struct ice_seg *seg; |
| |
| if (!buf || !len) |
| return ICE_ERR_PARAM; |
| |
| pkg = (struct ice_pkg_hdr *)buf; |
| status = ice_verify_pkg(pkg, len); |
| if (status) { |
| ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", |
| status); |
| return status; |
| } |
| |
| /* initialize package info */ |
| status = ice_init_pkg_info(hw, pkg); |
| if (status) |
| return status; |
| |
| /* before downloading the package, check package version for |
| * compatibility with driver |
| */ |
| status = ice_chk_pkg_version(&hw->pkg_ver); |
| if (status) |
| return status; |
| |
| /* find segment in given package */ |
| seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg); |
| if (!seg) { |
| ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); |
| return ICE_ERR_CFG; |
| } |
| |
| /* download package */ |
| status = ice_download_pkg(hw, seg); |
| if (status == ICE_ERR_AQ_NO_WORK) { |
| ice_debug(hw, ICE_DBG_INIT, |
| "package previously loaded - no work.\n"); |
| status = 0; |
| } |
| |
| /* Get information on the package currently loaded in HW, then make sure |
| * the driver is compatible with this version. |
| */ |
| if (!status) { |
| status = ice_get_pkg_info(hw); |
| if (!status) |
| status = ice_chk_pkg_version(&hw->active_pkg_ver); |
| } |
| |
| if (!status) { |
| hw->seg = seg; |
| /* on successful package download update other required |
| * registers to support the package and fill HW tables |
| * with package content. |
| */ |
| ice_init_pkg_regs(hw); |
| ice_fill_blk_tbls(hw); |
| } else { |
| ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", |
| status); |
| } |
| |
| return status; |
| } |
| |
| /** |
| * ice_copy_and_init_pkg - initialize/download a copy of the package |
| * @hw: pointer to the hardware structure |
| * @buf: pointer to the package buffer |
| * @len: size of the package buffer |
| * |
| * This function copies the package buffer, and then calls ice_init_pkg() to |
| * initialize the copied package contents. |
| * |
| * The copying is necessary if the package buffer supplied is constant, or if |
| * the memory may disappear shortly after calling this function. |
| * |
| * If the package buffer resides in the data segment and can be modified, the |
| * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). |
| * |
| * However, if the package buffer needs to be copied first, such as when being |
| * read from a file, the caller should use ice_copy_and_init_pkg(). |
| * |
| * This function will first copy the package buffer, before calling |
| * ice_init_pkg(). The caller is free to immediately destroy the original |
| * package buffer, as the new copy will be managed by this function and |
| * related routines. |
| */ |
| enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) |
| { |
| enum ice_status status; |
| u8 *buf_copy; |
| |
| if (!buf || !len) |
| return ICE_ERR_PARAM; |
| |
| buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); |
| |
| status = ice_init_pkg(hw, buf_copy, len); |
| if (status) { |
| /* Free the copy, since we failed to initialize the package */ |
| devm_kfree(ice_hw_to_dev(hw), buf_copy); |
| } else { |
| /* Track the copied pkg so we can free it later */ |
| hw->pkg_copy = buf_copy; |
| hw->pkg_size = len; |
| } |
| |
| return status; |
| } |
| |
| /* PTG Management */ |
| |
| /** |
| * ice_ptg_find_ptype - Search for packet type group using packet type (ptype) |
| * @hw: pointer to the hardware structure |
| * @blk: HW block |
| * @ptype: the ptype to search for |
| * @ptg: pointer to variable that receives the PTG |
| * |
| * This function will search the PTGs for a particular ptype, returning the |
| * PTG ID that contains it through the PTG parameter, with the value of |
| * ICE_DEFAULT_PTG (0) meaning it is part the default PTG. |
| */ |
| static enum ice_status |
| ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) |
| { |
| if (ptype >= ICE_XLT1_CNT || !ptg) |
| return ICE_ERR_PARAM; |
| |
| *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; |
| return 0; |
| } |
| |
| /** |
| * ice_ptg_alloc_val - Allocates a new packet type group ID by value |
| * @hw: pointer to the hardware structure |
| * @blk: HW block |
| * @ptg: the PTG to allocate |
| * |
| * This function allocates a given packet type group ID specified by the PTG |
| * parameter. |
| */ |
| static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg) |
| { |
| hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true; |
| } |
| |
| /** |
| * ice_ptg_remove_ptype - Removes ptype from a particular packet type group |
| * @hw: pointer to the hardware structure |
| * @blk: HW block |
| * @ptype: the ptype to remove |
| * @ptg: the PTG to remove the ptype from |
| * |
| * This function will remove the ptype from the specific PTG, and move it to |
| * the default PTG (ICE_DEFAULT_PTG). |
| */ |
| static enum ice_status |
| ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) |
| { |
| struct ice_ptg_ptype **ch; |
| struct ice_ptg_ptype *p; |
| |
| if (ptype > ICE_XLT1_CNT - 1) |
| return ICE_ERR_PARAM; |
| |
| if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) |
| return ICE_ERR_DOES_NOT_EXIST; |
| |
| /* Should not happen if .in_use is set, bad config */ |
| if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) |
| return ICE_ERR_CFG; |
| |
| /* find the ptype within this PTG, and bypass the link over it */ |
| p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; |
| ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; |
| while (p) { |
| if (ptype == (p - hw->blk[blk].xlt1.ptypes)) { |
| *ch = p->next_ptype; |
| break; |
| } |
| |
| ch = &p->next_ptype; |
| p = p->next_ptype; |
| } |
| |
| hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG; |
| hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL; |
| |
| return 0; |
| } |
| |
| /** |
| * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group |
| * @hw: pointer to the hardware structure |
| * @blk: HW block |
| * @ptype: the ptype to add or move |
| * @ptg: the PTG to add or move the ptype to |
| * |
| * This function will either add or move a ptype to a particular PTG depending |
| * on if the ptype is already part of another group. Note that using a |
| * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the |
| * default PTG. |
| */ |
| static enum ice_status |
| ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) |
| { |
| enum ice_status status; |
| u8 original_ptg; |
| |
| if (ptype > ICE_XLT1_CNT - 1) |
| return ICE_ERR_PARAM; |
| |
| if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) |
| return ICE_ERR_DOES_NOT_EXIST; |
| |
| status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg); |
| if (status) |
| return status; |
| |
| /* Is ptype already in the correct PTG? */ |
| if (original_ptg == ptg) |
| return 0; |
| |
| /* Remove from original PTG and move back to the default PTG */ |
| if (original_ptg != ICE_DEFAULT_PTG) |
| ice_ptg_remove_ptype(hw, blk, ptype, original_ptg); |
| |
| /* Moving to default PTG? Then we're done with this request */ |
| if (ptg == ICE_DEFAULT_PTG) |
| return 0; |
| |
| /* Add ptype to PTG at beginning of list */ |
| hw->blk[blk].xlt1.ptypes[ptype].next_ptype = |
| hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; |
| hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = |
| &hw->blk[blk].xlt1.ptypes[ptype]; |
| |
| hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg; |
| hw->blk[blk].xlt1.t[ptype] = ptg; |
| |
| return 0; |
| } |
| |
| /* Block / table size info */ |
| struct ice_blk_size_details { |
| u16 xlt1; /* # XLT1 entries */ |
| u16 xlt2; /* # XLT2 entries */ |
| u16 prof_tcam; /* # profile ID TCAM entries */ |
| u16 prof_id; /* # profile IDs */ |
| u8 prof_cdid_bits; /* # CDID one-hot bits used in key */ |
| u16 prof_redir; /* # profile redirection entries */ |
| u16 es; /* # extraction sequence entries */ |
| u16 fvw; /* # field vector words */ |
| u8 overwrite; /* overwrite existing entries allowed */ |
| u8 reverse; /* reverse FV order */ |
| }; |
| |
| static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = { |
| /** |
| * Table Definitions |
| * XLT1 - Number of entries in XLT1 table |
| * XLT2 - Number of entries in XLT2 table |
| * TCAM - Number of entries Profile ID TCAM table |
| * CDID - Control Domain ID of the hardware block |
| * PRED - Number of entries in the Profile Redirection Table |
| * FV - Number of entries in the Field Vector |
| * FVW - Width (in WORDs) of the Field Vector |
| * OVR - Overwrite existing table entries |
| * REV - Reverse FV |
| */ |
| /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */ |
| /* Overwrite , Reverse FV */ |
| /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48, |
| false, false }, |
| /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32, |
| false, false }, |
| /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, |
| false, true }, |
| /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, |
| true, true }, |
| /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24, |
| false, false }, |
| }; |
| |
| enum ice_sid_all { |
| ICE_SID_XLT1_OFF = 0, |
| ICE_SID_XLT2_OFF, |
| ICE_SID_PR_OFF, |
| ICE_SID_PR_REDIR_OFF, |
| ICE_SID_ES_OFF, |
| ICE_SID_OFF_COUNT, |
| }; |
| |
| /* VSIG Management */ |
| |
| /** |
| * ice_vsig_find_vsi - find a VSIG that contains a specified VSI |
| * @hw: pointer to the hardware structure |
| * @blk: HW block |
| * @vsi: VSI of interest |
| * @vsig: pointer to receive the VSI group |
| * |
| * This function will lookup the VSI entry in the XLT2 list and return |
| * the VSI group its associated with. |
| */ |
| static enum ice_status |
| ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) |
| { |
| if (!vsig || vsi >= ICE_MAX_VSI) |
| return ICE_ERR_PARAM; |
| |
| /* As long as there's a default or valid VSIG associated with the input |
| * VSI, the functions returns a success. Any handling of VSIG will be |
| * done by the following add, update or remove functions. |
| */ |
| *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; |
| |
| return 0; |
| } |
| |
| /** |
| * ice_vsig_alloc_val - allocate a new VSIG by value |
| * @hw: pointer to the hardware structure |
| * @blk: HW block |
| * @vsig: the VSIG to allocate |
| * |
| * This function will allocate a given VSIG specified by the VSIG parameter. |
| */ |
| static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig) |
| { |
| u16 idx = vsig & ICE_VSIG_IDX_M; |
| |
| if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) { |
| INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); |
| hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true; |
| } |
| |
| return ICE_VSIG_VALUE(idx, hw->pf_id); |
| } |
| |
| /** |
| * ice_vsig_remove_vsi - remove VSI from VSIG |
| * @hw: pointer to the hardware structure |
| * @blk: HW block |
| * @vsi: VSI to remove |
| * @vsig: VSI group to remove from |
| * |
| * The function will remove the input VSI from its VSI group and move it |
| * to the DEFAULT_VSIG. |
| */ |
| static enum ice_status |
| ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) |
| { |
| struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt; |
| u16 idx; |
| |
| idx = vsig & ICE_VSIG_IDX_M; |
| |
| if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) |
| return ICE_ERR_PARAM; |
| |
| if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) |
| return ICE_ERR_DOES_NOT_EXIST; |
| |
| /* entry already in default VSIG, don't have to remove */ |
| if (idx == ICE_DEFAULT_VSIG) |
| return 0; |
| |
| vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; |
| if (!(*vsi_head)) |
| return ICE_ERR_CFG; |
| |
| vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; |
| vsi_cur = (*vsi_head); |
| |
| /* iterate the VSI list, skip over the entry to be removed */ |
| while (vsi_cur) { |
| if (vsi_tgt == vsi_cur) { |
| (*vsi_head) = vsi_cur->next_vsi; |
| break; |
| } |
| vsi_head = &vsi_cur->next_vsi; |
| vsi_cur = vsi_cur->next_vsi; |
| } |
| |
| /* verify if VSI was removed from group list */ |
| if (!vsi_cur) |
| return ICE_ERR_DOES_NOT_EXIST; |
| |
| vsi_cur->vsig = ICE_DEFAULT_VSIG; |
| vsi_cur->changed = 1; |
| vsi_cur->next_vsi = NULL; |
| |
| return 0; |
| } |
| |
| /** |
| * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group |
| * @hw: pointer to the hardware structure |
| * @blk: HW block |
| * @vsi: VSI to move |
| * @vsig: destination VSI group |
| * |
| * This function will move or add the input VSI to the target VSIG. |
| * The function will find the original VSIG the VSI belongs to and |
| * move the entry to the DEFAULT_VSIG, update the original VSIG and |
| * then move entry to the new VSIG. |
| */ |
| static enum ice_status |
| ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) |
| { |
| struct ice_vsig_vsi *tmp; |
| enum ice_status status; |
| u16 orig_vsig, idx; |
| |
| idx = vsig & ICE_VSIG_IDX_M; |
| |
| if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) |
| return ICE_ERR_PARAM; |
| |
| /* if VSIG not in use and VSIG is not default type this VSIG |
| * doesn't exist. |
| */ |
| if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && |
| vsig != ICE_DEFAULT_VSIG) |
| return ICE_ERR_DOES_NOT_EXIST; |
| |
| status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); |
| if (status) |
| return status; |
| |
| /* no update required if vsigs match */ |
| if (orig_vsig == vsig) |
| return 0; |
| |
| if (orig_vsig != ICE_DEFAULT_VSIG) { |
| /* remove entry from orig_vsig and add to default VSIG */ |
| status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig); |
| if (status) |
| return status; |
| } |
| |
| if (idx == ICE_DEFAULT_VSIG) |
| return 0; |
| |
| /* Create VSI entry and add VSIG and prop_mask values */ |
| hw->blk[blk].xlt2.vsis[vsi].vsig = vsig; |
| hw->blk[blk].xlt2.vsis[vsi].changed = 1; |
| |
| /* Add new entry to the head of the VSIG list */ |
| tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; |
| hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = |
| &hw->blk[blk].xlt2.vsis[vsi]; |
| hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp; |
| hw->blk[blk].xlt2.t[vsi] = vsig; |
| |
| return 0; |
| } |
| |
| /* Block / table section IDs */ |
| static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = { |
| /* SWITCH */ |
| { ICE_SID_XLT1_SW, |
| ICE_SID_XLT2_SW, |
| ICE_SID_PROFID_TCAM_SW, |
| ICE_SID_PROFID_REDIR_SW, |
| ICE_SID_FLD_VEC_SW |
| }, |
| |
| /* ACL */ |
| { ICE_SID_XLT1_ACL, |
| ICE_SID_XLT2_ACL, |
| ICE_SID_PROFID_TCAM_ACL, |
| ICE_SID_PROFID_REDIR_ACL, |
| ICE_SID_FLD_VEC_ACL |
| }, |
| |
| /* FD */ |
| { ICE_SID_XLT1_FD, |
| ICE_SID_XLT2_FD, |
| ICE_SID_PROFID_TCAM_FD, |
| ICE_SID_PROFID_REDIR_FD, |
| ICE_SID_FLD_VEC_FD |
| }, |
| |
| /* RSS */ |
| { ICE_SID_XLT1_RSS, |
| ICE_SID_XLT2_RSS, |
| ICE_SID_PROFID_TCAM_RSS, |
| ICE_SID_PROFID_REDIR_RSS, |
| ICE_SID_FLD_VEC_RSS |
| }, |
| |
| /* PE */ |
| { ICE_SID_XLT1_PE, |
| ICE_SID_XLT2_PE, |
| ICE_SID_PROFID_TCAM_PE, |
| ICE_SID_PROFID_REDIR_PE, |
| ICE_SID_FLD_VEC_PE |
| } |
| }; |
| |
| /** |
| * ice_init_sw_xlt1_db - init software XLT1 database from HW tables |
| * @hw: pointer to the hardware structure |
| * @blk: the HW block to initialize |
| */ |
| static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk) |
| { |
| u16 pt; |
| |
| for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) { |
| u8 ptg; |
| |
| ptg = hw->blk[blk].xlt1.t[pt]; |
| if (ptg != ICE_DEFAULT_PTG) { |
| ice_ptg_alloc_val(hw, blk, ptg); |
| ice_ptg_add_mv_ptype(hw, blk, pt, ptg); |
| } |
| } |
| } |
| |
| /** |
| * ice_init_sw_xlt2_db - init software XLT2 database from HW tables |
| * @hw: pointer to the hardware structure |
| * @blk: the HW block to initialize |
| */ |
| static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk) |
| { |
| u16 vsi; |
| |
| for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) { |
| u16 vsig; |
| |
| vsig = hw->blk[blk].xlt2.t[vsi]; |
| if (vsig) { |
| ice_vsig_alloc_val(hw, blk, vsig); |
| ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); |
| /* no changes at this time, since this has been |
| * initialized from the original package |
| */ |
| hw->blk[blk].xlt2.vsis[vsi].changed = 0; |
| } |
| } |
| } |
| |
| /** |
| * ice_init_sw_db - init software database from HW tables |
| * @hw: pointer to the hardware structure |
| */ |
| static void ice_init_sw_db(struct ice_hw *hw) |
| { |
| u16 i; |
| |
| for (i = 0; i < ICE_BLK_COUNT; i++) { |
| ice_init_sw_xlt1_db(hw, (enum ice_block)i); |
| ice_init_sw_xlt2_db(hw, (enum ice_block)i); |
| } |
| } |
| |
| /** |
| * ice_fill_tbl - Reads content of a single table type into database |
| * @hw: pointer to the hardware structure |
| * @block_id: Block ID of the table to copy |
| * @sid: Section ID of the table to copy |
| * |
| * Will attempt to read the entire content of a given table of a single block |
| * into the driver database. We assume that the buffer will always |
| * be as large or larger than the data contained in the package. If |
| * this condition is not met, there is most likely an error in the package |
| * contents. |
| */ |
| static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid) |
| { |
| u32 dst_len, sect_len, offset = 0; |
| struct ice_prof_redir_section *pr; |
| struct ice_prof_id_section *pid; |
| struct ice_xlt1_section *xlt1; |
| struct ice_xlt2_section *xlt2; |
| struct ice_sw_fv_section *es; |
| struct ice_pkg_enum state; |
| u8 *src, *dst; |
| void *sect; |
| |
| /* if the HW segment pointer is null then the first iteration of |
| * ice_pkg_enum_section() will fail. In this case the HW tables will |
| * not be filled and return success. |
| */ |
| if (!hw->seg) { |
| ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n"); |
| return; |
| } |
| |
| memset(&state, 0, sizeof(state)); |
| |
| sect = ice_pkg_enum_section(hw->seg, &state, sid); |
| |
| while (sect) { |
| switch (sid) { |
| case ICE_SID_XLT1_SW: |
| case ICE_SID_XLT1_FD: |
| case ICE_SID_XLT1_RSS: |
| case ICE_SID_XLT1_ACL: |
| case ICE_SID_XLT1_PE: |
| xlt1 = (struct ice_xlt1_section *)sect; |
| src = xlt1->value; |
| sect_len = le16_to_cpu(xlt1->count) * |
| sizeof(*hw->blk[block_id].xlt1.t); |
| dst = hw->blk[block_id].xlt1.t; |
| dst_len = hw->blk[block_id].xlt1.count * |
| sizeof(*hw->blk[block_id].xlt1.t); |
| break; |
| case ICE_SID_XLT2_SW: |
| case ICE_SID_XLT2_FD: |
| case ICE_SID_XLT2_RSS: |
| case ICE_SID_XLT2_ACL: |
| case ICE_SID_XLT2_PE: |
| xlt2 = (struct ice_xlt2_section *)sect; |
| src = (__force u8 *)xlt2->value; |
| sect_len = le16_to_cpu(xlt2->count) * |
| sizeof(*hw->blk[block_id].xlt2.t); |
| dst = (u8 *)hw->blk[block_id].xlt2.t; |
| dst_len = hw->blk[block_id].xlt2.count * |
| sizeof(*hw->blk[block_id].xlt2.t); |
| break; |
| case ICE_SID_PROFID_TCAM_SW: |
| case ICE_SID_PROFID_TCAM_FD: |
| case ICE_SID_PROFID_TCAM_RSS: |
| case ICE_SID_PROFID_TCAM_ACL: |
| case ICE_SID_PROFID_TCAM_PE: |
| pid = (struct ice_prof_id_section *)sect; |
| src = (u8 *)pid->entry; |
| sect_len = le16_to_cpu(pid->count) * |
| sizeof(*hw->blk[block_id].prof.t); |
| dst = (u8 *)hw->blk[block_id].prof.t; |
| dst_len = hw->blk[block_id].prof.count * |
| sizeof(*hw->blk[block_id].prof.t); |
| break; |
| case ICE_SID_PROFID_REDIR_SW: |
| case ICE_SID_PROFID_REDIR_FD: |
| case ICE_SID_PROFID_REDIR_RSS: |
| case ICE_SID_PROFID_REDIR_ACL: |
| case ICE_SID_PROFID_REDIR_PE: |
| pr = (struct ice_prof_redir_section *)sect; |
| src = pr->redir_value; |
| sect_len = le16_to_cpu(pr->count) * |
| sizeof(*hw->blk[block_id].prof_redir.t); |
| dst = hw->blk[block_id].prof_redir.t; |
| dst_len = hw->blk[block_id].prof_redir.count * |
| sizeof(*hw->blk[block_id].prof_redir.t); |
| break; |
| case ICE_SID_FLD_VEC_SW: |
| case ICE_SID_FLD_VEC_FD: |
| case ICE_SID_FLD_VEC_RSS: |
| case ICE_SID_FLD_VEC_ACL: |
| case ICE_SID_FLD_VEC_PE: |
| es = (struct ice_sw_fv_section *)sect; |
| src = (u8 *)es->fv; |
| sect_len = (u32)(le16_to_cpu(es->count) * |
| hw->blk[block_id].es.fvw) * |
| sizeof(*hw->blk[block_id].es.t); |
| dst = (u8 *)hw->blk[block_id].es.t; |
| dst_len = (u32)(hw->blk[block_id].es.count * |
| hw->blk[block_id].es.fvw) * |
| sizeof(*hw->blk[block_id].es.t); |
| break; |
| default: |
| return; |
| } |
| |
| /* if the section offset exceeds destination length, terminate |
| * table fill. |
| */ |
| if (offset > dst_len) |
| return; |
| |
| /* if the sum of section size and offset exceed destination size |
| * then we are out of bounds of the HW table size for that PF. |
| * Changing section length to fill the remaining table space |
| * of that PF. |
| */ |
| if ((offset + sect_len) > dst_len) |
| sect_len = dst_len - offset; |
| |
| memcpy(dst + offset, src, sect_len); |
| offset += sect_len; |
| sect = ice_pkg_enum_section(NULL, &state, sid); |
| } |
| } |
| |
| /** |
| * ice_fill_blk_tbls - Read package context for tables |
| * @hw: pointer to the hardware structure |
| * |
| * Reads the current package contents and populates the driver |
| * database with the data iteratively for all advanced feature |
| * blocks. Assume that the HW tables have been allocated. |
| */ |
| void ice_fill_blk_tbls(struct ice_hw *hw) |
| { |
| u8 i; |
| |
| for (i = 0; i < ICE_BLK_COUNT; i++) { |
| enum ice_block blk_id = (enum ice_block)i; |
| |
| ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid); |
| ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid); |
| ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid); |
| ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid); |
| ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid); |
| } |
| |
| ice_init_sw_db(hw); |
| } |
| |
| /** |
| * ice_free_hw_tbls - free hardware table memory |
| * @hw: pointer to the hardware structure |
| */ |
| void ice_free_hw_tbls(struct ice_hw *hw) |
| { |
| u8 i; |
| |
| for (i = 0; i < ICE_BLK_COUNT; i++) { |
| hw->blk[i].is_list_init = false; |
| |
| devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes); |
| devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl); |
| devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t); |
| devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t); |
| devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl); |
| devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis); |
| devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t); |
| devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t); |
| devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t); |
| devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count); |
| devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written); |
| } |
| |
| memset(hw->blk, 0, sizeof(hw->blk)); |
| } |
| |
| /** |
| * ice_clear_hw_tbls - clear HW tables and flow profiles |
| * @hw: pointer to the hardware structure |
| */ |
| void ice_clear_hw_tbls(struct ice_hw *hw) |
| { |
| u8 i; |
| |
| for (i = 0; i < ICE_BLK_COUNT; i++) { |
| struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; |
| struct ice_prof_tcam *prof = &hw->blk[i].prof; |
| struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; |
| struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; |
| struct ice_es *es = &hw->blk[i].es; |
| |
| memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes)); |
| memset(xlt1->ptg_tbl, 0, |
| ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl)); |
| memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t)); |
| |
| memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis)); |
| memset(xlt2->vsig_tbl, 0, |
| xlt2->count * sizeof(*xlt2->vsig_tbl)); |
| memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t)); |
| |
| memset(prof->t, 0, prof->count * sizeof(*prof->t)); |
| memset(prof_redir->t, 0, |
| prof_redir->count * sizeof(*prof_redir->t)); |
| |
| memset(es->t, 0, es->count * sizeof(*es->t)); |
| memset(es->ref_count, 0, es->count * sizeof(*es->ref_count)); |
| memset(es->written, 0, es->count * sizeof(*es->written)); |
| } |
| } |
| |
| /** |
| * ice_init_hw_tbls - init hardware table memory |
| * @hw: pointer to the hardware structure |
| */ |
| enum ice_status ice_init_hw_tbls(struct ice_hw *hw) |
| { |
| u8 i; |
| |
| for (i = 0; i < ICE_BLK_COUNT; i++) { |
| struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; |
| struct ice_prof_tcam *prof = &hw->blk[i].prof; |
| struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; |
| struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; |
| struct ice_es *es = &hw->blk[i].es; |
| u16 j; |
| |
| if (hw->blk[i].is_list_init) |
| continue; |
| |
| hw->blk[i].is_list_init = true; |
| |
| hw->blk[i].overwrite = blk_sizes[i].overwrite; |
| es->reverse = blk_sizes[i].reverse; |
| |
| xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; |
| xlt1->count = blk_sizes[i].xlt1; |
| |
| xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count, |
| sizeof(*xlt1->ptypes), GFP_KERNEL); |
| |
| if (!xlt1->ptypes) |
| goto err; |
| |
| xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS, |
| sizeof(*xlt1->ptg_tbl), |
| GFP_KERNEL); |
| |
| if (!xlt1->ptg_tbl) |
| goto err; |
| |
| xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count, |
| sizeof(*xlt1->t), GFP_KERNEL); |
| if (!xlt1->t) |
| goto err; |
| |
| xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; |
| xlt2->count = blk_sizes[i].xlt2; |
| |
| xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, |
| sizeof(*xlt2->vsis), GFP_KERNEL); |
| |
| if (!xlt2->vsis) |
| goto err; |
| |
| xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, |
| sizeof(*xlt2->vsig_tbl), |
| GFP_KERNEL); |
| if (!xlt2->vsig_tbl) |
| goto err; |
| |
| for (j = 0; j < xlt2->count; j++) |
| INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst); |
| |
| xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count, |
| sizeof(*xlt2->t), GFP_KERNEL); |
| if (!xlt2->t) |
| goto err; |
| |
| prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; |
| prof->count = blk_sizes[i].prof_tcam; |
| prof->max_prof_id = blk_sizes[i].prof_id; |
| prof->cdid_bits = blk_sizes[i].prof_cdid_bits; |
| prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count, |
| sizeof(*prof->t), GFP_KERNEL); |
| |
| if (!prof->t) |
| goto err; |
| |
| prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; |
| prof_redir->count = blk_sizes[i].prof_redir; |
| prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw), |
| prof_redir->count, |
| sizeof(*prof_redir->t), |
| GFP_KERNEL); |
| |
| if (!prof_redir->t) |
| goto err; |
| |
| es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; |
| es->count = blk_sizes[i].es; |
| es->fvw = blk_sizes[i].fvw; |
| es->t = devm_kcalloc(ice_hw_to_dev(hw), |
| (u32)(es->count * es->fvw), |
| sizeof(*es->t), GFP_KERNEL); |
| if (!es->t) |
| goto err; |
| |
| es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count, |
| sizeof(*es->ref_count), |
| GFP_KERNEL); |
| |
| es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count, |
| sizeof(*es->written), GFP_KERNEL); |
| if (!es->ref_count) |
| goto err; |
| } |
| return 0; |
| |
| err: |
| ice_free_hw_tbls(hw); |
| return ICE_ERR_NO_MEMORY; |
| } |