blob: 8e3614218fc4fabc7bac9c60a0874c4234f12581 [file] [log] [blame]
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
#include "mlx5_core.h"
#include "en.h"
#include "ipsec.h"
#include "lib/mlx5.h"
enum {
MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
};
u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
u32 caps = 0;
if (!MLX5_CAP_GEN(mdev, ipsec_offload))
return 0;
if (!MLX5_CAP_GEN(mdev, log_max_dek))
return 0;
if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
return 0;
if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
return 0;
if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) ||
!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
return 0;
if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
caps |= MLX5_IPSEC_CAP_CRYPTO;
if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_del_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
if (!caps)
return 0;
if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
caps |= MLX5_IPSEC_CAP_ESN;
/* We can accommodate up to 2^24 different IPsec objects
* because we use up to 24 bit in flow table metadata
* to hold the IPsec Object unique handle.
*/
WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
return caps;
}
EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
void *aso_ctx;
aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
if (attrs->esn_trigger) {
MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
MLX5_SET(ipsec_aso, aso_ctx, window_sz,
attrs->replay_window / 64);
MLX5_SET(ipsec_aso, aso_ctx, mode,
MLX5_IPSEC_ASO_REPLAY_PROTECTION);
}
}
/* ASO context */
MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
MLX5_SET(ipsec_obj, obj, full_offload, 1);
MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
/* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
* in flow steering to perform matching against. Please be
* aware that this register was chosen arbitrary and can't
* be used in other places as long as IPsec packet offload
* active.
*/
MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
if (attrs->hard_packet_limit != XFRM_INF) {
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
lower_32_bits(attrs->hard_packet_limit));
MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
}
if (attrs->soft_packet_limit != XFRM_INF) {
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
lower_32_bits(attrs->soft_packet_limit));
MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
}
}
static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
void *obj, *salt_p, *salt_iv_p;
struct mlx5e_hw_objs *res;
int err;
obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
/* salt and seq_iv */
salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B);
salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
/* esn */
if (attrs->esn_trigger) {
MLX5_SET(ipsec_obj, obj, esn_en, 1);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
}
MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
/* general object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_IPSEC);
res = &mdev->mlx5e_res.hw_objs;
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
sa_entry->ipsec_obj_id =
MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return err;
}
static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_IPSEC);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm;
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
int err;
/* key */
err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
aes_gcm->key_len / BITS_PER_BYTE,
MLX5_ACCEL_OBJ_IPSEC_KEY,
&sa_entry->enc_key_id);
if (err) {
mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
return err;
}
err = mlx5_create_ipsec_obj(sa_entry);
if (err) {
mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
goto err_enc_key;
}
return 0;
err_enc_key:
mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
return err;
}
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
mlx5_destroy_ipsec_obj(sa_entry);
mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
}
static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
u64 modify_field_select = 0;
u64 general_obj_types;
void *obj;
int err;
if (!attrs->esn_trigger)
return 0;
general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
return -EINVAL;
/* general object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err) {
mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
sa_entry->ipsec_obj_id, err);
return err;
}
obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
/* esn */
if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
return -EOPNOTSUPP;
obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
MLX5_SET64(ipsec_obj, obj, modify_field_select,
MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
/* general object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
int err;
err = mlx5_modify_ipsec_obj(sa_entry, attrs);
if (err)
return;
memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
}
static void
mlx5e_ipsec_aso_update_esn(struct mlx5e_ipsec_sa_entry *sa_entry,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_wqe_aso_ctrl_seg data = {};
data.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
data.condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | MLX5_ASO_ALWAYS_TRUE
<< 4;
data.data_offset_condition_operand = MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
data.bitwise_data = cpu_to_be64(BIT_ULL(54));
data.data_mask = data.bitwise_data;
mlx5e_ipsec_aso_query(sa_entry, &data);
}
static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
u32 mode_param)
{
struct mlx5_accel_esp_xfrm_attrs attrs = {};
if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
sa_entry->esn_state.esn++;
sa_entry->esn_state.overlap = 0;
} else {
sa_entry->esn_state.overlap = 1;
}
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
mlx5e_ipsec_aso_update_esn(sa_entry, &attrs);
}
static void mlx5e_ipsec_handle_event(struct work_struct *_work)
{
struct mlx5e_ipsec_work *work =
container_of(_work, struct mlx5e_ipsec_work, work);
struct mlx5_accel_esp_xfrm_attrs *attrs;
struct mlx5e_ipsec_sa_entry *sa_entry;
struct mlx5e_ipsec_aso *aso;
struct mlx5e_ipsec *ipsec;
int ret;
sa_entry = xa_load(&work->ipsec->sadb, work->id);
if (!sa_entry)
goto out;
ipsec = sa_entry->ipsec;
aso = ipsec->aso;
attrs = &sa_entry->attrs;
spin_lock(&sa_entry->x->lock);
ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
if (ret)
goto unlock;
aso->use_cache = true;
if (attrs->esn_trigger &&
!MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
}
if (attrs->soft_packet_limit != XFRM_INF)
if (!MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm) ||
!MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) ||
!MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable))
xfrm_state_check_expire(sa_entry->x);
aso->use_cache = false;
unlock:
spin_unlock(&sa_entry->x->lock);
out:
kfree(work);
}
static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
void *data)
{
struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
struct mlx5_eqe_obj_change *object;
struct mlx5e_ipsec_work *work;
struct mlx5_eqe *eqe = data;
u16 type;
if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
return NOTIFY_DONE;
object = &eqe->data.obj_change;
type = be16_to_cpu(object->obj_type);
if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
return NOTIFY_DONE;
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return NOTIFY_DONE;
INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
work->ipsec = ipsec;
work->id = be32_to_cpu(object->obj_id);
queue_work(ipsec->wq, &work->work);
return NOTIFY_OK;
}
int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
{
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5e_ipsec_aso *aso;
struct mlx5e_hw_objs *res;
struct device *pdev;
int err;
aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
if (!aso)
return -ENOMEM;
res = &mdev->mlx5e_res.hw_objs;
pdev = mlx5_core_dma_dev(mdev);
aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx),
DMA_BIDIRECTIONAL);
err = dma_mapping_error(pdev, aso->dma_addr);
if (err)
goto err_dma;
aso->aso = mlx5_aso_create(mdev, res->pdn);
if (IS_ERR(aso->aso)) {
err = PTR_ERR(aso->aso);
goto err_aso_create;
}
ipsec->nb.notifier_call = mlx5e_ipsec_event;
mlx5_notifier_register(mdev, &ipsec->nb);
ipsec->aso = aso;
return 0;
err_aso_create:
dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
DMA_BIDIRECTIONAL);
err_dma:
kfree(aso);
return err;
}
void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
{
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5e_ipsec_aso *aso;
struct device *pdev;
aso = ipsec->aso;
pdev = mlx5_core_dma_dev(mdev);
mlx5_notifier_unregister(mdev, &ipsec->nb);
mlx5_aso_destroy(aso->aso);
dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
DMA_BIDIRECTIONAL);
kfree(aso);
}
static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
struct mlx5_wqe_aso_ctrl_seg *data)
{
if (!data)
return;
ctrl->data_mask_mode = data->data_mask_mode;
ctrl->condition_1_0_operand = data->condition_1_0_operand;
ctrl->condition_1_0_offset = data->condition_1_0_offset;
ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
ctrl->condition_0_data = data->condition_0_data;
ctrl->condition_0_mask = data->condition_0_mask;
ctrl->condition_1_data = data->condition_1_data;
ctrl->condition_1_mask = data->condition_1_mask;
ctrl->bitwise_data = data->bitwise_data;
ctrl->data_mask = data->data_mask;
}
int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_wqe_aso_ctrl_seg *data)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_aso *aso = ipsec->aso;
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_wqe_aso_ctrl_seg *ctrl;
struct mlx5e_hw_objs *res;
struct mlx5_aso_wqe *wqe;
u8 ds_cnt;
lockdep_assert_held(&sa_entry->x->lock);
if (aso->use_cache)
return 0;
res = &mdev->mlx5e_res.hw_objs;
memset(aso->ctx, 0, sizeof(aso->ctx));
wqe = mlx5_aso_get_wqe(aso->aso);
ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
ctrl = &wqe->aso_ctrl;
ctrl->va_l =
cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
ctrl->l_key = cpu_to_be32(res->mkey);
mlx5e_ipsec_aso_copy(ctrl, data);
mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
return mlx5_aso_poll_cq(aso->aso, false);
}
void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
u64 *packets)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_aso *aso = ipsec->aso;
u64 hard_cnt;
hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
/* HW decresases the limit till it reaches zero to fire an avent.
* We need to fix the calculations, so the returned count is a total
* number of passed packets and not how much left.
*/
*packets = sa_entry->attrs.hard_packet_limit - hard_cnt;
}