blob: 7ed659eb08deb0f727d210a5d477429129cd0ce8 [file] [log] [blame]
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2008 - 2016 Freescale Semiconductor Inc.
*/
#ifndef __DPAA_H
#define __DPAA_H
#include <linux/netdevice.h>
#include <linux/refcount.h>
#include <net/xdp.h>
#include <soc/fsl/qman.h>
#include <soc/fsl/bman.h>
#include "fman.h"
#include "mac.h"
#include "dpaa_eth_trace.h"
/* Number of prioritised traffic classes */
#define DPAA_TC_NUM 4
/* More detailed FQ types - used for fine-grained WQ assignments */
enum dpaa_fq_type {
FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
FQ_TYPE_RX_ERROR, /* Rx Error FQs */
FQ_TYPE_RX_PCD, /* Rx Parse Classify Distribute FQs */
FQ_TYPE_TX, /* "Real" Tx FQs */
FQ_TYPE_TX_CONFIRM, /* Tx default Conf FQ (actually an Rx FQ) */
FQ_TYPE_TX_CONF_MQ, /* Tx conf FQs (one for each Tx FQ) */
FQ_TYPE_TX_ERROR, /* Tx Error FQs (these are actually Rx FQs) */
};
struct dpaa_fq {
struct qman_fq fq_base;
struct list_head list;
struct net_device *net_dev;
bool init;
u32 fqid;
u32 flags;
u16 channel;
u8 wq;
enum dpaa_fq_type fq_type;
struct xdp_rxq_info xdp_rxq;
};
struct dpaa_fq_cbs {
struct qman_fq rx_defq;
struct qman_fq tx_defq;
struct qman_fq rx_errq;
struct qman_fq tx_errq;
struct qman_fq egress_ern;
};
struct dpaa_priv;
struct dpaa_bp {
/* used in the DMA mapping operations */
struct dpaa_priv *priv;
/* current number of buffers in the buffer pool alloted to each CPU */
int __percpu *percpu_count;
/* all buffers allocated for this pool have this raw size */
size_t raw_size;
/* all buffers in this pool have this same usable size */
size_t size;
/* the buffer pools are initialized with config_count buffers for each
* CPU; at runtime the number of buffers per CPU is constantly brought
* back to this level
*/
u16 config_count;
u8 bpid;
struct bman_pool *pool;
/* bpool can be seeded before use by this cb */
int (*seed_cb)(struct dpaa_bp *);
/* bpool can be emptied before freeing by this cb */
void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *);
refcount_t refs;
};
struct dpaa_rx_errors {
u64 dme; /* DMA Error */
u64 fpe; /* Frame Physical Error */
u64 fse; /* Frame Size Error */
u64 phe; /* Header Error */
};
/* Counters for QMan ERN frames - one counter per rejection code */
struct dpaa_ern_cnt {
u64 cg_tdrop; /* Congestion group taildrop */
u64 wred; /* WRED congestion */
u64 err_cond; /* Error condition */
u64 early_window; /* Order restoration, frame too early */
u64 late_window; /* Order restoration, frame too late */
u64 fq_tdrop; /* FQ taildrop */
u64 fq_retired; /* FQ is retired */
u64 orp_zero; /* ORP disabled */
};
struct dpaa_napi_portal {
struct napi_struct napi;
struct qman_portal *p;
bool down;
int xdp_act;
};
struct dpaa_percpu_priv {
struct net_device *net_dev;
struct dpaa_napi_portal np;
u64 in_interrupt;
u64 tx_confirm;
/* fragmented (non-linear) skbuffs received from the stack */
u64 tx_frag_skbuffs;
struct rtnl_link_stats64 stats;
struct dpaa_rx_errors rx_errors;
struct dpaa_ern_cnt ern_cnt;
};
struct dpaa_buffer_layout {
u16 priv_data_size;
};
/* Information to be used on the Tx confirmation path. Stored just
* before the start of the transmit buffer. Maximum size allowed
* is DPAA_TX_PRIV_DATA_SIZE bytes.
*/
struct dpaa_eth_swbp {
struct sk_buff *skb;
struct xdp_frame *xdpf;
};
struct dpaa_priv {
struct dpaa_percpu_priv __percpu *percpu_priv;
struct dpaa_bp *dpaa_bp;
/* Store here the needed Tx headroom for convenience and speed
* (even though it can be computed based on the fields of buf_layout)
*/
u16 tx_headroom;
struct net_device *net_dev;
struct mac_device *mac_dev;
struct device *rx_dma_dev;
struct device *tx_dma_dev;
struct qman_fq **egress_fqs;
struct qman_fq **conf_fqs;
u16 channel;
struct list_head dpaa_fq_list;
u8 num_tc;
bool keygen_in_use;
u32 msg_enable; /* net_device message level */
struct {
/* All egress queues to a given net device belong to one
* (and the same) congestion group.
*/
struct qman_cgr cgr;
/* If congested, when it began. Used for performance stats. */
u32 congestion_start_jiffies;
/* Number of jiffies the Tx port was congested. */
u32 congested_jiffies;
/* Counter for the number of times the CGR
* entered congestion state
*/
u32 cgr_congested_count;
} cgr_data;
/* Use a per-port CGR for ingress traffic. */
bool use_ingress_cgr;
struct qman_cgr ingress_cgr;
struct dpaa_buffer_layout buf_layout[2];
u16 rx_headroom;
bool tx_tstamp; /* Tx timestamping enabled */
bool rx_tstamp; /* Rx timestamping enabled */
struct bpf_prog *xdp_prog;
};
/* from dpaa_ethtool.c */
extern const struct ethtool_ops dpaa_ethtool_ops;
/* from dpaa_eth_sysfs.c */
void dpaa_eth_sysfs_remove(struct device *dev);
void dpaa_eth_sysfs_init(struct device *dev);
static inline size_t dpaa_num_txqs_per_tc(void)
{
return num_possible_cpus();
}
/* Total number of Tx queues */
static inline size_t dpaa_max_num_txqs(void)
{
return DPAA_TC_NUM * dpaa_num_txqs_per_tc();
}
#endif /* __DPAA_H */