direction == TLS_OFFLOAD_CTX_DIR_TX);
}
-static void mlx5e_tls_resync_rx(struct net_device *netdev, struct sock *sk,
- u32 seq, u8 *rcd_sn_data)
+static void mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
+ u32 seq, u8 *rcd_sn_data,
+ enum tls_offload_ctx_dir direction)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_tls_offload_context_rx *rx_ctx;
u64 rcd_sn = *(u64 *)rcd_sn_data;
+ if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX))
+ return;
rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq,
static const struct tlsdev_ops mlx5e_tls_ops = {
.tls_dev_add = mlx5e_tls_add,
.tls_dev_del = mlx5e_tls_del,
- .tls_dev_resync_rx = mlx5e_tls_resync_rx,
+ .tls_dev_resync = mlx5e_tls_resync,
};
void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
}
static void
-nfp_net_tls_resync_rx(struct net_device *netdev, struct sock *sk, u32 seq,
- u8 *rcd_sn)
+nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
+ u8 *rcd_sn, enum tls_offload_ctx_dir direction)
{
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_tls_offload_ctx *ntls;
struct nfp_crypto_req_update *req;
struct sk_buff *skb;
+ if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX))
+ return;
+
skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_ATOMIC);
if (!skb)
return;
static const struct tlsdev_ops nfp_net_tls_ops = {
.tls_dev_add = nfp_net_tls_add,
.tls_dev_del = nfp_net_tls_del,
- .tls_dev_resync_rx = nfp_net_tls_resync_rx,
+ .tls_dev_resync = nfp_net_tls_resync,
};
static int nfp_net_tls_reset(struct nfp_net *nn)
void (*tls_dev_del)(struct net_device *netdev,
struct tls_context *ctx,
enum tls_offload_ctx_dir direction);
- void (*tls_dev_resync_rx)(struct net_device *netdev,
- struct sock *sk, u32 seq, u8 *rcd_sn);
+ void (*tls_dev_resync)(struct net_device *netdev,
+ struct sock *sk, u32 seq, u8 *rcd_sn,
+ enum tls_offload_ctx_dir direction);
};
enum tls_offload_sync_type {
return;
netdev = READ_ONCE(tls_ctx->netdev);
if (netdev)
- netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq, rcd_sn);
+ netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
+ TLS_OFFLOAD_CTX_DIR_RX);
clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
}
case NETDEV_REGISTER:
case NETDEV_FEAT_CHANGE:
if ((dev->features & NETIF_F_HW_TLS_RX) &&
- !dev->tlsdev_ops->tls_dev_resync_rx)
+ !dev->tlsdev_ops->tls_dev_resync)
return NOTIFY_BAD;
if (dev->tlsdev_ops &&