net: macsec: add mac offload

Add secy pointer to context of macsec_offload
  apply link changes before mdo_upd_secy callback
  update sci when mac address changed
  invoke mdo_upd_secy callback when mac address changed
  Let offload engine reconfigure HW after SecY mac address changed.
  allow multiple macsec devices with offload
  Improve HW offload support in handle_not_macsec()
  add getting offloaded stats
  report real_dev features when HW offloading is enabled (#11)
  pass all packets in macsec dev in promiscuous mode

Change-Id: Ib1b842628a0c70cac4f8336ecea47a722c408e73
Signed-off-by: Dmitry Bogdanov <dbogdanov@marvell.com>
Signed-off-by: Mark Starovoytov <mstarovoitov@marvell.com>
Co-authored-by: Mark Starovoytov <mstarovoitov@marvell.com>
Git-commit: f3e9bda3e968883bd70fe1719e6215226b328779
Git-repo: https://github.com/aquantia/linux-4.14-atlantic-forwarding
Signed-off-by: Jinesh K. Jayakumar <jineshk@codeaurora.org>
This commit is contained in:
Dmitry Bogdanov 2019-10-07 11:52:33 +03:00 committed by Jinesh K. Jayakumar
parent 8482465b5e
commit 4118a1b0a4
3 changed files with 353 additions and 189 deletions

View File

@ -78,17 +78,6 @@ struct gcm_iv {
__be32 pn;
};
struct macsec_dev_stats {
__u64 OutPktsUntagged;
__u64 InPktsUntagged;
__u64 OutPktsTooLong;
__u64 InPktsNoTag;
__u64 InPktsBadTag;
__u64 InPktsUnknownSCI;
__u64 InPktsNoSCI;
__u64 InPktsOverrun;
};
#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
struct pcpu_secy_stats {
@ -410,6 +399,11 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
return (struct macsec_eth_header *)skb_mac_header(skb);
}
static sci_t dev_to_sci(struct net_device *dev, __be16 port)
{
return make_sci(dev->dev_addr, port);
}
static void __macsec_pn_wrapped(struct macsec_secy *secy,
struct macsec_tx_sa *tx_sa)
{
@ -924,22 +918,58 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
{
/* Deliver to the uncontrolled port by default */
enum rx_handler_result ret = RX_HANDLER_PASS;
struct ethhdr *hdr = eth_hdr(skb);
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
/* 10.6 If the management control validateFrames is not
* Strict, frames without a SecTAG are received, counted, and
* delivered to the Controlled Port
*/
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct sk_buff *nskb;
struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
struct net_device *ndev = macsec->secy.netdev;
if (!macsec_get_ops(macsec, NULL) &&
macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
/* When HW offload is enabled, HW decodes frames and strips the
* SecTAG, so we have to deduce which port to deliver to.
*/
if (macsec_get_ops(macsec, NULL) && netif_running(ndev)) {
if (ndev->flags & IFF_PROMISC) {
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb)
break;
nskb->dev = ndev;
netif_rx(nskb);
} else if (ether_addr_equal_64bits(hdr->h_dest,
ndev->dev_addr)) {
/* HW offload enabled, divert skb */
skb->dev = ndev;
skb->pkt_type = PACKET_HOST;
ret = RX_HANDLER_ANOTHER;
goto out;
} else if (is_multicast_ether_addr_64bits(hdr->h_dest)) {
/* multicast frame, deliver on this port as well */
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb)
break;
nskb->dev = ndev;
if (ether_addr_equal_64bits(hdr->h_dest, ndev->broadcast))
nskb->pkt_type = PACKET_BROADCAST;
else
nskb->pkt_type = PACKET_MULTICAST;
netif_rx(nskb);
}
continue;
}
/* 10.6 If the management control validateFrames is not
* Strict, frames without a SecTAG are received, counted, and
* delivered to the Controlled Port
*/
if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoTag++;
u64_stats_update_end(&secy_stats->syncp);
@ -958,12 +988,6 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
secy_stats->stats.InPktsUntagged++;
u64_stats_update_end(&secy_stats->syncp);
}
if (netif_running(macsec->secy.netdev) &&
macsec_get_ops(macsec, NULL)) {
ret = RX_HANDLER_EXACT;
goto out;
}
}
out:
@ -1665,6 +1689,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
if (ops) {
ctx.sa.assoc_num = assoc_num;
ctx.sa.rx_sa = rx_sa;
ctx.secy = secy;
memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
MACSEC_KEYID_LEN);
@ -1704,6 +1729,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
struct net_device *dev;
sci_t sci = MACSEC_UNDEF_SCI;
struct nlattr **attrs = info->attrs;
struct macsec_secy *secy;
struct macsec_rx_sc *rx_sc;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
const struct macsec_ops *ops;
@ -1727,6 +1753,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(dev);
}
secy = &macsec_priv(dev)->secy;
sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
rx_sc = create_rx_sc(dev, sci);
@ -1742,6 +1769,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
ops = macsec_get_ops(netdev_priv(dev), &ctx);
if (ops) {
ctx.rx_sc = rx_sc;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
if (ret) {
@ -1862,6 +1890,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
if (ops) {
ctx.sa.assoc_num = assoc_num;
ctx.sa.tx_sa = tx_sa;
ctx.secy = secy;
memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
MACSEC_KEYID_LEN);
@ -1928,6 +1957,7 @@ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
if (ops) {
ctx.sa.assoc_num = assoc_num;
ctx.sa.rx_sa = rx_sa;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
if (ret) {
@ -1985,6 +2015,7 @@ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
ops = macsec_get_ops(netdev_priv(dev), &ctx);
if (ops) {
ctx.rx_sc = rx_sc;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
if (ret) {
rtnl_unlock();
@ -2035,6 +2066,7 @@ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
if (ops) {
ctx.sa.assoc_num = assoc_num;
ctx.sa.tx_sa = tx_sa;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_del_txsa, &ctx);
if (ret) {
@ -2124,6 +2156,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
if (ops) {
ctx.sa.assoc_num = assoc_num;
ctx.sa.tx_sa = tx_sa;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
if (ret) {
@ -2195,6 +2228,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
if (ops) {
ctx.sa.assoc_num = assoc_num;
ctx.sa.rx_sa = rx_sa;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
if (ret) {
@ -2255,6 +2289,7 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
ops = macsec_get_ops(netdev_priv(dev), &ctx);
if (ops) {
ctx.rx_sc = rx_sc;
ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
if (ret) {
@ -2270,206 +2305,289 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
return 0;
}
static int copy_tx_sa_stats(struct sk_buff *skb,
struct macsec_tx_sa_stats __percpu *pstats)
static void get_tx_sa_stats(struct net_device *dev, int an,
struct macsec_tx_sa *tx_sa,
struct macsec_tx_sa_stats *sum)
{
struct macsec_tx_sa_stats sum = {0, };
const struct macsec_ops *ops;
struct macsec_context ctx;
int err = -EOPNOTSUPP;
int cpu;
for_each_possible_cpu(cpu) {
const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
sum.OutPktsProtected += stats->OutPktsProtected;
sum.OutPktsEncrypted += stats->OutPktsEncrypted;
ops = macsec_get_ops(netdev_priv(dev), &ctx);
if (ops) {
ctx.sa.assoc_num = an;
ctx.sa.tx_sa = tx_sa;
ctx.stats.tx_sa_stats = sum;
ctx.secy = &macsec_priv(dev)->secy;
err = macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
}
if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted))
if (err == -EOPNOTSUPP) {
for_each_possible_cpu(cpu) {
const struct macsec_tx_sa_stats *stats =
per_cpu_ptr(tx_sa->stats, cpu);
sum->OutPktsProtected += stats->OutPktsProtected;
sum->OutPktsEncrypted += stats->OutPktsEncrypted;
}
}
}
static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
{
if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum->OutPktsProtected) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum->OutPktsEncrypted))
return -EMSGSIZE;
return 0;
}
static void get_rx_sa_stats(struct net_device *dev,
struct macsec_rx_sc *rx_sc, int an,
struct macsec_rx_sa *rx_sa,
struct macsec_rx_sa_stats *sum)
{
const struct macsec_ops *ops;
struct macsec_context ctx;
int err = -EOPNOTSUPP;
int cpu;
ops = macsec_get_ops(netdev_priv(dev), &ctx);
if (ops) {
ctx.sa.assoc_num = an;
ctx.sa.rx_sa = rx_sa;
ctx.stats.rx_sa_stats = sum;
ctx.secy = &macsec_priv(dev)->secy;
ctx.rx_sc = rx_sc;
err = macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
}
if (err == -EOPNOTSUPP) {
for_each_possible_cpu(cpu) {
const struct macsec_rx_sa_stats *stats =
per_cpu_ptr(rx_sa->stats, cpu);
sum->InPktsOK += stats->InPktsOK;
sum->InPktsInvalid += stats->InPktsInvalid;
sum->InPktsNotValid += stats->InPktsNotValid;
sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
sum->InPktsUnusedSA += stats->InPktsUnusedSA;
}
}
}
static int copy_rx_sa_stats(struct sk_buff *skb,
struct macsec_rx_sa_stats __percpu *pstats)
struct macsec_rx_sa_stats *sum)
{
struct macsec_rx_sa_stats sum = {0, };
int cpu;
for_each_possible_cpu(cpu) {
const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
sum.InPktsOK += stats->InPktsOK;
sum.InPktsInvalid += stats->InPktsInvalid;
sum.InPktsNotValid += stats->InPktsNotValid;
sum.InPktsNotUsingSA += stats->InPktsNotUsingSA;
sum.InPktsUnusedSA += stats->InPktsUnusedSA;
}
if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum->InPktsInvalid) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum->InPktsNotValid) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum->InPktsNotUsingSA) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum->InPktsUnusedSA))
return -EMSGSIZE;
return 0;
}
static int copy_rx_sc_stats(struct sk_buff *skb,
struct pcpu_rx_sc_stats __percpu *pstats)
static void get_rx_sc_stats(struct net_device *dev,
struct macsec_rx_sc *rx_sc,
struct macsec_rx_sc_stats *sum)
{
struct macsec_rx_sc_stats sum = {0, };
const struct macsec_ops *ops;
struct macsec_context ctx;
int err = -EOPNOTSUPP;
int cpu;
for_each_possible_cpu(cpu) {
const struct pcpu_rx_sc_stats *stats;
struct macsec_rx_sc_stats tmp;
unsigned int start;
stats = per_cpu_ptr(pstats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sum.InOctetsValidated += tmp.InOctetsValidated;
sum.InOctetsDecrypted += tmp.InOctetsDecrypted;
sum.InPktsUnchecked += tmp.InPktsUnchecked;
sum.InPktsDelayed += tmp.InPktsDelayed;
sum.InPktsOK += tmp.InPktsOK;
sum.InPktsInvalid += tmp.InPktsInvalid;
sum.InPktsLate += tmp.InPktsLate;
sum.InPktsNotValid += tmp.InPktsNotValid;
sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA;
sum.InPktsUnusedSA += tmp.InPktsUnusedSA;
ops = macsec_get_ops(netdev_priv(dev), &ctx);
if (ops) {
ctx.stats.rx_sc_stats = sum;
ctx.secy = &macsec_priv(dev)->secy;
ctx.rx_sc = rx_sc;
err = macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
}
if (err == -EOPNOTSUPP) {
for_each_possible_cpu(cpu) {
const struct pcpu_rx_sc_stats *stats;
struct macsec_rx_sc_stats tmp;
unsigned int start;
stats = per_cpu_ptr(rx_sc->stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sum->InOctetsValidated += tmp.InOctetsValidated;
sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
sum->InPktsUnchecked += tmp.InPktsUnchecked;
sum->InPktsDelayed += tmp.InPktsDelayed;
sum->InPktsOK += tmp.InPktsOK;
sum->InPktsInvalid += tmp.InPktsInvalid;
sum->InPktsLate += tmp.InPktsLate;
sum->InPktsNotValid += tmp.InPktsNotValid;
sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA;
sum->InPktsUnusedSA += tmp.InPktsUnusedSA;
}
}
}
static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
{
if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
sum.InOctetsValidated,
sum->InOctetsValidated,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
sum.InOctetsDecrypted,
sum->InOctetsDecrypted,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
sum.InPktsUnchecked,
sum->InPktsUnchecked,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
sum.InPktsDelayed,
sum->InPktsDelayed,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
sum.InPktsOK,
sum->InPktsOK,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
sum.InPktsInvalid,
sum->InPktsInvalid,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
sum.InPktsLate,
sum->InPktsLate,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
sum.InPktsNotValid,
sum->InPktsNotValid,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
sum.InPktsNotUsingSA,
sum->InPktsNotUsingSA,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
sum.InPktsUnusedSA,
sum->InPktsUnusedSA,
MACSEC_RXSC_STATS_ATTR_PAD))
return -EMSGSIZE;
return 0;
}
static int copy_tx_sc_stats(struct sk_buff *skb,
struct pcpu_tx_sc_stats __percpu *pstats)
static void get_tx_sc_stats(struct net_device *dev, struct macsec_tx_sc_stats *sum)
{
struct macsec_tx_sc_stats sum = {0, };
const struct macsec_ops *ops;
struct macsec_context ctx;
int err = -EOPNOTSUPP;
int cpu;
for_each_possible_cpu(cpu) {
const struct pcpu_tx_sc_stats *stats;
struct macsec_tx_sc_stats tmp;
unsigned int start;
stats = per_cpu_ptr(pstats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sum.OutPktsProtected += tmp.OutPktsProtected;
sum.OutPktsEncrypted += tmp.OutPktsEncrypted;
sum.OutOctetsProtected += tmp.OutOctetsProtected;
sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted;
/* If h/w offloading is available, propagate to the device */
ops = macsec_get_ops(netdev_priv(dev), &ctx);
if (ops) {
ctx.stats.tx_sc_stats = sum;
ctx.secy = &macsec_priv(dev)->secy;
err = macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
}
if (err == -EOPNOTSUPP) {
for_each_possible_cpu(cpu) {
const struct pcpu_tx_sc_stats *stats;
struct macsec_tx_sc_stats tmp;
unsigned int start;
stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sum->OutPktsProtected += tmp.OutPktsProtected;
sum->OutPktsEncrypted += tmp.OutPktsEncrypted;
sum->OutOctetsProtected += tmp.OutOctetsProtected;
sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
}
}
}
static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
{
if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
sum.OutPktsProtected,
sum->OutPktsProtected,
MACSEC_TXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
sum.OutPktsEncrypted,
sum->OutPktsEncrypted,
MACSEC_TXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
sum.OutOctetsProtected,
sum->OutOctetsProtected,
MACSEC_TXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
sum.OutOctetsEncrypted,
sum->OutOctetsEncrypted,
MACSEC_TXSC_STATS_ATTR_PAD))
return -EMSGSIZE;
return 0;
}
static int copy_secy_stats(struct sk_buff *skb,
struct pcpu_secy_stats __percpu *pstats)
static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
{
struct macsec_dev_stats sum = {0, };
const struct macsec_ops *ops;
struct macsec_context ctx;
int err = -EOPNOTSUPP;
int cpu;
for_each_possible_cpu(cpu) {
const struct pcpu_secy_stats *stats;
struct macsec_dev_stats tmp;
unsigned int start;
stats = per_cpu_ptr(pstats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sum.OutPktsUntagged += tmp.OutPktsUntagged;
sum.InPktsUntagged += tmp.InPktsUntagged;
sum.OutPktsTooLong += tmp.OutPktsTooLong;
sum.InPktsNoTag += tmp.InPktsNoTag;
sum.InPktsBadTag += tmp.InPktsBadTag;
sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI;
sum.InPktsNoSCI += tmp.InPktsNoSCI;
sum.InPktsOverrun += tmp.InPktsOverrun;
ops = macsec_get_ops(netdev_priv(dev), &ctx);
if (ops) {
ctx.stats.dev_stats = sum;
ctx.secy = &macsec_priv(dev)->secy;
err = macsec_offload(ops->mdo_get_dev_stats, &ctx);
}
if (err == -EOPNOTSUPP) {
for_each_possible_cpu(cpu) {
const struct pcpu_secy_stats *stats;
struct macsec_dev_stats tmp;
unsigned int start;
stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sum->OutPktsUntagged += tmp.OutPktsUntagged;
sum->InPktsUntagged += tmp.InPktsUntagged;
sum->OutPktsTooLong += tmp.OutPktsTooLong;
sum->InPktsNoTag += tmp.InPktsNoTag;
sum->InPktsBadTag += tmp.InPktsBadTag;
sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
sum->InPktsNoSCI += tmp.InPktsNoSCI;
sum->InPktsOverrun += tmp.InPktsOverrun;
}
}
}
static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
{
if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
sum.OutPktsUntagged,
sum->OutPktsUntagged,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
sum.InPktsUntagged,
sum->InPktsUntagged,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
sum.OutPktsTooLong,
sum->OutPktsTooLong,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
sum.InPktsNoTag,
sum->InPktsNoTag,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
sum.InPktsBadTag,
sum->InPktsBadTag,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
sum.InPktsUnknownSCI,
sum->InPktsUnknownSCI,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
sum.InPktsNoSCI,
sum->InPktsNoSCI,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
sum.InPktsOverrun,
sum->InPktsOverrun,
MACSEC_SECY_STATS_ATTR_PAD))
return -EMSGSIZE;
@ -2520,6 +2638,11 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
struct macsec_rx_sc *rx_sc;
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
struct nlattr *txsa_list, *rxsc_list;
struct macsec_dev_stats dev_stats = {0, };
struct macsec_tx_sc_stats tx_sc_stats = {0, };
struct macsec_tx_sa_stats tx_sa_stats = {0, };
struct macsec_rx_sc_stats rx_sc_stats = {0, };
struct macsec_rx_sa_stats rx_sa_stats = {0, };
int i, j;
void *hdr;
struct nlattr *attr;
@ -2540,7 +2663,9 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS);
if (!attr)
goto nla_put_failure;
if (copy_tx_sc_stats(skb, tx_sc->stats)) {
get_tx_sc_stats(dev, &tx_sc_stats);
if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
nla_nest_cancel(skb, attr);
goto nla_put_failure;
}
@ -2549,7 +2674,8 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS);
if (!attr)
goto nla_put_failure;
if (copy_secy_stats(skb, macsec_priv(dev)->stats)) {
get_secy_stats(dev, &dev_stats);
if (copy_secy_stats(skb, &dev_stats)) {
nla_nest_cancel(skb, attr);
goto nla_put_failure;
}
@ -2571,6 +2697,22 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
goto nla_put_failure;
}
attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS);
if (!attr) {
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
goto nla_put_failure;
}
memset(&tx_sa_stats, 0, sizeof (tx_sa_stats));
get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
goto nla_put_failure;
}
nla_nest_end(skb, attr);
if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
@ -2580,20 +2722,6 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
goto nla_put_failure;
}
attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS);
if (!attr) {
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
goto nla_put_failure;
}
if (copy_tx_sa_stats(skb, tx_sa->stats)) {
nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
goto nla_put_failure;
}
nla_nest_end(skb, attr);
nla_nest_end(skb, txsa_nest);
}
nla_nest_end(skb, txsa_list);
@ -2627,7 +2755,9 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
nla_nest_cancel(skb, rxsc_list);
goto nla_put_failure;
}
if (copy_rx_sc_stats(skb, rx_sc->stats)) {
memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, rxsc_nest);
nla_nest_cancel(skb, rxsc_list);
@ -2664,7 +2794,9 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
nla_nest_cancel(skb, rxsc_list);
goto nla_put_failure;
}
if (copy_rx_sa_stats(skb, rx_sa->stats)) {
memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, rxsa_list);
nla_nest_cancel(skb, rxsc_nest);
@ -2857,14 +2989,22 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
return ret;
}
#define MACSEC_FEATURES \
#define SW_MACSEC_FEATURES \
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
static struct lock_class_key macsec_netdev_addr_lock_key;
/* If h/w offloading is enabled, use real device features save for
* VLAN_FEATURES - they require additional ops
* HW_MACSEC - no reason to report it
*/
#define REAL_DEV_FEATURES(dev) \
((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
static int macsec_dev_init(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
const struct macsec_ops *ops;
int err;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@ -2877,8 +3017,13 @@ static int macsec_dev_init(struct net_device *dev)
return err;
}
dev->features = real_dev->features & MACSEC_FEATURES;
dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
ops = macsec_get_ops(netdev_priv(dev), NULL);
if (ops) {
dev->features = REAL_DEV_FEATURES(real_dev);
} else {
dev->features = real_dev->features & SW_MACSEC_FEATURES;
dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
}
dev->needed_headroom = real_dev->needed_headroom +
MACSEC_NEEDED_HEADROOM;
@ -2906,8 +3051,13 @@ static netdev_features_t macsec_fix_features(struct net_device *dev,
{
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
const struct macsec_ops *ops;
features &= (real_dev->features & MACSEC_FEATURES) |
ops = macsec_get_ops(netdev_priv(dev), NULL);
if (ops)
return REAL_DEV_FEATURES(real_dev);
features &= (real_dev->features & SW_MACSEC_FEATURES) |
NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
features |= NETIF_F_LLTX;
@ -2944,6 +3094,7 @@ static int macsec_dev_open(struct net_device *dev)
/* If h/w offloading is available, propagate to the device */
ops = macsec_get_ops(netdev_priv(dev), &ctx);
if (ops) {
ctx.secy = &macsec->secy;
err = macsec_offload(ops->mdo_dev_open, &ctx);
if (err)
goto clear_allmulti;
@ -2973,8 +3124,10 @@ static int macsec_dev_stop(struct net_device *dev)
/* If h/w offloading is available, propagate to the device */
ops = macsec_get_ops(netdev_priv(dev), &ctx);
if (ops)
if (ops) {
ctx.secy = &macsec->secy;
macsec_offload(ops->mdo_dev_stop, &ctx);
}
dev_mc_unsync(real_dev, dev);
dev_uc_unsync(real_dev, dev);
@ -3017,6 +3170,8 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
{
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
const struct macsec_ops *ops;
struct macsec_context ctx;
struct sockaddr *addr = p;
int err;
@ -3034,6 +3189,16 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
out:
ether_addr_copy(dev->dev_addr, addr->sa_data);
macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
/* If h/w offloading is available, propagate to the device */
ops = macsec_get_ops(netdev_priv(dev), &ctx);
if (ops) {
ctx.secy = &macsec->secy;
return macsec_offload(ops->mdo_upd_secy, &ctx);
}
return 0;
}
@ -3212,6 +3377,8 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
data[IFLA_MACSEC_PORT])
return -EINVAL;
macsec_changelink_common(dev, data);
/* If h/w offloading is available, propagate to the device */
ops = macsec_get_ops(netdev_priv(dev), &ctx);
if (ops) {
@ -3219,8 +3386,6 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
return macsec_offload(ops->mdo_upd_secy, &ctx);
}
macsec_changelink_common(dev, data);
return 0;
}
@ -3321,11 +3486,6 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
return false;
}
static sci_t dev_to_sci(struct net_device *dev, __be16 port)
{
return make_sci(dev->dev_addr, port);
}
static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
{
struct macsec_dev *macsec = macsec_priv(dev);
@ -3368,10 +3528,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev, *loop_dev;
struct net_device *real_dev;
struct macsec_context ctx;
const struct macsec_ops *ops;
struct net *loop_net;
int err;
sci_t sci;
u8 icv_len = DEFAULT_ICV_LEN;
@ -3383,25 +3542,6 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (!real_dev)
return -ENODEV;
for_each_net(loop_net) {
for_each_netdev(loop_net, loop_dev) {
struct macsec_dev *priv;
if (!netif_is_macsec(loop_dev))
continue;
priv = macsec_priv(loop_dev);
/* A limitation of the MACsec h/w offloading is only a
* single MACsec interface can be created for a given
* real interface.
*/
if (macsec_get_ops(netdev_priv(dev), NULL) &&
priv->real_dev == real_dev)
return -EBUSY;
}
}
dev->priv_flags |= IFF_MACSEC;
macsec->real_dev = real_dev;

View File

@ -847,6 +847,12 @@ struct macsec_ops {
int (*mdo_add_txsa)(struct macsec_context *ctx);
int (*mdo_upd_txsa)(struct macsec_context *ctx);
int (*mdo_del_txsa)(struct macsec_context *ctx);
/* Statistics */
int (*mdo_get_dev_stats)(struct macsec_context *ctx);
int (*mdo_get_tx_sc_stats)(struct macsec_context *ctx);
int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
};
#endif

View File

@ -59,6 +59,17 @@ struct macsec_tx_sc_stats {
__u64 OutOctetsEncrypted;
};
struct macsec_dev_stats {
__u64 OutPktsUntagged;
__u64 InPktsUntagged;
__u64 OutPktsTooLong;
__u64 InPktsNoTag;
__u64 InPktsBadTag;
__u64 InPktsUnknownSCI;
__u64 InPktsNoSCI;
__u64 InPktsOverrun;
};
/**
* struct macsec_rx_sa - receive secure association
* @active:
@ -195,6 +206,13 @@ struct macsec_context {
const struct macsec_tx_sa *tx_sa;
};
} sa;
union {
struct macsec_tx_sc_stats *tx_sc_stats;
struct macsec_tx_sa_stats *tx_sa_stats;
struct macsec_rx_sc_stats *rx_sc_stats;
struct macsec_rx_sa_stats *rx_sa_stats;
struct macsec_dev_stats *dev_stats;
} stats;
u8 prepare:1;
u8 is_phy:1;