@@ -1521,6 +1521,139 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
1521
1521
return 0 ;
1522
1522
}
1523
1523
1524
+ static void
1525
+ nfp_fl_ct_sub_stats (struct nfp_fl_nft_tc_merge * nft_merge ,
1526
+ enum ct_entry_type type , u64 * m_pkts ,
1527
+ u64 * m_bytes , u64 * m_used )
1528
+ {
1529
+ struct nfp_flower_priv * priv = nft_merge -> zt -> priv ;
1530
+ struct nfp_fl_payload * nfp_flow ;
1531
+ u32 ctx_id ;
1532
+
1533
+ nfp_flow = nft_merge -> flow_pay ;
1534
+ if (!nfp_flow )
1535
+ return ;
1536
+
1537
+ ctx_id = be32_to_cpu (nfp_flow -> meta .host_ctx_id );
1538
+ * m_pkts += priv -> stats [ctx_id ].pkts ;
1539
+ * m_bytes += priv -> stats [ctx_id ].bytes ;
1540
+ * m_used = max_t (u64 , * m_used , priv -> stats [ctx_id ].used );
1541
+
1542
+ /* If request is for a sub_flow which is part of a tunnel merged
1543
+ * flow then update stats from tunnel merged flows first.
1544
+ */
1545
+ if (!list_empty (& nfp_flow -> linked_flows ))
1546
+ nfp_flower_update_merge_stats (priv -> app , nfp_flow );
1547
+
1548
+ if (type != CT_TYPE_NFT ) {
1549
+ /* Update nft cached stats */
1550
+ flow_stats_update (& nft_merge -> nft_parent -> stats ,
1551
+ priv -> stats [ctx_id ].bytes ,
1552
+ priv -> stats [ctx_id ].pkts ,
1553
+ 0 , priv -> stats [ctx_id ].used ,
1554
+ FLOW_ACTION_HW_STATS_DELAYED );
1555
+ } else {
1556
+ /* Update pre_ct cached stats */
1557
+ flow_stats_update (& nft_merge -> tc_m_parent -> pre_ct_parent -> stats ,
1558
+ priv -> stats [ctx_id ].bytes ,
1559
+ priv -> stats [ctx_id ].pkts ,
1560
+ 0 , priv -> stats [ctx_id ].used ,
1561
+ FLOW_ACTION_HW_STATS_DELAYED );
1562
+ /* Update post_ct cached stats */
1563
+ flow_stats_update (& nft_merge -> tc_m_parent -> post_ct_parent -> stats ,
1564
+ priv -> stats [ctx_id ].bytes ,
1565
+ priv -> stats [ctx_id ].pkts ,
1566
+ 0 , priv -> stats [ctx_id ].used ,
1567
+ FLOW_ACTION_HW_STATS_DELAYED );
1568
+ }
1569
+ /* Reset stats from the nfp */
1570
+ priv -> stats [ctx_id ].pkts = 0 ;
1571
+ priv -> stats [ctx_id ].bytes = 0 ;
1572
+ }
1573
+
1574
+ int nfp_fl_ct_stats (struct flow_cls_offload * flow ,
1575
+ struct nfp_fl_ct_map_entry * ct_map_ent )
1576
+ {
1577
+ struct nfp_fl_ct_flow_entry * ct_entry = ct_map_ent -> ct_entry ;
1578
+ struct nfp_fl_nft_tc_merge * nft_merge , * nft_m_tmp ;
1579
+ struct nfp_fl_ct_tc_merge * tc_merge , * tc_m_tmp ;
1580
+
1581
+ u64 pkts = 0 , bytes = 0 , used = 0 ;
1582
+ u64 m_pkts , m_bytes , m_used ;
1583
+
1584
+ spin_lock_bh (& ct_entry -> zt -> priv -> stats_lock );
1585
+
1586
+ if (ct_entry -> type == CT_TYPE_PRE_CT ) {
1587
+ /* Iterate tc_merge entries associated with this flow */
1588
+ list_for_each_entry_safe (tc_merge , tc_m_tmp , & ct_entry -> children ,
1589
+ pre_ct_list ) {
1590
+ m_pkts = 0 ;
1591
+ m_bytes = 0 ;
1592
+ m_used = 0 ;
1593
+ /* Iterate nft_merge entries associated with this tc_merge flow */
1594
+ list_for_each_entry_safe (nft_merge , nft_m_tmp , & tc_merge -> children ,
1595
+ tc_merge_list ) {
1596
+ nfp_fl_ct_sub_stats (nft_merge , CT_TYPE_PRE_CT ,
1597
+ & m_pkts , & m_bytes , & m_used );
1598
+ }
1599
+ pkts += m_pkts ;
1600
+ bytes += m_bytes ;
1601
+ used = max_t (u64 , used , m_used );
1602
+ /* Update post_ct partner */
1603
+ flow_stats_update (& tc_merge -> post_ct_parent -> stats ,
1604
+ m_bytes , m_pkts , 0 , m_used ,
1605
+ FLOW_ACTION_HW_STATS_DELAYED );
1606
+ }
1607
+ } else if (ct_entry -> type == CT_TYPE_POST_CT ) {
1608
+ /* Iterate tc_merge entries associated with this flow */
1609
+ list_for_each_entry_safe (tc_merge , tc_m_tmp , & ct_entry -> children ,
1610
+ post_ct_list ) {
1611
+ m_pkts = 0 ;
1612
+ m_bytes = 0 ;
1613
+ m_used = 0 ;
1614
+ /* Iterate nft_merge entries associated with this tc_merge flow */
1615
+ list_for_each_entry_safe (nft_merge , nft_m_tmp , & tc_merge -> children ,
1616
+ tc_merge_list ) {
1617
+ nfp_fl_ct_sub_stats (nft_merge , CT_TYPE_POST_CT ,
1618
+ & m_pkts , & m_bytes , & m_used );
1619
+ }
1620
+ pkts += m_pkts ;
1621
+ bytes += m_bytes ;
1622
+ used = max_t (u64 , used , m_used );
1623
+ /* Update pre_ct partner */
1624
+ flow_stats_update (& tc_merge -> pre_ct_parent -> stats ,
1625
+ m_bytes , m_pkts , 0 , m_used ,
1626
+ FLOW_ACTION_HW_STATS_DELAYED );
1627
+ }
1628
+ } else {
1629
+ /* Iterate nft_merge entries associated with this nft flow */
1630
+ list_for_each_entry_safe (nft_merge , nft_m_tmp , & ct_entry -> children ,
1631
+ nft_flow_list ) {
1632
+ nfp_fl_ct_sub_stats (nft_merge , CT_TYPE_NFT ,
1633
+ & pkts , & bytes , & used );
1634
+ }
1635
+ }
1636
+
1637
+ /* Add stats from this request to stats potentially cached by
1638
+ * previous requests.
1639
+ */
1640
+ flow_stats_update (& ct_entry -> stats , bytes , pkts , 0 , used ,
1641
+ FLOW_ACTION_HW_STATS_DELAYED );
1642
+ /* Finally update the flow stats from the original stats request */
1643
+ flow_stats_update (& flow -> stats , ct_entry -> stats .bytes ,
1644
+ ct_entry -> stats .pkts , 0 ,
1645
+ ct_entry -> stats .lastused ,
1646
+ FLOW_ACTION_HW_STATS_DELAYED );
1647
+ /* Stats has been synced to original flow, can now clear
1648
+ * the cache.
1649
+ */
1650
+ ct_entry -> stats .pkts = 0 ;
1651
+ ct_entry -> stats .bytes = 0 ;
1652
+ spin_unlock_bh (& ct_entry -> zt -> priv -> stats_lock );
1653
+
1654
+ return 0 ;
1655
+ }
1656
+
1524
1657
static int
1525
1658
nfp_fl_ct_offload_nft_flow (struct nfp_fl_ct_zone_entry * zt , struct flow_cls_offload * flow )
1526
1659
{
@@ -1553,7 +1686,11 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl
1553
1686
nfp_ct_map_params );
1554
1687
return nfp_fl_ct_del_flow (ct_map_ent );
1555
1688
case FLOW_CLS_STATS :
1556
- return 0 ;
1689
+ ct_map_ent = rhashtable_lookup_fast (& zt -> priv -> ct_map_table , & flow -> cookie ,
1690
+ nfp_ct_map_params );
1691
+ if (ct_map_ent )
1692
+ return nfp_fl_ct_stats (flow , ct_map_ent );
1693
+ break ;
1557
1694
default :
1558
1695
break ;
1559
1696
}
0 commit comments