|
44 | 44 | #include "ff_dpdk_kni.h" |
45 | 45 | #include "ff_config.h" |
46 | 46 |
|
| 47 | +#ifndef IPPROTO_OSPFIGP |
| 48 | +#define IPPROTO_OSPFIGP 89 /**< OSPFIGP */ |
| 49 | +#endif |
| 50 | + |
47 | 51 | /* Callback for request of changing MTU */ |
48 | 52 | /* Total octets in ethernet header */ |
49 | 53 | #define KNI_ENET_HEADER_SIZE 14 |
@@ -92,6 +96,8 @@ struct kni_interface_stats { |
92 | 96 | struct rte_ring **kni_rp; |
93 | 97 | struct kni_interface_stats **kni_stat; |
94 | 98 |
|
| 99 | +struct kni_ratelimit kni_rate_limt = {0, 0, 0}; |
| 100 | + |
95 | 101 | static void |
96 | 102 | set_bitmap(uint16_t port, unsigned char *bitmap) |
97 | 103 | { |
@@ -219,24 +225,38 @@ kni_process_tx(uint16_t port_id, uint16_t queue_id, |
219 | 225 | struct rte_mbuf **pkts_burst, unsigned count) |
220 | 226 | { |
221 | 227 | /* read packet from kni ring(phy port) and transmit to kni */ |
222 | | - uint16_t nb_tx, nb_kni_tx = 0; |
| 228 | + uint16_t nb_tx, nb_to_tx, nb_kni_tx; |
223 | 229 | nb_tx = rte_ring_dequeue_burst(kni_rp[port_id], (void **)pkts_burst, count, NULL); |
224 | 230 |
|
| 231 | + /* |
| 232 | + * The total ratelimit forwarded to the kernel, may a few more packets being sent, but it doesn’t matter, |
| 233 | + * If there are too many processes, there is also the possibility that the control packet will be ratelimited. |
| 234 | + */ |
| 235 | + if (ff_global_cfg.kni.kernel_packets_ratelimit) { |
| 236 | + if (likely(kni_rate_limt.kernel_packets < ff_global_cfg.kni.kernel_packets_ratelimit)) { |
| 237 | + nb_to_tx = nb_tx; |
| 238 | + } else { |
| 239 | + nb_to_tx = 0; |
| 240 | + } |
| 241 | + kni_rate_limt.kernel_packets += nb_tx; |
| 242 | + } else { |
| 243 | + nb_to_tx = nb_tx; |
| 244 | + } |
| 245 | + |
225 | 246 | #ifdef FF_KNI_KNI |
226 | 247 | if (ff_global_cfg.kni.type == KNI_TYPE_KNI) { |
227 | 248 | /* NB. |
228 | 249 | * if nb_tx is 0,it must call rte_kni_tx_burst |
229 | 250 | * must Call regularly rte_kni_tx_burst(kni, NULL, 0). |
230 | 251 | * detail https://embedded.communities.intel.com/thread/6668 |
231 | 252 | */ |
232 | | - nb_kni_tx = rte_kni_tx_burst(kni_stat[port_id]->kni, pkts_burst, nb_tx); |
| 253 | + nb_kni_tx = rte_kni_tx_burst(kni_stat[port_id]->kni, pkts_burst, nb_to_tx); |
233 | 254 | rte_kni_handle_request(kni_stat[port_id]->kni); |
234 | 255 | } else if (ff_global_cfg.kni.type == KNI_TYPE_VIRTIO) |
235 | 256 | #endif |
236 | 257 | { |
237 | | - nb_kni_tx = rte_eth_tx_burst(kni_stat[port_id]->port_id, 0, pkts_burst, nb_tx); |
| 258 | + nb_kni_tx = rte_eth_tx_burst(kni_stat[port_id]->port_id, 0, pkts_burst, nb_to_tx); |
238 | 259 | } |
239 | | - |
240 | 260 | if(nb_kni_tx < nb_tx) { |
241 | 261 | uint16_t i; |
242 | 262 | for(i = nb_kni_tx; i < nb_tx; ++i) |
@@ -419,22 +439,28 @@ protocol_filter_ip(const void *data, uint16_t len, uint16_t eth_frame_type) |
419 | 439 | next_len = len - hdr_len; |
420 | 440 |
|
421 | 441 | switch (proto) { |
| 442 | +#ifdef FF_KNI |
| 443 | + /* The opsf protocol is forwarded to kni and the ratelimited separately */ |
| 444 | + case IPPROTO_OSPFIGP: |
| 445 | + return FILTER_OSPF; |
| 446 | +#endif |
| 447 | + |
422 | 448 | case IPPROTO_TCP: |
423 | 449 | #ifdef FF_KNI |
424 | 450 | if (!enable_kni) |
425 | | - break; |
426 | | -#else |
427 | | - break; |
428 | 451 | #endif |
| 452 | + break; |
| 453 | + |
429 | 454 | return protocol_filter_tcp(next, next_len); |
| 455 | + |
430 | 456 | case IPPROTO_UDP: |
431 | 457 | #ifdef FF_KNI |
432 | 458 | if (!enable_kni) |
433 | | - break; |
434 | | -#else |
435 | | - break; |
436 | 459 | #endif |
| 460 | + break; |
| 461 | + |
437 | 462 | return protocol_filter_udp(next, next_len); |
| 463 | + |
438 | 464 | case IPPROTO_IPIP: |
439 | 465 | return protocol_filter_ip(next, next_len, RTE_ETHER_TYPE_IPV4); |
440 | 466 | #ifdef INET6 |
@@ -628,12 +654,34 @@ ff_kni_process(uint16_t port_id, uint16_t queue_id, |
628 | 654 |
|
629 | 655 | /* enqueue the packet, and own it */ |
630 | 656 | int |
631 | | -ff_kni_enqueue(uint16_t port_id, struct rte_mbuf *pkt) |
| 657 | +ff_kni_enqueue(enum FilterReturn filter, uint16_t port_id, struct rte_mbuf *pkt) |
632 | 658 | { |
| 659 | + if (filter >= FILTER_ARP) { |
| 660 | + if (ff_global_cfg.kni.console_packets_ratelimit) { |
| 661 | + kni_rate_limt.console_packets++; |
| 662 | + if (kni_rate_limt.console_packets > ff_global_cfg.kni.console_packets_ratelimit) { |
| 663 | + goto error; |
| 664 | + } |
| 665 | + } |
| 666 | + } else { |
| 667 | + if (ff_global_cfg.kni.general_packets_ratelimit) { |
| 668 | + kni_rate_limt.gerneal_packets++; |
| 669 | + if (kni_rate_limt.gerneal_packets > ff_global_cfg.kni.general_packets_ratelimit) { |
| 670 | + goto error; |
| 671 | + } |
| 672 | + } |
| 673 | + } |
| 674 | + |
633 | 675 | int ret = rte_ring_enqueue(kni_rp[port_id], pkt); |
634 | | - if (ret < 0) |
635 | | - rte_pktmbuf_free(pkt); |
| 676 | + if (ret < 0) { |
| 677 | + goto error; |
| 678 | + } |
636 | 679 |
|
637 | 680 | return 0; |
| 681 | + |
| 682 | +error: |
| 683 | + rte_pktmbuf_free(pkt); |
| 684 | + |
| 685 | + return -1; |
638 | 686 | } |
639 | 687 |
|
0 commit comments