DPDK  25.11.0
rte_mbuf.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5 
6 #ifndef _RTE_MBUF_H_
7 #define _RTE_MBUF_H_
8 
34 #include <stdbool.h>
35 #include <stdint.h>
36 
37 #include <rte_common.h>
38 #include <rte_config.h>
39 #include <rte_mempool.h>
40 #include <rte_prefetch.h>
41 #include <rte_branch_prediction.h>
42 #include <rte_mbuf_ptype.h>
43 #include <rte_mbuf_core.h>
44 #include <rte_mbuf_history.h>
45 
46 #ifdef __cplusplus
47 extern "C" {
48 #endif
49 
58 const char *rte_get_rx_ol_flag_name(uint64_t mask);
59 
72 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
73 
84 const char *rte_get_tx_ol_flag_name(uint64_t mask);
85 
98 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
99 
110 static inline void
112 {
113  rte_prefetch0(m);
114 }
115 
127 static inline void
129 {
130 #if RTE_CACHE_LINE_SIZE == 64
132 #else
133  RTE_SET_USED(m);
134 #endif
135 }
136 
137 
138 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
139 
148 static inline rte_iova_t
149 rte_mbuf_iova_get(const struct rte_mbuf *m)
150 {
151 #if RTE_IOVA_IN_MBUF
152  return m->buf_iova;
153 #else
154  return (rte_iova_t)m->buf_addr;
155 #endif
156 }
157 
166 static inline void
168 {
169 #if RTE_IOVA_IN_MBUF
170  m->buf_iova = iova;
171 #else
172  RTE_SET_USED(m);
173  RTE_SET_USED(iova);
174 #endif
175 }
176 
185 static inline rte_iova_t
186 rte_mbuf_data_iova(const struct rte_mbuf *mb)
187 {
188  return rte_mbuf_iova_get(mb) + mb->data_off;
189 }
190 
203 static inline rte_iova_t
205 {
206  return rte_mbuf_iova_get(mb) + RTE_PKTMBUF_HEADROOM;
207 }
208 
217 static inline struct rte_mbuf *
219 {
220  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
221 }
222 
238 static inline char *
239 rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
240 {
241  return (char *)mb + sizeof(*mb) + rte_pktmbuf_priv_size(mp);
242 }
243 
252 static inline char *
254 {
255  return rte_mbuf_buf_addr(mb, mb->pool) + RTE_PKTMBUF_HEADROOM;
256 }
257 
271 static inline char *
273 {
274  return rte_mbuf_buf_addr(md, md->pool);
275 }
276 
289 static inline void *
291 {
292  return RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
293 }
294 
303  uint16_t mbuf_priv_size;
304  uint32_t flags;
305 };
306 
315 static inline uint32_t
317 {
318  struct rte_pktmbuf_pool_private *mbp_priv;
319 
320  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
321  return mbp_priv->flags;
322 }
323 
330 #define RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF (1 << 0)
331 
339 #define RTE_MBUF_HAS_PINNED_EXTBUF(mb) \
340  (rte_pktmbuf_priv_flags(mb->pool) & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
341 
342 #if defined RTE_LIBRTE_MBUF_DEBUG || defined __DOXYGEN__
343 
345 #define __rte_mbuf_raw_sanity_check_mp(m, mp) rte_mbuf_raw_sanity_check(m, mp)
347 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
348 
349 #else /* !RTE_LIBRTE_MBUF_DEBUG */
350 
351 #define __rte_mbuf_raw_sanity_check_mp(m, mp) do { } while (0)
352 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
353 
354 #endif /* RTE_LIBRTE_MBUF_DEBUG */
355 
356 #ifdef RTE_MBUF_REFCNT_ATOMIC
357 
365 static inline uint16_t
366 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
367 {
368  return rte_atomic_load_explicit(&m->refcnt, rte_memory_order_relaxed);
369 }
370 
378 static inline void
379 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
380 {
381  rte_atomic_store_explicit(&m->refcnt, new_value, rte_memory_order_relaxed);
382 }
383 
384 /* internal */
385 static inline uint16_t
386 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
387 {
388  return rte_atomic_fetch_add_explicit(&m->refcnt, value,
389  rte_memory_order_acq_rel) + value;
390 }
391 
401 static inline uint16_t
402 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
403 {
404  /*
405  * The atomic_add is an expensive operation, so we don't want to
406  * call it in the case where we know we are the unique holder of
407  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
408  * operation has to be used because concurrent accesses on the
409  * reference counter can occur.
410  */
411  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
412  ++value;
413  rte_mbuf_refcnt_set(m, (uint16_t)value);
414  return (uint16_t)value;
415  }
416 
417  return __rte_mbuf_refcnt_update(m, value);
418 }
419 
420 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
421 
422 /* internal */
423 static inline uint16_t
424 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
425 {
426  m->refcnt = (uint16_t)(m->refcnt + value);
427  return m->refcnt;
428 }
429 
433 static inline uint16_t
434 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
435 {
436  return __rte_mbuf_refcnt_update(m, value);
437 }
438 
442 static inline uint16_t
444 {
445  return m->refcnt;
446 }
447 
451 static inline void
452 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
453 {
454  m->refcnt = new_value;
455 }
456 
457 #endif /* RTE_MBUF_REFCNT_ATOMIC */
458 
467 static inline uint16_t
469 {
470  return rte_atomic_load_explicit(&shinfo->refcnt, rte_memory_order_relaxed);
471 }
472 
481 static inline void
483  uint16_t new_value)
484 {
485  rte_atomic_store_explicit(&shinfo->refcnt, new_value, rte_memory_order_relaxed);
486 }
487 
499 static inline uint16_t
501  int16_t value)
502 {
503  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) {
504  ++value;
505  rte_mbuf_ext_refcnt_set(shinfo, (uint16_t)value);
506  return (uint16_t)value;
507  }
508 
509  return rte_atomic_fetch_add_explicit(&shinfo->refcnt, value,
510  rte_memory_order_acq_rel) + value;
511 }
512 
514 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
515  if ((m) != NULL) \
516  rte_prefetch0(m); \
517 } while (0)
518 
519 
535 void
536 rte_mbuf_raw_sanity_check(const struct rte_mbuf *m, const struct rte_mempool *mp);
537 
557 int rte_mbuf_raw_check(const struct rte_mbuf *m, const struct rte_mempool *mp,
558  const char **reason);
559 
572 void
573 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
574 
594 int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
595  const char **reason);
596 
598 #define __rte_mbuf_raw_sanity_check(m) __rte_mbuf_raw_sanity_check_mp(m, NULL)
599 
601 #define MBUF_RAW_ALLOC_CHECK(m) __rte_mbuf_raw_sanity_check_mp(m, NULL)
602 
622 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
623 {
624  union {
625  void *ptr;
626  struct rte_mbuf *m;
627  } ret;
628 
629  if (rte_mempool_get(mp, &ret.ptr) < 0)
630  return NULL;
632 
634 
635  return ret.m;
636 }
637 
662 static __rte_always_inline int
663 rte_mbuf_raw_alloc_bulk(struct rte_mempool *mp, struct rte_mbuf **mbufs, unsigned int count)
664 {
665  int rc = rte_mempool_get_bulk(mp, (void **)mbufs, count);
666  if (likely(rc == 0)) {
667  for (unsigned int idx = 0; idx < count; idx++)
668  __rte_mbuf_raw_sanity_check_mp(mbufs[idx], mp);
669  }
670 
672 
673  return rc;
674 }
675 
690 static __rte_always_inline void
692 {
695  rte_mempool_put(m->pool, m);
696 }
697 
718 static __rte_always_inline void
719 rte_mbuf_raw_free_bulk(struct rte_mempool *mp, struct rte_mbuf **mbufs, unsigned int count)
720 {
721  for (unsigned int idx = 0; idx < count; idx++)
722  __rte_mbuf_raw_sanity_check_mp(mbufs[idx], mp);
724  rte_mempool_put_bulk(mp, (void **)mbufs, count);
725 }
726 
749 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
750  void *m, unsigned i);
751 
772 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
773 
807 struct rte_mempool *
808 rte_pktmbuf_pool_create(const char *name, unsigned n,
809  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
810  int socket_id);
811 
848 struct rte_mempool *
849 rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
850  unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
851  int socket_id, const char *ops_name);
852 
855  void *buf_ptr;
857  size_t buf_len;
858  uint16_t elt_size;
859 };
860 
901 struct rte_mempool *
902 rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n,
903  unsigned int cache_size, uint16_t priv_size,
904  uint16_t data_room_size, int socket_id,
905  const struct rte_pktmbuf_extmem *ext_mem,
906  unsigned int ext_num);
907 
919 static inline uint16_t
921 {
922  struct rte_pktmbuf_pool_private *mbp_priv;
923 
924  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
925  return mbp_priv->mbuf_data_room_size;
926 }
927 
940 static inline uint16_t
942 {
943  struct rte_pktmbuf_pool_private *mbp_priv;
944 
945  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
946  return mbp_priv->mbuf_priv_size;
947 }
948 
957 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
958 {
959  m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
960  (uint16_t)m->buf_len);
961 }
962 
981 static inline void
982 rte_mbuf_raw_reset_bulk(struct rte_mempool *mp, struct rte_mbuf **mbufs, unsigned int count)
983 {
984  uint64_t ol_flags = (rte_pktmbuf_priv_flags(mp) & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) ?
986  uint16_t data_off = RTE_MIN_T(RTE_PKTMBUF_HEADROOM, rte_pktmbuf_data_room_size(mp),
987  uint16_t);
988 
989  for (unsigned int idx = 0; idx < count; idx++) {
990  struct rte_mbuf *m = mbufs[idx];
991 
992  m->pkt_len = 0;
993  m->tx_offload = 0;
994  m->vlan_tci = 0;
995  m->vlan_tci_outer = 0;
997 
998  m->ol_flags = ol_flags;
999  m->packet_type = 0;
1000  m->data_off = data_off;
1001 
1002  m->data_len = 0;
1004  }
1005 }
1006 
1015 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
1016 {
1017  m->next = NULL;
1018  m->pkt_len = 0;
1019  m->tx_offload = 0;
1020  m->vlan_tci = 0;
1021  m->vlan_tci_outer = 0;
1022  m->nb_segs = 1;
1024 
1026  m->packet_type = 0;
1028 
1029  m->data_len = 0;
1031 }
1032 
1046 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1047 {
1048  struct rte_mbuf *m;
1049  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
1050  rte_mbuf_raw_reset_bulk(mp, &m, 1);
1051  return m;
1052 }
1053 
1068 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
1069  struct rte_mbuf **mbufs, unsigned count)
1070 {
1071  int rc;
1072 
1073  rc = rte_mbuf_raw_alloc_bulk(pool, mbufs, count);
1074  if (unlikely(rc))
1075  return rc;
1076 
1078 
1079  rte_mbuf_raw_reset_bulk(pool, mbufs, count);
1080 
1081  return 0;
1082 }
1083 
1116 static inline struct rte_mbuf_ext_shared_info *
1117 rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
1119 {
1120  struct rte_mbuf_ext_shared_info *shinfo;
1121  void *buf_end = RTE_PTR_ADD(buf_addr, *buf_len);
1122  void *addr;
1123 
1124  addr = RTE_PTR_ALIGN_FLOOR(RTE_PTR_SUB(buf_end, sizeof(*shinfo)),
1125  sizeof(uintptr_t));
1126  if (addr <= buf_addr)
1127  return NULL;
1128 
1129  shinfo = (struct rte_mbuf_ext_shared_info *)addr;
1130  shinfo->free_cb = free_cb;
1131  shinfo->fcb_opaque = fcb_opaque;
1132  rte_mbuf_ext_refcnt_set(shinfo, 1);
1133 
1134  *buf_len = (uint16_t)RTE_PTR_DIFF(shinfo, buf_addr);
1135  return shinfo;
1136 }
1137 
1198 static inline void
1199 rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
1200  rte_iova_t buf_iova, uint16_t buf_len,
1201  struct rte_mbuf_ext_shared_info *shinfo)
1202 {
1203  /* mbuf should not be read-only */
1204  RTE_ASSERT(RTE_MBUF_DIRECT(m) && rte_mbuf_refcnt_read(m) == 1);
1205  RTE_ASSERT(shinfo->free_cb != NULL);
1206 
1207  m->buf_addr = buf_addr;
1208  rte_mbuf_iova_set(m, buf_iova);
1209  m->buf_len = buf_len;
1210 
1211  m->data_len = 0;
1212  m->data_off = 0;
1213 
1215  m->shinfo = shinfo;
1216 }
1217 
1225 #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1226 
1235 static inline void
1236 rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
1237 {
1238 #if !RTE_IOVA_IN_MBUF
1239  mdst->dynfield2 = msrc->dynfield2;
1240 #endif
1241  memcpy(&mdst->dynfield1, msrc->dynfield1, sizeof(mdst->dynfield1));
1242 }
1243 
1244 /* internal */
1245 static inline void
1246 __rte_pktmbuf_copy_hdr(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
1247 {
1248  mdst->port = msrc->port;
1249  mdst->vlan_tci = msrc->vlan_tci;
1250  mdst->vlan_tci_outer = msrc->vlan_tci_outer;
1251  mdst->tx_offload = msrc->tx_offload;
1252  mdst->hash = msrc->hash;
1253  mdst->packet_type = msrc->packet_type;
1254  rte_mbuf_dynfield_copy(mdst, msrc);
1255 }
1256 
1278 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1279 {
1280  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1281  rte_mbuf_refcnt_read(mi) == 1);
1282 
1283  if (RTE_MBUF_HAS_EXTBUF(m)) {
1285  mi->ol_flags = m->ol_flags;
1286  mi->shinfo = m->shinfo;
1287  } else {
1288  /* if m is not direct, get the mbuf that embeds the data */
1290  mi->priv_size = m->priv_size;
1292  }
1293 
1294  __rte_pktmbuf_copy_hdr(mi, m);
1295 
1296  mi->data_off = m->data_off;
1297  mi->data_len = m->data_len;
1299  mi->buf_addr = m->buf_addr;
1300  mi->buf_len = m->buf_len;
1301 
1302  mi->next = NULL;
1303  mi->pkt_len = mi->data_len;
1304  mi->nb_segs = 1;
1305 
1306  __rte_mbuf_sanity_check(mi, 1);
1308 }
1309 
1317 static inline void
1318 __rte_pktmbuf_free_extbuf(struct rte_mbuf *m)
1319 {
1320  RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
1321  RTE_ASSERT(m->shinfo != NULL);
1322 
1323  if (rte_mbuf_ext_refcnt_update(m->shinfo, -1) == 0)
1324  m->shinfo->free_cb(m->buf_addr, m->shinfo->fcb_opaque);
1325 }
1326 
1333 static inline void
1334 __rte_pktmbuf_free_direct(struct rte_mbuf *m)
1335 {
1336  struct rte_mbuf *md;
1337 
1338  RTE_ASSERT(RTE_MBUF_CLONED(m));
1339 
1340  md = rte_mbuf_from_indirect(m);
1341 
1342  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1343  md->next = NULL;
1344  md->nb_segs = 1;
1345  rte_mbuf_refcnt_set(md, 1);
1346  rte_mbuf_raw_free(md);
1347  }
1348 }
1349 
1368 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1369 {
1370  struct rte_mempool *mp = m->pool;
1371  uint32_t mbuf_size, buf_len;
1372  uint16_t priv_size;
1373 
1374  if (RTE_MBUF_HAS_EXTBUF(m)) {
1375  /*
1376  * The mbuf has the external attached buffer,
1377  * we should check the type of the memory pool where
1378  * the mbuf was allocated from to detect the pinned
1379  * external buffer.
1380  */
1381  uint32_t flags = rte_pktmbuf_priv_flags(mp);
1382 
1384  /*
1385  * The pinned external buffer should not be
1386  * detached from its backing mbuf, just exit.
1387  */
1388  return;
1389  }
1390  __rte_pktmbuf_free_extbuf(m);
1391  } else {
1392  __rte_pktmbuf_free_direct(m);
1393  }
1394  priv_size = rte_pktmbuf_priv_size(mp);
1395  mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1396  buf_len = rte_pktmbuf_data_room_size(mp);
1397 
1398  m->priv_size = priv_size;
1399  m->buf_addr = (char *)m + mbuf_size;
1400  rte_mbuf_iova_set(m, rte_mempool_virt2iova(m) + mbuf_size);
1401  m->buf_len = (uint16_t)buf_len;
1403  m->data_len = 0;
1404  m->ol_flags = 0;
1405 }
1406 
1420 static inline int __rte_pktmbuf_pinned_extbuf_decref(struct rte_mbuf *m)
1421 {
1422  struct rte_mbuf_ext_shared_info *shinfo;
1423 
1424  /* Clear flags, mbuf is being freed. */
1426  shinfo = m->shinfo;
1427 
1428  /* Optimize for performance - do not dec/reinit */
1429  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1))
1430  return 0;
1431 
1432  /*
1433  * Direct usage of add primitive to avoid
1434  * duplication of comparing with one.
1435  */
1436  if (likely(rte_atomic_fetch_add_explicit(&shinfo->refcnt, -1,
1437  rte_memory_order_acq_rel) - 1))
1438  return 1;
1439 
1440  /* Reinitialize counter before mbuf freeing. */
1441  rte_mbuf_ext_refcnt_set(shinfo, 1);
1442  return 0;
1443 }
1444 
1459 static __rte_always_inline struct rte_mbuf *
1461 {
1462  bool refcnt_not_one;
1463 
1465 
1466  refcnt_not_one = unlikely(rte_mbuf_refcnt_read(m) != 1);
1467  if (refcnt_not_one && __rte_mbuf_refcnt_update(m, -1) != 0)
1468  return NULL;
1469 
1470  if (unlikely(!RTE_MBUF_DIRECT(m))) {
1471  rte_pktmbuf_detach(m);
1472  if (RTE_MBUF_HAS_EXTBUF(m) &&
1474  __rte_pktmbuf_pinned_extbuf_decref(m))
1475  return NULL;
1476  }
1477 
1478  if (refcnt_not_one)
1479  rte_mbuf_refcnt_set(m, 1);
1480  if (m->nb_segs != 1)
1481  m->nb_segs = 1;
1482  if (m->next != NULL)
1483  m->next = NULL;
1484 
1485  return m;
1486 }
1487 
1497 static __rte_always_inline void
1499 {
1500  m = rte_pktmbuf_prefree_seg(m);
1501  if (likely(m != NULL))
1502  rte_mbuf_raw_free(m);
1503 }
1504 
1514 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1515 {
1516  struct rte_mbuf *m_next;
1517 
1518  if (m != NULL)
1520 
1521  while (m != NULL) {
1522  m_next = m->next;
1524  m = m_next;
1525  }
1526 }
1527 
1540 void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count);
1541 
1559 struct rte_mbuf *
1560 rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp);
1561 
1583 struct rte_mbuf *
1584 rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp,
1585  uint32_t offset, uint32_t length);
1586 
1598 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1599 {
1601 
1602  do {
1603  rte_mbuf_refcnt_update(m, v);
1604  } while ((m = m->next) != NULL);
1605 }
1606 
1615 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1616 {
1618  return m->data_off;
1619 }
1620 
1629 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1630 {
1632  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1633  m->data_len);
1634 }
1635 
1644 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1645 {
1647  while (m->next != NULL)
1648  m = m->next;
1649  return m;
1650 }
1651 
1660 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1661 
1670 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1671 
1687 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1688  uint16_t len)
1689 {
1691 
1692  if (unlikely(len > rte_pktmbuf_headroom(m)))
1693  return NULL;
1694 
1695  /* NB: elaborating the subtraction like this instead of using
1696  * -= allows us to ensure the result type is uint16_t
1697  * avoiding compiler warnings on gcc 8.1 at least */
1698  m->data_off = (uint16_t)(m->data_off - len);
1699  m->data_len = (uint16_t)(m->data_len + len);
1700  m->pkt_len = (m->pkt_len + len);
1701 
1702  return (char *)m->buf_addr + m->data_off;
1703 }
1704 
1720 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1721 {
1722  void *tail;
1723  struct rte_mbuf *m_last;
1724 
1726 
1727  m_last = rte_pktmbuf_lastseg(m);
1728  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1729  return NULL;
1730 
1731  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1732  m_last->data_len = (uint16_t)(m_last->data_len + len);
1733  m->pkt_len = (m->pkt_len + len);
1734  return (char*) tail;
1735 }
1736 
1751 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1752 {
1754 
1755  if (unlikely(len > m->data_len))
1756  return NULL;
1757 
1758  /* NB: elaborating the addition like this instead of using
1759  * += allows us to ensure the result type is uint16_t
1760  * avoiding compiler warnings on gcc 8.1 at least */
1761  m->data_len = (uint16_t)(m->data_len - len);
1762  m->data_off = (uint16_t)(m->data_off + len);
1763  m->pkt_len = (m->pkt_len - len);
1764  return (char *)m->buf_addr + m->data_off;
1765 }
1766 
1781 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1782 {
1783  struct rte_mbuf *m_last;
1784 
1786 
1787  m_last = rte_pktmbuf_lastseg(m);
1788  if (unlikely(len > m_last->data_len))
1789  return -1;
1790 
1791  m_last->data_len = (uint16_t)(m_last->data_len - len);
1792  m->pkt_len = (m->pkt_len - len);
1793  return 0;
1794 }
1795 
1805 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1806 {
1808  return m->nb_segs == 1;
1809 }
1810 
1814 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
1815  uint32_t len, void *buf);
1816 
1837 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
1838  uint32_t off, uint32_t len, void *buf)
1839 {
1840  if (likely(off + len <= rte_pktmbuf_data_len(m)))
1841  return rte_pktmbuf_mtod_offset(m, char *, off);
1842  else
1843  return __rte_pktmbuf_read(m, off, len, buf);
1844 }
1845 
1862 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
1863 {
1864  struct rte_mbuf *cur_tail;
1865 
1866  /* Check for number-of-segments-overflow */
1867  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
1868  return -EOVERFLOW;
1869 
1870  /* Chain 'tail' onto the old tail */
1871  cur_tail = rte_pktmbuf_lastseg(head);
1872  cur_tail->next = tail;
1873 
1874  /* accumulate number of segments and total length.
1875  * NB: elaborating the addition like this instead of using
1876  * -= allows us to ensure the result type is uint16_t
1877  * avoiding compiler warnings on gcc 8.1 at least */
1878  head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
1879  head->pkt_len += tail->pkt_len;
1880 
1881  /* pkt_len is only set in the head */
1882  tail->pkt_len = tail->data_len;
1883 
1884  return 0;
1885 }
1886 
1908 static __rte_always_inline uint64_t
1909 rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
1910  uint64_t ol3, uint64_t ol2, uint64_t unused)
1911 {
1912  return il2 << RTE_MBUF_L2_LEN_OFS |
1913  il3 << RTE_MBUF_L3_LEN_OFS |
1914  il4 << RTE_MBUF_L4_LEN_OFS |
1915  tso << RTE_MBUF_TSO_SEGSZ_OFS |
1916  ol3 << RTE_MBUF_OUTL3_LEN_OFS |
1917  ol2 << RTE_MBUF_OUTL2_LEN_OFS |
1918  unused << RTE_MBUF_TXOFLD_UNUSED_OFS;
1919 }
1920 
1931 static inline int
1933 {
1934  uint64_t ol_flags = m->ol_flags;
1935 
1936  /* Does packet set any of available offloads? */
1938  return 0;
1939 
1940  /* IP checksum can be counted only for IPv4 packet */
1942  return -EINVAL;
1943 
1944  /* IP type not set when required */
1947  return -EINVAL;
1948 
1949  /* Check requirements for TSO packet */
1951  if ((m->tso_segsz == 0) ||
1954  return -EINVAL;
1955 
1956  /* RTE_MBUF_F_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
1959  return -EINVAL;
1960 
1961  return 0;
1962 }
1963 
1967 int __rte_pktmbuf_linearize(struct rte_mbuf *mbuf);
1968 
1981 static inline int
1983 {
1984  if (rte_pktmbuf_is_contiguous(mbuf))
1985  return 0;
1986  return __rte_pktmbuf_linearize(mbuf);
1987 }
1988 
2003 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
2004 
2008 static inline uint32_t
2010 {
2011  return m->hash.sched.queue_id;
2012 }
2013 
2017 static inline uint8_t
2019 {
2020  return m->hash.sched.traffic_class;
2021 }
2022 
2026 static inline uint8_t
2028 {
2029  return m->hash.sched.color;
2030 }
2031 
2044 static inline void
2045 rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id,
2046  uint8_t *traffic_class,
2047  uint8_t *color)
2048 {
2049  struct rte_mbuf_sched sched = m->hash.sched;
2050 
2051  *queue_id = sched.queue_id;
2052  *traffic_class = sched.traffic_class;
2053  *color = sched.color;
2054 }
2055 
2059 static inline void
2061 {
2062  m->hash.sched.queue_id = queue_id;
2063 }
2064 
2068 static inline void
2070 {
2071  m->hash.sched.traffic_class = traffic_class;
2072 }
2073 
2077 static inline void
2079 {
2080  m->hash.sched.color = color;
2081 }
2082 
2095 static inline void
2097  uint8_t traffic_class,
2098  uint8_t color)
2099 {
2100  m->hash.sched = (struct rte_mbuf_sched){
2101  .queue_id = queue_id,
2102  .traffic_class = traffic_class,
2103  .color = color,
2104  .reserved = 0,
2105  };
2106 }
2107 
2108 #ifdef __cplusplus
2109 }
2110 #endif
2111 
2112 #endif /* _RTE_MBUF_H_ */
#define likely(x)
#define unlikely(x)
#define RTE_CACHE_LINE_MIN_SIZE
Definition: rte_common.h:736
#define RTE_PTR_ALIGN_FLOOR(ptr, align)
Definition: rte_common.h:610
#define RTE_MIN(a, b)
Definition: rte_common.h:795
#define RTE_PTR_DIFF(ptr1, ptr2)
Definition: rte_common.h:566
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:559
#define RTE_MIN_T(a, b, t)
Definition: rte_common.h:809
uint64_t rte_iova_t
Definition: rte_common.h:770
#define RTE_PTR_ADD(ptr, x)
Definition: rte_common.h:554
#define RTE_SET_USED(x)
Definition: rte_common.h:264
#define __rte_always_inline
Definition: rte_common.h:490
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1498
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:622
struct rte_mempool * rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const struct rte_pktmbuf_extmem *ext_mem, unsigned int ext_num)
static struct rte_mbuf_ext_shared_info * rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
Definition: rte_mbuf.h:1117
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1644
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:128
static __rte_always_inline uint64_t rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso, uint64_t ol3, uint64_t ol2, uint64_t unused)
Definition: rte_mbuf.h:1909
#define __rte_mbuf_raw_sanity_check_mp(m, mp)
Definition: rte_mbuf.h:345
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1514
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:204
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1751
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:272
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:1837
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:186
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1629
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1670
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1615
static uint32_t rte_mbuf_sched_queue_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2009
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:111
void rte_mbuf_raw_sanity_check(const struct rte_mbuf *m, const struct rte_mempool *mp)
static char * rte_mbuf_data_addr_default(struct rte_mbuf *mb)
Definition: rte_mbuf.h:253
static void rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id, uint8_t traffic_class, uint8_t color)
Definition: rte_mbuf.h:2096
static void rte_mbuf_raw_reset_bulk(struct rte_mempool *mp, struct rte_mbuf **mbufs, unsigned int count)
Definition: rte_mbuf.h:982
static __rte_always_inline int rte_mbuf_raw_alloc_bulk(struct rte_mempool *mp, struct rte_mbuf **mbufs, unsigned int count)
Definition: rte_mbuf.h:663
static void rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
Definition: rte_mbuf.h:1236
static void rte_mbuf_sched_color_set(struct rte_mbuf *m, uint8_t color)
Definition: rte_mbuf.h:2078
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1805
int rte_mbuf_check(const struct rte_mbuf *m, int is_header, const char **reason)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
static void rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id, uint8_t *traffic_class, uint8_t *color)
Definition: rte_mbuf.h:2045
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1368
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1460
static uint32_t rte_pktmbuf_priv_flags(struct rte_mempool *mp)
Definition: rte_mbuf.h:316
static __rte_always_inline void rte_mbuf_raw_free_bulk(struct rte_mempool *mp, struct rte_mbuf **mbufs, unsigned int count)
Definition: rte_mbuf.h:719
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1598
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
static char * rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
Definition: rte_mbuf.h:239
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:452
const char * rte_get_rx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo, int16_t value)
Definition: rte_mbuf.h:500
void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count)
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:347
int rte_mbuf_raw_check(const struct rte_mbuf *m, const struct rte_mempool *mp, const char **reason)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:443
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:1982
struct rte_mempool * rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name)
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:691
static void rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo, uint16_t new_value)
Definition: rte_mbuf.h:482
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1687
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
static void * rte_mbuf_to_priv(struct rte_mbuf *m)
Definition: rte_mbuf.h:290
static uint8_t rte_mbuf_sched_traffic_class_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2018
static uint16_t rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:468
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:434
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1781
#define RTE_MBUF_HAS_PINNED_EXTBUF(mb)
Definition: rte_mbuf.h:339
static rte_iova_t rte_mbuf_iova_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:149
static void rte_mbuf_sched_queue_set(struct rte_mbuf *m, uint32_t queue_id)
Definition: rte_mbuf.h:2060
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:920
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static void rte_mbuf_sched_traffic_class_set(struct rte_mbuf *m, uint8_t traffic_class)
Definition: rte_mbuf.h:2069
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1046
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1720
#define RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF
Definition: rte_mbuf.h:330
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1932
struct rte_mbuf * rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp, uint32_t offset, uint32_t length)
static void rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:1199
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:1068
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:218
static void rte_mbuf_iova_set(struct rte_mbuf *m, rte_iova_t iova)
Definition: rte_mbuf.h:167
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:957
static uint8_t rte_mbuf_sched_color_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2027
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:1862
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:941
static void rte_pktmbuf_reset(struct rte_mbuf *m)
Definition: rte_mbuf.h:1015
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1278
#define RTE_MBUF_F_TX_OUTER_IP_CKSUM
#define RTE_MBUF_F_EXTERNAL
#define RTE_MBUF_F_TX_IP_CKSUM
#define RTE_MBUF_MAX_NB_SEGS
#define RTE_MBUF_F_TX_TCP_SEG
#define RTE_MBUF_F_TX_L4_MASK
#define RTE_MBUF_F_TX_OUTER_IPV4
#define RTE_MBUF_HAS_EXTBUF(mb)
#define RTE_MBUF_F_TX_OFFLOAD_MASK
#define RTE_MBUF_CLONED(mb)
#define RTE_MBUF_DIRECT(mb)
#define RTE_MBUF_F_TX_IPV4
void(* rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque)
#define rte_pktmbuf_mtod_offset(m, t, o)
#define RTE_MBUF_F_TX_IPV6
#define RTE_MBUF_F_INDIRECT
#define RTE_MBUF_PORT_INVALID
@ RTE_MBUF_HISTORY_OP_LIB_FREE
@ RTE_MBUF_HISTORY_OP_LIB_ALLOC
static void rte_mbuf_history_mark(struct rte_mbuf *m, enum rte_mbuf_history_op op)
static void rte_mbuf_history_mark_bulk(struct rte_mbuf *const *mbufs, unsigned int count, enum rte_mbuf_history_op op)
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1691
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1845
static __rte_always_inline void rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
Definition: rte_mempool.h:1474
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1720
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1873
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1496
static void rte_prefetch0(const volatile void *p)
rte_mbuf_extbuf_free_callback_t free_cb
uint32_t queue_id
uint8_t traffic_class
uint64_t ol_flags
uint16_t nb_segs
uint16_t vlan_tci
uint16_t priv_size
uint32_t pkt_len
uint64_t dynfield2
uint16_t buf_len
struct rte_mbuf_ext_shared_info * shinfo
uint32_t packet_type
uint16_t port
void * buf_addr
struct rte_mempool * pool
uint32_t dynfield1[9]
uint16_t vlan_tci_outer
struct rte_mbuf * next
uint64_t tx_offload
uint16_t data_len
uint64_t tso_segsz
uint16_t refcnt
char name[RTE_MEMPOOL_NAMESIZE]
Definition: rte_mempool.h:231
uint32_t cache_size
Definition: rte_mempool.h:241
unsigned int flags
Definition: rte_mempool.h:238
uint16_t elt_size
Definition: rte_mbuf.h:858
rte_iova_t buf_iova
Definition: rte_mbuf.h:856
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:302