On 15/05/2020 10:47, Hemant Agrawal wrote:
> This patch moves the internal symbols to INTERNAL sections
> so that any change in them is not reported as ABI breakage.
> 
> This patch also removes two symbols, which are not
> to be exported.
> rte_dpaa_mem_ptov  - static inline in the headerfile
> fman_ccsr_map_fd - local shared variable.
> 
> Signed-off-by: Hemant Agrawal <hemant.agra...@nxp.com>
> ---
>  devtools/libabigail.abignore              |  2 ++
>  drivers/bus/dpaa/include/fsl_bman.h       |  6 +++++
>  drivers/bus/dpaa/include/fsl_fman.h       | 27 +++++++++++++++++++
>  drivers/bus/dpaa/include/fsl_qman.h       | 32 +++++++++++++++++++++++
>  drivers/bus/dpaa/include/fsl_usd.h        |  8 +++++-
>  drivers/bus/dpaa/include/netcfg.h         |  2 ++
>  drivers/bus/dpaa/rte_bus_dpaa_version.map |  8 +++---
>  drivers/bus/dpaa/rte_dpaa_bus.h           |  5 ++++
>  8 files changed, 85 insertions(+), 5 deletions(-)
> 
> diff --git a/devtools/libabigail.abignore b/devtools/libabigail.abignore
> index 877c6d5be8..ab34302d0c 100644
> --- a/devtools/libabigail.abignore
> +++ b/devtools/libabigail.abignore
> @@ -53,3 +53,5 @@
>       file_name_regexp = ^librte_common_dpaax\.
>  [suppress_file]
>       file_name_regexp = ^librte_bus_fslmc\.
> +[suppress_file]
> +     file_name_regexp = ^librte_bus_dpaa\.
> diff --git a/drivers/bus/dpaa/include/fsl_bman.h 
> b/drivers/bus/dpaa/include/fsl_bman.h
> index f9cd972153..82da2fcfe0 100644
> --- a/drivers/bus/dpaa/include/fsl_bman.h
> +++ b/drivers/bus/dpaa/include/fsl_bman.h
> @@ -264,12 +264,14 @@ int bman_shutdown_pool(u32 bpid);
>   * the structure provided by the caller can be released or reused after the
>   * function returns.
>   */
> +__rte_internal
>  struct bman_pool *bman_new_pool(const struct bman_pool_params *params);
>  
>  /**
>   * bman_free_pool - Deallocates a Buffer Pool object
>   * @pool: the pool object to release
>   */
> +__rte_internal
>  void bman_free_pool(struct bman_pool *pool);
>  
>  /**
> @@ -279,6 +281,7 @@ void bman_free_pool(struct bman_pool *pool);
>   * The returned pointer refers to state within the pool object so must not be
>   * modified and can no longer be read once the pool object is destroyed.
>   */
> +__rte_internal
>  const struct bman_pool_params *bman_get_params(const struct bman_pool *pool);
>  
>  /**
> @@ -289,6 +292,7 @@ const struct bman_pool_params *bman_get_params(const 
> struct bman_pool *pool);
>   * @flags: bit-mask of BMAN_RELEASE_FLAG_*** options
>   *
>   */
> +__rte_internal
>  int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 
> num,
>                u32 flags);
>  
> @@ -302,6 +306,7 @@ int bman_release(struct bman_pool *pool, const struct 
> bm_buffer *bufs, u8 num,
>   * The return value will be the number of buffers obtained from the pool, or 
> a
>   * negative error code if a h/w error or pool starvation was encountered.
>   */
> +__rte_internal
>  int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
>                u32 flags);
>  
> @@ -317,6 +322,7 @@ int bman_query_pools(struct bm_pool_state *state);
>   *
>   * Return the number of the free buffers
>   */
> +__rte_internal
>  u32 bman_query_free_buffers(struct bman_pool *pool);
>  
>  /**
> diff --git a/drivers/bus/dpaa/include/fsl_fman.h 
> b/drivers/bus/dpaa/include/fsl_fman.h
> index 5705ebfdce..6c87c8db0d 100644
> --- a/drivers/bus/dpaa/include/fsl_fman.h
> +++ b/drivers/bus/dpaa/include/fsl_fman.h
> @@ -7,6 +7,8 @@
>  #ifndef __FSL_FMAN_H
>  #define __FSL_FMAN_H
>  
> +#include <rte_compat.h>
> +
>  #ifdef __cplusplus
>  extern "C" {
>  #endif
> @@ -43,18 +45,23 @@ struct fm_status_t {
>  } __rte_packed;
>  
>  /* Set MAC address for a particular interface */
> +__rte_internal
>  int fman_if_add_mac_addr(struct fman_if *p, uint8_t *eth, uint8_t addr_num);
>  
>  /* Remove a MAC address for a particular interface */
> +__rte_internal
>  void fman_if_clear_mac_addr(struct fman_if *p, uint8_t addr_num);
>  
>  /* Get the FMAN statistics */
> +__rte_internal
>  void fman_if_stats_get(struct fman_if *p, struct rte_eth_stats *stats);
>  
>  /* Reset the FMAN statistics */
> +__rte_internal
>  void fman_if_stats_reset(struct fman_if *p);
>  
>  /* Get all of the FMAN statistics */
> +__rte_internal
>  void fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n);
>  
>  /* Set ignore pause option for a specific interface */
> @@ -64,32 +71,43 @@ void fman_if_set_rx_ignore_pause_frames(struct fman_if 
> *p, bool enable);
>  void fman_if_conf_max_frame_len(struct fman_if *p, unsigned int 
> max_frame_len);
>  
>  /* Enable/disable Rx promiscuous mode on specified interface */
> +__rte_internal
>  void fman_if_promiscuous_enable(struct fman_if *p);
> +__rte_internal
>  void fman_if_promiscuous_disable(struct fman_if *p);
>  
>  /* Enable/disable Rx on specific interfaces */
> +__rte_internal
>  void fman_if_enable_rx(struct fman_if *p);
> +__rte_internal
>  void fman_if_disable_rx(struct fman_if *p);
>  
>  /* Enable/disable loopback on specific interfaces */
> +__rte_internal
>  void fman_if_loopback_enable(struct fman_if *p);
> +__rte_internal
>  void fman_if_loopback_disable(struct fman_if *p);
>  
>  /* Set buffer pool on specific interface */
> +__rte_internal
>  void fman_if_set_bp(struct fman_if *fm_if, unsigned int num, int bpid,
>                   size_t bufsize);
>  
>  /* Get Flow Control threshold parameters on specific interface */
> +__rte_internal
>  int fman_if_get_fc_threshold(struct fman_if *fm_if);
>  
>  /* Enable and Set Flow Control threshold parameters on specific interface */
> +__rte_internal
>  int fman_if_set_fc_threshold(struct fman_if *fm_if,
>                       u32 high_water, u32 low_water, u32 bpid);
>  
>  /* Get Flow Control pause quanta on specific interface */
> +__rte_internal
>  int fman_if_get_fc_quanta(struct fman_if *fm_if);
>  
>  /* Set Flow Control pause quanta on specific interface */
> +__rte_internal
>  int fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta);
>  
>  /* Set default error fqid on specific interface */
> @@ -99,35 +117,44 @@ void fman_if_set_err_fqid(struct fman_if *fm_if, 
> uint32_t err_fqid);
>  int fman_if_get_ic_params(struct fman_if *fm_if, struct fman_if_ic_params 
> *icp);
>  
>  /* Set IC transfer params */
> +__rte_internal
>  int fman_if_set_ic_params(struct fman_if *fm_if,
>                         const struct fman_if_ic_params *icp);
>  
>  /* Get interface fd->offset value */
> +__rte_internal
>  int fman_if_get_fdoff(struct fman_if *fm_if);
>  
>  /* Set interface fd->offset value */
> +__rte_internal
>  void fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset);
>  
>  /* Get interface SG enable status value */
> +__rte_internal
>  int fman_if_get_sg_enable(struct fman_if *fm_if);
>  
>  /* Set interface SG support mode */
> +__rte_internal
>  void fman_if_set_sg(struct fman_if *fm_if, int enable);
>  
>  /* Get interface Max Frame length (MTU) */
>  uint16_t fman_if_get_maxfrm(struct fman_if *fm_if);
>  
>  /* Set interface  Max Frame length (MTU) */
> +__rte_internal
>  void fman_if_set_maxfrm(struct fman_if *fm_if, uint16_t max_frm);
>  
>  /* Set interface next invoked action for dequeue operation */
>  void fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia);
>  
>  /* discard error packets on rx */
> +__rte_internal
>  void fman_if_discard_rx_errors(struct fman_if *fm_if);
>  
> +__rte_internal
>  void fman_if_set_mcast_filter_table(struct fman_if *p);
>  
> +__rte_internal
>  void fman_if_reset_mcast_filter_table(struct fman_if *p);
>  
>  int fman_if_add_hash_mac_addr(struct fman_if *p, uint8_t *eth);
> diff --git a/drivers/bus/dpaa/include/fsl_qman.h 
> b/drivers/bus/dpaa/include/fsl_qman.h
> index 1b3342e7e6..4411bb0a79 100644
> --- a/drivers/bus/dpaa/include/fsl_qman.h
> +++ b/drivers/bus/dpaa/include/fsl_qman.h
> @@ -1314,6 +1314,7 @@ struct qman_cgr {
>  #define QMAN_CGR_MODE_FRAME          0x00000001
>  
>  #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
> +__rte_internal
>  void qman_set_fq_lookup_table(void **table);
>  #endif
>  
> @@ -1322,6 +1323,7 @@ void qman_set_fq_lookup_table(void **table);
>   */
>  int qman_get_portal_index(void);
>  
> +__rte_internal
>  u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
>                       void **bufs);
>  
> @@ -1333,6 +1335,7 @@ u32 qman_portal_dequeue(struct rte_event ev[], unsigned 
> int poll_limit,
>   * processed via qman_poll_***() functions). Returns zero for success, or
>   * -EINVAL if the current CPU is sharing a portal hosted on another CPU.
>   */
> +__rte_internal
>  int qman_irqsource_add(u32 bits);
>  
>  /**
> @@ -1340,6 +1343,7 @@ int qman_irqsource_add(u32 bits);
>   * takes portal (fq specific) as input rather than using the thread affined
>   * portal.
>   */
> +__rte_internal
>  int qman_fq_portal_irqsource_add(struct qman_portal *p, u32 bits);
>  
>  /**
> @@ -1350,6 +1354,7 @@ int qman_fq_portal_irqsource_add(struct qman_portal *p, 
> u32 bits);
>   * instead be processed via qman_poll_***() functions. Returns zero for 
> success,
>   * or -EINVAL if the current CPU is sharing a portal hosted on another CPU.
>   */
> +__rte_internal
>  int qman_irqsource_remove(u32 bits);
>  
>  /**
> @@ -1357,6 +1362,7 @@ int qman_irqsource_remove(u32 bits);
>   * takes portal (fq specific) as input rather than using the thread affined
>   * portal.
>   */
> +__rte_internal
>  int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits);
>  
>  /**
> @@ -1369,6 +1375,7 @@ int qman_fq_portal_irqsource_remove(struct qman_portal 
> *p, u32 bits);
>   */
>  u16 qman_affine_channel(int cpu);
>  
> +__rte_internal
>  unsigned int qman_portal_poll_rx(unsigned int poll_limit,
>                                void **bufs, struct qman_portal *q);
>  
> @@ -1380,6 +1387,7 @@ unsigned int qman_portal_poll_rx(unsigned int 
> poll_limit,
>   *
>   * This function will issue a volatile dequeue command to the QMAN.
>   */
> +__rte_internal
>  int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags);
>  
>  /**
> @@ -1390,6 +1398,7 @@ int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t 
> vdqcr_flags);
>   * is issued. It will keep returning NULL until there is no packet available 
> on
>   * the DQRR.
>   */
> +__rte_internal
>  struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);
>  
>  /**
> @@ -1401,6 +1410,7 @@ struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);
>   * This will consume the DQRR enrey and make it available for next volatile
>   * dequeue.
>   */
> +__rte_internal
>  void qman_dqrr_consume(struct qman_fq *fq,
>                      struct qm_dqrr_entry *dq);
>  
> @@ -1414,6 +1424,7 @@ void qman_dqrr_consume(struct qman_fq *fq,
>   * this function will return -EINVAL, otherwise the return value is >=0 and
>   * represents the number of DQRR entries processed.
>   */
> +__rte_internal
>  int qman_poll_dqrr(unsigned int limit);
>  
>  /**
> @@ -1460,6 +1471,7 @@ void qman_start_dequeues(void);
>   * (SDQCR). The requested pools are limited to those the portal has dequeue
>   * access to.
>   */
> +__rte_internal
>  void qman_static_dequeue_add(u32 pools, struct qman_portal *qm);
>  
>  /**
> @@ -1507,6 +1519,7 @@ void qman_dca(const struct qm_dqrr_entry *dq, int 
> park_request);
>   * function must be called from the same CPU as that which processed the DQRR
>   * entry in the first place.
>   */
> +__rte_internal
>  void qman_dca_index(u8 index, int park_request);
>  
>  /**
> @@ -1564,6 +1577,7 @@ void qman_set_dc_ern(qman_cb_dc_ern handler, int 
> affine);
>   * a frame queue object based on that, rather than assuming/requiring that 
> it be
>   * Out of Service.
>   */
> +__rte_internal
>  int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
>  
>  /**
> @@ -1582,6 +1596,7 @@ void qman_destroy_fq(struct qman_fq *fq, u32 flags);
>   * qman_fq_fqid - Queries the frame queue ID of a FQ object
>   * @fq: the frame queue object to query
>   */
> +__rte_internal
>  u32 qman_fq_fqid(struct qman_fq *fq);
>  
>  /**
> @@ -1594,6 +1609,7 @@ u32 qman_fq_fqid(struct qman_fq *fq);
>   * This captures the state, as seen by the driver, at the time the function
>   * executes.
>   */
> +__rte_internal
>  void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 
> *flags);
>  
>  /**
> @@ -1630,6 +1646,7 @@ void qman_fq_state(struct qman_fq *fq, enum 
> qman_fq_state *state, u32 *flags);
>   * context_a.address fields and will leave the stashing fields provided by 
> the
>   * user alone, otherwise it will zero out the context_a.stashing fields.
>   */
> +__rte_internal
>  int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
>  
>  /**
> @@ -1659,6 +1676,7 @@ int qman_schedule_fq(struct qman_fq *fq);
>   * caller should be prepared to accept the callback as the function is 
> called,
>   * not only once it has returned.
>   */
> +__rte_internal
>  int qman_retire_fq(struct qman_fq *fq, u32 *flags);
>  
>  /**
> @@ -1668,6 +1686,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags);
>   * The frame queue must be retired and empty, and if any order restoration 
> list
>   * was released as ERNs at the time of retirement, they must all be consumed.
>   */
> +__rte_internal
>  int qman_oos_fq(struct qman_fq *fq);
>  
>  /**
> @@ -1701,6 +1720,7 @@ int qman_query_fq_has_pkts(struct qman_fq *fq);
>   * @fq: the frame queue object to be queried
>   * @np: storage for the queried FQD fields
>   */
> +__rte_internal
>  int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
>  
>  /**
> @@ -1708,6 +1728,7 @@ int qman_query_fq_np(struct qman_fq *fq, struct 
> qm_mcr_queryfq_np *np);
>   * @fq: the frame queue object to be queried
>   * @frm_cnt: number of frames in the queue
>   */
> +__rte_internal
>  int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt);
>  
>  /**
> @@ -1738,6 +1759,7 @@ int qman_query_wq(u8 query_dedicated, struct 
> qm_mcr_querywq *wq);
>   * callback, or by waiting for the QMAN_FQ_STATE_VDQCR bit to disappear from 
> the
>   * "flags" retrieved from qman_fq_state().
>   */
> +__rte_internal
>  int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
>  
>  /**
> @@ -1773,8 +1795,10 @@ int qman_volatile_dequeue(struct qman_fq *fq, u32 
> flags, u32 vdqcr);
>   * of an already busy hardware resource by throttling many of the 
> to-be-dropped
>   * enqueues "at the source".
>   */
> +__rte_internal
>  int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags);
>  
> +__rte_internal
>  int qman_enqueue_multi(struct qman_fq *fq, const struct qm_fd *fd, u32 
> *flags,
>                      int frames_to_send);
>  
> @@ -1788,6 +1812,7 @@ int qman_enqueue_multi(struct qman_fq *fq, const struct 
> qm_fd *fd, u32 *flags,
>   * This API is similar to qman_enqueue_multi(), but it takes fd which needs
>   * to be processed by different frame queues.
>   */
> +__rte_internal
>  int
>  qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
>                     u32 *flags, int frames_to_send);
> @@ -1876,6 +1901,7 @@ int qman_shutdown_fq(u32 fqid);
>   * @fqid: the base FQID of the range to deallocate
>   * @count: the number of FQIDs in the range
>   */
> +__rte_internal
>  int qman_reserve_fqid_range(u32 fqid, unsigned int count);
>  static inline int qman_reserve_fqid(u32 fqid)
>  {
> @@ -1895,6 +1921,7 @@ static inline int qman_reserve_fqid(u32 fqid)
>   * than requested (though alignment will be as requested). If @partial is 
> zero,
>   * the return value will either be 'count' or negative.
>   */
> +__rte_internal
>  int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial);
>  static inline int qman_alloc_pool(u32 *result)
>  {
> @@ -1942,6 +1969,7 @@ void qman_seed_pool_range(u32 id, unsigned int count);
>   * any unspecified parameters) will be used rather than a modify hw hardware
>   * (which only modifies the specified parameters).
>   */
> +__rte_internal
>  int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
>                   struct qm_mcc_initcgr *opts);
>  
> @@ -1964,6 +1992,7 @@ int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 
> flags, u16 dcp_portal,
>   * is executed. This must be excuted on the same affine portal on which it 
> was
>   * created.
>   */
> +__rte_internal
>  int qman_delete_cgr(struct qman_cgr *cgr);
>  
>  /**
> @@ -1980,6 +2009,7 @@ int qman_delete_cgr(struct qman_cgr *cgr);
>   * unspecified parameters) will be used rather than a modify hw hardware 
> (which
>   * only modifies the specified parameters).
>   */
> +__rte_internal
>  int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
>                   struct qm_mcc_initcgr *opts);
>  
> @@ -2008,6 +2038,7 @@ int qman_query_congestion(struct qm_mcr_querycongestion 
> *congestion);
>   * than requested (though alignment will be as requested). If @partial is 
> zero,
>   * the return value will either be 'count' or negative.
>   */
> +__rte_internal
>  int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial);
>  static inline int qman_alloc_cgrid(u32 *result)
>  {
> @@ -2021,6 +2052,7 @@ static inline int qman_alloc_cgrid(u32 *result)
>   * @id: the base CGR ID of the range to deallocate
>   * @count: the number of CGR IDs in the range
>   */
> +__rte_internal
>  void qman_release_cgrid_range(u32 id, unsigned int count);
>  static inline void qman_release_cgrid(u32 id)
>  {
> diff --git a/drivers/bus/dpaa/include/fsl_usd.h 
> b/drivers/bus/dpaa/include/fsl_usd.h
> index 263d9bb976..dcf35e4adb 100644
> --- a/drivers/bus/dpaa/include/fsl_usd.h
> +++ b/drivers/bus/dpaa/include/fsl_usd.h
> @@ -58,6 +58,7 @@ int bman_allocate_raw_portal(struct dpaa_raw_portal 
> *portal);
>  int bman_free_raw_portal(struct dpaa_raw_portal *portal);
>  
>  /* Obtain thread-local UIO file-descriptors */
> +__rte_internal
>  int qman_thread_fd(void);
>  int bman_thread_fd(void);
>  
> @@ -66,10 +67,14 @@ int bman_thread_fd(void);
>   * processing is complete. As such, it is essential to call this before going
>   * into another blocking read/select/poll.
>   */
> +__rte_internal
>  void qman_thread_irq(void);
> +
> +__rte_internal
>  void bman_thread_irq(void);
> +__rte_internal
>  void qman_fq_portal_thread_irq(struct qman_portal *qp);
> -
> +__rte_internal
>  void qman_clear_irq(void);
>  
>  /* Global setup */
> @@ -77,6 +82,7 @@ int qman_global_init(void);
>  int bman_global_init(void);
>  
>  /* Direct portal create and destroy */
> +__rte_internal
>  struct qman_portal *fsl_qman_fq_portal_create(int *fd);
>  int fsl_qman_fq_portal_destroy(struct qman_portal *qp);
>  int fsl_qman_fq_portal_init(struct qman_portal *qp);
> diff --git a/drivers/bus/dpaa/include/netcfg.h 
> b/drivers/bus/dpaa/include/netcfg.h
> index bf7bfae8cb..d7d1befd24 100644
> --- a/drivers/bus/dpaa/include/netcfg.h
> +++ b/drivers/bus/dpaa/include/netcfg.h
> @@ -46,11 +46,13 @@ struct netcfg_interface {
>   * cfg_file: FMC config XML file
>   * Returns the configuration information in newly allocated memory.
>   */
> +__rte_internal
>  struct netcfg_info *netcfg_acquire(void);
>  
>  /* cfg_ptr: configuration information pointer.
>   * Frees the resources allocated by the configuration layer.
>   */
> +__rte_internal
>  void netcfg_release(struct netcfg_info *cfg_ptr);
>  
>  #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
> diff --git a/drivers/bus/dpaa/rte_bus_dpaa_version.map 
> b/drivers/bus/dpaa/rte_bus_dpaa_version.map
> index e6ca4361e0..53732289d3 100644
> --- a/drivers/bus/dpaa/rte_bus_dpaa_version.map
> +++ b/drivers/bus/dpaa/rte_bus_dpaa_version.map
> @@ -1,4 +1,8 @@
>  DPDK_20.0 {
> +     local: *;
> +};
> +
> +INTERNAL {
>       global:
>  
>       bman_acquire;
> @@ -13,7 +17,6 @@ DPDK_20.0 {
>       dpaa_logtype_pmd;
>       dpaa_netcfg;
>       dpaa_svr_family;
> -     fman_ccsr_map_fd;
>       fman_dealloc_bufs_mask_hi;
>       fman_dealloc_bufs_mask_lo;
>       fman_if_add_mac_addr;
> @@ -87,10 +90,7 @@ DPDK_20.0 {
>       qman_volatile_dequeue;
>       rte_dpaa_driver_register;
>       rte_dpaa_driver_unregister;
> -     rte_dpaa_mem_ptov;
>       rte_dpaa_portal_fq_close;
>       rte_dpaa_portal_fq_init;
>       rte_dpaa_portal_init;
> -
> -     local: *;
>  };
> diff --git a/drivers/bus/dpaa/rte_dpaa_bus.h b/drivers/bus/dpaa/rte_dpaa_bus.h
> index 373aca9785..d4aee132ef 100644
> --- a/drivers/bus/dpaa/rte_dpaa_bus.h
> +++ b/drivers/bus/dpaa/rte_dpaa_bus.h
> @@ -158,6 +158,7 @@ rte_dpaa_mem_vtop(void *vaddr)
>   *   A pointer to a rte_dpaa_driver structure describing the driver
>   *   to be registered.
>   */
> +__rte_internal
>  void rte_dpaa_driver_register(struct rte_dpaa_driver *driver);
>  
>  /**
> @@ -167,6 +168,7 @@ void rte_dpaa_driver_register(struct rte_dpaa_driver 
> *driver);
>   *   A pointer to a rte_dpaa_driver structure describing the driver
>   *   to be unregistered.
>   */
> +__rte_internal
>  void rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver);
>  
>  /**
> @@ -178,10 +180,13 @@ void rte_dpaa_driver_unregister(struct rte_dpaa_driver 
> *driver);
>   * @return
>   *   0 in case of success, error otherwise
>   */
> +__rte_internal
>  int rte_dpaa_portal_init(void *arg);
>  
> +__rte_internal
>  int rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq);
>  
> +__rte_internal
>  int rte_dpaa_portal_fq_close(struct qman_fq *fq);
>  
>  /**
> 
Acked-by: Ray Kinsella <m...@ashroe.eu>

Reply via email to