diff --git a/drivers/ra/CMakeLists.txt b/drivers/ra/CMakeLists.txt index cf0ae58d..141212e8 100644 --- a/drivers/ra/CMakeLists.txt +++ b/drivers/ra/CMakeLists.txt @@ -202,6 +202,15 @@ zephyr_library_sources_ifdef(CONFIG_USE_RA_FSP_ETHER_PHY zephyr_library_sources_ifdef(CONFIG_USE_RA_FSP_ETHER fsp/src/r_ether/r_ether.c) +zephyr_library_sources_ifdef(CONFIG_USE_RA_FSP_RMAC_PHY + fsp/src/r_rmac_phy/r_rmac_phy.c) + +zephyr_library_sources_ifdef(CONFIG_USE_RA_FSP_RMAC + fsp/src/r_rmac/r_rmac.c) + +zephyr_library_sources_ifdef(CONFIG_USE_RA_FSP_LAYER3_SWITCH + fsp/src/r_layer3_switch/r_layer3_switch.c) + zephyr_library_sources_ifdef(CONFIG_USE_RA_FSP_CRC fsp/src/r_crc/r_crc.c) diff --git a/drivers/ra/README b/drivers/ra/README index 05bf3ece..69aa3822 100644 --- a/drivers/ra/README +++ b/drivers/ra/README @@ -183,3 +183,24 @@ Patch List: drivers/ra/fsp/src/bsp/mcu/ra8m2/bsp_linker.c drivers/ra/fsp/src/bsp/mcu/ra8p1/bsp_linker.c drivers/ra/fsp/src/bsp/mcu/ra8t2/bsp_linker.c + + * Remove the static definition in RMAC and RMAC_PHY + Impacted files: + drivers/ra/fsp/src/r_rmac/r_rmac.c + drivers/ra/fsp/src/r_rmac_phy/r_rmac_phy.c + + * Add RMAC_CFG_SKIP_PHY_LINK_ABILITY_CHECK to skip checking PHY link state in do-link process. + Impacted files: + drivers/ra/fsp/src/r_rmac/r_rmac.c + + * Add RMAC_PHY_CFG_CUSTOM_PHY_INIT to be compatible with muli PHY on the same MDIO bus. + Impacted files: + drivers/ra/fsp/src/r_rmac_phy/r_rmac_phy.c + + * Remove crictical section barrier for read function in RMAC + Impacted files: + drivers/ra/fsp/src/r_rmac/r_rmac.c + + * Seperate broadcast storm filter reception settings in RMAC + Impacted files: + drivers/ra/fsp/src/r_rmac/r_rmac.c diff --git a/drivers/ra/fsp/inc/api/r_ether_switch_api.h b/drivers/ra/fsp/inc/api/r_ether_switch_api.h new file mode 100644 index 00000000..2a09dc38 --- /dev/null +++ b/drivers/ra/fsp/inc/api/r_ether_switch_api.h @@ -0,0 +1,118 @@ +/* +* Copyright (c) 2020 - 2025 Renesas Electronics Corporation and/or its affiliates +* +* SPDX-License-Identifier: BSD-3-Clause +*/ + +/*******************************************************************************************************************//** + * @ingroup RENESAS_NETWORKING_INTERFACES + * @defgroup ETHER_SWITCH_API Ethernet Switch Interface + * @brief Interface for Ethernet Switch functions. + * + * @section ETHER_SWITCH_API_Summary Summary + * The Ether Switch module provides an API for ethernet switch peripheral. + * And the general ethernet switch peripheral have forwarding functionality. + * + * @{ + **********************************************************************************************************************/ + +#ifndef R_ETHER_SWITCH_API_H +#define R_ETHER_SWITCH_API_H + +/*********************************************************************************************************************** + * Includes + **********************************************************************************************************************/ + +/* Register definitions, common services and error codes. */ +#include "bsp_api.h" + +/* Common macro for FSP header files. There is also a corresponding FSP_FOOTER macro at the end of this file. */ +FSP_HEADER + +/********************************************************************************************************************** + * Macro definitions + **********************************************************************************************************************/ + +/********************************************************************************************************************** + * Typedef definitions + **********************************************************************************************************************/ + +#ifndef BSP_OVERRIDE_ETHER_SWITCH_EVENT_T + +/** Events that can trigger a callback function */ +typedef enum e_ether_switch_event +{ + ETHER_SWITCH_EVENT_RX_COMPLETE, ///< A descriptor complete to receive a flame. + ETHER_SWITCH_EVENT_TX_COMPLETE, ///< A descriptor complete to transmit a flame. + ETHER_SWITCH_EVENT_RX_QUEUE_FULL, ///< A RX descriptor queue is full. + ETHER_SWITCH_EVENT_RX_MESSAGE_LOST, ///< Receive a frame when a RX descriptor queue is full. + ETHER_SWITCH_EVENT_TAS_ERROR, ///< TAS gate error. +} ether_switch_event_t; +#endif + +#ifndef BSP_OVERRIDE_ETHER_SWITCH_CALLBACK_ARGS_T + +/** Callback function parameter data */ +typedef struct st_ether_switch_callback_args +{ + uint32_t channel; ///< Device channel number + uint32_t ports; ///< Bitmap of ports on which the interrupt occurred. + uint32_t queue_index; ///< Queue index where a interrupt occurs. + ether_switch_event_t event; ///< The event can be used to identify what caused the callback. + + void * p_context; ///< Placeholder for user data. Set in @ref ether_switch_api_t::open function in @ref ether_switch_cfg_t. +} ether_switch_callback_args_t; +#endif + +/** Control block. Allocate an instance specific control block to pass into the API calls. + */ +typedef void ether_switch_ctrl_t; + +/** Configuration parameters. */ +typedef struct st_ether_switch_cfg +{ + uint8_t channel; ///< Channel + + IRQn_Type irq; ///< MCU interrupt number + uint8_t ipl; ///< MCU interrupt priority + + void (* p_callback)(ether_switch_callback_args_t * p_args); ///< Callback provided when an ISR occurs. + + /** Placeholder for user data. Passed to the user callback in ether_switch_callback_args_t. */ + void * p_context; + void const * p_extend; ///< Placeholder for user extension. +} ether_switch_cfg_t; + +/** Functions implemented at the HAL layer will follow this API. */ +typedef struct st_ether_switch_api +{ + /** Open driver. + * + * @param[in] p_ctrl Pointer to control structure. + * @param[in] p_cfg Pointer to pin configuration structure. + */ + fsp_err_t (* open)(ether_switch_ctrl_t * const p_ctrl, ether_switch_cfg_t const * const p_cfg); + + /** Close driver. + * + * @param[in] p_ctrl Pointer to control structure. + */ + fsp_err_t (* close)(ether_switch_ctrl_t * const p_ctrl); +} ether_switch_api_t; + +/** This structure encompasses everything that is needed to use an instance of this interface. */ +typedef struct st_ether_switch_instance +{ + ether_switch_ctrl_t * p_ctrl; ///< Pointer to the control structure for this instance + ether_switch_cfg_t const * p_cfg; ///< Pointer to the configuration structure for this instance + ether_switch_api_t const * p_api; ///< Pointer to the API structure for this instance +} ether_switch_instance_t; + +/*******************************************************************************************************************//** + * @} (end defgroup ETHER_SWITCH_API) + **********************************************************************************************************************/ + +/* Common macro for FSP header files. There is also a corresponding FSP_HEADER macro at the top of this file. */ +FSP_FOOTER + +#endif /* R_ETHER_SWITCH_API_H */ diff --git a/drivers/ra/fsp/inc/api/r_gptp_api.h b/drivers/ra/fsp/inc/api/r_gptp_api.h new file mode 100644 index 00000000..5a45b2ac --- /dev/null +++ b/drivers/ra/fsp/inc/api/r_gptp_api.h @@ -0,0 +1,161 @@ +/* +* Copyright (c) 2020 - 2025 Renesas Electronics Corporation and/or its affiliates +* +* SPDX-License-Identifier: BSD-3-Clause +*/ + +/*******************************************************************************************************************//** + * @ingroup RENESAS_NETWORKING_INTERFACES + * @defgroup GPTP_API GPTP Interface + * @brief Interface for gPTP timing. + * + * @section GPTP_API_Summary Summary + * The gPTP API provides a generic interface for gPTP timer operation. + * + * @{ + **********************************************************************************************************************/ + +#ifndef R_GPTP_API_H +#define R_GPTP_API_H + +/*********************************************************************************************************************** + * Includes + **********************************************************************************************************************/ + +/* Register definitions, common services and error codes. */ +#include "bsp_api.h" + +/* Common macro for FSP header files. There is also a corresponding FSP_FOOTER macro at the end of this file. */ +FSP_HEADER + +/********************************************************************************************************************** + * Macro definitions + **********************************************************************************************************************/ + +/********************************************************************************************************************** + * Typedef definitions + **********************************************************************************************************************/ + +/** Timer value. */ +typedef struct st_gptp_timer_value +{ + uint16_t time_sec_upper; ///< Second(Upper 16 bit). + uint32_t time_sec_lower; ///< Second(Lower 32 bit). + uint32_t time_nsec; ///< Nanosecond. +} gptp_timer_value_t; + +/** Configuration of gPTP timer. */ +typedef struct st_gptp_timer_cfg +{ + uint8_t clock_period; ///< Timer increment value. +} gptp_timer_cfg_t; + +/** Control block. Allocate an instance specific control block to pass into the API calls. + */ +typedef void gptp_ctrl_t; + +/** GPTP callback arguments definitions. */ +typedef struct st_gptp_callback_args +{ + void * p_context; ///< Placeholder for user data. Set in @ref gptp_api_t::open function in @ref gptp_cfg_t. +} gptp_callback_args_t; + +/** Configuration parameters. */ +typedef struct st_gptp_cfg +{ + void (* p_callback)(gptp_callback_args_t * p_args); ///< Pointer to callback function. + void * p_context; ///< Placeholder for user data. Passed to the user callback in @ref gptp_callback_args_t. + void const * p_extend; ///< Placeholder for user extension. +} gptp_cfg_t; + +/** Functions implemented at the HAL layer will follow this API. */ +typedef struct st_gptp_api +{ + /** Open driver. + * + * @param[in] p_ctrl Pointer to control structure. + * @param[in] p_cfg Pointer to pin configuration structure. + */ + fsp_err_t (* open)(gptp_ctrl_t * const p_ctrl, gptp_cfg_t const * const p_cfg); + + /** Close driver. + * + * @param[in] p_ctrl Pointer to control structure. + */ + fsp_err_t (* close)(gptp_ctrl_t * const p_ctrl); + + /** Configure gptp timer parameters. + * + * @param[in] p_ctrl Pointer to control structure. + * @param[in] timer Timer index. + * @param[in] p_timer_cfg Configuration of the timer. + */ + fsp_err_t (* timerCfg)(gptp_ctrl_t * const p_ctrl, uint8_t timer, gptp_timer_cfg_t const * const p_timer_cfg); + + /** Start gptp timer. + * + * @param[in] p_ctrl Pointer to control structure. + * @param[in] timer Timer index. + */ + fsp_err_t (* start)(gptp_ctrl_t * const p_ctrl, uint8_t timer); + + /** Stop gptp timer. + * + * @param[in] p_ctrl Pointer to control structure. + * @param[in] timer Timer index. + */ + fsp_err_t (* stop)(gptp_ctrl_t * const p_ctrl, uint8_t timer); + + /** Get the current time value to gptp timer. + * + * @param[in] p_ctrl Pointer to control structure. + * @param[in] timer Timer index. + * @param[out] p_timer_value Pointer to timer value structure. + */ + fsp_err_t (* timerValueGet)(gptp_ctrl_t * const p_ctrl, uint8_t timer, gptp_timer_value_t * const p_timer_value); + + /** Set time offset correction to gptp timer. + * + * @param[in] p_ctrl Pointer to control structure. + * @param[in] timer Timer index. + * @param[in] offset Time offset value. + */ + fsp_err_t (* timerOffsetSet)(gptp_ctrl_t * const p_ctrl, uint8_t timer, int64_t offset); + + /** Set clock rate correction to gptp timer. + * + * @param[in] p_ctrl Pointer to control structure. + * @param[in] timer Timer index. + * @param[in] rate Clock rate value. + */ + fsp_err_t (* timerRateSet)(gptp_ctrl_t * const p_ctrl, uint8_t timer, uint32_t rate); + + /** + * Specify callback function and optional context pointer and working memory pointer. + * + * @param[in] p_ctrl Pointer to control structure. + * @param[in] p_callback Callback function. + * @param[in] p_context Pointer to send to callback function. + * @param[in] p_callback_memory Pointer to volatile memory where callback structure can be allocated. + * Callback arguments allocated here are only valid during the callback. + */ + fsp_err_t (* callbackSet)(gptp_ctrl_t * const p_ctrl, void (* p_callback)(gptp_callback_args_t *), + void * const p_context, gptp_callback_args_t * const p_callback_memory); +} gptp_api_t; + +/** This structure encompasses everything that is needed to use an instance of this interface. */ +typedef struct st_gptp_instance +{ + gptp_ctrl_t * p_ctrl; ///< Pointer to the control structure for this instance + gptp_cfg_t const * p_cfg; ///< Pointer to the configuration structure for this instance + gptp_api_t const * p_api; ///< Pointer to the API structure for this instance +} gptp_instance_t; + +/*******************************************************************************************************************//** + * @} (end defgroup GPTP_API) + **********************************************************************************************************************/ + +/* Common macro for FSP header files. There is also a corresponding FSP_HEADER macro at the top of this file. */ +FSP_FOOTER + +#endif /* R_GPTP_API_H */ diff --git a/drivers/ra/fsp/inc/instances/r_gptp.h b/drivers/ra/fsp/inc/instances/r_gptp.h new file mode 100644 index 00000000..ca13e530 --- /dev/null +++ b/drivers/ra/fsp/inc/instances/r_gptp.h @@ -0,0 +1,111 @@ +/* +* Copyright (c) 2020 - 2025 Renesas Electronics Corporation and/or its affiliates +* +* SPDX-License-Identifier: BSD-3-Clause +*/ + +/*******************************************************************************************************************//** + * @addtogroup GPTP + * @{ + **********************************************************************************************************************/ + +#ifndef R_GPTP_H +#define R_GPTP_H + +#include "bsp_api.h" + +/* Common macro for FSP header files. There is also a corresponding FSP_FOOTER macro at the end of this file. */ +FSP_HEADER + +/*********************************************************************************************************************** + * Includes + **********************************************************************************************************************/ + +#include "r_gptp_cfg.h" +#include "r_gptp_api.h" + +/*********************************************************************************************************************** + * Macro definitions + **********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Typedef definitions + **********************************************************************************************************************/ + +/** Pulsed output configuration. */ +typedef struct st_gptp_pulsed_output_cfg +{ + uint8_t pulse_num; ///< Pulse generator number. + uint16_t start_sec_upper; ///< Pulse start second(upper 16 bit). + uint32_t start_sec_lower; ///< Pulse start second(lower 32 bit). + uint32_t start_ns; ///< Pulse start nanosecond. + uint16_t wide; ///< Pulse width. + uint16_t period_sec_upper; ///< Pulse period second(upper 16 bit). + uint32_t period_sec_lower; ///< Pulse period second(lower 32 bit). + uint32_t period_ns; ///< Pulse period nanosecond. +} gptp_pulsed_output_cfg_t; + +/** Pulse generator. */ +typedef struct st_gptp_pulse_generator +{ + gptp_pulsed_output_cfg_t * p_pulsed_output_cfg_list[BSP_FEATURE_ESWM_GPTP_PULSE_GENERATOR_NUM]; ///< List of pointer to pulsed output configuration. +} gptp_pulse_generator_t; + +/** GPTP control block. DO NOT INITIALIZE. Initialization occurs when @ref gptp_api_t::open is called. */ +typedef struct st_gptp_instance_ctrl +{ + uint32_t open; ///< Indicates whether the open() API has been successfully called. + gptp_cfg_t const * p_cfg; ///< Pointer to initial configurations. + + R_GPTP_Type * p_reg_gptp; ///< Pointer to GPTP timer register. + + void (* p_callback)(gptp_callback_args_t *); ///< Pointer to callback that is called. + gptp_callback_args_t * p_callback_memory; ///< Pointer to non-secure memory that can be used to pass arguments to a callback in non-secure memory. + + void const * p_context; ///< Pointer to context to be passed into callback function. +} gptp_instance_ctrl_t; + +/********************************************************************************************************************** + * Exported global variables + **********************************************************************************************************************/ + +/** @cond INC_HEADER_DEFS_SEC */ +/** Filled in Interface API structure for this Instance. */ +extern const gptp_api_t g_gptp_on_gptp; + +/** @endcond */ + +/********************************************************************************************************************** + * Public Function Prototypes + **********************************************************************************************************************/ +fsp_err_t R_GPTP_Open(gptp_ctrl_t * const p_ctrl, gptp_cfg_t const * const p_cfg); + +fsp_err_t R_GPTP_Close(gptp_ctrl_t * const p_ctrl); + +fsp_err_t R_GPTP_TimerCfg(gptp_ctrl_t * const p_ctrl, uint8_t timer, gptp_timer_cfg_t const * const p_timer_cfg); + +fsp_err_t R_GPTP_Start(gptp_ctrl_t * const p_ctrl, uint8_t timer); + +fsp_err_t R_GPTP_Stop(gptp_ctrl_t * const p_ctrl, uint8_t timer); + +fsp_err_t R_GPTP_TimerValueGet(gptp_ctrl_t * const p_ctrl, uint8_t timer, gptp_timer_value_t * const p_timer_value); + +fsp_err_t R_GPTP_TimerOffsetSet(gptp_ctrl_t * const p_ctrl, uint8_t timer, int64_t offset); + +fsp_err_t R_GPTP_TimerRateSet(gptp_ctrl_t * const p_ctrl, uint8_t timer, uint32_t rate); + +fsp_err_t R_GPTP_PulseGeneratorSet(gptp_ctrl_t * const p_ctrl, uint8_t timer, gptp_pulse_generator_t * p_pulse); + +fsp_err_t R_GPTP_CallbackSet(gptp_ctrl_t * const p_ctrl, + void ( * p_callback)(gptp_callback_args_t *), + void * const p_context, + gptp_callback_args_t * const p_callback_memory); + +/*******************************************************************************************************************//** + * @} (end addtogroup GPTP) + **********************************************************************************************************************/ + +/* Common macro for FSP header files. There is also a corresponding FSP_HEADER macro at the top of this file. */ +FSP_FOOTER + +#endif // R_GPTP_H diff --git a/drivers/ra/fsp/inc/instances/r_layer3_switch.h b/drivers/ra/fsp/inc/instances/r_layer3_switch.h new file mode 100644 index 00000000..5206ef15 --- /dev/null +++ b/drivers/ra/fsp/inc/instances/r_layer3_switch.h @@ -0,0 +1,759 @@ +/* +* Copyright (c) 2020 - 2025 Renesas Electronics Corporation and/or its affiliates +* +* SPDX-License-Identifier: BSD-3-Clause +*/ + +/*******************************************************************************************************************//** + * @addtogroup LAYER3_SWITCH + * @{ + **********************************************************************************************************************/ + +#ifndef R_LAYER3_SWITCH_H +#define R_LAYER3_SWITCH_H + +#include "bsp_api.h" + +/* Common macro for FSP header files. There is also a corresponding FSP_FOOTER macro at the end of this file. */ +FSP_HEADER + +/*********************************************************************************************************************** + * Includes + **********************************************************************************************************************/ + +#include "r_layer3_switch_cfg.h" +#include "r_ether_switch_api.h" +#include "r_ether_phy_api.h" +#include "r_gptp_api.h" + +/*********************************************************************************************************************** + * Macro definitions + **********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Typedef definitions + **********************************************************************************************************************/ + +/** GWCA descriptor. */ +typedef struct st_layer3_switch_basic_descriptor +{ +#if ((defined(__GNUC__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) || (defined(__ARMCC_VERSION) && \ + !defined(__ARM_BIG_ENDIAN)) || (defined(__ICCARM__) && (__LITTLE_ENDIAN__))) + + /* Little endian. */ + volatile uint8_t ds_l; ///< 0.. 8 (8 bits), Descriptor size (low). + volatile uint8_t ds_h : 4; ///< 9..12 (4 bits), Descriptor size (High). + volatile uint8_t info0 : 4; ///< 12..15 (4 bits), Information 0. + volatile uint8_t err : 3; ///< 16..18 (3 bits), Error, data size error, AXI bus error. + volatile uint8_t die : 1; ///< 19 (1 bit), Descriptor interrupt enable. + volatile uint8_t dt : 4; ///< 20..23 (4 bits), Descriptor type. + volatile uint8_t ptr_h; ///< 24..31 (8 bits), Pointer (High). + volatile uint32_t ptr_l; ///< 32..63 (32 bits), Pointer (Low). +#endif +} layer3_switch_basic_descriptor_t; + +/** GWCA extended descriptor. */ +typedef struct st_layer3_switch_descriptor +{ + /* Little endian. */ +#if ((defined(__GNUC__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) || (defined(__ARMCC_VERSION) && \ + !defined(__ARM_BIG_ENDIAN)) || (defined(__ICCARM__) && (__LITTLE_ENDIAN__))) + + volatile layer3_switch_basic_descriptor_t basic; ///< Basic descriptor fields. + + /* Extended descriptor fields. */ + union + { + /* INFO1 of RX descriptor. */ + struct st_info1_rx + { + volatile uint8_t fi : 1; ///< 0 (1 bit), FCS in. + volatile uint8_t sec : 1; ///< 1 (1 bit), Secure descriptor. + volatile uint8_t fmt : 1; ///< 2 (1 bit), Descriptor format. + volatile uint8_t txc : 1; ///< 3 (1 bit), TX Time stamp capture. + volatile uint8_t iet : 1; ///< 4 (1 bit), Time stamp insertion request. + volatile uint8_t crt : 1; ///< 5 (1 bit), Residence time calculation request. + volatile uint8_t tn : 1; ///< 6 (1 bit), Timer utilized for capture/insertion. + volatile uint8_t : 1; ///< 7 (1 bit), Reserved. + volatile uint8_t tsun; ///< 8..15 (8 bits), Time stamp unique number. + volatile uint8_t saef; ///< 16..23 (8 bits), Source agent error flags. + volatile uint8_t rn; ///< 24..31 (8 bits), Routing number. + volatile uint32_t : 3; ///< 32..34 (3 bits), Reserved. + volatile uint32_t rv : 1; ///< 35 (1 bit), Routing valid. + volatile uint32_t spn : 3; ///< 36..38 (3 bits), Source port number. + volatile uint32_t : 1; ///< 39 (1 bit), Reserved. + volatile uint32_t fesf : 24; ///< 40..63 (24 bits), Forwarding engine status flags. + } info1_rx; + + /* INFO1 of TX descriptor. */ + struct st_info1_tx + { + volatile uint8_t fi : 1; ///< 0 (1 bit), FCS in. + volatile uint8_t sec : 1; ///< 1 (1 bit), Secure descriptor. + volatile uint8_t fmt : 1; ///< 2 (1 bit), Descriptor format. + volatile uint8_t txc : 1; ///< 3 (1 bit), TX Time stamp capture. + volatile uint8_t iet : 1; ///< 4 (1 bit), Time stamp insertion request. + volatile uint8_t crt : 1; ///< 5 (1 bit), Residence time calculation request. + volatile uint8_t tn : 1; ///< 6 (1 bit), Timer utilized for capture/insertion. + volatile uint8_t : 1; ///< 7 (1 bit), Reserved. + volatile uint8_t tsun; ///< 8..15 (8 bits), Time stamp unique number. + volatile uint8_t rn; ///< 16..23 (8 bits), Routing number. + volatile uint8_t : 3; ///< 24..26 (3 bits), Reserved. + volatile uint8_t rv : 1; ///< 27 (1 bit), Routing valid. + volatile uint8_t ipv : 3; ///< 28..30 (3 bits), Internal priority value. + volatile uint8_t fw : 1; ///< 31 (1 bit), FCS contained in frame is wrong. + volatile uint8_t csd0 : 7; ///< 32..38 (7 bits), CPU sub destination for GWCA0. + volatile uint8_t : 1; ///< 39 (1 bit), Reserved. + volatile uint8_t reserved1; ///< 40..47 (8 bits), Reserved. + volatile uint8_t dv : 7; ///< 48..54 (7 bits), Destination vector. + volatile uint8_t : 1; ///< 55 (1 bit), Reserved. + volatile uint8_t reserved2; ///< 56..63 (8 bits), Reserved. + } info1_tx; + }; + + #if LAYER3_SWITCH_CFG_GPTP_ENABLE + + /* Reception descriptor TS fields. */ + union + { + /* Reception direct descriptor TS. */ + struct st_reception_direct_descriptor + { + volatile uint8_t csd0 : 7; ///< 0..6 (7 bit), CPU sub destination for GWCA0 + volatile uint8_t : 1; ///< 7 (1 bit), Reserved + volatile uint8_t csd1 : 7; ///< 8..14 (7 bit), CPU sub destination for GWCA1 + volatile uint8_t : 1; ///< 15 (1 bit), Reserved + volatile uint8_t dv : 7; ///< 16..22 (7 bit), Destination vector + volatile uint8_t : 1; ///< 23 (1 bit), Reserved + volatile uint8_t reserved1; ///< 24..31 (8 bit), Reserved + volatile uint32_t reserved2; ///< 32..63 (32 bit), Reserved + } reception_direct_descriptor; + + /* Reception ethernet descriptor TS. */ + struct st_reception_ethernet_descriptor + { + volatile uint32_t tsns : 30; ///< 0..29 (30 bit), Timestamp nanosecond [gPTP] PCH header timestamp + volatile uint32_t tsv : 1; ///< 30 (1 bit), Timestamp valid + volatile uint32_t tsd : 1; ///< 31 (1 bit), Timestamp default + volatile uint32_t tss; ///< 32..63 (32 bit), Timestamp second [gPTP] PCH header timestamp + } reception_ethernet_descriptor; + }; + #endif +#endif +} layer3_switch_descriptor_t; + +/** + * GWCA TS reception process descriptor. + */ +typedef struct st_layer3_switch_ts_reception_process_descriptor +{ + /* Little endian. */ +#if ((defined(__GNUC__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) || (defined(__ARMCC_VERSION) && \ + !defined(__ARM_BIG_ENDIAN)) || (defined(__ICCARM__) && (__LITTLE_ENDIAN__))) + union + { + struct st_ts_reception_descriptor_basic + { + volatile uint8_t ds_l; ///< 0..7 (8 bit), Descriptor size + volatile uint8_t ds_h : 4; ///< 8..11 (4 bit), Descriptor size + volatile uint8_t info0 : 4; ///< 12..15 (4 bit), Information 0 + volatile uint8_t err : 1; ///< 16 (1 bit), Error + volatile uint8_t dse : 1; ///< 17 (1 bit), Data Size Error + volatile uint8_t axie : 1; ///< 18 (1 bit), AXI Bus Error + volatile uint8_t die : 1; ///< 19 (1 bit), Descriptor Interrupt Enable + volatile uint8_t dt : 4; ///< 20..23 (4 bit), Descriptor Type + volatile uint8_t ptr_h; ///< 24..31 (8 bit), Pointer + volatile uint32_t ptr_l; ///< 32..63 (32 bit), Pointer + volatile uint32_t reserved1; ///< 64..95 (32 bit), Reserved + volatile uint32_t reserved2; ///< 96..127 (32 bit), Reserved + } ts_reception_descriptor_basic; + + struct st_ts_reception_descriptor_result + { + volatile uint8_t ds_l; ///< 0..7 (8 bit), Descriptor size + volatile uint8_t ds_h : 4; ///< 8..11 (4 bit), Descriptor size + volatile uint8_t info0 : 4; ///< 12..15 (4 bit), Information 0 + volatile uint8_t err : 1; ///< 16 (1 bit), Error + volatile uint8_t dse : 1; ///< 17 (1 bit), Data Size Error + volatile uint8_t axie : 1; ///< 18 (1 bit), AXI Bus Error + volatile uint8_t die : 1; ///< 19 (1 bit), Descriptor Interrupt Enable + volatile uint8_t dt : 4; ///< 20..23 (4 bit), Descriptor Type + volatile uint8_t ptr; ///< 24..31 (8 bit), Pointer + volatile uint8_t tsun; ///< 32..39 (8 bit), Timestamp unique number + volatile uint8_t spn : 2; ///< 40..41 (2 bit), Port number from which the timestamp corresponding frame entered the switch + volatile uint8_t : 6; ///< 42..47 (6 bit), Reserved + volatile uint8_t dpn : 1; ///< 48 (1 bit), Port number by which the timestamp has been taken + volatile uint8_t : 7; ///< 49..55 (7 bit), Reserved + volatile uint8_t tn : 1; ///< 56 (1 bit), Timer Number + volatile uint8_t : 7; ///< 57..63 (7 bit), Reserved + volatile uint32_t tsns : 30; ///< 64..93 (30 bit), Timestamp nanosecond + volatile uint32_t : 2; ///< 94..95 (2 bit), Reserved + volatile uint32_t tss; ///< 96..127 (32 bit), Timestamp second + } ts_reception_descriptor_result; + }; +#endif +} layer3_switch_ts_reception_process_descriptor_t; + +/** GWCA descriptor type. */ +typedef enum e_layer3_switch_descriptor_type +{ + LAYER3_SWITCH_DESCRIPTOR_TYPE_LINKFIX = 0U, ///< Linkfix. Control element pointing to next descriptor in chain. + LAYER3_SWITCH_DESCRIPTOR_TYPE_FEMPTY_IS = 1U, ///< Frame Empty Incremental Start. + LAYER3_SWITCH_DESCRIPTOR_TYPE_FEMPTY_IC = 2U, ///< Frame Empty Incremental Continue. + LAYER3_SWITCH_DESCRIPTOR_TYPE_FEMPTY_ND = 3U, ///< Frame Empty No Data Storage. + LAYER3_SWITCH_DESCRIPTOR_TYPE_FEMPTY = 4U, ///< Frame Empty. + LAYER3_SWITCH_DESCRIPTOR_TYPE_FEMPTY_START = 5U, ///< Frame Empty Start. + LAYER3_SWITCH_DESCRIPTOR_TYPE_FEMPTY_MID = 6U, ///< Frame Empty Mid. + LAYER3_SWITCH_DESCRIPTOR_TYPE_FEMPTY_END = 7U, ///< Frame Empty End. + LAYER3_SWITCH_DESCRIPTOR_TYPE_FSINGLE = 8U, ///< Frame Single. + LAYER3_SWITCH_DESCRIPTOR_TYPE_FSTART = 9U, ///< Frame Start. + LAYER3_SWITCH_DESCRIPTOR_TYPE_FMID = 10U, ///< Frame Mid. + LAYER3_SWITCH_DESCRIPTOR_TYPE_FEND = 11U, ///< Frame End. + LAYER3_SWITCH_DESCRIPTOR_TYPE_LEMPTY = 12U, ///< Link Empty. + LAYER3_SWITCH_DESCRIPTOR_TYPE_EEMPTY = 13U, ///< EOS Empty. + LAYER3_SWITCH_DESCRIPTOR_TYPE_LINK = 14U, ///< Link. + LAYER3_SWITCH_DESCRIPTOR_TYPE_EOS = 15U, ///< End of Set. +} layer3_switch_descriptor_type_t; + +/** Bitmasks for each port. */ +typedef enum e_layer3_switch_port_bitmask +{ + LAYER3_SWITCH_PORT_BITMASK_PORT0 = (1U << 0U), ///< Port 0 + LAYER3_SWITCH_PORT_BITMASK_PORT1 = (1U << 1U), ///< Port 1 + LAYER3_SWITCH_PORT_BITMASK_PORT2 = (1U << 2U), ///< Port 2 +} layer3_switch_port_bitmask_t; + +/** Descriptor queue type. */ +typedef enum e_layer3_switch_queue_type +{ + LAYER3_SWITCH_QUEUE_TYPE_RX = 0U, ///< Reception queue. + LAYER3_SWITCH_QUEUE_TYPE_TX = 1U, ///< Transmission queue. +} layer3_switch_queue_type_t; + +/** Descriptor format type. */ +typedef enum e_layer3_switch_descriptor_format +{ + LAYER3_SWITCH_DISCRIPTOR_FORMTAT_BASIC = 0U, ///< Using basic descriptor. + LAYER3_SWITCH_DISCRIPTOR_FORMTAT_EXTENDED = 1U, ///< Using extended descriptor with additional fields. + LAYER3_SWITCH_DISCRIPTOR_FORMTAT_TX_TIMESTAMP = 2U, ///< Using TX timestamp descriptor. +} layer3_switch_descriptor_format_t; + +/** Write back mode. */ +typedef enum e_layer3_switch_write_back_mode +{ + LAYER3_SWITCH_WRITE_BACK_MODE_FULL = 0U, ///< All fields are updated by hardware. + LAYER3_SWITCH_WRITE_BACK_MODE_DISABLE = 1U, ///< No fields are updated by hardware. + LAYER3_SWITCH_WRITE_BACK_MODE_KEEP_DT = 2U, ///< Fields exclude DT are updated by hardware. +} layer3_switch_write_back_mode_t; + +/** IP version options. */ +typedef enum e_layer3_switch_ip_version +{ + LAYER3_SWITCH_IP_VERSION_NONE = 0U, ///< Not IP packet. + LAYER3_SWITCH_IP_VERSION_IPV4 = 1U, ///< IPv4 packet. + LAYER3_SWITCH_IP_VERSION_IPV6 = 2U, ///< IPv6 packet. +} layer3_switch_ip_version_t; + +/** Direction of IP address. */ +typedef enum e_layer3_switch_ip_address_direction +{ + LAYER3_SWITCH_IP_ADDRESS_DIRECTION_SOURCE = 0U, ///< Use IP source address in stream ID. + LAYER3_SWITCH_IP_ADDRESS_DIRECTION_DESTINATION = 1U, ///< Use IP destination address in stream ID. +} layer3_switch_ip_address_direction_t; + +/** Magic packet detection options. */ +typedef enum e_layer3_switch_magic_packet_detection +{ + LAYER3_SWITCH_MAGIC_PACKET_DETECTION_ENABLE = 1U, ///< Enable magic packet detection. + LAYER3_SWITCH_MAGIC_PACKET_DETECTION_DISABLE = 0U, ///< Disable magic packet detection. +} layer3_switch_magic_packet_detection_t; + +/** VLAN detection mode. */ +typedef enum e_layer3_switch_vlan_mode +{ + LAYER3_SWITCH_VLAN_MODE_NO_VLAN = 0U, ///< Not use VLAN feature. + LAYER3_SWITCH_VLAN_MODE_C_TAG = 1U, ///< Detect and use VLAN C-TAG. + LAYER3_SWITCH_VLAN_MODE_SC_TAG = 2U, ///< Detect and use VLAN SC-TAG. +} layer3_switch_vlan_mode_t; + +/** VLAN ingress mode determines whether the switch operates with tagged VLAN or port-based VLAN. */ +typedef enum e_layer3_switch_vlan_ingress_mode +{ + LAYER3_SWITCH_VLAN_INGRESS_MODE_TAG_BASED = 0U, ///< Use VLAN ID of the incoming frame. + LAYER3_SWITCH_VLAN_INGRESS_MODE_PORT_BASED = 1U, ///< Use VLAN ID of the incoming port. +} layer3_switch_vlan_ingress_mode_t; + +/** VLAN egress mode determines the VLAN tag that is added to output frames. */ +typedef enum e_layer3_switch_vlan_egress_mode +{ + LAYER3_SWITCH_VLAN_EGRESS_MODE_NO_VLAN = 0U, ///< Frame always outgoing without VLAN TAG. + LAYER3_SWITCH_VLAN_EGRESS_MODE_C_TAG = 1U, ///< Frame outgoing with C-TAG of incoming frame. + LAYER3_SWITCH_VLAN_EGRESS_MODE_HW_C_TAG = 2U, ///< Frame always outgoing with C-TAG of output port. + LAYER3_SWITCH_VLAN_EGRESS_MODE_SC_TAG = 3U, ///< Frame always outgoing with SC-TAG of incoming frame. + LAYER3_SWITCH_VLAN_EGRESS_MODE_HW_SC_TAG = 4U, ///< Frame always outgoing with SC-TAG of output port. +} layer3_switch_vlan_egress_mode_t; + +/** IP protocols type. */ +typedef enum e_layer3_switch_ip_protocol +{ + LAYER3_SWITCH_IP_PROTOCOL_TCP = 0x06U, ///< IP protocol is TCP. + LAYER3_SWITCH_IP_PROTOCOL_UDP = 0x11U, ///< IP protocol is UDP. +} layer3_switch_ip_protocol_t; + +/** R-TAG filed update options. */ +typedef enum e_layer3_switch_forwarding_r_tag +{ + LAYER3_SWITCH_FORWARDING_R_TAG_UPDATE = 0U, ///< Incoming frame with R-TAG will be updated with new R-TAG. + LAYER3_SWITCH_FORWARDING_R_TAG_ALWAYS_ADD = 1U, ///< ALl incoming frame will be added new R-TAG. + LAYER3_SWITCH_FORWARDING_R_TAG_ALWAYS_ELIMINATE = 2U, ///< ALl incoming frame will be removed R-TAG. +} layer3_switch_forwarding_r_tag_t; + +/** Frame filters options for layer3 forwarding. */ +typedef enum e_layer3_switch_l3_filter_bitmask +{ + LAYER3_SWITCH_L3_FILTER_BITMASK_MAC_DESTINATION = 0x1U, ///< Enable MAC destination address. + LAYER3_SWITCH_L3_FILTER_BITMASK_MAC_SOURCE = 0x2U, ///< Enable MAC source address. + LAYER3_SWITCH_L3_FILTER_BITMASK_STAG_ID = 0x4U, ///< Enable VLAN ID of S-TAG. + LAYER3_SWITCH_L3_FILTER_BITMASK_STAG_PCP = 0x8U, ///< Enable PCP of S-TAG. + LAYER3_SWITCH_L3_FILTER_BITMASK_STAG_DEI = 0x10U, ///< Enable DEI of S-TAG. + LAYER3_SWITCH_L3_FILTER_BITMASK_CTAG_ID = 0x20U, ///< Enable VLAN ID of C-TAG. + LAYER3_SWITCH_L3_FILTER_BITMASK_CTAG_PCP = 0x40U, ///< Enable PCP of C-TAG. + LAYER3_SWITCH_L3_FILTER_BITMASK_CTAG_DEI = 0x80U, ///< Enable DEI of C-TAG. + LAYER3_SWITCH_L3_FILTER_BITMASK_IP_SOURCE_ADDRESS = 0x100U, ///< Enable IP source address. + LAYER3_SWITCH_L3_FILTER_BITMASK_IP_DESTINATION_ADDRESS = 0x200U, ///< Enable IP destination address. + LAYER3_SWITCH_L3_FILTER_BITMASK_PROTOCOL = 0x400U, ///< Enable IP protocol. + LAYER3_SWITCH_L3_FILTER_BITMASK_IP_SOURCE_PORT = 0x800U, ///< Enable layer4 source port. + LAYER3_SWITCH_L3_FILTER_BITMASK_IP_DESTINATION_PORT = 0x1000U, ///< Enable layer4 destination port. +} layer3_switch_l3_filter_bitmask_t; + +/** Enable L2/L3 update feature. */ +typedef enum e_layer3_switch_l3_update_bitmask +{ + LAYER3_SWITCH_L3_UPDATE_BITMASK_TTL = 0x1U, ///< Update TTL + LAYER3_SWITCH_L3_UPDATE_BITMASK_MAC_DESTINATION = 0x2U, ///< Update MAC destination address. + LAYER3_SWITCH_L3_UPDATE_BITMASK_MAC_SOURCE = 0x4U, ///< Update MAC source address. + LAYER3_SWITCH_L3_UPDATE_BITMASK_CTAG_ID = 0x8U, ///< Update VLAN ID of C-TAG + LAYER3_SWITCH_L3_UPDATE_BITMASK_CTAG_PCP = 0x10U, ///< Update PCP of C-TAG + LAYER3_SWITCH_L3_UPDATE_BITMASK_CTAG_DEI = 0x20U, ///< Update DEI of C-TAG + LAYER3_SWITCH_L3_UPDATE_BITMASK_STAG_ID = 0x40U, ///< Update VLAN ID of C-TAG + LAYER3_SWITCH_L3_UPDATE_BITMASK_STAG_PCP = 0x80U, ///< Update PCP of C-TAG + LAYER3_SWITCH_L3_UPDATE_BITMASK_STAG_DEI = 0x100U, ///< Update DEI of C-TAG +} layer3_switch_l3_update_bitmask_t; + +/** Table entry type for forwarding feature. */ +typedef enum e_layer3_switch_table_entry_type +{ + LAYER3_SWITCH_TABLE_ENTRY_TYPE_EMPTY = 0U, ///< Entry is not initialized yet. + LAYER3_SWITCH_TABLE_ENTRY_TYPE_MAC = 1U, ///< Entry of MAC table. + LAYER3_SWITCH_TABLE_ENTRY_TYPE_VLAN = 2U, ///< Entry of VLAN table. + LAYER3_SWITCH_TABLE_ENTRY_TYPE_LAYER3 = 3U, ///< Entry of Layer3 table. +} layer3_switch_table_entry_type_t; + +/** Table status for forwarding feature. */ +typedef enum e_layer3_switch_table_status +{ + LAYER3_SWITCH_TABLE_STATUS_UNINITIALIZED = 0, ///< Forwarding table is uninitialized + LAYER3_SWITCH_TABLE_STATUS_INITIALIZED = 1 ///< Forwarding table is initialized +} layer3_switch_table_status_t; + +/** TAS gate state. */ +typedef enum e_layer3_switch_tas_gate_state +{ + LAYER3_SWITCH_TAS_GATE_STATE_CLOSE = 0, ///< Gate is closed. + LAYER3_SWITCH_TAS_GATE_STATE_OPEN = 1 ///< Gate is opened. +} layer3_switch_tas_gate_state_t; + +/** Enable or disable rx timestamp storage. */ +typedef enum e_layer3_switch_rx_timestamp_storage +{ + LAYER3_SWITCH_RX_TIMESTAMP_STORAGE_DISABLE = 0U, ///< Timestamp is not added in the descriptor. + LAYER3_SWITCH_RX_TIMESTAMP_STORAGE_ENABLE = 1U, ///< Timestamp is added in the descriptor. +} layer3_switch_rx_timestamp_storage_t; + +/** Used or not timestamp descriptor queue. */ +typedef enum e_layer3_switch_ts_descriptor_queue_status +{ + LAYER3_SWITCH_TS_DESCRIPTOR_QUEUE_STATUS_UNUSED = 0U, ///< TS descriptor queue is unused. + LAYER3_SWITCH_TS_DESCRIPTOR_QUEUE_STATUS_USED = 1U, ///< TS descriptor queue is used. +} layer3_switch_ts_descriptor_queue_status_t; + +/** Configuration of FRER feature. */ +typedef struct st_layer3_switch_frer_cfg +{ + uint32_t sys_clock; ///< Timeout check time. + uint32_t timeout_enable; ///< Timeout check valid. + uint32_t check_period; ///< Microsecond prescaler used to creates an internal clock for aging at 1 MHz to derive the timeout 1 kHz clock. +} layer3_switch_frer_cfg_t; + +/** FRER table entry. */ +typedef struct st_layer3_switch_frer_entry +{ + uint32_t take_no_sequence; ///< Reject frames without R-TAG. + uint32_t sequence_history_len; ///< Sequence history length. + uint32_t set_recovery_remaining_tick; ///< Time in tick before timeout. +} layer3_switch_frer_entry_t; + +/** Configuration of FRER entry (individual and sequence recovery). */ +typedef struct st_layer3_switch_frer_entry_cfg +{ + layer3_switch_frer_entry_t individual_recovery; ///< FRER table entry for individual recovery. + layer3_switch_frer_entry_t * p_sequence_recovery; ///< Pointer to FRER entry(sequence recovery) information referenced by `FRERN` in this FRER entry(individual recovery). + uint32_t sequence_recovery_id; +} layer3_switch_frer_entry_cfg_t; + +/** Status of FRER sequence recovery table. */ +typedef struct st_layer3_switch_frer_sequence_recovery_status +{ + bool learned; ///< Flags whether the FRER entry(sequence recovery) related to this entry has been learned. + uint32_t frer_entry_index; ///< Real FRER table index of the FRER entry(sequence recovery) related to this entry. +} layer3_switch_frer_sequence_recovery_status_t; + +/** Configuration of a descriptor queue. */ +typedef struct st_layer3_switch_descriptor_queue_cfg +{ + layer3_switch_descriptor_format_t descriptor_format; ///< Enable or disable extended descriptors. + layer3_switch_queue_type_t type; ///< Reception queue or transmission queue. + layer3_switch_write_back_mode_t write_back_mode; ///< Configure write-back to descriptor fields. + layer3_switch_descriptor_t * p_descriptor_array; ///< Descriptor array that use to create queue. + layer3_switch_ts_reception_process_descriptor_t * p_ts_descriptor_array; ///< TS descriptor array that use to create queue. + layer3_switch_rx_timestamp_storage_t rx_timestamp_storage; ///< Configure RX timestamp storage. + uint32_t array_length; ///< Length of descriptor array. This length includes terminate descriptor at the end. + uint32_t ports; ///< Bitmap of ports that use this queue. +} layer3_switch_descriptor_queue_cfg_t; + +/** Configuration of Credit Based Shaper. */ +typedef struct st_layer3_switch_cbs_cfg +{ + uint8_t band_width_list[8]; ///< CBS band width [%] of each queue. + uint8_t max_burst_num_list[8]; ///< Maximum burst frame number of each queue. +} layer3_switch_cbs_cfg_t; + +/** Configuration of each Ethernet port. */ +typedef struct st_layer3_switch_port_cfg +{ + uint8_t * p_mac_address; ///< Pointer to MAC address. + bool forwarding_to_cpu_enable; ///< Enable or disable reception on CPU. + layer3_switch_cbs_cfg_t * p_cbs_cfg; ///< Pointer to CBS configuration. + void (* p_callback)(ether_switch_callback_args_t * p_args); ///< Callback provided when an ISR occurs. + ether_switch_callback_args_t * p_callback_memory; ///< Pointer to optional callback argument memory + void * p_context; ///< Pointer to context to be passed into callback function +} layer3_switch_port_cfg_t; + +/** Status of a descriptor queue. */ +typedef struct st_layer3_switch_descriptor_queue_status +{ + uint32_t head; ///< Index at the head of the queue. This used for GetDescriptor API. + uint32_t tail; ///< Index at the tail of the queue. This used for SetDescriptor API. + bool created; ///< This queue is already created. + layer3_switch_descriptor_queue_cfg_t const * p_queue_cfg; ///< Configuration of this queue. + bool rx_available; ///< Indicates whether the queue is available for reception. +} layer3_switch_descriptor_queue_status_t; + +/** IP address offset for creating IPv6 filter of Layer3 forwarding. */ +typedef struct st_layer3_switch_ipv6_filter_address_offset +{ + uint8_t offset; ///< Offset of IPv6 address. + layer3_switch_ip_address_direction_t direction; ///< Select source or destination. +} layer3_switch_ipv6_filter_address_offset_t; + +/** Configuration of stream filter in Layer3 forwarding. */ +typedef struct st_layer3_switch_l3_stream_filter_cfg +{ + uint16_t filter_field_bitmask; ///< Bitmask of which feilds is enabled in stream filter. Use @ref layer3_switch_l3_filter_bitmask_t. + layer3_switch_ipv6_filter_address_offset_t ipv6_address0; ///< Offset of IPv6 address 0. + layer3_switch_ipv6_filter_address_offset_t ipv6_address1; ///< Offset of IPv6 address 1. +} layer3_switch_l3_stream_filter_cfg_t; + +/* Stream ID of Layer3 forwarding entry. This structure is used internally only. */ +typedef struct st_layer3_switch_stream_id +{ + uint8_t frame_format_code; ///< Format code that implies which filter created this stream ID. + union + { + uint32_t words[4]; ///< Stream ID as words. + uint8_t bytes[16]; ///< Stream ID as bytes. + }; +} layer3_switch_stream_id_t; + +/** VLAN tag structure. */ +typedef struct st_layer3_switch_frame_vlan_tag +{ + uint16_t pcp : 3; ///< Priority Code Point (3 bit). + uint16_t dei : 1; ///< Drop Eligible Indicator (1 bit). + uint16_t id : 12; ///< VLAN ID (12 bit). +} layer3_switch_frame_vlan_tag_t; + +/** Frame filter of a MAC/VLAN/Layer3 forwarding entry. Set values for members required for filtering. */ +typedef struct st_layer3_switch_frame_filter +{ + /* Entry type. */ + layer3_switch_table_entry_type_t entry_type; ///< Type of this entry. Select MAC, VLAN or Layer3. + + /* Used for MAC and Layer3 entry. */ + uint8_t * p_destination_mac_address; ///< Destination MAC address. + uint8_t * p_source_mac_address; ///< Source MAC address. + + /* Used for VLAN and Layer3 entry. */ + layer3_switch_frame_vlan_tag_t vlan_c_tag; ///< VLAN C-TAG. + layer3_switch_frame_vlan_tag_t vlan_s_tag; ///< VLAN S-TAG. + + /* Used for Layer3 table. */ + layer3_switch_ip_version_t ip_version; ///< IP version. Select IPv4, IPv6, or not IP packet. + uint8_t protocol; ///< IP protocol + uint8_t * p_source_ip_address; ///< Source IP address. + uint8_t * p_destination_ip_address; ///< Destination IP address. + uint16_t layer4_source_port; ///< TCP/UDP source port. + uint16_t layer4_destination_port; ///< TCP/UDP destination port. +} layer3_switch_frame_filter_t; + +/** Store filter information of Layer3 forwarding entry. */ +typedef struct st_layer3_switch_l3_filter +{ + layer3_switch_frame_filter_t frame; ///< Target frame format that used to Layer3 forwarding. +} layer3_switch_l3_filter_t; + +/** Configuration of a L2/L3 update feature for output frames. */ +typedef struct st_layer3_switch_l3_update_config +{ + uint32_t enable_destination_ports; ///< Destination ports that this update config is enabled. + uint32_t update_field_bitmask; ///< Bit mask of which fields will be updated. Use @ref layer3_switch_l3_update_bitmask_t. + uint8_t * p_mac_destination_address; ///< MAC Destination Address. + layer3_switch_frame_vlan_tag_t vlan_c_tag; ///< VLAN C-tag. + layer3_switch_frame_vlan_tag_t vlan_s_tag; ///< VLAN S-tag. + layer3_switch_forwarding_r_tag_t r_tag_update_mode; ///< R-TAG update setting. + bool sequence_number_generation_enable; ///< Flags whether sequence number generation (FRER) is enabled for this entry. +} layer3_switch_l3_update_config_t; + +/** Table entry configuration of MAC/VLAN/Layer3 forwarding. */ +typedef struct st_layer3_switch_table_entry_cfg +{ + /* Entry settings. */ + bool entry_enable; ///< Enable or disable entry. If this field is false, entry will be removed. + bool security_enable; ///< Entry is secure or not. + + /* FRER setting. */ + layer3_switch_frer_entry_cfg_t * p_frer_entry_cfg; ///< Configuration of the FRER entry(individual recovery) for this L3 entry (set to `NULL` : FRER is not valid in this L3 entry). + + /* Forwarding settings. */ + uint32_t destination_ports; ///< Destination ports of forwarding. + uint32_t source_ports; ///< Source ports that enable forwarding of incoming frame. + uint32_t destination_queue_index; ///< Destination queue. This fields is only used when a destination port is CPU. + uint32_t internal_priority_update_enable; ///< Enable to update internal priority + uint32_t internal_priority_update_value; ///< Internal priority when updating is enabled. + + /* Forwarding protocol specific features. */ + union + { + /* MAC forwarding entry fields. */ + struct st_mac + { + bool dinamic_entry; ///< This entry is dynamic entry or not. Dynamic entry enable aging feature. + } mac; + + /* Layer3 forwarding specific feature. */ + struct st_layer3 + { + layer3_switch_l3_update_config_t * p_update_configs; ///< Pointer to an array of a L2/L3 update configurations. + uint32_t number_of_configs; ///< Number of the update configs. + } layer3; + }; +} layer3_switch_table_entry_cfg_t; + +/** Table entry of MAC/VLAN/Layer3 forwarding. */ +typedef struct st_layer3_switch_table_entry +{ + layer3_switch_frame_filter_t target_frame; ///< Target frame of forwarding by this entry. + layer3_switch_table_entry_cfg_t entry_cfg; ///< Configuration of this entry. +} layer3_switch_table_entry_t; + +/** Configuration of forwarding feature for each port. */ +typedef struct st_layer3_switch_forwarding_port_cfg +{ + /* MAC table configuration. */ + bool mac_table_enable; ///< Enable MAC table and forwarding feature. + bool mac_reject_unknown; ///< Reject frame with unknown MAC address. + bool mac_hardware_learning_enable; ///< Enable hardware learning and migration. + + /* VLAN table configuration. */ + bool vlan_table_enable; ///< Enable VLAN table and forwarding feature. + bool vlan_reject_unknown; ///< Reject frame with unknown VLAN ID. + layer3_switch_vlan_ingress_mode_t vlan_ingress_mode; ///< Select Tag-based VLAN or Port-based VLAN for incoming frame. + layer3_switch_vlan_egress_mode_t vlan_egress_mode; ///< Tagging/untagging mode for outgoing frame. + layer3_switch_frame_vlan_tag_t vlan_s_tag; ///< S-TAG of this port. When egress mode is hardware SC-TAG, add this to outgoing frame. + layer3_switch_frame_vlan_tag_t vlan_c_tag; ///< C-TAG of this port. When egress mode is hardware C-TAG, add this to outgoing frame. + + /* Layer3 table configuration. */ + bool layer3_table_enable; ///< Enable Layer3 table and forwarding feature. + bool layer3_reject_unknown; ///< Reject frame that not found in Layer3 table. + bool layer3_ipv4_filter_enable; ///< Enable IPv4 stream filter. + bool layer3_ipv6_filter_enable; ///< Enable IPv6 stream filter. + bool layer3_l2_filter_enable; ///< Enable L2 stream filter. +} layer3_switch_forwarding_port_cfg_t; + +/** Forwarding table containing MAC/VLAN/Layer3 forwarding entries. */ +typedef struct st_layer3_switch_table +{ + layer3_switch_table_entry_t * p_mac_entry_list; ///< List of MAC entries. + uint32_t mac_list_length; ///< Length of the MAC entry list. + layer3_switch_table_entry_t * p_vlan_entry_list; ///< List of VLAN entries. + uint32_t vlan_list_length; ///< Length of the VLAN entry list. + layer3_switch_table_entry_t * p_l3_entry_list; ///< List of Layer3 entries. + uint32_t l3_list_length; ///< Length of the Layer3 entry list. +} layer3_switch_table_t; + +/** Configuration of the forwarding table. */ +typedef struct st_layer3_switch_table_cfg +{ + layer3_switch_table_t * p_table; ///< Pointer to forwarding table. + layer3_switch_forwarding_port_cfg_t port_cfg_list[BSP_FEATURE_ETHER_NUM_CHANNELS + 1]; ///< Forwarding configuration of each port. + uint32_t unsecure_entry_maximum_num; ///< Maximum number of unsecure entries. + + /* MAC table configuration. */ + bool mac_entry_aging_enable; ///< Enable aging feature of MAC table. + uint32_t mac_entry_aging_time_sec; ///< Time[s] to delete an entry by aging. + + /* VLAN table configuration. */ + layer3_switch_vlan_mode_t vlan_mode; ///< VLAN mode options: NO VLAN, C-TAG, or SC-TAG. + + /* Layer3 table configuration. */ + layer3_switch_l3_stream_filter_cfg_t l3_stream_filter_cfg; ///< Configuration of stream filter in Layer3 forwarding. + layer3_switch_frer_cfg_t frer_cfg; ///< Configuration of FRER feature. +} layer3_switch_table_cfg_t; + +/** ESWM extension configures each Ethernet port and forwarding feature. */ +typedef struct st_layer3_switch_extended_cfg +{ + ether_phy_instance_t const * p_ether_phy_instances[BSP_FEATURE_ETHER_NUM_CHANNELS]; ///< List of pointers to ETHER_PHY instance. + gptp_instance_t const * p_gptp_instance; ///< Pointer to a gPTP instance. + uint32_t fowarding_target_port_masks[BSP_FEATURE_ETHER_NUM_CHANNELS]; ///< List of ports to which incoming frames are forwarded. + uint8_t * p_mac_addresses[BSP_FEATURE_ETHER_NUM_CHANNELS]; // [DEPRECATED] MAC address of each port. + uint32_t ipv_queue_depth_list[BSP_FEATURE_ETHER_NUM_CHANNELS][8]; ///< List of IPV queue depth for each port. + layer3_switch_l3_filter_t * l3_filter_list; ///< Filter list of layer3 routing. + uint32_t l3_filter_list_length; ///< Length of Layer3 filter list. + layer3_switch_port_cfg_t * p_port_cfg_list[BSP_FEATURE_ETHER_NUM_CHANNELS]; ///< Configuration for each port. + IRQn_Type etha_error_irq_port_0; ///< ETHA error interrupt number for port 0. + IRQn_Type etha_error_irq_port_1; ///< ETHA error interrupt number for port 1. + uint8_t etha_error_ipl_port_0; ///< ETHA error interrupt priority for port 0. + uint8_t etha_error_ipl_port_1; ///< ETHA error interrupt priority for port 1. + uint8_t gptp_timer_numbers[BSP_FEATURE_ESWM_GPTP_TIMER_NUM]; ///< List of timer numbers for transmission/reception timestamp. +} layer3_switch_extended_cfg_t; + +/** LAYER3_SWITCH control block. DO NOT INITIALIZE. Initialization occurs when @ref ether_switch_api_t::open is called. */ +typedef struct st_layer3_switch_instance_ctrl +{ + uint32_t open; ///< Used to determine if the channel is configured + ether_switch_cfg_t const * p_cfg; ///< Pointer to initial configurations. + R_GWCA0_Type * p_gwca_reg; ///< Pointer to GWCA register. + + uint32_t allocated_descriptor_queue_index; ///< Index of the descriptor pool. + layer3_switch_basic_descriptor_t p_descriptor_queue_list[LAYER3_SWITCH_CFG_AVAILABLE_QUEUE_NUM]; ///< Descriptor queue lists used by hardware. + layer3_switch_descriptor_queue_status_t p_queues_status[LAYER3_SWITCH_CFG_AVAILABLE_QUEUE_NUM]; ///< Status of each descriptor queues. + layer3_switch_port_cfg_t p_port_cfg_list[BSP_FEATURE_ETHER_NUM_CHANNELS]; ///< Configuration for each port. + + /* Forwarding features. */ + layer3_switch_table_status_t table_status; ///< Forwarding table is initialized or not. + uint32_t l3_entry_count; ///< Counts of valid LAYER3 entry. + uint8_t l3_routing_number; ///< Routing number for L2/L3 update feature. + uint8_t l3_remapping_number; ///< Remapping number for L2/L3 update feature. + + /* Timestamp features. */ + layer3_switch_ts_descriptor_queue_status_t ts_descriptor_queue_status_list[ + BSP_FEATURE_ESWM_TS_DESCRIPTOR_QUEUE_MAX_NUM]; ///< Status of TS reception descriptor queues. + + /* FRER features. */ + uint32_t valid_frer_entry_num; ///< Number of valid FRER entry. + layer3_switch_frer_sequence_recovery_status_t frer_sequence_recovery_status[BSP_FEATURE_ESWM_FRER_TABLE_SIZE]; ///< Status of table for each FRER sequence recovery entry. + uint32_t used_frer_sequence_generator_num; ///< Number of the sequence number generator. + + void (* p_callback)(ether_switch_callback_args_t * p_args); ///< Callback provided when an ISR occurs. + ether_switch_callback_args_t * p_callback_memory; ///< Pointer to optional callback argument memory + void * p_context; ///< Pointer to context to be passed into callback function +} layer3_switch_instance_ctrl_t; + +/** Configuration of the gate operation. */ +typedef struct st_layer3_switch_etha_tas_entry +{ + layer3_switch_tas_gate_state_t state; ///< Gate state. + uint32_t time; ///< Time associated with the entry gate state [nsec]. +} layer3_switch_tas_entry_t; + +/** Configuration of the gate. */ +typedef struct st_layer3_switch_tas_gate_cfg +{ + layer3_switch_tas_gate_state_t initial_gate_state; ///< Initial gate state when the cycle starts. + uint8_t tas_entry_num; ///< Number of TAS entries included in this gate. + layer3_switch_tas_entry_t * p_tas_entry_list; ///< List of TAS entries included in this gate. +} layer3_switch_tas_gate_cfg_t; + +/** Configuration of the TAS. */ +typedef struct st_layer3_switch_tas_cfg +{ + uint8_t gptp_timer_number; ///< gPTP timer number. + uint32_t cycle_time_start_high; ///< Upper 32 bits of TAS cycle start time [nsec]. + uint32_t cycle_time_start_low; ///< Lower 32 bits of TAS cycle start time [nsec]. + uint32_t cycle_time; ///< TAS cycle time [nsec]. + layer3_switch_tas_gate_cfg_t gate_cfg_list[8]; ///< List of TAS gate configurations. +} layer3_switch_tas_cfg_t; + +/********************************************************************************************************************** + * Exported global variables + **********************************************************************************************************************/ + +/** @cond INC_HEADER_DEFS_SEC */ +/** Filled in Interface API structure for this Instance. */ +extern const ether_switch_api_t g_ether_switch_on_layer3_switch; + +/** @endcond */ + +/********************************************************************************************************************** + * Public Function Prototypes + **********************************************************************************************************************/ +fsp_err_t R_LAYER3_SWITCH_Open(ether_switch_ctrl_t * const p_ctrl, ether_switch_cfg_t const * const p_cfg); + +fsp_err_t R_LAYER3_SWITCH_Close(ether_switch_ctrl_t * const p_ctrl); + +fsp_err_t R_LAYER3_SWITCH_CreateDescriptorQueue(ether_switch_ctrl_t * const p_ctrl, + uint32_t * const p_queue_index, + layer3_switch_descriptor_queue_cfg_t const * const p_queue_cfg); + +fsp_err_t R_LAYER3_SWITCH_SetDescriptor(ether_switch_ctrl_t * const p_ctrl, + uint32_t queue_index, + layer3_switch_descriptor_t const * const p_descriptor); + +fsp_err_t R_LAYER3_SWITCH_GetDescriptor(ether_switch_ctrl_t * const p_ctrl, + uint32_t queue_index, + layer3_switch_descriptor_t * const p_descriptor); + +fsp_err_t R_LAYER3_SWITCH_StartDescriptorQueue(ether_switch_ctrl_t * const p_ctrl, uint32_t queue_index); + +fsp_err_t R_LAYER3_SWITCH_CallbackSet(ether_switch_ctrl_t * const p_ctrl, + void ( * p_callback)(ether_switch_callback_args_t *), + void * const p_context, + ether_switch_callback_args_t * const p_callback_memory); + +fsp_err_t R_LAYER3_SWITCH_ConfigurePort(ether_switch_ctrl_t * const p_ctrl, + uint8_t port, + layer3_switch_port_cfg_t * p_port_cfg); + +fsp_err_t R_LAYER3_SWITCH_AddTableEntry(ether_switch_ctrl_t * const p_ctrl, + layer3_switch_frame_filter_t const * const p_target_frame, + layer3_switch_table_entry_cfg_t const * const p_entry_cfg); +fsp_err_t R_LAYER3_SWITCH_SearchTableEntry(ether_switch_ctrl_t * const p_ctrl, + layer3_switch_frame_filter_t const * const p_target_frame, + layer3_switch_table_entry_cfg_t * const p_entry_cfg); +fsp_err_t R_LAYER3_SWITCH_ConfigureTable(ether_switch_ctrl_t * const p_ctrl, + layer3_switch_table_cfg_t const * const p_table_cfg); +fsp_err_t R_LAYER3_SWITCH_GetTable(ether_switch_ctrl_t * const p_ctrl, layer3_switch_table_t * const p_table); +fsp_err_t R_LAYER3_SWITCH_ConfigureTAS(ether_switch_ctrl_t * const p_ctrl, + uint8_t port, + layer3_switch_tas_cfg_t * p_tas_cfg); +fsp_err_t R_LAYER3_SWITCH_EnableTAS(ether_switch_ctrl_t * const p_ctrl, uint8_t port); + +/*******************************************************************************************************************//** + * @} (end addtogroup LAYER3_SWITCH) + **********************************************************************************************************************/ + +/* Common macro for FSP header files. There is also a corresponding FSP_HEADER macro at the top of this file. */ +FSP_FOOTER + +#endif // R_LAYER3_SWITCH_H diff --git a/drivers/ra/fsp/inc/instances/r_rmac.h b/drivers/ra/fsp/inc/instances/r_rmac.h new file mode 100644 index 00000000..e18b60a2 --- /dev/null +++ b/drivers/ra/fsp/inc/instances/r_rmac.h @@ -0,0 +1,185 @@ +/* +* Copyright (c) 2020 - 2025 Renesas Electronics Corporation and/or its affiliates +* +* SPDX-License-Identifier: BSD-3-Clause +*/ + +#ifndef R_RMAC_H +#define R_RMAC_H + +/* Common macro for FSP header files. There is also a corresponding FSP_FOOTER macro at the end of this file. */ + +/*********************************************************************************************************************** + * Includes + **********************************************************************************************************************/ +#include "r_ether_api.h" +#include "r_layer3_switch.h" + +FSP_HEADER + +/*********************************************************************************************************************** + * Macro definitions + **********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Typedef definitions + **********************************************************************************************************************/ + +typedef enum e_ether_previous_link_status +{ + ETHER_PREVIOUS_LINK_STATUS_DOWN = 0, ///< Previous link status is down + ETHER_PREVIOUS_LINK_STATUS_UP = 1, ///< Previous link status is up +} ether_previous_link_status_t; + +typedef enum e_ether_link_change +{ + ETHER_LINK_CHANGE_NO_CHANGE = 0, ///< Link status is no change + ETHER_LINK_CHANGE_LINK_DOWN = 1, ///< Link status changes to down + ETHER_LINK_CHANGE_LINK_UP = 2, ///< Link status changes to up +} ether_link_change_t; + +typedef enum e_ether_link_establish_status +{ + ETHER_LINK_ESTABLISH_STATUS_DOWN = 0, ///< Link establish status is down + ETHER_LINK_ESTABLISH_STATUS_UP = 1, ///< Link establish status is up +} ether_link_establish_status_t; + +/** Information of a descriptor queue. */ +typedef struct st_rmac_queue_info +{ + layer3_switch_descriptor_queue_cfg_t queue_cfg; ///< Queue configuration. + uint32_t index; ///< Queue index. +} rmac_queue_info_t; + +/** Write configuration. */ +typedef struct st_rmac_write_cfg +{ + uint32_t tx_timestamp_enable : 1; ///< Enable to get TX timestamp. + uint32_t reserved : 31; +} rmac_write_cfg_t; + +/** Timestamp. */ +typedef struct st_rmac_timestamp +{ + uint16_t sec_upper; ///< Timestamp second (Upper 16 bit). + uint32_t sec_lower; ///< Timestamp second (Lower 32 bit). + uint32_t ns; ///< Timestamp nanosecond. +} rmac_timestamp_t; + +/** Node to manage buffer. */ +typedef struct st_rmac_buffer_node +{ + void * p_buffer; ///< Pointer to the buffer. + uint32_t size; ///< Buffer size. +#if LAYER3_SWITCH_CFG_GPTP_ENABLE + rmac_timestamp_t timestamp; ///< RX timestamp value. +#endif + struct st_rmac_buffer_node * p_next; ///< Pointer to the next node. +} rmac_buffer_node_t; + +/** Queue of internal buffers. */ +typedef struct st_rmac_buffer_queue +{ + rmac_buffer_node_t * p_head; ///< Pointer to the head of the queue. + rmac_buffer_node_t * p_tail; ///< Pointer to the tail of the queue. +} rmac_buffer_queue_t; + +/* Extended configuration. */ +typedef struct st_rmac_extended_cfg +{ + ether_switch_instance_t const * p_ether_switch; ///< Pointer to ETHER_SWITCH instance. + + uint32_t tx_queue_num; ///< Number of TX descriptor queues. + uint32_t rx_queue_num; ///< Number of RX descriptor queues. + + rmac_queue_info_t * p_ts_queue; ///< Configuration of TS queue. + rmac_queue_info_t * p_tx_queue_list; ///< TX queue list. + rmac_queue_info_t * p_rx_queue_list; ///< RX queue list. + + IRQn_Type rmpi_irq; ///< Magic packet detection interrupt number. + uint32_t rmpi_ipl; ///< Magic packet detection interrupt priority. + rmac_buffer_node_t * p_buffer_node_list; ///< List of buffer nodes for managing TX/RX buffers. + uint32_t buffer_node_num; ///< Length of buffer nodes list. +} rmac_extended_cfg_t; + +/** Instance control block. DO NOT INITIALIZE. Initialization occurs when @ref spi_flash_api_t::open is called */ +typedef struct st_rmac_instance_ctrl +{ + uint32_t open; // Whether or not driver is open + ether_cfg_t const * p_cfg; // Pointer to initial configuration + + bool is_lost_rx_packet; + + /* IP dependent members. */ + R_ETHA0_Type * p_reg_etha; + R_RMAC0_Type * p_reg_rmac; + + /* RX statuses. */ + uint32_t read_queue_index; ///< RX queue that used for next BufferRelease API. + uint32_t rx_running_queue_index; ///< Whether a RX queue is running or not. + rmac_buffer_queue_t rx_completed_buffer_queue; ///< RX buffers that have completed reception. + rmac_buffer_queue_t rx_unreleased_buffer_queue; ///< RX buffers that have been read but not yet released. + rmac_buffer_queue_t rx_empty_buffer_queue; ///< RX Buffers that have no data. + uint32_t rx_initialized_buffer_num; ///< RX buffer num of initialized. This is used in RxBufferUpdate API. + + /* TX statuses. */ + uint32_t write_queue_index; ///< TX queue that used for next Write API. + uint32_t tx_running_queue_index; ///< Index of the queue that is running now. + void * p_last_sent_buffer; ///< Pointer to the last sent TX buffer. + rmac_buffer_queue_t tx_pending_buffer_queue; ///< Delayed TX buffers. + rmac_buffer_queue_t tx_empty_buffer_queue; ///< TX Buffers that have no data. + uint32_t write_descriptor_count; ///< Count of descriptor that already write in active queue. + + rmac_buffer_queue_t buffer_node_pool; ///< Buffer nodes pool. + + /* Timestamp features. */ + rmac_timestamp_t * p_rx_timestamp; ///< RX timestamp pointer. + rmac_timestamp_t tx_timestamp; ///< TX timestamp. + uint32_t tx_timestamp_seq_num; ///< Sequence number of TX timestamp. + rmac_write_cfg_t write_cfg; ///< Configuration of transmission. + + /* Status of ethernet driver. */ + ether_previous_link_status_t previous_link_status; ///< Previous link status + ether_link_change_t link_change; ///< Status of link change + ether_link_establish_status_t link_establish_status; ///< Current Link status + ether_wake_on_lan_t wake_on_lan; ///< Wake on LAN mode. + + /* Pointer to callback and optional working memory */ + void (* p_callback)(ether_callback_args_t *); + ether_callback_args_t * p_callback_memory; + + /* Pointer to context to be passed into callback function */ + void * p_context; +} rmac_instance_ctrl_t; + +/********************************************************************************************************************** + * Exported global variables + **********************************************************************************************************************/ + +/** @cond INC_HEADER_DEFS_SEC */ +/** Filled in Interface API structure for this Instance. */ +extern const ether_api_t g_ether_on_rmac; + +/** @endcond */ + +fsp_err_t R_RMAC_Open(ether_ctrl_t * p_ctrl, ether_cfg_t const * const p_cfg); +fsp_err_t R_RMAC_Close(ether_ctrl_t * p_ctrl); +fsp_err_t R_RMAC_BufferRelease(ether_ctrl_t * const p_ctrl); +fsp_err_t R_RMAC_RxBufferUpdate(ether_ctrl_t * const p_ctrl, void * const p_buffer); +fsp_err_t R_RMAC_LinkProcess(ether_ctrl_t * const p_ctrl); +fsp_err_t R_RMAC_WakeOnLANEnable(ether_ctrl_t * const p_ctrl); +fsp_err_t R_RMAC_Read(ether_ctrl_t * const p_ctrl, void * const p_buffer, uint32_t * const length_bytes); +fsp_err_t R_RMAC_Write(ether_ctrl_t * const p_ctrl, void * const p_buffer, uint32_t const frame_length); +fsp_err_t R_RMAC_TxStatusGet(ether_ctrl_t * const p_ctrl, void * const p_buffer_address); +fsp_err_t R_RMAC_CallbackSet(ether_ctrl_t * const p_api_ctrl, + void ( * p_callback)(ether_callback_args_t *), + void * const p_context, + ether_callback_args_t * const p_callback_memory); +fsp_err_t R_RMAC_SetWriteConfig(ether_ctrl_t * const p_ctrl, rmac_write_cfg_t * const p_write_cfg); +fsp_err_t R_RMAC_GetTxTimestamp(ether_ctrl_t * const p_ctrl, rmac_timestamp_t * const p_timestamp); +fsp_err_t R_RMAC_GetRxTimestamp(ether_ctrl_t * const p_ctrl, rmac_timestamp_t * const p_timestamp); + +/* Common macro for FSP header files. There is also a corresponding FSP_HEADER macro at the top of this file. */ +FSP_FOOTER + +#endif /* R_RMAC_H */ diff --git a/drivers/ra/fsp/inc/instances/r_rmac_phy.h b/drivers/ra/fsp/inc/instances/r_rmac_phy.h new file mode 100644 index 00000000..545db97f --- /dev/null +++ b/drivers/ra/fsp/inc/instances/r_rmac_phy.h @@ -0,0 +1,116 @@ +/* +* Copyright (c) 2020 - 2025 Renesas Electronics Corporation and/or its affiliates +* +* SPDX-License-Identifier: BSD-3-Clause +*/ + +/*******************************************************************************************************************//** + * @addtogroup RMAC_PHY + * @{ + **********************************************************************************************************************/ + +#ifndef R_RMAC_PHY_H +#define R_RMAC_PHY_H + +#include "bsp_api.h" + +/* Common macro for FSP header files. There is also a corresponding FSP_FOOTER macro at the end of this file. */ +FSP_HEADER + +/*********************************************************************************************************************** + * Includes + **********************************************************************************************************************/ +#include "r_rmac_phy_cfg.h" +#include "r_ether_phy_api.h" + +/*********************************************************************************************************************** + * Macro definitions + **********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Typedef definitions + **********************************************************************************************************************/ + +/** Initialization state for read/write */ +typedef enum e_rmac_phy_interface_status +{ + RMAC_PHY_INTERFACE_STATUS_UNINITIALIZED = 0, ///< ETHER PHY interface is uninitialized + RMAC_PHY_INTERFACE_STATUS_INITIALIZED = 1 ///< ETHER PHY interface is initialized +} rmac_phy_interface_status_t; + +typedef enum e_rmac_phy_frame_format +{ + RMAC_PHY_FRAME_FORMAT_MDIO = 0, ///< Normal management frame format defined in clause 22. + RMAC_PHY_FRAME_FORMAT_EMDIO = 1 ///< Extension management frame format defined in clause 45. +} rmac_phy_frame_format_t; + +/** RMAC PHY control block. DO NOT INITIALIZE. Initialization occurs when @ref ether_phy_api_t::open is called. */ +typedef struct st_rmac_phy_instance_ctrl +{ + uint32_t open; ///< Used to determine if the channel is configured + ether_phy_cfg_t const * p_ether_phy_cfg; ///< Pointer to initial configurations. + R_RMAC0_Type * p_reg_rmac; ///< Pointer to RMAC peripheral registers. + uint32_t local_advertise; ///< Capabilities bitmap for local advertising. + rmac_phy_interface_status_t interface_status; ///< Initialized status of ETHER PHY interface. + uint8_t phy_lsi_cfg_index; ///< Index of the PHY LSI that is currently the target of operation +} rmac_phy_instance_ctrl_t; + +/** RMAC PHY extended configuration. */ +typedef struct st_rmac_phy_extended_cfg +{ + void (* p_target_init)(rmac_phy_instance_ctrl_t * p_instance_ctrl); ///< Pointer to callback that is called to initialize the target. + bool (* p_target_link_partner_ability_get)(rmac_phy_instance_ctrl_t * p_instance_ctrl, uint32_t line_speed_duplex); ///< Pointer to callback that is called to get the link partner ability. + rmac_phy_frame_format_t frame_format; ///< Whether the management frame format is MDIO or eMDIO + uint32_t mdc_clock_rate; ///< MDC frequency division + uint8_t mdio_hold_time; ///< MDIO hold time adjustment + uint8_t mdio_capture_time; ///< MDIO capture time adjustment + ether_phy_lsi_cfg_t const * p_phy_lsi_cfg_list[BSP_FEATURE_ETHER_NUM_CHANNELS]; ///< Pointer list of PHY LSI configurations. + uint8_t default_phy_lsi_cfg_index; ///< Index of the default PHY LSI condiguration. +} rmac_phy_extended_cfg_t; + +/********************************************************************************************************************** + * Exported global variables + **********************************************************************************************************************/ + +/** @cond INC_HEADER_DEFS_SEC */ +/** Filled in Interface API structure for this Instance. */ +extern const ether_phy_api_t g_ether_phy_on_rmac_phy; + +/** @endcond */ + +/*********************************************************************************************************************** + * Exported global functions (to be accessed by other files) + ***********************************************************************************************************************/ + +/********************************************************************************************************************** + * Public Function Prototypes + **********************************************************************************************************************/ +fsp_err_t R_RMAC_PHY_Open(ether_phy_ctrl_t * const p_ctrl, ether_phy_cfg_t const * const p_cfg); + +fsp_err_t R_RMAC_PHY_Close(ether_phy_ctrl_t * const p_ctrl); + +fsp_err_t R_RMAC_PHY_ChipInit(ether_phy_ctrl_t * const p_ctrl, ether_phy_cfg_t const * const p_cfg); + +fsp_err_t R_RMAC_PHY_Read(ether_phy_ctrl_t * const p_ctrl, uint32_t reg_addr, uint32_t * const p_data); + +fsp_err_t R_RMAC_PHY_Write(ether_phy_ctrl_t * const p_ctrl, uint32_t reg_addr, uint32_t data); + +fsp_err_t R_RMAC_PHY_StartAutoNegotiate(ether_phy_ctrl_t * const p_ctrl); + +fsp_err_t R_RMAC_PHY_LinkPartnerAbilityGet(ether_phy_ctrl_t * const p_ctrl, + uint32_t * const p_line_speed_duplex, + uint32_t * const p_local_pause, + uint32_t * const p_partner_pause); + +fsp_err_t R_RMAC_PHY_LinkStatusGet(ether_phy_ctrl_t * const p_ctrl); + +fsp_err_t R_RMAC_PHY_ChipSelect(ether_phy_ctrl_t * const p_ctrl, uint8_t port); + +/*******************************************************************************************************************//** + * @} (end addtogroup RMAC_PHY) + **********************************************************************************************************************/ + +/* Common macro for FSP header files. There is also a corresponding FSP_HEADER macro at the top of this file. */ +FSP_FOOTER + +#endif // R_RMAC_PHY_H diff --git a/drivers/ra/fsp/src/r_gptp/r_gptp.c b/drivers/ra/fsp/src/r_gptp/r_gptp.c new file mode 100644 index 00000000..d90b2e8c --- /dev/null +++ b/drivers/ra/fsp/src/r_gptp/r_gptp.c @@ -0,0 +1,538 @@ +/* +* Copyright (c) 2020 - 2025 Renesas Electronics Corporation and/or its affiliates +* +* SPDX-License-Identifier: BSD-3-Clause +*/ + +/*********************************************************************************************************************** + * Includes , "Project Includes" + ***********************************************************************************************************************/ +#include "r_gptp.h" + +/*********************************************************************************************************************** + * Macro definitions + ***********************************************************************************************************************/ + +/** "GPTP" in ASCII. Used to determine if the control block is open. */ +#define GPTP_OPEN (('G' << 24U) | ('P' << 16U) | ('T' << 8U) | ('P' << 0U)) + +#define GPTP_GPTP_TIMER_OFFSET (0x40) +#define GPTP_PULSE_OUTPUT_TIMER_OFFSET (0x30) +#define GPTP_SEC_LOWER_OFFSET (0x100000000L) + +#define GPTP_CLOCK_PERIOD_MASK (0x1F) +#define GPTP_OFFSET_MASK (0xFFFFFFFF) +#define GPTP_OFFSET_NANO_SEC_MASK (0x3FFFFFFF) +#define GPTP_SEC_UPPER_MASK (0xFFFF) + +#define GPTP_NANO_SEC_POSITION (27) +#define GPTP_SEC_UPPER_POSITION (32) + +#define GPTP_NANO_COUNT_FOR_1SEC (1000000000L) + +/*********************************************************************************************************************** + * Typedef definitions + ***********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Exported global functions (to be accessed by other files) + ***********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Exported global variables (to be accessed by other files) + ***********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Private function prototypes + **********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Private global variables + **********************************************************************************************************************/ + +/** GPTP API mapping for gptp module. */ +const gptp_api_t g_gptp_on_gptp = +{ + .open = R_GPTP_Open, + .close = R_GPTP_Close, + .timerCfg = R_GPTP_TimerCfg, + .start = R_GPTP_Start, + .stop = R_GPTP_Stop, + .timerValueGet = R_GPTP_TimerValueGet, + .timerOffsetSet = R_GPTP_TimerOffsetSet, + .timerRateSet = R_GPTP_TimerRateSet, + .callbackSet = R_GPTP_CallbackSet, +}; + +/*******************************************************************************************************************//** + * @addtogroup GPTP + * @{ + **********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Functions + **********************************************************************************************************************/ + +/********************************************************************************************************************//** + * Initializes the gptp module and applies configurations. + * + * @retval FSP_SUCCESS Module opened successfully. + * @retval FSP_ERR_ASSERTION Pointer to GPTP control block or configuration structure is NULL. + * @retval FSP_ERR_ALREADY_OPEN Control block has already been opened or channel is being used by another + * instance. Call close() then open() to reconfigure. + * @retval FSP_ERR_INVALID_POINTER Pointer to arguments are NULL. + ***********************************************************************************************************************/ +fsp_err_t R_GPTP_Open (gptp_ctrl_t * const p_ctrl, gptp_cfg_t const * const p_cfg) +{ + gptp_instance_ctrl_t * p_instance_ctrl = (gptp_instance_ctrl_t *) p_ctrl; + +#if (GPTP_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN((NULL != p_cfg), FSP_ERR_INVALID_POINTER); + + FSP_ERROR_RETURN((GPTP_OPEN != p_instance_ctrl->open), FSP_ERR_ALREADY_OPEN); +#endif + + /* Initialize configuration of gptp module. */ + p_instance_ctrl->p_reg_gptp = (void *) R_GPTP; + p_instance_ctrl->p_cfg = p_cfg; + p_instance_ctrl->p_callback = p_cfg->p_callback; + p_instance_ctrl->p_context = p_cfg->p_context; + p_instance_ctrl->p_callback_memory = NULL; + + p_instance_ctrl->open = GPTP_OPEN; + + return FSP_SUCCESS; +} /* End of function R_GPTP_Open() */ + +/********************************************************************************************************************//** + * Close the gptp module. + * + * @retval FSP_SUCCESS Successfully closed. + * @retval FSP_ERR_ASSERTION Pointer to GPTP control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened + * + ***********************************************************************************************************************/ +fsp_err_t R_GPTP_Close (gptp_ctrl_t * const p_ctrl) +{ + gptp_instance_ctrl_t * p_instance_ctrl = (gptp_instance_ctrl_t *) p_ctrl; + +#if (GPTP_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(GPTP_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); +#endif + + /* Clear configure block parameters. */ + p_instance_ctrl->p_cfg = NULL; + + /* Mark the channel not open so other APIs cannot use it. */ + p_instance_ctrl->open = 0U; + + return FSP_SUCCESS; +} /* End of function R_GPTP_Close() */ + +/*******************************************************************************************************************//** + * Configures the gptp timer parameters. + * + * @retval FSP_SUCCESS Command successfully. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_INVALID_ARGUMENT Invalid timer id. + * @retval FSP_ERR_INVALID_POINTER Invalid poiter to the timer cfg. + **********************************************************************************************************************/ +fsp_err_t R_GPTP_TimerCfg (gptp_ctrl_t * const p_ctrl, uint8_t timer, gptp_timer_cfg_t const * const p_timer_cfg) +{ + gptp_instance_ctrl_t * p_instance_ctrl = (gptp_instance_ctrl_t *) p_ctrl; + volatile uint32_t * p_ptpvic_reg; + +#if (GPTP_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(GPTP_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN((BSP_FEATURE_ESWM_GPTP_TIMER_NUM > timer), FSP_ERR_INVALID_ARGUMENT); + FSP_ERROR_RETURN(NULL != p_timer_cfg, FSP_ERR_INVALID_POINTER); +#endif + + p_ptpvic_reg = (volatile uint32_t *) ((uint8_t *) &p_instance_ctrl->p_reg_gptp->PTPTIVC0 + \ + (GPTP_GPTP_TIMER_OFFSET * timer)); + *p_ptpvic_reg = + (uint32_t) ((p_timer_cfg->clock_period & GPTP_CLOCK_PERIOD_MASK) << GPTP_NANO_SEC_POSITION); + + return FSP_SUCCESS; +} /* End of function R_GPTP_TimerCfg() */ + +/*******************************************************************************************************************//** + * Starts gptp timer. + * + * @retval FSP_SUCCESS Timer started.. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_INVALID_ARGUMENT Invalid timer id. + **********************************************************************************************************************/ +fsp_err_t R_GPTP_Start (gptp_ctrl_t * const p_ctrl, uint8_t timer) +{ + gptp_instance_ctrl_t * p_instance_ctrl = (gptp_instance_ctrl_t *) p_ctrl; + R_GPTP_Type * p_gptp_reg; + +#if (GPTP_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(GPTP_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN((BSP_FEATURE_ESWM_GPTP_TIMER_NUM > timer), FSP_ERR_INVALID_ARGUMENT); +#endif + + p_gptp_reg = p_instance_ctrl->p_reg_gptp; + p_gptp_reg->PTPTMEC_b.TE |= (1U << timer); + + return FSP_SUCCESS; +} /* End of function R_GPTP_Start() */ + +/*******************************************************************************************************************//** + * Stops gptp timer. + * + * @retval FSP_SUCCESS Timer stopped. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_INVALID_ARGUMENT Invalid timer id. + **********************************************************************************************************************/ +fsp_err_t R_GPTP_Stop (gptp_ctrl_t * const p_ctrl, uint8_t timer) +{ + gptp_instance_ctrl_t * p_instance_ctrl = (gptp_instance_ctrl_t *) p_ctrl; + R_GPTP_Type * p_gptp_reg; + +#if (GPTP_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(GPTP_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN((BSP_FEATURE_ESWM_GPTP_TIMER_NUM > timer), FSP_ERR_INVALID_ARGUMENT); +#endif + + p_gptp_reg = p_instance_ctrl->p_reg_gptp; + p_gptp_reg->PTPTMDC_b.TD |= (1U << timer); + + return FSP_SUCCESS; +} /* End of function R_GPTP_Stop() */ + +/*******************************************************************************************************************//** + * Gets the current time value to the timer with the specified gptp timer number. + * + * @retval FSP_SUCCESS Sec upper, sec lower, and nano sec srored in p_timestamp. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_INVALID_POINTER Pointer to arguments are NULL. + * @retval FSP_ERR_INVALID_ARGUMENT Invalid input parameter. + **********************************************************************************************************************/ +fsp_err_t R_GPTP_TimerValueGet (gptp_ctrl_t * const p_ctrl, uint8_t timer, gptp_timer_value_t * const p_timer_value) +{ + gptp_instance_ctrl_t * p_instance_ctrl = (gptp_instance_ctrl_t *) p_ctrl; + R_GPTP_Type * p_gptp_reg; + volatile uint32_t * p_ptpgptptml_reg; + volatile uint32_t * p_ptpgptptmm_reg; + volatile uint32_t * p_ptpgptptmu_reg; + +#if (GPTP_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(GPTP_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + + FSP_ERROR_RETURN((NULL != p_timer_value), FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(BSP_FEATURE_ESWM_GPTP_TIMER_NUM > timer, FSP_ERR_INVALID_ARGUMENT); +#endif + + p_gptp_reg = p_instance_ctrl->p_reg_gptp; + + p_ptpgptptml_reg = (volatile uint32_t *) ((uint8_t *) &p_gptp_reg->PTPGPTPTML0 + \ + (GPTP_GPTP_TIMER_OFFSET * timer)); + p_timer_value->time_nsec = *p_ptpgptptml_reg; + + p_ptpgptptmm_reg = (volatile uint32_t *) ((uint8_t *) &p_gptp_reg->PTPGPTPTMM0 + \ + (GPTP_GPTP_TIMER_OFFSET * timer)); + p_timer_value->time_sec_lower = *p_ptpgptptmm_reg; + + p_ptpgptptmu_reg = (volatile uint32_t *) ((uint8_t *) &p_gptp_reg->PTPGPTPTMU0 + \ + (GPTP_GPTP_TIMER_OFFSET * timer)); + p_timer_value->time_sec_upper = (uint16_t) (*p_ptpgptptmu_reg); + + return FSP_SUCCESS; +} /* End of R_GPTP_GptpTimeValueGet() */ + +/********************************************************************************************************************//** + * Sets the offset correction for the specified gptp timer number. + * + * @retval FSP_SUCCESS Time offset set successfully. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_INVALID_ARGUMENT Invalid input parameter. + ***********************************************************************************************************************/ +fsp_err_t R_GPTP_TimerOffsetSet (gptp_ctrl_t * const p_ctrl, uint8_t timer, int64_t offset) +{ + gptp_instance_ctrl_t * p_instance_ctrl = (gptp_instance_ctrl_t *) p_ctrl; + R_GPTP_Type * p_gptp_reg; + + int64_t offset_abs; + int64_t offset_nsec; + int64_t offset_sec_lower; + int64_t offset_sec_upper; + + volatile uint32_t * p_ptptovcl_reg; + volatile uint32_t * p_ptptovcm_reg; + volatile uint32_t * p_ptptovcu_reg; + +#if (GPTP_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(GPTP_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(BSP_FEATURE_ESWM_GPTP_TIMER_NUM > timer, FSP_ERR_INVALID_ARGUMENT); +#endif + + p_gptp_reg = p_instance_ctrl->p_reg_gptp; + + /* Obtains the offset value currently applied to the gPTP timer and adds the set offset. */ + p_ptptovcl_reg = (volatile uint32_t *) ((uint8_t *) &p_gptp_reg->PTPTOVCL0 + \ + (GPTP_GPTP_TIMER_OFFSET * timer)); + p_ptptovcm_reg = (volatile uint32_t *) ((uint8_t *) &p_gptp_reg->PTPTOVCM0 + \ + (GPTP_GPTP_TIMER_OFFSET * timer)); + p_ptptovcu_reg = (volatile uint32_t *) ((uint8_t *) &p_gptp_reg->PTPTOVCU0 + \ + (GPTP_GPTP_TIMER_OFFSET * timer)); + + if (0 <= offset) + { + /* When advancing a gPTP timer, add an offset value. */ + offset_abs = offset; + + /* Check for carryover during nanosecond addition. */ + offset_nsec = (int64_t) (*p_ptptovcl_reg) + (offset_abs % GPTP_NANO_COUNT_FOR_1SEC); + if (GPTP_NANO_COUNT_FOR_1SEC <= offset_nsec) + { + offset_nsec -= GPTP_NANO_COUNT_FOR_1SEC; + offset_sec_lower = (int64_t) (*p_ptptovcm_reg) + \ + ((offset_abs / GPTP_NANO_COUNT_FOR_1SEC) & GPTP_OFFSET_MASK) + 1; + } + else + { + offset_sec_lower = (int64_t) (*p_ptptovcm_reg) + \ + ((offset_abs / GPTP_NANO_COUNT_FOR_1SEC) & GPTP_OFFSET_MASK); + } + + /* Check for carryover during lower second addition. */ + if ((int64_t) GPTP_OFFSET_MASK < offset_sec_lower) + { + offset_sec_lower -= (int64_t) GPTP_SEC_LOWER_OFFSET; + offset_sec_upper = (int64_t) (*p_ptptovcu_reg) + \ + ((offset_abs / GPTP_NANO_COUNT_FOR_1SEC) >> GPTP_SEC_UPPER_POSITION) + 1; + } + else + { + offset_sec_upper = (int64_t) (*p_ptptovcu_reg) + \ + ((offset_abs / GPTP_NANO_COUNT_FOR_1SEC) >> GPTP_SEC_UPPER_POSITION); + } + } + else + { + /* To set the gPTP timer back, subtract the offset value. */ + offset_abs = offset * -1; + + /* When subtracting nanoseconds, check for borrowing. */ + offset_nsec = (int64_t) (*p_ptptovcl_reg) - (offset_abs % GPTP_NANO_COUNT_FOR_1SEC); + if (0 > offset_nsec) + { + offset_nsec += GPTP_NANO_COUNT_FOR_1SEC; + offset_sec_lower = (int64_t) (*p_ptptovcm_reg) - \ + ((offset_abs / GPTP_NANO_COUNT_FOR_1SEC) & GPTP_OFFSET_MASK) - 1; + } + else + { + offset_sec_lower = (int64_t) (*p_ptptovcm_reg) - \ + ((offset_abs / GPTP_NANO_COUNT_FOR_1SEC) & GPTP_OFFSET_MASK); + } + + /* When subtracting lower second, check for borrowing. */ + if (0 > offset_sec_lower) + { + offset_sec_lower += (int64_t) GPTP_SEC_LOWER_OFFSET; + offset_sec_upper = (int64_t) (*p_ptptovcu_reg) - \ + ((offset_abs / GPTP_NANO_COUNT_FOR_1SEC) >> GPTP_SEC_UPPER_POSITION) - 1; + } + else + { + offset_sec_upper = (int64_t) (*p_ptptovcu_reg) - \ + ((offset_abs / GPTP_NANO_COUNT_FOR_1SEC) >> GPTP_SEC_UPPER_POSITION); + } + } + + *p_ptptovcu_reg = (uint16_t) offset_sec_upper; + *p_ptptovcm_reg = (uint32_t) offset_sec_lower; + *p_ptptovcl_reg = (uint32_t) offset_nsec & GPTP_OFFSET_NANO_SEC_MASK; + + return FSP_SUCCESS; +} /* End of function R_GPTP_TimeOffsetSet() */ + +/********************************************************************************************************************//** + * Sets clock rate correction for the specified timer. + * + * @retval FSP_SUCCESS Time rate set successfully. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_INVALID_POINTER Pointer to arguments are NULL. + * @retval FSP_ERR_INVALID_ARGUMENT Invalid input parameter. + ***********************************************************************************************************************/ +fsp_err_t R_GPTP_TimerRateSet (gptp_ctrl_t * const p_ctrl, uint8_t timer, uint32_t rate) +{ + gptp_instance_ctrl_t * p_instance_ctrl = (gptp_instance_ctrl_t *) p_ctrl; + R_GPTP_Type * p_gptp_reg; + volatile uint32_t * p_ptpvic_reg; + +#if (GPTP_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(GPTP_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(BSP_FEATURE_ESWM_GPTP_TIMER_NUM > timer, FSP_ERR_INVALID_ARGUMENT); +#endif + + p_gptp_reg = p_instance_ctrl->p_reg_gptp; + + p_ptpvic_reg = (volatile uint32_t *) ((uint8_t *) &p_gptp_reg->PTPTIVC0 + \ + (GPTP_GPTP_TIMER_OFFSET * timer)); + *p_ptpvic_reg = rate; + + return FSP_SUCCESS; +} /* End of function R_GPTP_TimeRateSet() */ + +/********************************************************************************************************************//** + * Sets pulse generator. + * + * @retval FSP_SUCCESS Pulse generator set successfully. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_INVALID_POINTER Pointer to arguments are NULL. + * @retval FSP_ERR_INVALID_ARGUMENT Invalid input parameter. + ***********************************************************************************************************************/ +fsp_err_t R_GPTP_PulseGeneratorSet (gptp_ctrl_t * const p_ctrl, uint8_t timer, gptp_pulse_generator_t * p_pulse) +{ + gptp_instance_ctrl_t * p_instance_ctrl = (gptp_instance_ctrl_t *) p_ctrl; + gptp_pulsed_output_cfg_t * p_pulsed_output_cfg; + R_GPTP_Type * p_gptp_reg; + + volatile uint32_t * p_potcr_reg; + volatile uint32_t * p_potstru_reg; + volatile uint32_t * p_potstrm_reg; + volatile uint32_t * p_potstrl_reg; + volatile uint32_t * p_potperu_reg; + volatile uint32_t * p_potperm_reg; + volatile uint32_t * p_potperl_reg; + volatile uint32_t * p_potpwr_reg; + +#if (GPTP_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(GPTP_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + + FSP_ERROR_RETURN((NULL != p_pulse), FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(BSP_FEATURE_ESWM_GPTP_TIMER_NUM > timer, FSP_ERR_INVALID_ARGUMENT); + + for (uint8_t i = 0; i < BSP_FEATURE_ESWM_GPTP_PULSE_GENERATOR_NUM; i++) + { + FSP_ERROR_RETURN(BSP_FEATURE_ESWM_GPTP_PULSE_GENERATOR_NUM > p_pulse->p_pulsed_output_cfg_list[i]->pulse_num, + FSP_ERR_INVALID_ARGUMENT); + FSP_ERROR_RETURN(GPTP_NANO_COUNT_FOR_1SEC > p_pulse->p_pulsed_output_cfg_list[i]->period_ns, + FSP_ERR_INVALID_ARGUMENT); + FSP_ERROR_RETURN(GPTP_NANO_COUNT_FOR_1SEC > p_pulse->p_pulsed_output_cfg_list[i]->start_ns, + FSP_ERR_INVALID_ARGUMENT); + } +#endif + + p_gptp_reg = p_instance_ctrl->p_reg_gptp; + + for (uint8_t i = 0; i < BSP_FEATURE_ESWM_GPTP_PULSE_GENERATOR_NUM; i++) + { + p_pulsed_output_cfg = p_pulse->p_pulsed_output_cfg_list[i]; + p_potcr_reg = (volatile uint32_t *) ((uint8_t *) &p_gptp_reg->POTCR0 + + (GPTP_PULSE_OUTPUT_TIMER_OFFSET * p_pulsed_output_cfg->pulse_num)); + + /* Stop pulse output for modification POTCFGR register. */ + *p_potcr_reg = 0U; + + p_gptp_reg->POTCFGR_b.REFSEL = (uint32_t) (timer & 0x1); + + /* Modification Pulse output generation registers. */ + p_potstru_reg = (volatile uint32_t *) ((uint8_t *) &p_gptp_reg->POTSTRU0 + \ + (GPTP_PULSE_OUTPUT_TIMER_OFFSET * p_pulsed_output_cfg->pulse_num)); + *p_potstru_reg = (p_pulsed_output_cfg->start_sec_upper & GPTP_SEC_UPPER_MASK); + + p_potstrm_reg = (volatile uint32_t *) ((uint8_t *) &p_gptp_reg->POTSTRM0 + \ + (GPTP_PULSE_OUTPUT_TIMER_OFFSET * p_pulsed_output_cfg->pulse_num)); + *p_potstrm_reg = p_pulsed_output_cfg->start_sec_lower; + + p_potstrl_reg = (volatile uint32_t *) ((uint8_t *) &p_gptp_reg->POTSTRL0 + \ + (GPTP_PULSE_OUTPUT_TIMER_OFFSET * p_pulsed_output_cfg->pulse_num)); + *p_potstrl_reg = (p_pulsed_output_cfg->start_ns & GPTP_OFFSET_NANO_SEC_MASK); + + p_potperu_reg = (volatile uint32_t *) ((uint8_t *) &p_gptp_reg->POTPERU0 + \ + (GPTP_PULSE_OUTPUT_TIMER_OFFSET * p_pulsed_output_cfg->pulse_num)); + *p_potperu_reg = (p_pulsed_output_cfg->period_sec_upper & GPTP_SEC_UPPER_MASK); + + p_potperm_reg = (volatile uint32_t *) ((uint8_t *) &p_gptp_reg->POTPERM0 + \ + (GPTP_PULSE_OUTPUT_TIMER_OFFSET * p_pulsed_output_cfg->pulse_num)); + *p_potperm_reg = p_pulsed_output_cfg->period_sec_lower; + + p_potperl_reg = (volatile uint32_t *) ((uint8_t *) &p_gptp_reg->POTPERL0 + \ + (GPTP_PULSE_OUTPUT_TIMER_OFFSET * p_pulsed_output_cfg->pulse_num)); + *p_potperl_reg = (p_pulsed_output_cfg->period_ns & GPTP_OFFSET_NANO_SEC_MASK); + + p_potpwr_reg = (volatile uint32_t *) ((uint8_t *) &p_gptp_reg->POTPWR0 + \ + (GPTP_PULSE_OUTPUT_TIMER_OFFSET * p_pulsed_output_cfg->pulse_num)); + *p_potpwr_reg = (p_pulsed_output_cfg->wide & GPTP_SEC_UPPER_MASK); + + /* Apply generate pulse setting. */ + *p_potcr_reg = 1U; + } + + return FSP_SUCCESS; +} /* End of function R_GPTP_PulseGeneratorSet() */ + +/*******************************************************************************************************************//** + * Updates the user callback with the option to provide memory for the callback argument structure. + * Implements @ref gptp_api_t::callbackSet. + * + * @retval FSP_SUCCESS Callback updated successfully. + * @retval FSP_ERR_ASSERTION A required pointer is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_NO_CALLBACK_MEMORY p_callback is non-secure and p_callback_memory is either secure or NULL. + **********************************************************************************************************************/ +fsp_err_t R_GPTP_CallbackSet (gptp_ctrl_t * const p_ctrl, + void ( * p_callback)(gptp_callback_args_t *), + void * const p_context, + gptp_callback_args_t * const p_callback_memory) +{ + gptp_instance_ctrl_t * p_instance_ctrl = (gptp_instance_ctrl_t *) p_ctrl; + +#if GPTP_CFG_PARAM_CHECKING_ENABLE + FSP_ASSERT(p_instance_ctrl); + FSP_ASSERT(p_callback); + FSP_ERROR_RETURN(GPTP_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); +#endif + +#if BSP_TZ_SECURE_BUILD + + /* Get security state of p_callback */ + bool callback_is_secure = + (NULL == cmse_check_address_range((void *) p_callback, sizeof(void *), CMSE_AU_NONSECURE)); + + #if GPTP_CFG_PARAM_CHECKING_ENABLE + + /* In secure projects, p_callback_memory must be provided in non-secure space if p_callback is non-secure */ + gptp_callback_args_t * const p_callback_memory_checked = cmse_check_pointed_object(p_callback_memory, + CMSE_AU_NONSECURE); + FSP_ERROR_RETURN(callback_is_secure || (NULL != p_callback_memory_checked), FSP_ERR_NO_CALLBACK_MEMORY); + #endif +#endif + + /* Store callback and context */ +#if BSP_TZ_SECURE_BUILD + p_instance_ctrl->p_callback = callback_is_secure ? p_callback : + (void (*)(gptp_callback_args_t *))cmse_nsfptr_create(p_callback); +#else + p_instance_ctrl->p_callback = p_callback; +#endif + p_instance_ctrl->p_context = p_context; + p_instance_ctrl->p_callback_memory = p_callback_memory; + + return FSP_SUCCESS; +} + +/*******************************************************************************************************************//** + * @} (end addtogroup GPTP) + **********************************************************************************************************************/ diff --git a/drivers/ra/fsp/src/r_layer3_switch/r_layer3_switch.c b/drivers/ra/fsp/src/r_layer3_switch/r_layer3_switch.c new file mode 100644 index 00000000..8c74054e --- /dev/null +++ b/drivers/ra/fsp/src/r_layer3_switch/r_layer3_switch.c @@ -0,0 +1,3566 @@ +/* +* Copyright (c) 2020 - 2025 Renesas Electronics Corporation and/or its affiliates +* +* SPDX-License-Identifier: BSD-3-Clause +*/ + +/*********************************************************************************************************************** + * Includes , "Project Includes" + ***********************************************************************************************************************/ +#include "r_layer3_switch.h" +#include "r_rmac_phy.h" + +/*********************************************************************************************************************** + * Macro definitions + ***********************************************************************************************************************/ + +/** "ESWM" in ASCII. Used to determine if the control block is open. */ +#define LAYER3_SWITCH_OPEN (('E' << 24U) | ('S' << 16U) | ('W' << 8U) | ('M' << 0U)) + +#define LAYER3_SWITCH_ETHA_REG_SIZE (R_ETHA1_BASE - R_ETHA0_BASE) +#define LAYER3_SWITCH_RMAC_REG_SIZE (R_RMAC1_BASE - R_RMAC0_BASE) +#define LAYER3_SWITCH_REGISTER_SIZE (32) +#define LAYER3_SWITCH_QUEUE_ADDRESS_UPPER_MASK (0xFF00000000) +#define LAYER3_SWITCH_QUEUE_ADDRESS_UPPER_POSITION (32) +#define LAYER3_SWITCH_QUEUE_ADDRESS_LOWER_MASK (0xFFFFFFFF) + +#define LAYER3_SWITCH_FWPBFC_REGISTER_OFFSET (0x10) +#define LAYER3_SWITCH_FWPBFCSDC0_REGISTER_OFFSET (0x10) +#define LAYER3_SWITCH_INTERRUPT_REGISTER_OFFSET (0x10) +#define LAYER3_SWITCH_FWPC_REGISTER_OFFSET (0x10) +#define LAYER3_SWITCH_PORT_CONFIG_REGISTER_OFFSET (0x10) +#define LAYER3_SWITCH_TS_DESCRIPTOR_QUEUE_REGISTOR_OFFSET (0x08) +#define LAYER3_SWITCH_TS_DESCRIPTOR_TIMER_REGISTOR_OFFSET (0x04) + +/* VLAN tag bit position. */ +#define LAYER3_SWITCH_VLAN_TAG_DEI_POSITION (12UL) +#define LAYER3_SWITCH_VLAN_TAG_PCP_POSITION (13UL) + +/* Definitions for forwarding feature. */ +#define LAYER3_SWITCH_MAC_ENTRY_MAX_NUM (0x7FFU) +#define LAYER3_SWITCH_VLAN_ENTRY_MAX_NUM (0xFFFU) +#define LAYER3_SWITCH_L3_ENTRY_MAX_NUM (0xFFU) +#define LAYER3_SWITCH_L3_UPDATE_REMAPPING_MAX_NUM (0x1FU) +#define LAYER3_SWITCH_CLOCK_100MHZ (100U) + +/* MFWD Register bitmask and position. */ +#define R_MFWD_FWIP6OC_IP6IPOM1_Pos (16UL) +#define R_MFWD_FWIP6OC_IP6IPOM1_Msk (0x10000UL) +#define R_MFWD_FWIP6OC_IP6IPO1_Pos (20UL) +#define R_MFWD_FWIP6OC_IP6IPO1_Msk (0xf00000UL) +#define R_MFWD_FWRFVC0_RFSV0_Pos (0UL) +#define R_MFWD_FWRFVC0_RFSV0_Msk (0xffUL) +#define R_MFWD_FWRFVC0_RFSV1_Pos (8UL) +#define R_MFWD_FWRFVC0_RFSV1_Msk (0xff00UL) + +/* Bitmask for the CPU port (GWCA). */ +#define LAYER3_SWITCH_PORT_CPU_BITMASK (1 << BSP_FEATURE_ESWM_GWCA_PORT) +#define LAYER3_SWITCH_EATASIGSC_MASK (R_ETHA0_EATASIGSC_TASIGS0_Msk | \ + R_ETHA0_EATASIGSC_TASIGS1_Msk | \ + R_ETHA0_EATASIGSC_TASIGS2_Msk | \ + R_ETHA0_EATASIGSC_TASIGS3_Msk | \ + R_ETHA0_EATASIGSC_TASIGS4_Msk | \ + R_ETHA0_EATASIGSC_TASIGS5_Msk | \ + R_ETHA0_EATASIGSC_TASIGS6_Msk | \ + R_ETHA0_EATASIGSC_TASIGS7_Msk) + +/* Bitmask for unique number of timestamp. */ +#define LAYER3_SWITCH_TS_UNIQUE_NUMBER_BITMASK (0xFF) + +/* For CBS feature. */ +#define LAYER3_SWITCH_CBS_REQUEST_DELAY (50) +#define LAYER3_SWITCH_CBS_INTERFERENCE_SIZE_OFFSET (20) +#define LAYER3_SWITCH_CBS_BITS_PER_BYTE (8) +#define LAYER3_SWITCH_LINK_SPEED_100M (100000000) +#define LAYER3_SWITCH_LINK_SPEED_1G (1000000000) +#define LAYER3_SWITCH_MAXIMUM_FRAME_SIZE (1514U) + +/* FRER feature. */ +#define LAYER3_SWITCH_FWSEQNGC_REGISTER_OFFSET (0x08) +#define LAYER3_SWITCH_FRER_CHECK_PERIOD_BITMASK (0xFFFFUL) +#define LAYER3_SWITCH_FRER_ENTRY_NUM_BITMASK (0x7FUL) +#define LAYER3_SWITCH_FRER_SEQ_GENERATOR_NUM_BITMASK (0x1FUL) +#define LAYER3_SWITCH_FRER_SYSTEM_CLOCK_BITMASK (0x3FFUL) +#define LAYER3_SWITCH_SEQ_REG_MAX_NUM (32) + +/*********************************************************************************************************************** + * Typedef definitions + ***********************************************************************************************************************/ +#if defined(__ARMCC_VERSION) || defined(__ICCARM__) +typedef void (BSP_CMSE_NONSECURE_CALL * layer3_switch_prv_ns_callback)(ether_switch_callback_args_t * p_args); +#elif defined(__GNUC__) +typedef BSP_CMSE_NONSECURE_CALL void (*volatile layer3_switch_prv_ns_callback)(ether_switch_callback_args_t * p_args); +#endif + +/* Operation mode of ETHA and GWCA. */ +typedef enum e_layer3_switch_agent_mode +{ + LAYER3_SWITCH_AGENT_MODE_RESET = (0), + LAYER3_SWITCH_AGENT_MODE_DISABLE = (1), + LAYER3_SWITCH_AGENT_MODE_CONFIG = (2), + LAYER3_SWITCH_AGENT_MODE_OPERATION = (3), +} layer3_switch_agent_mode_t; + +/*********************************************************************************************************************** + * Exported global functions (to be accessed by other files) + ***********************************************************************************************************************/ +void layer3_switch_gwdi_isr(void); +void layer3_switch_eaei_isr(void); + +/*********************************************************************************************************************** + * Exported global variables (to be accessed by other files) + ***********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Private function prototypes + **********************************************************************************************************************/ +static void r_layer3_switch_module_start(void); +static void r_layer3_switch_update_gwca_operation_mode( + layer3_switch_instance_ctrl_t * p_instance_ctrl, + layer3_switch_agent_mode_t mode); +static void r_layer3_switch_update_etha_operation_mode(uint8_t port, + layer3_switch_agent_mode_t mode); +static void r_layer3_switch_reset_coma(void); +static void r_layer3_switch_close_etha_ports(layer3_switch_instance_ctrl_t * p_instance_ctrl); +static void r_layer3_switch_initialize_linkfix_table( + layer3_switch_instance_ctrl_t * p_instance_ctrl); +static layer3_switch_descriptor_t * r_layer3_switch_get_descriptor(layer3_switch_instance_ctrl_t * p_instance_ctrl, + uint32_t queue_index, + uint32_t descriptor_index); +static layer3_switch_descriptor_t * r_layer3_switch_get_current_descriptor( + layer3_switch_instance_ctrl_t * p_instance_ctrl, + uint32_t queue_index); +static bool r_layer3_switch_is_descriptor_queue_active( + layer3_switch_instance_ctrl_t * p_instance_ctrl, + uint32_t queue_index); +static void r_layer3_switch_configure_mac_address(uint8_t * p_mac_address, uint8_t port); +static void r_layer3_switch_configure_port( + layer3_switch_instance_ctrl_t * const p_instance_ctrl, + uint8_t port, + layer3_switch_port_cfg_t const * const p_port_cfg); + +/* Forwarding features. */ +static void r_layer3_switch_configure_forwarding_port(layer3_switch_forwarding_port_cfg_t const * const port_cfg, + uint8_t port); +static void r_layer3_switch_reset_table(layer3_switch_instance_ctrl_t * p_instance_ctrl); +static fsp_err_t r_layer3_switch_learn_mac_entry(layer3_switch_frame_filter_t const * const p_target_frame, + layer3_switch_table_entry_cfg_t const * const p_entry_cfg); +static fsp_err_t r_layer3_switch_search_mac_entry(layer3_switch_frame_filter_t const * const p_target_frame, + layer3_switch_table_entry_cfg_t * const p_entry_cfg); +static fsp_err_t r_layer3_switch_read_mac_entry(uint16_t offset, layer3_switch_table_entry_t * p_entry); +static fsp_err_t r_layer3_switch_learn_vlan_entry(layer3_switch_frame_filter_t const * const p_target_frame, + layer3_switch_table_entry_cfg_t const * const p_entry_cfg); +static fsp_err_t r_layer3_switch_search_vlan_entry(layer3_switch_frame_filter_t const * const p_target_frame, + layer3_switch_table_entry_cfg_t * const p_entry_cfg); +static fsp_err_t r_layer3_switch_read_vlan_entry(uint16_t offset, layer3_switch_table_entry_t * p_entry); +static fsp_err_t r_layer3_switch_learn_l3_entry(layer3_switch_instance_ctrl_t * p_instance_ctrl, + layer3_switch_frame_filter_t const * const p_target_frame, + layer3_switch_table_entry_cfg_t const * const p_entry_cfg); +static fsp_err_t r_layer3_switch_search_l3_entry(layer3_switch_frame_filter_t const * p_target_frame, + layer3_switch_table_entry_cfg_t * const p_entry_cfg); +static fsp_err_t r_layer3_switch_learn_l3_update(layer3_switch_instance_ctrl_t * p_instance_ctrl, + layer3_switch_l3_update_config_t * const p_config); +static fsp_err_t r_layer3_switch_search_l3_update(uint8_t routing_number, layer3_switch_l3_update_config_t * p_config); + +/* MAC/VLAN forwarding. */ +static fsp_err_t r_layer3_switch_enable_mac_table_aging(uint32_t aging_time); +static fsp_err_t r_layer3_switch_extract_vlan_id(layer3_switch_frame_filter_t const * const p_target_frame, + uint16_t * p_vlan_id); +static void r_layer3_switch_initialize_vlan_port(layer3_switch_instance_ctrl_t * p_instance_ctrl, + layer3_switch_forwarding_port_cfg_t const * const p_port_cfg, + uint8_t port); + +/* L3 forwarding. */ +static fsp_err_t r_layer3_switch_configure_stream_filter(layer3_switch_l3_stream_filter_cfg_t const * p_filter_cfg); +static uint16_t r_layer3_switch_calculate_l3_hash(layer3_switch_frame_filter_t const * p_frame); +static uint8_t r_layer3_switch_calculate_l3_format_code(layer3_switch_frame_filter_t const * p_frame); +static fsp_err_t r_layer3_switch_calculate_l3_stream_id(layer3_switch_frame_filter_t const * p_frame, + layer3_switch_stream_id_t * p_stream_id); +static uint32_t r_layer3_switch_convert_vlan_tag_to_int(layer3_switch_frame_vlan_tag_t const * p_vlan_s_tag, + layer3_switch_frame_vlan_tag_t const * p_vlan_c_tag); +static fsp_err_t r_layer3_switch_remapping_l3_update(layer3_switch_instance_ctrl_t * p_instance_ctrl, + uint32_t routing_number, + layer3_switch_l3_update_config_t * p_update_cfg); +static uint32_t r_layer3_switch_convert_array_to_int(uint8_t * array, uint8_t length); + +/* TSN feature. */ +static void r_layer3_switch_configure_cbs(layer3_switch_instance_ctrl_t const * const p_instance_ctrl, + uint8_t port, + layer3_switch_cbs_cfg_t const * const p_cbs_cfg); +static uint32_t r_layer3_switch_calculate_max_interference_size(uint8_t queue_number, + uint8_t const * const p_max_burst_num_list); + +/* FRER feature. */ +static fsp_err_t r_layer3_switch_frer_init(layer3_switch_instance_ctrl_t * p_instance_ctrl, + layer3_switch_frer_cfg_t const * p_frer_cfg); +static void r_layer3_switch_frer_table_reset(void); +static void r_layer3_switch_configure_sequence_number_generation(layer3_switch_instance_ctrl_t * p_instance_ctrl); +static fsp_err_t r_layer3_switch_learn_frer_entry(layer3_switch_instance_ctrl_t * p_instance_ctrl, + layer3_switch_frer_entry_t * const p_frer_entry, + layer3_switch_frer_entry_t * const p_sequence_recovery, + uint32_t sequence_recovery_id); +static fsp_err_t r_layer3_switch_learn_frer_individual_recovery(layer3_switch_instance_ctrl_t * p_instance_ctrl, + layer3_switch_frer_entry_cfg_t * const p_frer_entry_cfg); + +static void r_layer3_switch_call_callback_for_ports(layer3_switch_instance_ctrl_t * p_instance_ctrl, + ether_switch_callback_args_t * p_callback_args, + uint32_t ports); +static void r_layer3_switch_call_callback(void (* p_callback)( + ether_switch_callback_args_t *), + ether_switch_callback_args_t * p_callback_args, + ether_switch_callback_args_t * const p_callback_memory); + +/* Timestamp feature. */ +static fsp_err_t r_layer3_switch_create_tx_timestamp_queue(ether_switch_ctrl_t * const p_ctrl, + const layer3_switch_descriptor_queue_cfg_t * const p_queue_cfg, + uint32_t * const p_ts_descriptor_queue_index); + +/*********************************************************************************************************************** + * Private global variables + **********************************************************************************************************************/ + +/** ETHER SWITCH API mapping for layer3 switch module. */ +const ether_switch_api_t g_ether_switch_on_layer3_switch = +{ + .open = R_LAYER3_SWITCH_Open, + .close = R_LAYER3_SWITCH_Close, +}; + +/*******************************************************************************************************************//** + * @addtogroup LAYER3_SWITCH + * @{ + **********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Functions + **********************************************************************************************************************/ + +/********************************************************************************************************************//** + * Initializes the switch module and applies configurations. Implements @ref ether_switch_api_t::open. + * + * @retval FSP_SUCCESS Channel opened successfully. + * @retval FSP_ERR_ASSERTION Pointer to control block, config structure or extended config structure is NULL. + * @retval FSP_ERR_ALREADY_OPEN Control block has already been opened or channel is being used by another + * instance. Call close() then open() to reconfigure. + * @retval FSP_ERR_ETHER_ERROR_PHY_COMMUNICATION Initialization of PHY-LSI failed. + * @retval FSP_ERR_INVALID_ARGUMENT Invalid configuration value. + ************************************************************************************************************************/ +fsp_err_t R_LAYER3_SWITCH_Open (ether_switch_ctrl_t * const p_ctrl, ether_switch_cfg_t const * const p_cfg) +{ + layer3_switch_instance_ctrl_t * p_instance_ctrl = (layer3_switch_instance_ctrl_t *) p_ctrl; + layer3_switch_extended_cfg_t * p_extend; + ether_phy_instance_t const * p_ether_phy; + volatile uint32_t * p_mfwd_fwpbfc_reg; + volatile uint32_t * p_etha_eatdqdcn_reg; + R_ETHA0_Type * p_reg_etha; + R_RMAC0_Type * p_reg_rmac; + + fsp_err_t phy_err = FSP_SUCCESS; + +#if (LAYER3_SWITCH_CFG_PARAM_CHECKING_ENABLE) + + /** Check parameters. */ + FSP_ASSERT(p_instance_ctrl); + FSP_ASSERT(p_cfg); + + p_extend = (layer3_switch_extended_cfg_t *) p_cfg->p_extend; + FSP_ASSERT(p_cfg->p_extend); + FSP_ERROR_RETURN((0 <= p_cfg->irq), FSP_ERR_INVALID_ARGUMENT); + FSP_ERROR_RETURN((LAYER3_SWITCH_CFG_AVAILABLE_QUEUE_NUM <= BSP_FEATURE_ESWM_MAX_QUEUE_NUM), + FSP_ERR_INVALID_ARGUMENT); + FSP_ERROR_RETURN(LAYER3_SWITCH_L3_ENTRY_MAX_NUM > p_extend->l3_filter_list_length, FSP_ERR_INVALID_ARGUMENT); +#else + p_extend = (layer3_switch_extended_cfg_t *) p_cfg->p_extend; +#endif + + FSP_ERROR_RETURN((LAYER3_SWITCH_OPEN != p_instance_ctrl->open), FSP_ERR_ALREADY_OPEN); + + /* Initialize parameters. */ + p_instance_ctrl->p_cfg = p_cfg; + p_instance_ctrl->p_callback = p_cfg->p_callback; + p_instance_ctrl->p_gwca_reg = R_GWCA0; + p_instance_ctrl->table_status = LAYER3_SWITCH_TABLE_STATUS_UNINITIALIZED; + p_instance_ctrl->l3_entry_count = 0; + p_instance_ctrl->l3_routing_number = 0; + + for (uint8_t i = 0; i < BSP_FEATURE_ESWM_TS_DESCRIPTOR_QUEUE_MAX_NUM; i++) + { + p_instance_ctrl->ts_descriptor_queue_status_list[i] = LAYER3_SWITCH_TS_DESCRIPTOR_QUEUE_STATUS_UNUSED; + } + + /* Clear module stops. */ + r_layer3_switch_module_start(); + + /* Reset COMA IP. */ + r_layer3_switch_reset_coma(); + + /* When a r_gptp instance is set, initialize it. */ + if (NULL != p_extend->p_gptp_instance) + { + p_extend->p_gptp_instance->p_api->open(p_extend->p_gptp_instance->p_ctrl, p_extend->p_gptp_instance->p_cfg); + } + + /* Configure destination ports of forwarding feature. */ + for (uint32_t i = 0; i < BSP_FEATURE_ETHER_NUM_CHANNELS; i++) + { + p_mfwd_fwpbfc_reg = + (uint32_t *) ((uintptr_t) &(R_MFWD->FWPBFC0) + (i * LAYER3_SWITCH_FWPBFC_REGISTER_OFFSET)); + *p_mfwd_fwpbfc_reg |= (R_MFWD_FWPBFC0_PBDV_Msk & p_extend->fowarding_target_port_masks[i]); + } + + /* Enable extended descriptor for each agents. */ + R_MFWD->FWPC10_b.DDE = 0x1; + R_MFWD->FWPC11_b.DDE = 0x1; + R_MFWD->FWPC12_b.DDE = 0x1; + + /* Set GWCA to CONFIG mode. */ + r_layer3_switch_update_gwca_operation_mode(p_instance_ctrl, LAYER3_SWITCH_AGENT_MODE_DISABLE); + r_layer3_switch_update_gwca_operation_mode(p_instance_ctrl, LAYER3_SWITCH_AGENT_MODE_CONFIG); + + /* Reset AXI RAM. */ + p_instance_ctrl->p_gwca_reg->GWARIRM_b.ARIOG = 1; + FSP_HARDWARE_REGISTER_WAIT(p_instance_ctrl->p_gwca_reg->GWARIRM_b.ARR, 1); + + /* Initialize LINKFIX table. */ + r_layer3_switch_initialize_linkfix_table(p_instance_ctrl); + + /* Set GWCA to OPERATION mode. */ + r_layer3_switch_update_gwca_operation_mode(p_instance_ctrl, LAYER3_SWITCH_AGENT_MODE_DISABLE); + r_layer3_switch_update_gwca_operation_mode(p_instance_ctrl, LAYER3_SWITCH_AGENT_MODE_OPERATION); + + /* ETHA ports initialization. */ + for (uint8_t channel = 0; (channel < BSP_FEATURE_ETHER_NUM_CHANNELS) && (FSP_SUCCESS == phy_err); channel++) + { + if (NULL != p_extend->p_ether_phy_instances[channel]) + { + /* Set ETHA to CONFIG mode. */ + r_layer3_switch_update_etha_operation_mode(channel, LAYER3_SWITCH_AGENT_MODE_DISABLE); + r_layer3_switch_update_etha_operation_mode(channel, LAYER3_SWITCH_AGENT_MODE_CONFIG); + + /* Configure the port specific feature. */ + if (NULL != p_extend->p_port_cfg_list[channel]) + { + r_layer3_switch_configure_port(p_instance_ctrl, channel, p_extend->p_port_cfg_list[channel]); + } + + /* Configure queue depth for each transmission IPV queue. */ + p_reg_etha = (R_ETHA0_Type *) (R_ETHA0_BASE + (LAYER3_SWITCH_ETHA_REG_SIZE * channel)); + p_etha_eatdqdcn_reg = &p_reg_etha->EATDQDC0; + for (uint8_t j = 0; j < BSP_FEATURE_ESWM_ETHA_IPV_QUEUE_NUM; j++) + { + *p_etha_eatdqdcn_reg = p_extend->ipv_queue_depth_list[channel][j] & R_ETHA0_EATDQDC0_DQD_Msk; + p_etha_eatdqdcn_reg += 1; + } + + /* Enable Magic packet detection. */ + p_reg_rmac = + (R_RMAC0_Type *) (R_RMAC0_BASE + (channel * LAYER3_SWITCH_RMAC_REG_SIZE)); + p_reg_rmac->MRGC_b.MPDE = 1; + + if (NULL != p_extend->p_gptp_instance) + { + p_reg_rmac->MTRC_b.DTN = (uint8_t) (p_extend->gptp_timer_numbers[channel] & 0x1); + } + } + } + + /* Open all ETHER_PHY instances. */ + for (uint8_t channel = 0; + (channel < BSP_FEATURE_ETHER_NUM_CHANNELS) && ((FSP_SUCCESS == phy_err) | (FSP_ERR_ALREADY_OPEN == phy_err)); + channel++) + { + p_ether_phy = p_extend->p_ether_phy_instances[channel]; + if (NULL != p_ether_phy) + { + p_ether_phy->p_api->open(p_ether_phy->p_ctrl, p_ether_phy->p_cfg); + } + } + + /* Start operation on ETHA ports. */ + for (uint8_t channel = 0; (channel < BSP_FEATURE_ETHER_NUM_CHANNELS) && (FSP_SUCCESS == phy_err); channel++) + { + p_ether_phy = p_extend->p_ether_phy_instances[channel]; + if (NULL != p_ether_phy) + { + /* Set ETHA to OPERATION mode. */ + r_layer3_switch_update_etha_operation_mode(channel, LAYER3_SWITCH_AGENT_MODE_DISABLE); + r_layer3_switch_update_etha_operation_mode(channel, LAYER3_SWITCH_AGENT_MODE_OPERATION); + + /* Initialize each PHY LSI. */ + R_RMAC_PHY_ChipSelect(p_ether_phy->p_ctrl, channel); + phy_err = p_ether_phy->p_api->chipInit(p_ether_phy->p_ctrl, p_ether_phy->p_cfg); + if (phy_err != FSP_SUCCESS) + { + break; + } + + phy_err = p_ether_phy->p_api->startAutoNegotiate(p_ether_phy->p_ctrl); + } + } + + /* If failed to open a PHY instance, revert all configurations and return error. */ + if (FSP_SUCCESS != phy_err) + { + /* Close ETHA IP and other PHY instances. */ + r_layer3_switch_close_etha_ports(p_instance_ctrl); + + /* Disable GWCA. */ + r_layer3_switch_update_gwca_operation_mode(p_instance_ctrl, LAYER3_SWITCH_AGENT_MODE_DISABLE); + + /* Reset destination ports of forwarding feature. */ + for (uint32_t i = 0; i < BSP_FEATURE_ETHER_NUM_CHANNELS; i++) + { + p_mfwd_fwpbfc_reg = + (uint32_t *) ((uintptr_t) &(R_MFWD->FWPBFC0) + (i * LAYER3_SWITCH_FWPBFC_REGISTER_OFFSET)); + *p_mfwd_fwpbfc_reg = 0; + } + + /* Disable extended descriptor for each agents. */ + R_MFWD->FWPC10_b.DDE = 0; + R_MFWD->FWPC11_b.DDE = 0; + R_MFWD->FWPC12_b.DDE = 0; + + FSP_RETURN(FSP_ERR_ETHER_ERROR_PHY_COMMUNICATION); + } + + /* Enable GWCA Data Interrupt IRQ. It occurs when a descriptor completes RX/TX or receive frame for a full queue. */ + R_BSP_IrqCfgEnable(p_instance_ctrl->p_cfg->irq, p_instance_ctrl->p_cfg->ipl, p_instance_ctrl); + if (p_extend->etha_error_irq_port_0 >= 0) + { + R_BSP_IrqCfgEnable(p_extend->etha_error_irq_port_0, p_extend->etha_error_ipl_port_0, p_instance_ctrl); + } + + if (p_extend->etha_error_irq_port_1 >= 0) + { + R_BSP_IrqCfgEnable(p_extend->etha_error_irq_port_1, p_extend->etha_error_ipl_port_1, p_instance_ctrl); + } + + p_instance_ctrl->open = LAYER3_SWITCH_OPEN; + + return FSP_SUCCESS; +} /* End of function R_LAYER3_SWITCH_Open() */ + +/********************************************************************************************************************//** + * @brief Disables interrupts and stop module. Implements @ref ether_switch_api_t::close. + * + * @retval FSP_SUCCESS Channel successfully closed. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN Control block is not open. + ***********************************************************************************************************************/ +fsp_err_t R_LAYER3_SWITCH_Close (ether_switch_ctrl_t * const p_ctrl) +{ + layer3_switch_instance_ctrl_t * p_instance_ctrl = (layer3_switch_instance_ctrl_t *) p_ctrl; + layer3_switch_extended_cfg_t * p_extend; + +#if LAYER3_SWITCH_CFG_PARAM_CHECKING_ENABLE + FSP_ASSERT(NULL != p_instance_ctrl); + FSP_ERROR_RETURN(LAYER3_SWITCH_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); +#endif + + p_extend = (layer3_switch_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + + /* Disable GWCA Data Interrupt IRQ. */ + R_BSP_IrqDisable(p_instance_ctrl->p_cfg->irq); + R_FSP_IsrContextSet(p_instance_ctrl->p_cfg->irq, NULL); + + if (p_extend->etha_error_irq_port_0 >= 0) + { + R_BSP_IrqDisable(p_extend->etha_error_irq_port_0); + R_FSP_IsrContextSet(p_extend->etha_error_irq_port_0, NULL); + } + + if (p_extend->etha_error_irq_port_1 >= 0) + { + R_BSP_IrqDisable(p_extend->etha_error_irq_port_1); + R_FSP_IsrContextSet(p_extend->etha_error_irq_port_1, NULL); + } + + /* Close ETHA ports and PHY instances. */ + r_layer3_switch_close_etha_ports(p_instance_ctrl); + + /* When a r_gptp instance is set, close it. */ + if (NULL != p_extend->p_gptp_instance) + { + p_extend->p_gptp_instance->p_api->close(p_extend->p_gptp_instance->p_ctrl); + } + + /* Set GWCA to DISABLE mode. */ + r_layer3_switch_update_gwca_operation_mode(p_instance_ctrl, LAYER3_SWITCH_AGENT_MODE_DISABLE); + + /* Waiting for all pointers to be released. */ + FSP_HARDWARE_REGISTER_WAIT(R_COMA->CABPPCM_b.RPC, R_COMA->CABPPCM_b.TPC) + + /* Not set ESWM module stop feature because it shared with EtherCAT.*/ + + /* Reset instance ctrl members. */ + p_instance_ctrl->p_cfg = NULL; + memset(p_instance_ctrl->p_queues_status, 0, sizeof(p_instance_ctrl->p_queues_status)); + p_instance_ctrl->table_status = LAYER3_SWITCH_TABLE_STATUS_UNINITIALIZED; + + /* Mark the driver as closed. */ + p_instance_ctrl->open = 0U; + + return FSP_SUCCESS; +} /* End of function R_LAYER3_SWITCH_Close() */ + +/********************************************************************************************************************//** + * @brief Create a new descriptor queue and set it to LINKFIX table. + * This function must be called before calling @ref R_LAYER3_SWITCH_SetDescriptor and @ref R_LAYER3_SWITCH_GetDescriptor. + * + * @retval FSP_SUCCESS Descriptor created successfully. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN Control block is not open. + * @retval FSP_ERR_INVALID_POINTER Pointer to a argument is NULL. + * @retval FSP_ERR_OUT_OF_MEMORY Descriptor queue list is depleted. + * @retval FSP_ERR_OVERFLOW TS descriptor queue is used. + ***********************************************************************************************************************/ +fsp_err_t R_LAYER3_SWITCH_CreateDescriptorQueue (ether_switch_ctrl_t * const p_ctrl, + uint32_t * const p_queue_index, + const layer3_switch_descriptor_queue_cfg_t * const p_queue_cfg) +{ + layer3_switch_instance_ctrl_t * p_instance_ctrl = (layer3_switch_instance_ctrl_t *) p_ctrl; + uint32_t queue_index; + fsp_err_t err = FSP_SUCCESS; + + volatile uint32_t * p_gwca_gwdcc_reg; + volatile uint32_t * p_gwca_gwdie_reg; + +#if LAYER3_SWITCH_CFG_PARAM_CHECKING_ENABLE + FSP_ASSERT(NULL != p_instance_ctrl); + FSP_ERROR_RETURN(LAYER3_SWITCH_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(NULL != p_queue_index, FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(NULL != p_queue_cfg, FSP_ERR_INVALID_POINTER); + if (LAYER3_SWITCH_DISCRIPTOR_FORMTAT_TX_TIMESTAMP == p_queue_cfg->descriptor_format) + { + FSP_ERROR_RETURN(NULL != p_queue_cfg->p_ts_descriptor_array, FSP_ERR_INVALID_POINTER); + } + else + { + FSP_ERROR_RETURN(NULL != p_queue_cfg->p_descriptor_array, FSP_ERR_INVALID_POINTER); + } +#endif + + if (LAYER3_SWITCH_DISCRIPTOR_FORMTAT_TX_TIMESTAMP == p_queue_cfg->descriptor_format) + { + err = r_layer3_switch_create_tx_timestamp_queue(p_instance_ctrl, p_queue_cfg, p_queue_index); + } + else + { + queue_index = p_instance_ctrl->allocated_descriptor_queue_index; + FSP_ERROR_RETURN(LAYER3_SWITCH_CFG_AVAILABLE_QUEUE_NUM > queue_index, FSP_ERR_OUT_OF_MEMORY); + + /* Set all descriptors of new queue as disable. */ + for (uint32_t i = 0; i < p_queue_cfg->array_length; i++) + { + p_queue_cfg->p_descriptor_array[i].basic.dt = LAYER3_SWITCH_DESCRIPTOR_TYPE_LEMPTY; + } + + /* Set the new queue to LINKFIX table. */ + p_instance_ctrl->p_descriptor_queue_list[queue_index].ptr_h = + (LAYER3_SWITCH_QUEUE_ADDRESS_UPPER_MASK & (uint64_t) (uintptr_t) p_queue_cfg->p_descriptor_array) >> + LAYER3_SWITCH_QUEUE_ADDRESS_UPPER_POSITION; + p_instance_ctrl->p_descriptor_queue_list[queue_index].ptr_l = + LAYER3_SWITCH_QUEUE_ADDRESS_LOWER_MASK & (uintptr_t) p_queue_cfg->p_descriptor_array; + p_instance_ctrl->p_descriptor_queue_list[queue_index].dt = LAYER3_SWITCH_DESCRIPTOR_TYPE_LINKFIX; + + /* Configure the new queue.*/ + p_gwca_gwdcc_reg = &(p_instance_ctrl->p_gwca_reg->GWDCC0) + queue_index; + *p_gwca_gwdcc_reg = + (uint32_t) ((p_queue_cfg->write_back_mode << R_GWCA0_GWDCC0_SM_Pos) | + (p_queue_cfg->descriptor_format << R_GWCA0_GWDCC0_EDE_Pos) | + (p_queue_cfg->rx_timestamp_storage << R_GWCA0_GWDCC0_ETS_Pos) | + (p_queue_cfg->type << R_GWCA0_GWDCC0_DQT_Pos)); + + /* Enable GWCA Data Interrupt of this queue. */ + /* Get register address. Use GWDIE0 for queue 0-31, GWDIE1 for queue 32-63. */ + p_gwca_gwdie_reg = (uint32_t *) ((uintptr_t) &(p_instance_ctrl->p_gwca_reg->GWDIE0) + + ((queue_index / LAYER3_SWITCH_REGISTER_SIZE) * + LAYER3_SWITCH_INTERRUPT_REGISTER_OFFSET)); + + /* Set bit field of this queue. */ + *p_gwca_gwdie_reg = (*p_gwca_gwdie_reg) | (1 << (queue_index % LAYER3_SWITCH_REGISTER_SIZE)); + + /* Enable also Descriptor Full Error Interrupt. */ + p_gwca_gwdie_reg = (uint32_t *) ((uintptr_t) &(p_instance_ctrl->p_gwca_reg->GWEIE20) + + ((queue_index / LAYER3_SWITCH_REGISTER_SIZE) * + LAYER3_SWITCH_INTERRUPT_REGISTER_OFFSET)); + *p_gwca_gwdie_reg = (*p_gwca_gwdie_reg) | (1 << (queue_index % LAYER3_SWITCH_REGISTER_SIZE)); + + /* Initialize software queue status. */ + p_instance_ctrl->p_queues_status[queue_index].created = true; + p_instance_ctrl->p_queues_status[queue_index].head = 0; + p_instance_ctrl->p_queues_status[queue_index].tail = 0; + p_instance_ctrl->p_queues_status[queue_index].p_queue_cfg = p_queue_cfg; + + /* Store queue index. */ + *p_queue_index = queue_index; + p_instance_ctrl->allocated_descriptor_queue_index += 1; + } + + return err; +} /* End of function R_LAYER3_SWITCH_CreateDescriptorQueue() */ + +/********************************************************************************************************************//** + * @brief Set descriptor data to a target descriptor. + * + * @retval FSP_SUCCESS Descriptor set successfully. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN Control block is not open. + * @retval FSP_ERR_INVALID_POINTER Pointer to a argument is NULL. + * @retval FSP_ERR_INVALID_ARGUMENT Queue index is invalid. + * @retval FSP_ERR_NOT_INITIALIZED This descriptor queue is not created. + * @retval FSP_ERR_IN_USE Target descriptor is now running. + * @retval FSP_ERR_OVERFLOW Descriptor queue is full. + ***********************************************************************************************************************/ +fsp_err_t R_LAYER3_SWITCH_SetDescriptor (ether_switch_ctrl_t * const p_ctrl, + uint32_t queue_index, + layer3_switch_descriptor_t const * const p_descriptor) +{ + layer3_switch_instance_ctrl_t * p_instance_ctrl = (layer3_switch_instance_ctrl_t *) p_ctrl; + layer3_switch_descriptor_t * p_target_descriptor; + layer3_switch_descriptor_type_t active_descriptor_type; + fsp_err_t err = FSP_SUCCESS; + +#if LAYER3_SWITCH_CFG_PARAM_CHECKING_ENABLE + FSP_ASSERT(NULL != p_instance_ctrl); + FSP_ERROR_RETURN(LAYER3_SWITCH_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(NULL != p_descriptor, FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(LAYER3_SWITCH_CFG_AVAILABLE_QUEUE_NUM > queue_index, FSP_ERR_INVALID_ARGUMENT); + FSP_ERROR_RETURN(p_instance_ctrl->p_queues_status[queue_index].created, FSP_ERR_NOT_INITIALIZED); +#endif + + /* The last descriptor of the queue cannot be rewritten. */ + FSP_ERROR_RETURN(p_instance_ctrl->p_queues_status[queue_index].tail < + p_instance_ctrl->p_queues_status[queue_index].p_queue_cfg->array_length - 1, + FSP_ERR_OVERFLOW); + + /* Get pointer to the target descriptor. */ + p_target_descriptor = + r_layer3_switch_get_descriptor(p_instance_ctrl, queue_index, + p_instance_ctrl->p_queues_status[queue_index].tail); + + /* Check the descriptor queue is in TX queue or RX queue. */ + if (LAYER3_SWITCH_QUEUE_TYPE_TX == p_instance_ctrl->p_queues_status[queue_index].p_queue_cfg->type) + { + /* In TX queue, FSINGLE descriptor is active. */ + active_descriptor_type = LAYER3_SWITCH_DESCRIPTOR_TYPE_FSINGLE; + } + else /* When RX descriptor queue. */ + { + /* In RX queue, FEMPTY descriptor is active. */ + active_descriptor_type = LAYER3_SWITCH_DESCRIPTOR_TYPE_FEMPTY; + } + + /* Check if the target descriptor is active. Set is only permitted to a stopped descriptor. */ + if ((active_descriptor_type == p_target_descriptor->basic.dt) && + (LAYER3_SWITCH_DESCRIPTOR_TYPE_LEMPTY != p_descriptor->basic.dt)) + { + err = FSP_ERR_IN_USE; + } + else + { + /* Copy all fields in the descriptor. */ + memcpy(p_target_descriptor, p_descriptor, sizeof(layer3_switch_descriptor_t)); + p_instance_ctrl->p_queues_status[queue_index].tail += 1; + + /* RX queue become available. */ + if (LAYER3_SWITCH_QUEUE_TYPE_RX == p_instance_ctrl->p_queues_status[queue_index].p_queue_cfg->type) + { + p_instance_ctrl->p_queues_status[queue_index].rx_available = true; + } + } + + return err; +} /* End of function R_LAYER3_SWITCH_SetDescriptor() */ + +/********************************************************************************************************************//** + * @brief Get descriptor data from a target descriptor. + * + * @retval FSP_SUCCESS Descriptor got successfully. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN Control block is not open. + * @retval FSP_ERR_INVALID_POINTER Pointer to a argument is NULL. + * @retval FSP_ERR_INVALID_ARGUMENT Queue index is invalid. + * @retval FSP_ERR_NOT_INITIALIZED This descriptor queue is not created or target descriptor is not set. + * @retval FSP_ERR_IN_USE Target descriptor is now running. + ***********************************************************************************************************************/ +fsp_err_t R_LAYER3_SWITCH_GetDescriptor (ether_switch_ctrl_t * const p_ctrl, + uint32_t queue_index, + layer3_switch_descriptor_t * const p_descriptor) +{ + layer3_switch_instance_ctrl_t * p_instance_ctrl = (layer3_switch_instance_ctrl_t *) p_ctrl; + layer3_switch_descriptor_t * p_target_descriptor; + layer3_switch_descriptor_type_t active_descriptor_type; + fsp_err_t err = FSP_SUCCESS; + +#if LAYER3_SWITCH_CFG_PARAM_CHECKING_ENABLE + FSP_ASSERT(NULL != p_instance_ctrl); + FSP_ERROR_RETURN(LAYER3_SWITCH_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(NULL != p_descriptor, FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(LAYER3_SWITCH_CFG_AVAILABLE_QUEUE_NUM > queue_index, FSP_ERR_INVALID_ARGUMENT); + FSP_ERROR_RETURN(p_instance_ctrl->p_queues_status[queue_index].created, FSP_ERR_NOT_INITIALIZED); +#endif + + /* Get pointer to the target descriptor. */ + p_target_descriptor = + r_layer3_switch_get_descriptor(p_instance_ctrl, queue_index, + p_instance_ctrl->p_queues_status[queue_index].head); + + /* Check the descriptor queue is in TX queue or RX queue. */ + if (LAYER3_SWITCH_QUEUE_TYPE_TX == p_instance_ctrl->p_queues_status[queue_index].p_queue_cfg->type) + { + /* In TX queue, FSINGLE descriptor is active. */ + active_descriptor_type = LAYER3_SWITCH_DESCRIPTOR_TYPE_FSINGLE; + } + else /* When RX descriptor queue. */ + { + /* In RX queue, FEMPTY descriptor is active. */ + active_descriptor_type = LAYER3_SWITCH_DESCRIPTOR_TYPE_FEMPTY; + } + + /* Check if the target descriptor is active. Get is only permitted to a stopped descriptor. */ + if (active_descriptor_type == p_target_descriptor->basic.dt) + { + err = FSP_ERR_IN_USE; + } + else if (LAYER3_SWITCH_DESCRIPTOR_TYPE_LEMPTY == p_target_descriptor->basic.dt) + { + err = FSP_ERR_NOT_INITIALIZED; + } + else + { + /* Copy the all descriptor fields to a argument.*/ + memcpy(p_descriptor, p_target_descriptor, sizeof(layer3_switch_descriptor_t)); + + p_instance_ctrl->p_queues_status[queue_index].head += 1; + } + + return err; +} /* End of function R_LAYER3_SWITCH_GetDescriptor() */ + +/********************************************************************************************************************//** + * @brief Reload and enable a descriptor queue. + * In a TX descriptor queue, the queue start transmission. In a RX descriptor queue, the queue start reception. + * + * @retval FSP_SUCCESS Descriptor queue started successfully. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN Control block is not open. + * @retval FSP_ERR_INVALID_ARGUMENT Queue index is invalid. + * @retval FSP_ERR_NOT_INITIALIZED Target descriptor queue is not created. + * @retval FSP_ERR_IN_USE Target descriptor queue is already running. + * @retval FSP_ERR_INVALID_DATA Target TX queue have no data. + ***********************************************************************************************************************/ +fsp_err_t R_LAYER3_SWITCH_StartDescriptorQueue (ether_switch_ctrl_t * const p_ctrl, uint32_t queue_index) +{ + layer3_switch_instance_ctrl_t * p_instance_ctrl = (layer3_switch_instance_ctrl_t *) p_ctrl; + + volatile uint32_t * p_gwca_gwdcc_reg; + volatile uint32_t * p_gwca_gwtrc_reg; + volatile uint32_t * p_mfwd_fwpbfcsdc0_reg; + layer3_switch_descriptor_t * p_descriptor; + fsp_err_t err = FSP_SUCCESS; + +#if LAYER3_SWITCH_CFG_PARAM_CHECKING_ENABLE + FSP_ASSERT(NULL != p_instance_ctrl); + FSP_ERROR_RETURN(LAYER3_SWITCH_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(LAYER3_SWITCH_CFG_AVAILABLE_QUEUE_NUM > queue_index, FSP_ERR_INVALID_ARGUMENT); + FSP_ERROR_RETURN(p_instance_ctrl->p_queues_status[queue_index].created, FSP_ERR_NOT_INITIALIZED); +#endif + + /* The target descriptor queue should be stopped. */ + if (LAYER3_SWITCH_QUEUE_TYPE_RX == p_instance_ctrl->p_queues_status[queue_index].p_queue_cfg->type) + { + /* RX queue should have been set a new buffer. */ + FSP_ERROR_RETURN(p_instance_ctrl->p_queues_status[queue_index].rx_available, FSP_ERR_IN_USE); + p_instance_ctrl->p_queues_status[queue_index].rx_available = false; + } + else + { + /* TX queue should have completed previous transmission. */ + FSP_ERROR_RETURN(!r_layer3_switch_is_descriptor_queue_active(p_instance_ctrl, queue_index), FSP_ERR_IN_USE); + + /* Reset descriptor index of this queue. */ + p_instance_ctrl->p_queues_status[queue_index].head = 0; + p_instance_ctrl->p_queues_status[queue_index].tail = 0; + + /* When the head of the queue has no data, do not start transmission. */ + p_descriptor = r_layer3_switch_get_descriptor(p_instance_ctrl, queue_index, 0); + FSP_ERROR_RETURN(LAYER3_SWITCH_DESCRIPTOR_TYPE_LEMPTY != p_descriptor->basic.dt, FSP_ERR_INVALID_DATA); + } + + /* Reload the queue. */ + p_gwca_gwdcc_reg = &(p_instance_ctrl->p_gwca_reg->GWDCC0) + queue_index; + *p_gwca_gwdcc_reg |= R_GWCA0_GWDCC0_BALR_Msk; + FSP_HARDWARE_REGISTER_WAIT((*p_gwca_gwdcc_reg & R_GWCA0_GWDCC0_BALR_Msk), 0); + + /* Check if target descriptor queue type is TX or RX. */ + if (LAYER3_SWITCH_QUEUE_TYPE_TX == p_instance_ctrl->p_queues_status[queue_index].p_queue_cfg->type) + { + /* Get a pointer to GWTRCi register. Use GWTRC0 register for queue 0-31, GWTRC1 register for queue 32-63. */ + p_gwca_gwtrc_reg = &(p_instance_ctrl->p_gwca_reg->GWTRC0) + (queue_index / LAYER3_SWITCH_REGISTER_SIZE); + + /* Request to start transmission. */ + *p_gwca_gwtrc_reg = (*p_gwca_gwtrc_reg) | (1 << (queue_index % LAYER3_SWITCH_REGISTER_SIZE)); + } + else + { + /* When RX queue. */ + for (uint32_t i = 0; i < BSP_FEATURE_ETHER_NUM_CHANNELS; i++) + { + /* Get register address that depend on port number. */ + if (1 & (p_instance_ctrl->p_queues_status[queue_index].p_queue_cfg->ports >> i)) + { + p_mfwd_fwpbfcsdc0_reg = (uint32_t *) ((uintptr_t) &(R_MFWD->FWPBFCSDC00) + + (LAYER3_SWITCH_FWPBFCSDC0_REGISTER_OFFSET * i)); + + /* Configure reception frame forwarding to the queue. */ + *p_mfwd_fwpbfcsdc0_reg = R_MFWD_FWPBFCSDC00_PBCSD_Msk & queue_index; + } + } + } + + /* Reset descriptor index of this queue. */ + p_instance_ctrl->p_queues_status[queue_index].head = 0; + p_instance_ctrl->p_queues_status[queue_index].tail = 0; + + return err; +} /* End of function R_LAYER3_SWITCH_StartDescriptorQueue() */ + +/*******************************************************************************************************************//** + * Updates the user callback with the option to provide memory for the callback argument structure. + * + * @retval FSP_SUCCESS Callback updated successfully. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_NO_CALLBACK_MEMORY p_callback is non-secure and p_callback_memory is either secure or NULL. + **********************************************************************************************************************/ +fsp_err_t R_LAYER3_SWITCH_CallbackSet (ether_switch_ctrl_t * const p_ctrl, + void ( * p_callback)(ether_switch_callback_args_t *), + void * const p_context, + ether_switch_callback_args_t * const p_callback_memory) +{ + layer3_switch_instance_ctrl_t * p_instance_ctrl = (layer3_switch_instance_ctrl_t *) p_ctrl; + +#if LAYER3_SWITCH_CFG_PARAM_CHECKING_ENABLE + FSP_ASSERT(p_instance_ctrl); + FSP_ASSERT(p_callback); + FSP_ERROR_RETURN(LAYER3_SWITCH_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); +#endif + +#if BSP_TZ_SECURE_BUILD && BSP_FEATURE_ETHER_SUPPORTS_TZ_SECURE + + /* Get security state of p_callback. */ + bool callback_is_secure = + (NULL == cmse_check_address_range((void *) p_callback, sizeof(void *), CMSE_AU_NONSECURE)); + + #if LAYER3_SWITCH_CFG_PARAM_CHECKING_ENABLE + + /* In secure projects, p_callback_memory must be provided in non-secure space if p_callback is non-secure. */ + ether_switch_callback_args_t * const p_callback_memory_checked = cmse_check_pointed_object(p_callback_memory, + CMSE_AU_NONSECURE); + FSP_ERROR_RETURN(callback_is_secure || (NULL != p_callback_memory_checked), FSP_ERR_NO_CALLBACK_MEMORY); + #endif +#endif + + /* Store callback and context. */ +#if BSP_TZ_SECURE_BUILD && BSP_FEATURE_ETHER_SUPPORTS_TZ_SECURE + p_instance_ctrl->p_callback = callback_is_secure ? p_callback : + (void (*)(ether_switch_callback_args_t *))cmse_nsfptr_create(p_callback); +#else + p_instance_ctrl->p_callback = p_callback; +#endif + p_instance_ctrl->p_context = p_context; + p_instance_ctrl->p_callback_memory = p_callback_memory; + + return FSP_SUCCESS; +} /* End of function R_LAYER3_SWITCH_CallbackSet() */ + +/*******************************************************************************************************************//** + * Configure Ethernet port features, including callback function for each port. + * + * @retval FSP_SUCCESS Port configured successfully. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_INVALID_POINTER Pointer to a argument is NULL. + * @retval FSP_ERR_INVALID_ARGUMENT Port number is invalid. + **********************************************************************************************************************/ +fsp_err_t R_LAYER3_SWITCH_ConfigurePort (ether_switch_ctrl_t * const p_ctrl, + uint8_t port, + layer3_switch_port_cfg_t * p_port_cfg) +{ + layer3_switch_instance_ctrl_t * p_instance_ctrl = (layer3_switch_instance_ctrl_t *) p_ctrl; + +#if LAYER3_SWITCH_CFG_PARAM_CHECKING_ENABLE + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(LAYER3_SWITCH_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(port < BSP_FEATURE_ETHER_NUM_CHANNELS, FSP_ERR_INVALID_ARGUMENT); + FSP_ERROR_RETURN(p_port_cfg, FSP_ERR_INVALID_POINTER); +#endif + + /* Set ETHA to CONFIG mode. */ + r_layer3_switch_update_etha_operation_mode(port, LAYER3_SWITCH_AGENT_MODE_DISABLE); + r_layer3_switch_update_etha_operation_mode(port, LAYER3_SWITCH_AGENT_MODE_CONFIG); + + r_layer3_switch_configure_port(p_instance_ctrl, port, p_port_cfg); + + /* Set ETHA to OPERATION mode. */ + r_layer3_switch_update_etha_operation_mode(port, LAYER3_SWITCH_AGENT_MODE_DISABLE); + r_layer3_switch_update_etha_operation_mode(port, LAYER3_SWITCH_AGENT_MODE_OPERATION); + + return FSP_SUCCESS; +} /* End of function R_LAYER3_SWITCH_ConfigurePort() */ + +/*******************************************************************************************************************//** + * Add or update an entry of the forwarding table. + * + * @retval FSP_SUCCESS Successfully add/updated an entry into the table. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_INVALID_POINTER Pointer to a argument is NULL. + * @retval FSP_ERR_OVERFLOW The number of unsecure entries exceeded the configured value. + * @retval FSP_ERR_WRITE_FAILED A hardware error occurred while learning the entry. + * @retval FSP_ERR_INVALID_ARGUMENT Target frame or entry type is invalid. + * @retval FSP_ERR_INVALID_MODE VLAN feature is disabled and a VLAN entry is passed. + **********************************************************************************************************************/ +fsp_err_t R_LAYER3_SWITCH_AddTableEntry (ether_switch_ctrl_t * const p_ctrl, + layer3_switch_frame_filter_t const * const p_target_frame, + layer3_switch_table_entry_cfg_t const * const p_entry_cfg) +{ + layer3_switch_instance_ctrl_t * p_instance_ctrl = (layer3_switch_instance_ctrl_t *) p_ctrl; + fsp_err_t err = FSP_SUCCESS; + +#if LAYER3_SWITCH_CFG_PARAM_CHECKING_ENABLE + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(LAYER3_SWITCH_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(p_target_frame, FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(p_entry_cfg, FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(LAYER3_SWITCH_TABLE_STATUS_INITIALIZED == p_instance_ctrl->table_status, FSP_ERR_NOT_INITIALIZED); + FSP_ERROR_RETURN(LAYER3_SWITCH_TABLE_ENTRY_TYPE_EMPTY != p_target_frame->entry_type, FSP_ERR_INVALID_ARGUMENT); +#endif + + switch (p_target_frame->entry_type) + { + case LAYER3_SWITCH_TABLE_ENTRY_TYPE_MAC: + { + err = r_layer3_switch_learn_mac_entry(p_target_frame, p_entry_cfg); + break; + } + + case LAYER3_SWITCH_TABLE_ENTRY_TYPE_VLAN: + { + err = r_layer3_switch_learn_vlan_entry(p_target_frame, p_entry_cfg); + break; + } + + case LAYER3_SWITCH_TABLE_ENTRY_TYPE_LAYER3: + { + err = r_layer3_switch_learn_l3_entry(p_instance_ctrl, p_target_frame, p_entry_cfg); + break; + } + + default: + { + break; + } + } + + return err; +} /* End of function R_LAYER3_SWITCH_AddTableEntry() */ + +/*******************************************************************************************************************//** + * Search an entry from the forwarding table. + * + * @retval FSP_SUCCESS The entry is found successfully. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_INVALID_POINTER Pointer to a argument is NULL. + * @retval FSP_ERR_NOT_FOUND The entry is not found in the table. + * @retval FSP_ERR_INVALID_ARGUMENT Target frame or entry type is invalid. + * @retval FSP_ERR_INVALID_MODE VLAN feature is disabled and a VLAN entry is passed. + **********************************************************************************************************************/ +fsp_err_t R_LAYER3_SWITCH_SearchTableEntry (ether_switch_ctrl_t * const p_ctrl, + layer3_switch_frame_filter_t const * const p_target_frame, + layer3_switch_table_entry_cfg_t * const p_entry_cfg) +{ + layer3_switch_instance_ctrl_t * p_instance_ctrl = (layer3_switch_instance_ctrl_t *) p_ctrl; + fsp_err_t err = FSP_SUCCESS; + +#if LAYER3_SWITCH_CFG_PARAM_CHECKING_ENABLE + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(LAYER3_SWITCH_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(p_target_frame, FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(p_entry_cfg, FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(LAYER3_SWITCH_TABLE_STATUS_INITIALIZED == p_instance_ctrl->table_status, FSP_ERR_NOT_INITIALIZED); + FSP_ERROR_RETURN(LAYER3_SWITCH_TABLE_ENTRY_TYPE_EMPTY != p_target_frame->entry_type, FSP_ERR_INVALID_ARGUMENT); +#else + FSP_PARAMETER_NOT_USED(p_instance_ctrl); +#endif + + switch (p_target_frame->entry_type) + { + case LAYER3_SWITCH_TABLE_ENTRY_TYPE_MAC: + { + err = r_layer3_switch_search_mac_entry(p_target_frame, p_entry_cfg); + break; + } + + case LAYER3_SWITCH_TABLE_ENTRY_TYPE_VLAN: + { + err = r_layer3_switch_search_vlan_entry(p_target_frame, p_entry_cfg); + break; + } + + case LAYER3_SWITCH_TABLE_ENTRY_TYPE_LAYER3: + { + err = r_layer3_switch_search_l3_entry(p_target_frame, p_entry_cfg); + break; + } + + default: + { + break; + } + } + + return err; +} /* End of function R_LAYER3_SWITCH_SearchTableEntry() */ + +/*******************************************************************************************************************//** + * Configure and initialize an forwarding table. + * + * @retval FSP_SUCCESS Table configured successfully. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_INVALID_POINTER Pointer to a argument is NULL. + * @retval FSP_ERR_WRITE_FAILED Failed to add entries. + **********************************************************************************************************************/ +fsp_err_t R_LAYER3_SWITCH_ConfigureTable (ether_switch_ctrl_t * const p_ctrl, + layer3_switch_table_cfg_t const * const p_table_cfg) +{ + layer3_switch_instance_ctrl_t * p_instance_ctrl = (layer3_switch_instance_ctrl_t *) p_ctrl; + fsp_err_t err = FSP_SUCCESS; + +#if LAYER3_SWITCH_CFG_PARAM_CHECKING_ENABLE + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(LAYER3_SWITCH_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(p_table_cfg, FSP_ERR_INVALID_POINTER); +#endif + + /* Set VLAN mode. */ + R_MFWD->FWGC_b.SVM = p_table_cfg->vlan_mode; + + /* Reset table. */ + r_layer3_switch_reset_table(p_instance_ctrl); + p_instance_ctrl->table_status = LAYER3_SWITCH_TABLE_STATUS_INITIALIZED; + + /* Set maximum number of unsecure entries. */ + R_MFWD->FWMACHEC = (uint32_t) (R_MFWD_FWMACHEC_MACHMC_Msk & (7) << R_MFWD_FWMACHEC_MACHMC_Pos) | + (uint32_t) (R_MFWD_FWMACHEC_MACHMUE_Msk & + (p_table_cfg->unsecure_entry_maximum_num) << + R_MFWD_FWMACHEC_MACHMUE_Pos); + R_MFWD->FWVLANTEC = (p_table_cfg->unsecure_entry_maximum_num << R_MFWD_FWVLANTEC_VLANTMUE_Pos) & + (R_MFWD_FWVLANTEC_VLANTMUE_Msk); + R_MFWD->FWLTHHEC = (p_table_cfg->unsecure_entry_maximum_num << R_MFWD_FWLTHHEC_LTHHMUE_Pos) & + (R_MFWD_FWLTHHEC_LTHHMUE_Msk); + + /* Initialize FRER parameters. */ + err = r_layer3_switch_frer_init(p_instance_ctrl, &p_table_cfg->frer_cfg); + FSP_ASSERT(FSP_SUCCESS == err); + + /* Configure each port. */ + for (uint8_t i = 0; i < BSP_FEATURE_ETHER_NUM_CHANNELS + 1; i++) + { + r_layer3_switch_configure_forwarding_port(&p_table_cfg->port_cfg_list[i], i); + + /* Configure VLAN ingress/egress mode. */ + r_layer3_switch_initialize_vlan_port(p_instance_ctrl, &p_table_cfg->port_cfg_list[i], i); + } + + /* Configure MAC table aging. */ + if (p_table_cfg->mac_entry_aging_enable) + { + r_layer3_switch_enable_mac_table_aging((uint16_t) p_table_cfg->mac_entry_aging_time_sec); + } + + /* Configure Layer3 table stream filter. */ + r_layer3_switch_configure_stream_filter(&p_table_cfg->l3_stream_filter_cfg); + + /* When an table is passed, learn all entries. */ + if (NULL != p_table_cfg->p_table) + { + /* Learn MAC entries. */ + for (uint32_t i = 0; (i < p_table_cfg->p_table->mac_list_length) & (FSP_SUCCESS == err); i++) + { + err = R_LAYER3_SWITCH_AddTableEntry(p_instance_ctrl, + &p_table_cfg->p_table->p_mac_entry_list[i].target_frame, + &p_table_cfg->p_table->p_mac_entry_list[i].entry_cfg); + } + + /* Learn VLAN entries. */ + for (uint32_t i = 0; (i < p_table_cfg->p_table->vlan_list_length) & (FSP_SUCCESS == err); i++) + { + err = R_LAYER3_SWITCH_AddTableEntry(p_instance_ctrl, + &p_table_cfg->p_table->p_vlan_entry_list[i].target_frame, + &p_table_cfg->p_table->p_vlan_entry_list[i].entry_cfg); + } + + /* Learn Layer3 entries. */ + for (uint32_t i = 0; (i < p_table_cfg->p_table->l3_list_length) & (FSP_SUCCESS == err); i++) + { + err = R_LAYER3_SWITCH_AddTableEntry(p_instance_ctrl, + &p_table_cfg->p_table->p_vlan_entry_list[i].target_frame, + &p_table_cfg->p_table->p_l3_entry_list[i].entry_cfg); + } + } + + if (FSP_SUCCESS != err) + { + /* When an error occurs, remove entries and return the error. */ + r_layer3_switch_reset_table(p_instance_ctrl); + err = FSP_ERR_WRITE_FAILED; + } + + return err; +} /* End of function R_LAYER3_SWITCH_ConfigureTable() */ + +/*******************************************************************************************************************//** + * Get a pointer to the forwarding table. + * + * @retval FSP_SUCCESS Table got successfully. + * @retval FSP_ERR_ASSERTION Pointer to control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_INVALID_POINTER Pointer to a argument is NULL. + * @retval FSP_ERR_INVALID_MODE VLAN feature is disabled and a VLAN entry list is passed. + **********************************************************************************************************************/ +fsp_err_t R_LAYER3_SWITCH_GetTable (ether_switch_ctrl_t * const p_ctrl, layer3_switch_table_t * const p_table) +{ + layer3_switch_instance_ctrl_t * p_instance_ctrl = (layer3_switch_instance_ctrl_t *) p_ctrl; + layer3_switch_extended_cfg_t * p_extend; + fsp_err_t err = FSP_SUCCESS; + +#if LAYER3_SWITCH_CFG_PARAM_CHECKING_ENABLE + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(LAYER3_SWITCH_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(p_table, FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(LAYER3_SWITCH_TABLE_STATUS_INITIALIZED == p_instance_ctrl->table_status, FSP_ERR_NOT_INITIALIZED); +#endif + + /* Initialize lengths. */ + p_table->mac_list_length = 0; + p_table->vlan_list_length = 0; + p_table->l3_list_length = 0; + + /* Read all MAC entries. */ + if (NULL != p_table->p_mac_entry_list) + { + for (uint16_t i = 0; + (i < LAYER3_SWITCH_MAC_ENTRY_MAX_NUM) && ((FSP_SUCCESS == err) || (FSP_ERR_NOT_FOUND == err)); + i++) + { + err = r_layer3_switch_read_mac_entry(i, &p_table->p_mac_entry_list[p_table->mac_list_length]); + if (FSP_SUCCESS == err) + { + p_table->mac_list_length += 1; + } + } + } + + /* Read all VLAN entries. */ + if ((NULL != p_table->p_vlan_entry_list) && ((FSP_SUCCESS == err) || (FSP_ERR_NOT_FOUND == err))) + { + for (uint16_t i = 1; + (i < LAYER3_SWITCH_VLAN_ENTRY_MAX_NUM) && ((FSP_SUCCESS == err) || (FSP_ERR_NOT_FOUND == err)); + i++) + { + err = r_layer3_switch_read_vlan_entry(i, &p_table->p_vlan_entry_list[p_table->vlan_list_length]); + if (FSP_SUCCESS == err) + { + p_table->vlan_list_length += 1; + } + } + } + + /* Read all L3 entries. */ + if ((NULL != p_table->p_l3_entry_list) && ((FSP_SUCCESS == err) || (FSP_ERR_NOT_FOUND == err))) + { + p_extend = (layer3_switch_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + for (uint16_t i = 0; + (i < p_extend->l3_filter_list_length) && ((FSP_SUCCESS == err) || (FSP_ERR_NOT_FOUND == err)); + i++) + { + err = + r_layer3_switch_search_l3_entry(&p_extend->l3_filter_list[i].frame, + &p_table->p_l3_entry_list[p_table->l3_list_length].entry_cfg); + if (FSP_SUCCESS == err) + { + p_table->p_l3_entry_list[p_table->l3_list_length].target_frame = p_extend->l3_filter_list[i].frame; + p_table->l3_list_length += 1; + } + } + } + + if (FSP_ERR_NOT_FOUND == err) + { + /* When entries are not found, it is acceptable. */ + err = FSP_SUCCESS; + } + + return err; +} /* End of function R_LAYER3_SWITCH_GetTable() */ + +/*******************************************************************************************************************//** + * Configure Time Aware Shaper feature. + * + * @retval FSP_SUCCESS TAS configure successfully. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_INVALID_POINTER Pointer to a argument is NULL. + * @retval FSP_ERR_INVALID_ARGUMENT Port number is invalid. + * @retval FSP_ERR_UNSUPPORTED TAS feature is not enabled in the configuration. + **********************************************************************************************************************/ +fsp_err_t R_LAYER3_SWITCH_ConfigureTAS (ether_switch_ctrl_t * const p_ctrl, + uint8_t port, + layer3_switch_tas_cfg_t * p_tas_cfg) +{ + fsp_err_t err = FSP_SUCCESS; + +#if LAYER3_SWITCH_CFG_TAS_ENABLE + layer3_switch_instance_ctrl_t * p_instance_ctrl = (layer3_switch_instance_ctrl_t *) p_ctrl; + uint8_t tas_entry_addr; + uint8_t learn_count = 0; + R_ETHA0_Type * p_etha_reg; + volatile uint32_t * p_eatasenc_reg; + uint32_t initial_gate_state_bitmask = 0; + + #if LAYER3_SWITCH_CFG_PARAM_CHECKING_ENABLE + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(LAYER3_SWITCH_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(port < BSP_FEATURE_ETHER_NUM_CHANNELS, FSP_ERR_INVALID_ARGUMENT); + FSP_ERROR_RETURN(NULL != p_tas_cfg, FSP_ERR_INVALID_POINTER); + #else + FSP_PARAMETER_NOT_USED(p_instance_ctrl); + #endif + + p_etha_reg = (R_ETHA0_Type *) (R_ETHA0_BASE + (LAYER3_SWITCH_ETHA_REG_SIZE * port)); + + /* Enable TAS gate error interrupt. */ + p_etha_reg->EAEIE1 |= (R_ETHA0_EAEIE1_TASGEE0_Msk | R_ETHA0_EAEIE1_TASGEE1_Msk | + R_ETHA0_EAEIE1_TASGEE2_Msk | R_ETHA0_EAEIE1_TASGEE3_Msk | R_ETHA0_EAEIE1_TASGEE4_Msk | + R_ETHA0_EAEIE1_TASGEE5_Msk | R_ETHA0_EAEIE1_TASGEE6_Msk | R_ETHA0_EAEIE1_TASGEE7_Msk); + + /* Initialize TAS RAM. */ + p_etha_reg->EATASRIRM_b.TASRIOG = 1U; + FSP_HARDWARE_REGISTER_WAIT(p_etha_reg->EATASRIRM_b.TASRR, 1); + + /* Wait until TAS Configuration becomes available. */ + FSP_HARDWARE_REGISTER_WAIT(p_etha_reg->EATASC_b.TASCI, 0); + + /* Set entry count and the initial state of each gate. */ + tas_entry_addr = p_etha_reg->EATASC_b.TASCA; + for (uint8_t i = 0; i < BSP_FEATURE_ESWM_ETHA_IPV_QUEUE_NUM; i++) + { + p_eatasenc_reg = &p_etha_reg->EATASENC0 + i; + *p_eatasenc_reg = ((p_tas_cfg->gate_cfg_list[i].tas_entry_num - 1) & R_ETHA0_EATASCTENC_TASCTAEN_Msk); + + initial_gate_state_bitmask |= (uint8_t) (p_tas_cfg->gate_cfg_list[i].initial_gate_state << i); + } + + p_etha_reg->EATASIGSC = initial_gate_state_bitmask & LAYER3_SWITCH_EATASIGSC_MASK; + + /* Set cycle start time and span. */ + p_etha_reg->EATASCSTC0_b.TASACSTP0 = (p_tas_cfg->cycle_time_start_low); + p_etha_reg->EATASCSTC1_b.TASACSTP1 = (p_tas_cfg->cycle_time_start_high); + p_etha_reg->EATASCTC_b.TASACT = (p_tas_cfg->cycle_time); + + /* Learning TAS gate entries. */ + for (uint8_t i = 0; i < BSP_FEATURE_ESWM_ETHA_IPV_QUEUE_NUM; i++) + { + for (uint8_t learn_port_cnt = 0; + learn_port_cnt < p_tas_cfg->gate_cfg_list[i].tas_entry_num; + learn_port_cnt++) + { + layer3_switch_tas_entry_t * p_learn_entry = + (p_tas_cfg->gate_cfg_list[i].p_tas_entry_list + learn_port_cnt); + + p_etha_reg->EATASGL0_b.TASGAL = (tas_entry_addr + learn_count); + p_etha_reg->EATASGL1 = + ((p_learn_entry->time << R_ETHA0_EATASGL1_TASGTL_Pos) & R_ETHA0_EATASGL1_TASGTL_Msk) | + ((uint32_t) (p_learn_entry->state << R_ETHA0_EATASGL1_TASGSL_Pos) & + R_ETHA0_EATASGL1_TASGSL_Msk); + FSP_HARDWARE_REGISTER_WAIT(p_etha_reg->EATASGLR_b.GL, 0); + + learn_count++; + } + } + + /* Set gPTP timer number. */ + p_etha_reg->EATASC_b.TASTS = (uint32_t) (p_tas_cfg->gptp_timer_number & 0x01); + + /* When the TAS operation is already enabled, apply the configuration. */ + if (p_etha_reg->EATASC_b.TASE == 1) + { + p_etha_reg->EATASC |= R_ETHA0_EATASC_TASE_Msk | R_ETHA0_EATASC_TASCC_Msk; + } + +#else + FSP_PARAMETER_NOT_USED(p_ctrl); + FSP_PARAMETER_NOT_USED(port); + FSP_PARAMETER_NOT_USED(p_tas_cfg); + err = FSP_ERR_UNSUPPORTED; +#endif + + return err; +} /* End of function R_LAYER3_SWITCH_ConfigureTAS() */ + +/*******************************************************************************************************************//** + * Enable Time Aware Shaper feature. + * + * @retval FSP_SUCCESS TAS enabled successfully. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_INVALID_ARGUMENT Port number is invalid. + * @retval FSP_ERR_UNSUPPORTED TAS feature is not enabled in the configuration. + **********************************************************************************************************************/ +fsp_err_t R_LAYER3_SWITCH_EnableTAS (ether_switch_ctrl_t * const p_ctrl, uint8_t port) +{ + fsp_err_t err = FSP_SUCCESS; +#if LAYER3_SWITCH_CFG_TAS_ENABLE + layer3_switch_instance_ctrl_t * p_instance_ctrl = (layer3_switch_instance_ctrl_t *) p_ctrl; + #if LAYER3_SWITCH_CFG_PARAM_CHECKING_ENABLE + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(LAYER3_SWITCH_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(port < BSP_FEATURE_ETHER_NUM_CHANNELS, FSP_ERR_INVALID_ARGUMENT); + FSP_ERROR_RETURN(LAYER3_SWITCH_CFG_TAS_ENABLE, FSP_ERR_UNSUPPORTED); + #else + FSP_PARAMETER_NOT_USED(p_instance_ctrl); + #endif + R_ETHA0_Type * p_etha_reg = (R_ETHA0_Type *) (R_ETHA0_BASE + (LAYER3_SWITCH_ETHA_REG_SIZE * port)); + + /* Enable TAS feature if it is disabled. */ + p_etha_reg->EATASC |= R_ETHA0_EATASC_TASE_Msk; +#else + FSP_PARAMETER_NOT_USED(p_ctrl); + FSP_PARAMETER_NOT_USED(port); + err = FSP_ERR_UNSUPPORTED; +#endif + + return err; +} /* End of function R_LAYER3_SWITCH_EnableTAS() */ + +/*******************************************************************************************************************//** + * @} (end addtogroup LAYER3_SWITCH) + **********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Private Functions + **********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Clear module stop and turn on the domain power of the ESWM IP. + ***********************************************************************************************************************/ +static void r_layer3_switch_module_start (void) +{ + /* Disable protect of the power domain control. */ + R_BSP_RegisterProtectDisable(BSP_REG_PROTECT_OM_LPC_BATT); + + /* Turn on the domain power. */ + if ((0 == R_SYSTEM->PDCTRESWM_b.PDCSF) && (1 == R_SYSTEM->PDCTRESWM_b.PDPGSF)) + { + R_SYSTEM->PDCTRESWM_b.PDDE = 0; + FSP_HARDWARE_REGISTER_WAIT(R_SYSTEM->PDCTRESWM_b.PDCSF, 0); + FSP_HARDWARE_REGISTER_WAIT(R_SYSTEM->PDCTRESWM_b.PDPGSF, 0); + } + + R_BSP_RegisterProtectEnable(BSP_REG_PROTECT_OM_LPC_BATT); + + /* Clear Ether-PHY clock Module Stop. */ + R_MSTP->MSTPCRC_b.MSTPC28 = 0; + + /* Clear Layer 3 Ethernet Switch Module Module Stop. */ + R_MSTP->MSTPCRC_b.MSTPC30 = 0; + + /* Waiting for module start. */ + R_BSP_SoftwareDelay(1, BSP_DELAY_UNITS_MICROSECONDS); +} /* End of function r_layer3_switch_module_start() */ + +/*********************************************************************************************************************** + * Reset COMA IP. + ***********************************************************************************************************************/ +static void r_layer3_switch_reset_coma (void) +{ + /* Reset ESWM IP. */ + R_COMA->RRC_b.RR = 1; + R_COMA->RRC_b.RR = 0; + R_BSP_SoftwareDelay(1, BSP_DELAY_UNITS_MILLISECONDS); + + /* Enable switch clock. */ + R_COMA->RCEC_b.RCE = 1; + R_BSP_SoftwareDelay(1, BSP_DELAY_UNITS_MILLISECONDS); + + /* Reset COMA buffer pool. */ + R_COMA->CABPIRM_b.BPIOG = 1; + FSP_HARDWARE_REGISTER_WAIT(R_COMA->CABPIRM_b.BPR, 1); + + /* Waiting for COMA reset. */ + R_BSP_SoftwareDelay(1, BSP_DELAY_UNITS_MICROSECONDS); + + /* Enable all agent clock. */ + R_COMA->RCEC = R_COMA_RCEC_RCE_Msk | R_COMA_RCEC_ACE_Msk; +} /* End of function r_layer3_switch_reset_coma() */ + +/*********************************************************************************************************************** + * Close ETHA ports with PHY instance. + * + * @param[in] p_instance_ctrl Pointer to a instance ctrl + ***********************************************************************************************************************/ +static void r_layer3_switch_close_etha_ports (layer3_switch_instance_ctrl_t * p_instance_ctrl) +{ + layer3_switch_extended_cfg_t * p_extend = (layer3_switch_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + ether_phy_instance_t const * p_ether_phy; + + /* Disable ETHA ports. */ + for (uint8_t i = 0; i < BSP_FEATURE_ETHER_NUM_CHANNELS; i++) + { + p_ether_phy = p_extend->p_ether_phy_instances[i]; + if (NULL != p_ether_phy) + { + /* Set ETHA to DISABLE mode. */ + r_layer3_switch_update_etha_operation_mode(i, LAYER3_SWITCH_AGENT_MODE_DISABLE); + + /* Close a ETHER_PHY instance. */ + p_ether_phy->p_api->close(p_ether_phy->p_ctrl); + } + } +} /* End of function r_layer3_switch_close_etha_ports() */ + +/*********************************************************************************************************************** + * Initialize LINKFIX table as queue disabled. + * + * @param[in] p_instance_ctrl Pointer to a instance control + ***********************************************************************************************************************/ +static void r_layer3_switch_initialize_linkfix_table (layer3_switch_instance_ctrl_t * p_instance_ctrl) +{ + layer3_switch_basic_descriptor_t * p_linkfix_descriptor; + + /* Initialize descriptor queue index. */ + p_instance_ctrl->allocated_descriptor_queue_index = 0; + + /* Initialize all LINKFIX descriptors in the LINKFIX table. */ + for (uint32_t i = 0; i < LAYER3_SWITCH_CFG_AVAILABLE_QUEUE_NUM; i++) + { + p_linkfix_descriptor = &p_instance_ctrl->p_descriptor_queue_list[i]; + + /* Initialize descriptor by 0. */ + memset(p_linkfix_descriptor, 0, sizeof(layer3_switch_basic_descriptor_t)); + + /* Set descriptor type as LEMPTY that mean disable queue. */ + p_linkfix_descriptor->dt = LAYER3_SWITCH_DESCRIPTOR_TYPE_LEMPTY; + } + + /* Set link fix table address. */ + p_instance_ctrl->p_gwca_reg->GWDCBAC0 = + (LAYER3_SWITCH_QUEUE_ADDRESS_UPPER_MASK & (uint64_t) (uintptr_t) p_instance_ctrl->p_descriptor_queue_list) >> + LAYER3_SWITCH_QUEUE_ADDRESS_UPPER_POSITION; + p_instance_ctrl->p_gwca_reg->GWDCBAC1 = R_GWCA0_GWDCBAC1_DCBADP_Msk & + (uintptr_t) p_instance_ctrl->p_descriptor_queue_list; +} /* End of function r_layer3_switch_initialize_linkfix_table() */ + +/*********************************************************************************************************************** + * Return a descriptor at descriptor_index in the queue. + * + * @param[in] p_instance_ctrl Pointer to a instance control + * @param[in] queue_index Index of the queue + * @param[in] descriptor_index Index of the descriptor + ***********************************************************************************************************************/ +static layer3_switch_descriptor_t * r_layer3_switch_get_descriptor (layer3_switch_instance_ctrl_t * p_instance_ctrl, + uint32_t queue_index, + uint32_t descriptor_index) +{ + layer3_switch_basic_descriptor_t * p_linkfix_descriptor; + layer3_switch_descriptor_t * p_queue_head_descriptor; + + p_linkfix_descriptor = &p_instance_ctrl->p_descriptor_queue_list[queue_index]; + p_queue_head_descriptor = (layer3_switch_descriptor_t *) p_linkfix_descriptor->ptr_l; + + return &p_queue_head_descriptor[descriptor_index]; +} /* End of function r_layer3_switch_get_descriptor() */ + +/*********************************************************************************************************************** + * Return a descriptor that now processed by hardware in the queue. + * + * @param[in] p_instance_ctrl Pointer to a instance control + * @param[in] queue_index Index of the queue + ***********************************************************************************************************************/ +static layer3_switch_descriptor_t * r_layer3_switch_get_current_descriptor ( + layer3_switch_instance_ctrl_t * p_instance_ctrl, + uint32_t queue_index) +{ + /* Set dummy value to clear AXI searching address. */ + p_instance_ctrl->p_gwca_reg->GWAARSS_b.AARA = R_GWCA0_GWAARSS_AARA_Msk & (queue_index + 1); + FSP_HARDWARE_REGISTER_WAIT(p_instance_ctrl->p_gwca_reg->GWAARSR0_b.AARS, 0); + + /* Get descriptor address that hardware use now. */ + p_instance_ctrl->p_gwca_reg->GWAARSS_b.AARA = R_GWCA0_GWAARSS_AARA_Msk & queue_index; + FSP_HARDWARE_REGISTER_WAIT(p_instance_ctrl->p_gwca_reg->GWAARSR0_b.AARS, 0); + + return (layer3_switch_descriptor_t *) p_instance_ctrl->p_gwca_reg->GWAARSR1; +} /* End of function r_layer3_switch_get_current_descriptor() */ + +/*********************************************************************************************************************** + * Check if the descriptor queue is active. If active, return true. + * + * @param[in] p_instance_ctrl Pointer to a instance control + * @param[in] queue_index Index of the queue + ***********************************************************************************************************************/ +static bool r_layer3_switch_is_descriptor_queue_active (layer3_switch_instance_ctrl_t * p_instance_ctrl, + uint32_t queue_index) +{ + volatile layer3_switch_descriptor_t * p_current_descriptor; + bool active = false; + + /* Check the target descriptor queue type is TX or RX.*/ + if (LAYER3_SWITCH_QUEUE_TYPE_TX == p_instance_ctrl->p_queues_status[queue_index].p_queue_cfg->type) + { + /* In TX queue, the GWTRCi register indicates whether the queue is active. Use GWTRC0 for queue 0-31, GWTRC1 for queue 32-63. */ + if (*(&(p_instance_ctrl->p_gwca_reg->GWTRC0) + (queue_index / LAYER3_SWITCH_REGISTER_SIZE)) & + (1 << (queue_index % LAYER3_SWITCH_REGISTER_SIZE))) + { + active = true; + } + } + else /* When RX descriptor queue. */ + { + p_current_descriptor = r_layer3_switch_get_current_descriptor(p_instance_ctrl, queue_index); + + if (NULL == p_current_descriptor) + { + /* When the pointer is NULL, this queue is not started. */ + active = false; + } + else if (&p_instance_ctrl->p_descriptor_queue_list[queue_index] == (void *) p_current_descriptor) + { + active = false; + } + else + { + /* When the current descriptor is LINKFIX, checking the next descriptor. */ + while (LAYER3_SWITCH_DESCRIPTOR_TYPE_LINKFIX == p_current_descriptor->basic.dt) + { + p_current_descriptor = (layer3_switch_descriptor_t *) p_current_descriptor->basic.ptr_l; + } + + if ((LAYER3_SWITCH_DESCRIPTOR_TYPE_LEMPTY == p_current_descriptor->basic.dt) || + (LAYER3_SWITCH_DESCRIPTOR_TYPE_FSINGLE == p_current_descriptor->basic.dt)) + { + /* When the current descriptor is LEMPTY or FSINGLE, this queue has reached its end. */ + active = false; + } + else + { + active = true; + } + } + } + + return active; +} /* End of function r_layer3_switch_is_descriptor_queue_active() */ + +/*********************************************************************************************************************** + * Change operation mode of GWCA. + * + * @param[in] p_instance_ctrl Pointer to a instance control + * @param[in] mode New operation mode + ***********************************************************************************************************************/ +static void r_layer3_switch_update_gwca_operation_mode (layer3_switch_instance_ctrl_t * p_instance_ctrl, + layer3_switch_agent_mode_t mode) +{ + if (NULL != p_instance_ctrl) + { + /* Mode transition. */ + p_instance_ctrl->p_gwca_reg->GWMC_b.OPC = R_GWCA0_GWMC_OPC_Msk & mode; + FSP_HARDWARE_REGISTER_WAIT(p_instance_ctrl->p_gwca_reg->GWMS_b.OPS, mode); + } +} /* End of function r_layer3_switch_update_gwca_operation_mode() */ + +/*********************************************************************************************************************** + * Change operation mode of ETHA. + * + * @param[in] mode New operation mode + ***********************************************************************************************************************/ +static void r_layer3_switch_update_etha_operation_mode (uint8_t port, layer3_switch_agent_mode_t mode) +{ + R_ETHA0_Type * p_etha_reg = (R_ETHA0_Type *) (R_ETHA0_BASE + (LAYER3_SWITCH_ETHA_REG_SIZE * port)); + + /* Mode transition. */ + p_etha_reg->EAMC_b.OPC = R_ETHA0_EAMC_OPC_Msk & mode; + FSP_HARDWARE_REGISTER_WAIT(p_etha_reg->EAMS_b.OPS, mode); +} /* End of function r_layer3_switch_update_etha_operation_mode() */ + +/******************************************************************************************************************* + * Configure MAC address of a ETHA port. + * + * @param[in] p_mac_address Pointer to a MAC address + * @param[in] port The target port + **********************************************************************************************************************/ +static void r_layer3_switch_configure_mac_address (uint8_t * p_mac_address, uint8_t port) +{ + R_RMAC0_Type * p_reg_rmac = (R_RMAC0_Type *) (R_RMAC0_BASE + port * LAYER3_SWITCH_RMAC_REG_SIZE); + uint32_t mac_h; + uint32_t mac_l; + + if (NULL != p_mac_address) + { + mac_h = + ((((uint32_t) p_mac_address[0] << 8)) | (uint32_t) p_mac_address[1]); + + mac_l = (((uint32_t) p_mac_address[2] << 24) | ((uint32_t) p_mac_address[3] << 16) | + ((uint32_t) p_mac_address[4] << 8) | (uint32_t) p_mac_address[5]); + + p_reg_rmac->MRMAC0 = mac_h; + p_reg_rmac->MRMAC1 = mac_l; + } +} /* End of function r_layer3_switch_configure_mac_address() */ + +/******************************************************************************************************************* + * Configure the port specific features. + **********************************************************************************************************************/ +static void r_layer3_switch_configure_port (layer3_switch_instance_ctrl_t * const p_instance_ctrl, + uint8_t port, + layer3_switch_port_cfg_t const * const p_port_cfg) +{ + volatile uint32_t * p_mfwd_fwpbfc_reg; + + /* Copy callback settings to the control member. */ + p_instance_ctrl->p_port_cfg_list[port].p_callback = p_port_cfg->p_callback; + p_instance_ctrl->p_port_cfg_list[port].p_context = p_port_cfg->p_context; + p_instance_ctrl->p_port_cfg_list[port].p_callback_memory = p_port_cfg->p_callback_memory; + + /* Configure MAC address. */ + r_layer3_switch_configure_mac_address(p_port_cfg->p_mac_address, port); + + p_mfwd_fwpbfc_reg = + (uint32_t *) ((uintptr_t) &(R_MFWD->FWPBFC0) + (port * LAYER3_SWITCH_FWPBFC_REGISTER_OFFSET)); + if (p_port_cfg->forwarding_to_cpu_enable) + { + *p_mfwd_fwpbfc_reg |= (R_MFWD_FWPBFC0_PBDV_Msk & (uint32_t) (LAYER3_SWITCH_PORT_CPU_BITMASK)); + } + else + { + /* Disable forwarding to CPU. But forwarding from the LAN port to the LAN port is still enabled. */ + *p_mfwd_fwpbfc_reg &= (R_MFWD_FWPBFC0_PBDV_Msk & (uint32_t) (~LAYER3_SWITCH_PORT_CPU_BITMASK)); + } + + if (NULL != p_port_cfg->p_cbs_cfg) + { + r_layer3_switch_configure_cbs(p_instance_ctrl, port, p_port_cfg->p_cbs_cfg); + } +} /* End of function r_layer3_switch_configure_port() */ + +/******************************************************************************************************************* + * Set forwarding configuration of the target port. This configuration includes enable forwarding and reject unknown frame. + * + * @param[in] p_port_cfg Pointer to a port dependent configuration + * @param[in] port The target port + **********************************************************************************************************************/ +static void r_layer3_switch_configure_forwarding_port (layer3_switch_forwarding_port_cfg_t const * const p_port_cfg, + uint8_t port) +{ + uint32_t * p_mfwd_fwpc0_reg = + (uint32_t *) ((uintptr_t) (&R_MFWD->FWPC00) + (port * LAYER3_SWITCH_PORT_CONFIG_REGISTER_OFFSET)); + uint32_t fwpc0_value = *p_mfwd_fwpc0_reg; + + /* Set VLAN configuration. */ + if (p_port_cfg->vlan_table_enable) + { + fwpc0_value |= R_MFWD_FWPC00_VLANSA_Msk; + } + + if (p_port_cfg->vlan_reject_unknown) + { + fwpc0_value |= (R_MFWD_FWPC00_VLANRU_Msk | R_MFWD_FWPC00_VLANRUS_Msk); + } + + /* Set MAC configuration. */ + if (p_port_cfg->mac_table_enable) + { + fwpc0_value |= (R_MFWD_FWPC00_MACDSA_Msk | R_MFWD_FWPC00_MACSSA_Msk); + } + + if (p_port_cfg->mac_reject_unknown) + { + fwpc0_value |= + (R_MFWD_FWPC00_MACRUDA_Pos | R_MFWD_FWPC00_MACRUDSA_Pos | R_MFWD_FWPC00_MACRUSA_Pos | + R_MFWD_FWPC00_MACRUSSA_Pos); + } + + if (p_port_cfg->mac_hardware_learning_enable) + { + fwpc0_value |= (R_MFWD_FWPC00_MACHLA_Msk | R_MFWD_FWPC00_MACHMA_Msk); + } + + /* Set layer3 configuration. */ + if (p_port_cfg->layer3_table_enable) + { + fwpc0_value |= R_MFWD_FWPC00_LTHTA_Msk; + } + + if (p_port_cfg->layer3_ipv4_filter_enable) + { + fwpc0_value |= R_MFWD_FWPC00_IP4UE_Msk | R_MFWD_FWPC00_IP4TE_Msk | R_MFWD_FWPC00_IP4OE_Msk; + } + + if (p_port_cfg->layer3_ipv6_filter_enable) + { + fwpc0_value |= R_MFWD_FWPC00_IP6UE_Msk | R_MFWD_FWPC00_IP6TE_Msk | R_MFWD_FWPC00_IP6OE_Msk; + } + + if (p_port_cfg->layer3_l2_filter_enable) + { + fwpc0_value |= R_MFWD_FWPC00_L2SE_Msk; + } + + if (p_port_cfg->layer3_reject_unknown) + { + fwpc0_value |= (R_MFWD_FWPC00_VLANRU_Msk | R_MFWD_FWPC00_VLANRUS_Msk); + } + + /* Write to port configuration register. */ + *p_mfwd_fwpc0_reg = fwpc0_value; +} /* End of function r_layer3_switch_configure_forwarding_port() */ + +/******************************************************************************************************************* + * Reset each forwarding table. + **********************************************************************************************************************/ +static void r_layer3_switch_reset_table (layer3_switch_instance_ctrl_t * p_instance_ctrl) +{ + /* Reset MAC table. */ + R_MFWD->FWMACTIM_b.MACTIOG = 1; + FSP_HARDWARE_REGISTER_WAIT(R_MFWD->FWMACTIM_b.MACTR, 1); + + /* Reset VLAN table. */ + R_MFWD->FWVLANTIM_b.VLANTIOG = 1; + FSP_HARDWARE_REGISTER_WAIT(R_MFWD->FWVLANTIM_b.VLANTR, 1); + + /* Reset layer3 table. */ + R_MFWD->FWLTHTIM_b.LTHTIOG = 1; + FSP_HARDWARE_REGISTER_WAIT(R_MFWD->FWLTHTIM_b.LTHTR, 1); + + /* Reset layer3 Update feature. */ + R_MFWD->FWL23UTIM_b.L23UTIOG = 1; + FSP_HARDWARE_REGISTER_WAIT(R_MFWD->FWL23UTIM_b.L23UTR, 1); + + /* Initialize entry counts. */ + p_instance_ctrl->l3_entry_count = 0; + p_instance_ctrl->l3_routing_number = 0; + p_instance_ctrl->l3_remapping_number = 0; +} /* End of function r_layer3_switch_reset_table() */ + +/******************************************************************************************************************* + * Learning an entry of MAC table. + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_learn_mac_entry (layer3_switch_frame_filter_t const * const p_target_frame, + layer3_switch_table_entry_cfg_t const * const p_entry_cfg) +{ + fsp_err_t err = FSP_SUCCESS; + + uint32_t mac_address_high; + uint32_t mac_address_low; + uint32_t fwmactl3_value = 0; + + /* Destination or source MAC address should be set. */ + FSP_ERROR_RETURN((NULL != p_target_frame->p_destination_mac_address) || + (NULL != p_target_frame->p_source_mac_address), + FSP_ERR_INVALID_ARGUMENT); + + if (NULL != p_target_frame->p_destination_mac_address) + { + /* Enable forwarding when destination MAC of a incoming frame is matched. */ + fwmactl3_value |= R_MFWD_FWMACTL3_MACDSLVL_Msk & (p_entry_cfg->source_ports << R_MFWD_FWMACTL3_MACDSLVL_Pos); + mac_address_high = r_layer3_switch_convert_array_to_int(&p_target_frame->p_destination_mac_address[0], 2); + mac_address_low = r_layer3_switch_convert_array_to_int(&p_target_frame->p_destination_mac_address[2], 4); + } + else + { + /* Enable forwarding when source MAC address of a incoming frame is matched. */ + fwmactl3_value |= R_MFWD_FWMACTL3_MACSSLVL_Msk & (p_entry_cfg->source_ports << R_MFWD_FWMACTL3_MACSSLVL_Pos); + mac_address_high = r_layer3_switch_convert_array_to_int(&p_target_frame->p_source_mac_address[0], 2); + mac_address_low = r_layer3_switch_convert_array_to_int(&p_target_frame->p_source_mac_address[2], 4); + } + + if (p_entry_cfg->entry_enable) + { + /* Set security and dynamic entry feature. */ + R_MFWD->FWMACTL0 = (uint32_t) (p_entry_cfg->mac.dinamic_entry << R_MFWD_FWMACTL0_MACDEL_Pos) | + (uint32_t) (p_entry_cfg->security_enable << R_MFWD_FWMACTL0_MACSLL_Pos); + + /* Set MAC address. */ + R_MFWD->FWMACTL1 = mac_address_high; + R_MFWD->FWMACTL2 = mac_address_low; + + /* Set enabled source port. */ + R_MFWD->FWMACTL3 = fwmactl3_value; + + /* Set destination queue index when forward to CPU. */ + R_MFWD->FWMACTL40 = R_MFWD_FWMACTL40_MACCSDL_Msk & p_entry_cfg->destination_queue_index; + + /* Set destination ports and internal priority. */ + R_MFWD->FWMACTL5 = (p_entry_cfg->internal_priority_update_enable << R_MFWD_FWMACTL5_MACIPUL_Pos) | + (R_MFWD_FWMACTL5_MACIPVL_Msk & p_entry_cfg->internal_priority_update_value << + R_MFWD_FWMACTL5_MACIPVL_Pos) | + (R_MFWD_FWMACTL5_MACDVL_Msk & p_entry_cfg->destination_ports); + } + else + { + /* If entry is disabled, delete the target entry. */ + /* Set to delete mode. */ + R_MFWD->FWMACTL0 = R_MFWD_FWMACTL0_MACED_Msk; + + /* Set MAC address. */ + R_MFWD->FWMACTL1 = R_MFWD_FWMACTL1_MACMALP0_Msk & mac_address_high; + R_MFWD->FWMACTL2 = mac_address_low; + + /* Write to FWMACTL5 register. It is need to start deleting process. */ + R_MFWD->FWMACTL5 = 0; + } + + /* Wait to complete learning. */ + FSP_HARDWARE_REGISTER_WAIT(R_MFWD->FWMACTLR_b.MACTL, 0); + + /* Check learning result. */ + if (0 != (R_MFWD->FWMACTLR & R_MFWD_FWMACTLR_MACLSF_Msk)) + { + /* Unsecure entry overflow. */ + err = FSP_ERR_OVERFLOW; + } + else if (0 != (R_MFWD->FWMACTLR & (R_MFWD_FWMACTLR_MACLF_Msk | R_MFWD_FWMACTLR_MACLEF_Msk))) + { + err = FSP_ERR_WRITE_FAILED; + } + else + { + /* Do nothing. */ + } + + return err; +} /* End of function r_layer3_switch_learn_mac_entry() */ + +/******************************************************************************************************************* + * Search an entry of MAC table. + * + * @param[in] p_port_cfg Pointer to a target frame + * @param[in] port Pointer to an entry + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_search_mac_entry (layer3_switch_frame_filter_t const * const p_target_frame, + layer3_switch_table_entry_cfg_t * const p_entry_cfg) +{ + fsp_err_t err = FSP_SUCCESS; + uint32_t mac_address_high; + uint32_t mac_address_low; + + /* Destination or source MAC address should be set. */ + FSP_ERROR_RETURN((NULL != p_target_frame->p_destination_mac_address) || + (NULL != p_target_frame->p_source_mac_address), + FSP_ERR_INVALID_POINTER); + + if (NULL != p_target_frame->p_destination_mac_address) + { + /* Use destination MAC address to search. */ + mac_address_high = r_layer3_switch_convert_array_to_int(&p_target_frame->p_destination_mac_address[0], 2); + mac_address_low = r_layer3_switch_convert_array_to_int(&p_target_frame->p_destination_mac_address[2], 4); + } + else + { + /* If destination is not passed, use source MAC address to search. */ + mac_address_high = r_layer3_switch_convert_array_to_int(&p_target_frame->p_source_mac_address[0], 2); + mac_address_low = r_layer3_switch_convert_array_to_int(&p_target_frame->p_source_mac_address[2], 4); + } + + /* Set MAC address and start searching. */ + R_MFWD->FWMACTS0 = mac_address_high; + R_MFWD->FWMACTS1 = mac_address_low; + + /* Wait to complete searching. */ + FSP_HARDWARE_REGISTER_WAIT(R_MFWD->FWMACTSR0_b.MACTS, 0); + + if (0 == R_MFWD->FWMACTSR0_b.MACSNF) + { + /* If an entry is found successfully, copy the entry fields. */ + p_entry_cfg->security_enable = R_MFWD->FWMACTSR0_b.MACSLS; + p_entry_cfg->mac.dinamic_entry = R_MFWD->FWMACTSR0_b.MACDES; + p_entry_cfg->source_ports = R_MFWD->FWMACTSR1_b.MACDSLVS | R_MFWD->FWMACTSR1_b.MACSSLVS; + p_entry_cfg->destination_queue_index = R_MFWD->FWMACTSR20_b.MACCSDS; + p_entry_cfg->destination_ports = R_MFWD->FWMACTSR3_b.MACDVS; + p_entry_cfg->internal_priority_update_enable = R_MFWD->FWMACTSR3_b.MACIPUS; + p_entry_cfg->internal_priority_update_value = R_MFWD->FWMACTSR3_b.MACIPVS; + } + else + { + /* Any entry is not found. */ + err = FSP_ERR_NOT_FOUND; + } + + return err; +} /* End of function r_layer3_switch_search_mac_entry() */ + +/******************************************************************************************************************* + * Read an entry of MAC table. + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_read_mac_entry (uint16_t offset, layer3_switch_table_entry_t * p_entry) +{ + fsp_err_t err = FSP_SUCCESS; + uint8_t * p_mac_address; + + R_MFWD->FWMACTR = LAYER3_SWITCH_MAC_ENTRY_MAX_NUM & offset; + FSP_HARDWARE_REGISTER_WAIT(R_MFWD->FWMACTRR0_b.MACTR, 0); + + if (R_MFWD->FWMACTRR0_b.MACEVR) + { + p_entry->entry_cfg.security_enable = R_MFWD->FWMACTRR1_b.MACSLR; + p_entry->entry_cfg.mac.dinamic_entry = R_MFWD->FWMACTRR1_b.MACDER; + p_entry->entry_cfg.source_ports = R_MFWD->FWMACTRR4_b.MACDSLVR | + R_MFWD->FWMACTRR4_b.MACSSLVR; + p_entry->entry_cfg.destination_queue_index = R_MFWD->FWMACTRR50_b.MACCSDR; + p_entry->entry_cfg.destination_ports = R_MFWD->FWMACTRR6_b.MACDVR; + p_entry->entry_cfg.internal_priority_update_enable = R_MFWD->FWMACTRR6_b.MACIPUR; + p_entry->entry_cfg.internal_priority_update_value = R_MFWD->FWMACTRR6_b.MACIPVR; + + if (R_MFWD->FWMACTRR4_b.MACDSLVR) + { + /* When destination MAC address matching is enabled. */ + p_mac_address = p_entry->target_frame.p_destination_mac_address; + } + else if (R_MFWD->FWMACTRR4_b.MACSSLVR) + { + /* When source MAC address matching is enabled. */ + p_mac_address = p_entry->target_frame.p_source_mac_address; + } + else + { + p_mac_address = NULL; + } + + if (NULL != p_mac_address) + { + p_mac_address[0] = (uint8_t) (R_MFWD->FWMACTRR2_b.MACMARP0 >> 8); + p_mac_address[1] = (uint8_t) (R_MFWD->FWMACTRR2_b.MACMARP0); + p_mac_address[2] = (uint8_t) (R_MFWD->FWMACTRR3_b.MACMARP1 >> 24); + p_mac_address[3] = (uint8_t) (R_MFWD->FWMACTRR3_b.MACMARP1 >> 16); + p_mac_address[4] = (uint8_t) (R_MFWD->FWMACTRR3_b.MACMARP1 >> 8); + p_mac_address[5] = (uint8_t) (R_MFWD->FWMACTRR3_b.MACMARP1); + } + else + { + /* An array for MAC address should be given by the argument. */ + err = FSP_ERR_INVALID_ARGUMENT; + } + } + else + { + err = FSP_ERR_NOT_FOUND; + } + + return err; +} /* End of function r_layer3_switch_read_mac_entry() */ + +/******************************************************************************************************************* + * Learning an entry of VLAN table. + * + * @param[in] p_entry Pointer to an entry + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_learn_vlan_entry (layer3_switch_frame_filter_t const * const p_target_frame, + layer3_switch_table_entry_cfg_t const * const p_entry_cfg) +{ + fsp_err_t err = FSP_SUCCESS; + uint16_t vlan_id; + + /* Extract VLAN ID. */ + err = r_layer3_switch_extract_vlan_id(p_target_frame, &vlan_id); + FSP_ERROR_RETURN(FSP_SUCCESS == err, err); + + if (p_entry_cfg->entry_enable) + { + /* Set entry is secure or not. */ + R_MFWD->FWVLANTL0 = (uint32_t) (p_entry_cfg->security_enable << R_MFWD_FWVLANTL0_VLANSLL_Pos); + + /* Set VLAN ID. */ + R_MFWD->FWVLANTL1 = vlan_id; + + /* Set forwarding ports. */ + R_MFWD->FWVLANTL2 = p_entry_cfg->source_ports; + R_MFWD->FWVLANTL30 = p_entry_cfg->destination_queue_index; + R_MFWD->FWVLANTL4 = (p_entry_cfg->destination_ports); + } + else + { + /* Set to delete mode. */ + R_MFWD->FWVLANTL0 = R_MFWD_FWVLANTL0_VLANED_Msk; + R_MFWD->FWVLANTL1 = vlan_id; + + /* Write to following register to start learning process. */ + R_MFWD->FWVLANTL4 = 0x0; + } + + /* Wait to complete learning */ + FSP_HARDWARE_REGISTER_WAIT(R_MFWD->FWVLANTLR_b.VLANTL, 0); + + if (0 != (R_MFWD->FWVLANTLR & R_MFWD_FWVLANTLR_VLANLSF_Msk)) + { + /* Unsecure entry overflow. */ + err = FSP_ERR_OVERFLOW; + } + else if (0 != (R_MFWD->FWVLANTLR & (R_MFWD_FWVLANTLR_VLANLF_Msk | R_MFWD_FWVLANTLR_VLANLEF_Msk))) + { + err = FSP_ERR_WRITE_FAILED; + } + else + { + err = FSP_SUCCESS; + + /* Do nothing. */ + } + + return err; +} /* End of function r_layer3_switch_learn_vlan_entry() */ + +/******************************************************************************************************************* + * Search an entry of VLAN table. + * + * @param[in] p_port_cfg Pointer to a target frame + * @param[in] port Pointer to an entry + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_search_vlan_entry (layer3_switch_frame_filter_t const * const p_target_frame, + layer3_switch_table_entry_cfg_t * const p_entry_cfg) +{ + uint16_t vlan_id; + fsp_err_t err; + + /* Extract VLAN ID from the target frame. */ + err = r_layer3_switch_extract_vlan_id(p_target_frame, &vlan_id); + FSP_ERROR_RETURN(FSP_SUCCESS == err, err); + + /* Start searching. */ + R_MFWD->FWVLANTS = vlan_id; + + /* Wait to complete searching. */ + FSP_HARDWARE_REGISTER_WAIT(R_MFWD->FWVLANTSR0_b.VLANTS, 0); + + if (0 == R_MFWD->FWVLANTSR0_b.VLANSNF) + { + /* An entry with the VLAN ID is found. Copy entry fields. */ + p_entry_cfg->security_enable = R_MFWD->FWVLANTSR0_b.VLANSLS; + p_entry_cfg->source_ports = R_MFWD->FWVLANTSR1_b.VLANSLVS; + p_entry_cfg->destination_queue_index = R_MFWD->FWVLANTSR20_b.VLANCSDS; + p_entry_cfg->destination_ports = R_MFWD->FWVLANTSR3_b.VLANDVS; + } + else + { + /* Any entry is not found. */ + err = FSP_ERR_NOT_FOUND; + } + + return err; +} /* End of function r_layer3_switch_search_vlan_entry() */ + +/******************************************************************************************************************* + * Read an entry of VLAN table. + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_read_vlan_entry (uint16_t offset, layer3_switch_table_entry_t * p_entry) +{ + fsp_err_t err; + FSP_ERROR_RETURN(LAYER3_SWITCH_VLAN_MODE_NO_VLAN != R_MFWD->FWGC_b.SVM, FSP_ERR_INVALID_MODE); + + /* Set offset as VLAN ID. */ + p_entry->target_frame.vlan_c_tag.id = LAYER3_SWITCH_VLAN_ENTRY_MAX_NUM & offset; + + /* Search an entry with this VLAN ID. */ + err = r_layer3_switch_search_vlan_entry(&p_entry->target_frame, &p_entry->entry_cfg); + + return err; +} /* End of function r_layer3_switch_read_vlan_entry() */ + +/******************************************************************************************************************* + * Learn an entry of layer3 forwarding table. + * + * @param[in] p_entry Pointer to an entry + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_learn_l3_entry (layer3_switch_instance_ctrl_t * p_instance_ctrl, + layer3_switch_frame_filter_t const * const p_target_frame, + layer3_switch_table_entry_cfg_t const * const p_entry_cfg) +{ + fsp_err_t err = FSP_SUCCESS; + layer3_switch_extended_cfg_t * p_extend = (layer3_switch_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + layer3_switch_stream_id_t stream_id = {0}; + uint32_t frer_number = 0; + uint32_t routing_number; + + FSP_ERROR_RETURN(p_extend->l3_filter_list_length > p_instance_ctrl->l3_entry_count, FSP_ERR_OVERFLOW); + + /* Calculate new stream ID. */ + r_layer3_switch_calculate_l3_stream_id(p_target_frame, &stream_id); + + /* Set stream ID. */ + R_MFWD->FWLTHTL1 = stream_id.words[0]; + R_MFWD->FWLTHTL2 = stream_id.words[1]; + R_MFWD->FWLTHTL3 = stream_id.words[2]; + R_MFWD->FWLTHTL4 = stream_id.words[3]; + + /* Calculate FRER number. */ + if (NULL != p_entry_cfg->p_frer_entry_cfg) + { + /* If sequence recovery is passed and has not been learned, two FRER entries will be learned. */ + if ((NULL != p_entry_cfg->p_frer_entry_cfg->p_sequence_recovery) && + (false == + p_instance_ctrl->frer_sequence_recovery_status[p_entry_cfg->p_frer_entry_cfg->sequence_recovery_id & + LAYER3_SWITCH_FRER_ENTRY_NUM_BITMASK].learned)) + { + frer_number = (p_instance_ctrl->valid_frer_entry_num + 1) & LAYER3_SWITCH_FRER_ENTRY_NUM_BITMASK; + } + else + { + /* In other case, one FRER entry will be learned. */ + frer_number = p_instance_ctrl->valid_frer_entry_num & LAYER3_SWITCH_FRER_ENTRY_NUM_BITMASK; + } + } + + if (p_entry_cfg->entry_enable) + { + /* Set stream ID and security level. */ + R_MFWD->FWLTHTL0 = + (uint32_t) ((R_MFWD_FWLTHTL0_LTHSLP0_Msk & + (uint32_t) (stream_id.frame_format_code << R_MFWD_FWLTHTL0_LTHSLP0_Pos)) | + (uint32_t) (p_entry_cfg->security_enable << R_MFWD_FWLTHTL0_LTHSLL_Pos)); + + /* Configure FRER feature. */ + R_MFWD->FWLTHTL6 = ((R_MFWD_FWLTHTL6_LTHFRERVL_Msk & + (uint32_t) ((NULL != p_entry_cfg->p_frer_entry_cfg) << R_MFWD_FWLTHTL6_LTHFRERVL_Pos)) | + (R_MFWD_FWLTHTL6_LTHFRERNL_Msk & + (frer_number << R_MFWD_FWLTHTL6_LTHFRERNL_Pos))); + + /* Configure routing number and source ports. */ + R_MFWD->FWLTHTL7 = + (uint32_t) ((uint32_t) ((NULL != p_entry_cfg->layer3.p_update_configs) << R_MFWD_FWLTHTL7_LTHRVL_Pos) | + (R_MFWD_FWLTHTL7_LTHRNL_Msk & + (uint32_t) (p_instance_ctrl->l3_routing_number << + R_MFWD_FWLTHTL7_LTHRNL_Pos)) | + (R_MFWD_FWLTHTL7_LTHSLVL_Msk & + (p_entry_cfg->source_ports << R_MFWD_FWLTHTL7_LTHSLVL_Pos))); + + /* Configure queue index that frame forwarding to. */ + R_MFWD->FWLTHTL80 = + (uint32_t) (R_MFWD_FWLTHTL80_LTHCSDL_Msk & + (p_entry_cfg->destination_queue_index << R_MFWD_FWLTHTL80_LTHCSDL_Pos)); + + /* Configure destination ports, mirroring and internal priority. After writing this register, hardware start learning. */ + R_MFWD->FWLTHTL9 = + (p_entry_cfg->destination_ports << R_MFWD_FWLTHTL9_LTHDVL_Pos); + } + else + { + /* Set to delete mode. */ + R_MFWD->FWLTHTL0 = + (uint32_t) ((R_MFWD_FWLTHTL0_LTHSLP0_Msk & + (uint32_t) (stream_id.frame_format_code << R_MFWD_FWLTHTL0_LTHSLP0_Pos)) | + (1 << R_MFWD_FWLTHTL0_LTHED_Pos)); + + /* Start removing entry. */ + R_MFWD->FWLTHTL9 = 0; + } + + /* Wait reset. */ + FSP_HARDWARE_REGISTER_WAIT(R_MFWD->FWLTHTLR_b.LTHTL, 0); + + /* Check learning result. */ + if (0 == (R_MFWD->FWLTHTLR & (R_MFWD_FWLTHTLR_LTHLF_Msk | R_MFWD_FWLTHTLR_LTHLSF_Msk | R_MFWD_FWLTHTLR_LTHLEF_Msk))) + { + err = FSP_SUCCESS; + } + else + { + err = FSP_ERR_WRITE_FAILED; + } + + if (err == FSP_SUCCESS) + { + /* Configure L3 update feature. */ + if (NULL != p_entry_cfg->layer3.p_update_configs) + { + if (0 < p_entry_cfg->layer3.number_of_configs) + { + err = + r_layer3_switch_learn_l3_update(p_instance_ctrl, &p_entry_cfg->layer3.p_update_configs[0]); + routing_number = p_instance_ctrl->l3_routing_number - 1; + } + + /* When two or more config are passed, set it using remapping feature. */ + for (uint32_t i = 1; (i < p_entry_cfg->layer3.number_of_configs) && (FSP_SUCCESS == err); i++) + { + err = r_layer3_switch_remapping_l3_update(p_instance_ctrl, + routing_number, + &p_entry_cfg->layer3.p_update_configs[i]); + } + } + + /* When the FRER individual recovery is passed, learn the FRER entry. */ + if ((FSP_SUCCESS == err) && (NULL != p_entry_cfg->p_frer_entry_cfg)) + { + err = r_layer3_switch_learn_frer_individual_recovery(p_instance_ctrl, p_entry_cfg->p_frer_entry_cfg); + } + + if ((FSP_SUCCESS == err) && p_entry_cfg->entry_enable) + { + /* Save routing number. */ + if (0 != stream_id.frame_format_code) + { + /* This stream ID is generated by stream filter. Save frame with routing number.*/ + p_extend->l3_filter_list[p_instance_ctrl->l3_entry_count].frame = *p_target_frame; + } + + /* Count the valid Layer 3 entry. */ + p_instance_ctrl->l3_entry_count += 1; + } + } + + return err; +} /* End of function r_layer3_switch_learn_l3_entry() */ + +/******************************************************************************************************************* + * Search an entry of layer3 forwarding table. + * + * @param[in] p_port_cfg Pointer to a target frame + * @param[in] port Pointer to an entry + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_search_l3_entry (layer3_switch_frame_filter_t const * const p_target_frame, + layer3_switch_table_entry_cfg_t * const p_entry_cfg) +{ + fsp_err_t err = FSP_SUCCESS; + layer3_switch_stream_id_t stream_id; + uint8_t routing_number; + bool l3_update_enable; + + r_layer3_switch_calculate_l3_stream_id(p_target_frame, &stream_id); + + /* Set stream ID of a entry. */ + R_MFWD->FWLTHTS0_b.LTHSSP0 = R_MFWD_FWLTHTS0_LTHSSP0_Msk & stream_id.frame_format_code; + R_MFWD->FWLTHTS1_b.LTHSSP1 = stream_id.words[0]; + R_MFWD->FWLTHTS2_b.LTHSSP2 = stream_id.words[1]; + R_MFWD->FWLTHTS3_b.LTHSSP3 = stream_id.words[2]; + R_MFWD->FWLTHTS4_b.LTHSSP4 = stream_id.words[3]; + + /* Wait completing search. */ + FSP_HARDWARE_REGISTER_WAIT(R_MFWD->FWLTHTSR0_b.LTHTS, 0); + + if (0 == R_MFWD->FWLTHTSR0_b.LTHSNF) + { + /* Copy entry values. */ + routing_number = R_MFWD->FWLTHTSR3_b.LTHRNS; + p_entry_cfg->source_ports = R_MFWD->FWLTHTSR3_b.LTHSLVS; + p_entry_cfg->destination_ports = R_MFWD->FWLTHTSR5_b.LTHDVS; + + /* Copy enable/disable setting bits. */ + p_entry_cfg->security_enable = R_MFWD->FWLTHTSR0_b.LTHSLS; + l3_update_enable = R_MFWD->FWLTHTSR3_b.LTHRVS; + } + else + { + /* Any entry for the stream ID is not found. */ + err = FSP_ERR_NOT_FOUND; + } + + if (err == FSP_SUCCESS) + { + /* Search and get update config. */ + if (l3_update_enable && (NULL != p_entry_cfg->layer3.p_update_configs)) + { + err = r_layer3_switch_search_l3_update(routing_number, &p_entry_cfg->layer3.p_update_configs[0]); + } + } + + return err; +} /* End of function r_layer3_switch_search_l3_entry() */ + +/******************************************************************************************************************* + * Learn an entry of layer3 update table. + * + * @param[in] routing_number Index of a target layer3 entry. + * @param[in] p_config Pointer to an configuration of update entry. + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_learn_l3_update (layer3_switch_instance_ctrl_t * p_instance_ctrl, + layer3_switch_l3_update_config_t * const p_config) +{ + fsp_err_t err = FSP_SUCCESS; + + /* Set the target routing number. */ + R_MFWD->FWL23URL0 = + (R_MFWD_FWL23URL0_L23URNL_Msk & + ((uint32_t) (p_instance_ctrl->l3_routing_number << R_MFWD_FWL23URL0_L23URNL_Pos))) | + (R_MFWD_FWL23URL0_L23URPVL_Msk & + (p_config->enable_destination_ports << R_MFWD_FWL23URL0_L23URPVL_Pos)); + + R_MFWD->FWL23URL1 = + (R_MFWD_FWL23URL1_L23UMDALP0_Msk & + (r_layer3_switch_convert_array_to_int(&p_config->p_mac_destination_address[0], + 2) << R_MFWD_FWL23URL1_L23UMDALP0_Pos)) | + (p_config->update_field_bitmask << R_MFWD_FWL23URL1_L23UTTLUL_Pos) | + (R_MFWD_FWL23URL1_L23URTUL_Msk & ((uint32_t) (p_config->r_tag_update_mode << R_MFWD_FWL23URL1_L23URTUL_Pos))); + + R_MFWD->FWL23URL2 = + (R_MFWD_FWL23URL2_L23UMDALP1_Msk & + (r_layer3_switch_convert_array_to_int(&p_config->p_mac_destination_address[2], + 4) << R_MFWD_FWL23URL2_L23UMDALP1_Pos)); + + R_MFWD->FWL23URL3 = + (R_MFWD_FWL23URL3_L23UCVIDL_Msk & ((uint32_t) (p_config->vlan_c_tag.id << R_MFWD_FWL23URL3_L23UCVIDL_Pos))) | + (R_MFWD_FWL23URL3_L23UCPCPL_Msk & ((uint32_t) (p_config->vlan_c_tag.pcp << R_MFWD_FWL23URL3_L23UCPCPL_Pos))) | + (R_MFWD_FWL23URL3_L23UCDEIL_Msk & ((uint32_t) (p_config->vlan_c_tag.dei << R_MFWD_FWL23URL3_L23UCDEIL_Pos))) | + (R_MFWD_FWL23URL3_L23USVIDL_Msk & ((uint32_t) (p_config->vlan_s_tag.id << R_MFWD_FWL23URL3_L23USVIDL_Pos))) | + (R_MFWD_FWL23URL3_L23USPCPL_Msk & ((uint32_t) (p_config->vlan_s_tag.pcp << R_MFWD_FWL23URL3_L23USPCPL_Pos))) | + (R_MFWD_FWL23URL3_L23USDEIL_Msk & ((uint32_t) (p_config->vlan_s_tag.dei << R_MFWD_FWL23URL3_L23USDEIL_Pos))); + + /* Wait reset. */ + FSP_HARDWARE_REGISTER_WAIT(R_MFWD->FWL23URLR_b.L23URL, 0); + + if (0 != R_MFWD->FWL23URLR_b.L23ULF) + { + /* Layer3 update learning fail. */ + err = FSP_ERR_WRITE_FAILED; + } + else + { + if (true == p_config->sequence_number_generation_enable) + { + /* Enable sequence_number_generation for this L23U entry. */ + if (LAYER3_SWITCH_SEQ_REG_MAX_NUM > + (p_instance_ctrl->used_frer_sequence_generator_num & LAYER3_SWITCH_FRER_SEQ_GENERATOR_NUM_BITMASK)) + { + r_layer3_switch_configure_sequence_number_generation(p_instance_ctrl); + + p_instance_ctrl->used_frer_sequence_generator_num = + (p_instance_ctrl->used_frer_sequence_generator_num + 1) & + LAYER3_SWITCH_FRER_SEQ_GENERATOR_NUM_BITMASK; + } + } + + p_instance_ctrl->l3_routing_number += 1; + } + + return err; +} /* End of function r_layer3_switch_learn_l3_update() */ + +/******************************************************************************************************************* + * Search an entry of layer3 update table. + * + * @param[in] routing_number Index of a target layer3 entry. + * @param[out] p_config Pointer to an configuration of update entry. + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_search_l3_update (uint8_t routing_number, layer3_switch_l3_update_config_t * p_config) +{ + fsp_err_t err = FSP_SUCCESS; + + /* Search for entries that match the routing number. */ + R_MFWD->FWL23URR_b.L23RNR = routing_number; + + /* Wait reset. */ + FSP_HARDWARE_REGISTER_WAIT(R_MFWD->FWL23URRR0_b.L23URR, 0); + + if (0 == R_MFWD->FWL23URRR0_b.L23UREF) + { + /* Layer3 update entry searching success. Copy fields.*/ + p_config->enable_destination_ports = R_MFWD->FWL23URRR0_b.L23URPVR; + p_config->update_field_bitmask = + ((R_MFWD_FWL23URRR1_L23UTTLUR_Msk | R_MFWD_FWL23URRR1_L23UMDAUR_Msk | R_MFWD_FWL23URRR1_L23UMSAUR_Msk | + R_MFWD_FWL23URRR1_L23UCVIDUR_Msk | R_MFWD_FWL23URRR1_L23UCPCPUR_Msk | R_MFWD_FWL23URRR1_L23UCDEIUR_Msk | + R_MFWD_FWL23URRR1_L23USVIDUR_Msk | R_MFWD_FWL23URRR1_L23USPCPUR_Msk | R_MFWD_FWL23URRR1_L23USDEIUR_Msk) & + R_MFWD->FWL23URRR1) >> R_MFWD_FWL23URRR1_L23UTTLUR_Pos; + p_config->r_tag_update_mode = (layer3_switch_forwarding_r_tag_t) R_MFWD->FWL23URRR1_b.L23URTUR; + p_config->vlan_c_tag.id = R_MFWD->FWL23URRR3_b.L23UCVIDR; + p_config->vlan_c_tag.pcp = R_MFWD->FWL23URRR3_b.L23UCPCPR; + p_config->vlan_c_tag.dei = R_MFWD->FWL23URRR3_b.L23UCDEIR; + p_config->vlan_s_tag.id = R_MFWD->FWL23URRR3_b.L23USVIDR; + p_config->vlan_s_tag.pcp = R_MFWD->FWL23URRR3_b.L23USPCPR; + p_config->vlan_s_tag.dei = R_MFWD->FWL23URRR3_b.L23USDEIR; + if (NULL != p_config->p_mac_destination_address) + { + /* Copy MAC address. */ + p_config->p_mac_destination_address[0] = (uint8_t) (R_MFWD->FWL23URRR1_b.L23UMDARP0 >> 8); + p_config->p_mac_destination_address[1] = (uint8_t) R_MFWD->FWL23URRR1_b.L23UMDARP0; + p_config->p_mac_destination_address[2] = (uint8_t) (R_MFWD->FWL23URRR2_b.L23UMDARP1 >> 24); + p_config->p_mac_destination_address[3] = (uint8_t) (R_MFWD->FWL23URRR2_b.L23UMDARP1 >> 16); + p_config->p_mac_destination_address[4] = (uint8_t) (R_MFWD->FWL23URRR2_b.L23UMDARP1 >> 8); + p_config->p_mac_destination_address[5] = (uint8_t) R_MFWD->FWL23URRR2_b.L23UMDARP1; + } + } + else + { + /* Layer3 update entry is not found. */ + err = FSP_ERR_NOT_FOUND; + } + + return err; +} /* End of function r_layer3_switch_search_l3_update() */ + +/******************************************************************************************************************* + * Enable and configure MAC aging feature. + * + * @param[in] aging_time Aging cycle time + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_enable_mac_table_aging (uint32_t aging_time) +{ + /* Configure aging clock prescaler. */ + R_MFWD->FWMACAGUSPC = R_MFWD_FWMACAGUSPC_MACAGUSP_Msk & LAYER3_SWITCH_CLOCK_100MHZ; + + /* Enable aging and set aging time. */ + R_MFWD->FWMACAGC = R_MFWD_FWMACAGC_MACAGE_Msk | (R_MFWD_FWMACAGC_MACAGT_Msk & aging_time); + + return FSP_SUCCESS; +} /* End of function r_layer3_switch_enable_mac_table_aging() */ + +/******************************************************************************************************************* + * Extract VLAN ID from a target frame based on hardware VLAN mode. + * + * @param[in] p_target_frame Pointer to a target frame + * @param[out] p_vlan_id Extracted VLAN ID + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_extract_vlan_id (layer3_switch_frame_filter_t const * const p_target_frame, + uint16_t * p_vlan_id) +{ + fsp_err_t err = FSP_SUCCESS; + + /* Check VLAN mode of this switch. */ + if (R_MFWD->FWGC_b.SVM == LAYER3_SWITCH_VLAN_MODE_NO_VLAN) + { + /* When No VLAN mode, can't use VLAN feature. */ + err = FSP_ERR_INVALID_MODE; + } + else if ((R_MFWD->FWGC_b.SVM == LAYER3_SWITCH_VLAN_MODE_SC_TAG) && (0 < p_target_frame->vlan_s_tag.id)) + { + /* When the switch is SC-TAG mode and passed S-TAG is valid, use S-TAG. */ + *p_vlan_id = p_target_frame->vlan_s_tag.id; + } + else if (0 < p_target_frame->vlan_c_tag.id) + { + /* If passed C-TAG is valid, use C-TAG. */ + *p_vlan_id = p_target_frame->vlan_c_tag.id; + } + else + { + /* If S-TAG and C-TAG is invalid, return error. */ + err = FSP_ERR_INVALID_ARGUMENT; + } + + return err; +} /* End of function r_layer3_switch_extract_vlan_id() */ + +/******************************************************************************************************************* + * Configure VLAN ingress and egress mode of the target port. + * + * @param[in] p_instance_ctrl Pointer to a instance control + * @param[in] p_port_cfg Pointer to a port dependent configuration + * @param[in] port The target port + **********************************************************************************************************************/ +static void r_layer3_switch_initialize_vlan_port (layer3_switch_instance_ctrl_t * p_instance_ctrl, + layer3_switch_forwarding_port_cfg_t const * const p_port_cfg, + uint8_t port) +{ + R_ETHA0_Type * p_reg_etha; + uint32_t vcc_reg_value; + uint32_t vtc_reg_value; + + /* Set ingress/egress mode. */ + vcc_reg_value = (uint32_t) ((p_port_cfg->vlan_ingress_mode << R_ETHA0_EAVCC_VIM_Pos) | + (p_port_cfg->vlan_egress_mode << R_ETHA0_EAVCC_VEM_Pos)); + + /* Set VLAN ID, PCP, DEI of the port. */ + vtc_reg_value = (uint32_t) ((p_port_cfg->vlan_c_tag.id << R_ETHA0_EAVTC_CTV_Pos) | + (p_port_cfg->vlan_c_tag.pcp << R_ETHA0_EAVTC_CTP_Pos) | + (p_port_cfg->vlan_c_tag.dei << R_ETHA0_EAVTC_CTD_Pos) | + (p_port_cfg->vlan_s_tag.id << R_ETHA0_EAVTC_STV_Pos) | + (p_port_cfg->vlan_s_tag.pcp << R_ETHA0_EAVTC_STP_Pos) | + (p_port_cfg->vlan_s_tag.dei << R_ETHA0_EAVTC_STD_Pos)); + + if (port < BSP_FEATURE_ETHER_NUM_CHANNELS) + { + /* When port is external(ETHA). */ + /* Set ETHA to CONFIG mode. */ + r_layer3_switch_update_etha_operation_mode(port, LAYER3_SWITCH_AGENT_MODE_DISABLE); + r_layer3_switch_update_etha_operation_mode(port, LAYER3_SWITCH_AGENT_MODE_CONFIG); + + /* Write register to configuration value. */ + p_reg_etha = (R_ETHA0_Type *) (R_ETHA0_BASE + (LAYER3_SWITCH_ETHA_REG_SIZE * port)); + p_reg_etha->EAVCC = vcc_reg_value; + p_reg_etha->EAVTC = vtc_reg_value; + + /* Set ETHA to OPERATION mode. */ + r_layer3_switch_update_etha_operation_mode(port, LAYER3_SWITCH_AGENT_MODE_DISABLE); + r_layer3_switch_update_etha_operation_mode(port, LAYER3_SWITCH_AGENT_MODE_OPERATION); + } + else + { + /* When port is the CPU (GWCA). */ + /* Set GWCA to CONFIG mode. */ + r_layer3_switch_update_gwca_operation_mode(p_instance_ctrl, LAYER3_SWITCH_AGENT_MODE_DISABLE); + r_layer3_switch_update_gwca_operation_mode(p_instance_ctrl, LAYER3_SWITCH_AGENT_MODE_CONFIG); + + /* Write register to configuration value. */ + p_instance_ctrl->p_gwca_reg->GWVCC = vcc_reg_value; + p_instance_ctrl->p_gwca_reg->GWVTC = vtc_reg_value; + + /* Set GWCA to OPERATION mode. */ + r_layer3_switch_update_gwca_operation_mode(p_instance_ctrl, LAYER3_SWITCH_AGENT_MODE_DISABLE); + r_layer3_switch_update_gwca_operation_mode(p_instance_ctrl, LAYER3_SWITCH_AGENT_MODE_OPERATION); + } +} /* End of function r_layer3_switch_initialize_vlan_port() */ + +/******************************************************************************************************************* + * Configure IPv4/IPv6/L2 stream filter to which frame fields used to stream ID creation. + * + * @param[in] p_filter_cfg Pointer to a stream filter configuration + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_configure_stream_filter (layer3_switch_l3_stream_filter_cfg_t const * p_filter_cfg) +{ + FSP_ERROR_RETURN(NULL != p_filter_cfg, FSP_ERR_INVALID_POINTER); + uint32_t filter_reg_value = 0; + + /* Configure IPv4 filter. */ + /* Set fields included to hash calculation. */ + filter_reg_value = p_filter_cfg->filter_field_bitmask; + + /* Set fields included to stream ID creation. */ + filter_reg_value |= + ((R_MFWD_FWIP4SC_IP4ISVS_Msk | R_MFWD_FWIP4SC_IP4ISPS_Msk | R_MFWD_FWIP4SC_IP4ISDS_Msk | + R_MFWD_FWIP4SC_IP4ICVS_Msk | R_MFWD_FWIP4SC_IP4ICPS_Msk | R_MFWD_FWIP4SC_IP4ICDS_Msk | + R_MFWD_FWIP4SC_IP4IISS_Msk | + R_MFWD_FWIP4SC_IP4IIDS_Msk) & + (uint32_t) (p_filter_cfg->filter_field_bitmask << (R_MFWD_FWIP4SC_IP4ISVS_Pos - 2))) | + ((uint32_t) ((p_filter_cfg->filter_field_bitmask & + LAYER3_SWITCH_L3_FILTER_BITMASK_IP_DESTINATION_PORT) != 0) << R_MFWD_FWIP4SC_IP4IDPTS_Pos); + + R_MFWD->FWIP4SC = filter_reg_value; + + /* Configure IPv6 filter. */ + /* Set fields included to hash calculation. */ + filter_reg_value = p_filter_cfg->filter_field_bitmask; + + /* Set fields included to stream ID creation. */ + filter_reg_value |= + ((R_MFWD_FWIP6SC_IP6ISVS_Msk | R_MFWD_FWIP6SC_IP6ISPS_Msk | R_MFWD_FWIP6SC_IP6ISDS_Msk | + R_MFWD_FWIP6SC_IP6ICVS_Msk | R_MFWD_FWIP6SC_IP6ICPS_Msk | R_MFWD_FWIP6SC_IP6ICDS_Msk | + R_MFWD_FWIP6SC_IP6II0S_Msk | + R_MFWD_FWIP6SC_IP6II1S_Msk) & + (uint32_t) (p_filter_cfg->filter_field_bitmask << (R_MFWD_FWIP6SC_IP6ISVS_Pos - 2))) | + ((uint32_t) ((p_filter_cfg->filter_field_bitmask & + LAYER3_SWITCH_L3_FILTER_BITMASK_IP_DESTINATION_PORT) != 0) << R_MFWD_FWIP6SC_IP6IDPTS_Pos); + + R_MFWD->FWIP6SC = filter_reg_value; + + /* Configure L2 filter. */ + /* Set fields included to stream ID creation. */ + filter_reg_value = + ((R_MFWD_FWL2SC_L2ISVS_Msk | R_MFWD_FWL2SC_L2ISPS_Msk | R_MFWD_FWL2SC_L2ISDS_Msk | R_MFWD_FWL2SC_L2ICVS_Msk | + R_MFWD_FWL2SC_L2ICPS_Msk | R_MFWD_FWL2SC_L2ICDS_Msk | R_MFWD_FWL2SC_L2IMDS_Msk | R_MFWD_FWL2SC_L2IMSS_Msk) & + p_filter_cfg->filter_field_bitmask); + R_MFWD->FWL2SC = filter_reg_value; + + /* Set IPv6 address offset. */ + R_MFWD->FWIP6OC = + (uint32_t) ((R_MFWD_FWIP6OC_IP6IPO1_Msk & + (uint32_t) (p_filter_cfg->ipv6_address1.offset << R_MFWD_FWIP6OC_IP6IPO1_Pos)) | + (R_MFWD_FWIP6OC_IP6IPOM1_Msk & + (uint32_t) (p_filter_cfg->ipv6_address1.direction << R_MFWD_FWIP6OC_IP6IPOM1_Pos)) | + (R_MFWD_FWIP6OC_IP6IPO_Msk & + (uint32_t) (p_filter_cfg->ipv6_address0.offset << R_MFWD_FWIP6OC_IP6IPO_Pos)) | + (R_MFWD_FWIP6OC_IP6IPOM_Msk & + (uint32_t) (p_filter_cfg->ipv6_address0.direction << R_MFWD_FWIP6OC_IP6IPOM_Pos))); + + /* Set IPv4/IPv6 hash equation. */ + R_MFWD->FWSFHEC = (uint32_t) ((1 << R_MFWD_FWSFHEC_IP4HE_Pos) | (1 << R_MFWD_FWSFHEC_IP6HE_Pos)); + + return FSP_SUCCESS; +} /* End of function r_layer3_switch_configure_stream_filter() */ + +/******************************************************************************************************************* + * Calculate a layer3 hash value of the target frame. + * + * @param[in] p_frame Pointer to the target frame. + **********************************************************************************************************************/ +uint16_t r_layer3_switch_calculate_l3_hash (layer3_switch_frame_filter_t const * p_frame) +{ + uint16_t hash_value; + + /* If target is not IP packet, hash value is 0. */ + if (LAYER3_SWITCH_IP_VERSION_NONE == p_frame->ip_version) + { + hash_value = 0; + } + else + { + /* Set destination MAC address. */ + if (NULL != p_frame->p_destination_mac_address) + { + R_MFWD->FWSHCR0 = r_layer3_switch_convert_array_to_int(&p_frame->p_destination_mac_address[0], 4); + R_MFWD->FWSHCR1_b.SHCMDP1 = (uint16_t) r_layer3_switch_convert_array_to_int( + &p_frame->p_destination_mac_address[4], + 2); + } + else + { + R_MFWD->FWSHCR0 = 0; + R_MFWD->FWSHCR1_b.SHCMDP1 = 0; + } + + if (NULL != p_frame->p_source_mac_address) + { + /* Source address should be input as 2 byte higher part and 4 byte lower part. */ + R_MFWD->FWSHCR1_b.SHCMSP0 = (uint16_t) r_layer3_switch_convert_array_to_int( + &p_frame->p_source_mac_address[0], + 2); + R_MFWD->FWSHCR2 = r_layer3_switch_convert_array_to_int(&p_frame->p_source_mac_address[2], 4); + } + else + { + R_MFWD->FWSHCR1_b.SHCMSP0 = 0; + R_MFWD->FWSHCR2 = 0; + } + + /* Set VLANs. */ + R_MFWD->FWSHCR3 = r_layer3_switch_convert_vlan_tag_to_int(&p_frame->vlan_s_tag, &p_frame->vlan_c_tag); + + /* Set IP addresses. */ + if (LAYER3_SWITCH_IP_VERSION_IPV4 == p_frame->ip_version) + { + /* Set IP protocol. */ + R_MFWD->FWSHCR4 = p_frame->protocol; + + /* IPv4 source. */ + R_MFWD->FWSHCR5 = 0; + R_MFWD->FWSHCR6 = 0; + R_MFWD->FWSHCR7 = 0; + R_MFWD->FWSHCR8 = r_layer3_switch_convert_array_to_int(p_frame->p_source_ip_address, 4); + + /* IPv4 destination. */ + R_MFWD->FWSHCR9 = 0; + R_MFWD->FWSHCR10 = 0; + R_MFWD->FWSHCR11 = 0; + R_MFWD->FWSHCR12 = r_layer3_switch_convert_array_to_int(p_frame->p_destination_ip_address, 4); + } + else + { + /* Set IP protocol and IPv6 bit. */ + R_MFWD->FWSHCR4 = (1 << R_MFWD_FWSHCR4_SHCFF_Pos) | p_frame->protocol; + + /* IPv6 source. */ + if (NULL != p_frame->p_source_ip_address) + { + R_MFWD->FWSHCR5 = r_layer3_switch_convert_array_to_int(&p_frame->p_source_ip_address[0], 4); + R_MFWD->FWSHCR6 = r_layer3_switch_convert_array_to_int(&p_frame->p_source_ip_address[4], 4); + R_MFWD->FWSHCR7 = r_layer3_switch_convert_array_to_int(&p_frame->p_source_ip_address[8], 4); + R_MFWD->FWSHCR8 = r_layer3_switch_convert_array_to_int(&p_frame->p_source_ip_address[12], 4); + } + else + { + R_MFWD->FWSHCR5 = 0; + R_MFWD->FWSHCR6 = 0; + R_MFWD->FWSHCR7 = 0; + R_MFWD->FWSHCR8 = 0; + } + + /* IPv6 destination. */ + if (NULL != p_frame->p_destination_ip_address) + { + R_MFWD->FWSHCR9 = r_layer3_switch_convert_array_to_int(&p_frame->p_destination_ip_address[0], 4); + R_MFWD->FWSHCR10 = r_layer3_switch_convert_array_to_int(&p_frame->p_destination_ip_address[4], 4); + R_MFWD->FWSHCR11 = r_layer3_switch_convert_array_to_int(&p_frame->p_destination_ip_address[8], 4); + R_MFWD->FWSHCR12 = r_layer3_switch_convert_array_to_int(&p_frame->p_destination_ip_address[12], 4); + } + else + { + R_MFWD->FWSHCR9 = 0; + R_MFWD->FWSHCR10 = 0; + R_MFWD->FWSHCR11 = 0; + R_MFWD->FWSHCR12 = 0; + } + } + + /* Set layer4 destination/source ports. That used for TCP and UDP. */ + R_MFWD->FWSHCR13 = + (R_MFWD_FWSHCR13_SHCDP_Msk & + (uint32_t) (p_frame->layer4_destination_port << R_MFWD_FWSHCR13_SHCDP_Pos)) | + (R_MFWD_FWSHCR13_SHCSP_Msk & + (uint32_t) (p_frame->layer4_source_port << R_MFWD_FWSHCR13_SHCSP_Pos)); + + /* After writing FWSHCR13 register, IP start calculation. */ + /* Wait for completing calculation. */ + FSP_HARDWARE_REGISTER_WAIT(R_MFWD->FWSHCRR_b.SHC, 0); + + /* Read hash value. */ + hash_value = R_MFWD->FWSHCRR_b.SHCR; + } + + return hash_value; +} /* End of function r_layer3_switch_calculate_l3_hash() */ + +/******************************************************************************************************************* + * Calculate a layer3 format code of the target frame. + * + * @param[in] p_frame Pointer to the target frame. + **********************************************************************************************************************/ +uint8_t r_layer3_switch_calculate_l3_format_code (layer3_switch_frame_filter_t const * p_frame) +{ + uint8_t ip_version_offset = 0; + uint8_t ip_protocol_offset = 0; + + /* Get the offset value of format code based on the IP version. */ + switch (p_frame->ip_version) + { + case LAYER3_SWITCH_IP_VERSION_IPV4: + { + ip_version_offset = 1; + break; + } + + case LAYER3_SWITCH_IP_VERSION_IPV6: + { + ip_version_offset = 4; + break; + } + + case LAYER3_SWITCH_IP_VERSION_NONE: + { + /* When not IP packet. */ + ip_version_offset = 7; + break; + } + + default: + { + /* Not reach here. */ + break; + } + } + + if (LAYER3_SWITCH_IP_VERSION_NONE != p_frame->ip_version) + { + switch (p_frame->protocol) + { + case LAYER3_SWITCH_IP_PROTOCOL_UDP: + { + ip_protocol_offset = 1; + break; + } + + case LAYER3_SWITCH_IP_PROTOCOL_TCP: + { + ip_protocol_offset = 2; + break; + } + + default: + { + ip_protocol_offset = 0; + break; + } + } + } + + /* Return frame format code. */ + return ip_version_offset + ip_protocol_offset; +} /* End of function r_layer3_switch_calculate_l3_format_code() */ + +/******************************************************************************************************************* + * Calculate a layer3 stream ID of the target frame. + * + * @param[in] p_frame Pointer to the target frame. + * @param[out] p_stream_id Pointer to the stream ID. + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_calculate_l3_stream_id (layer3_switch_frame_filter_t const * p_frame, + layer3_switch_stream_id_t * p_stream_id) +{ + uint32_t ip_address0 = 0; + uint32_t ip_address1 = 0; + uint32_t ipv6_offset; + + switch (p_frame->ip_version) + { + case LAYER3_SWITCH_IP_VERSION_IPV4: + { + ip_address0 = r_layer3_switch_convert_array_to_int(p_frame->p_source_ip_address, 4); + ip_address1 = r_layer3_switch_convert_array_to_int(p_frame->p_destination_ip_address, 4); + + break; + } + + case LAYER3_SWITCH_IP_VERSION_IPV6: + { + /* Extract 4 bytes from the IPv6 address according to the offset configuration. */ + ipv6_offset = R_MFWD->FWIP6OC_b.IP6IPO0; + if ((0 == R_MFWD->FWIP6OC_b.IP6IPOM0) && (NULL != p_frame->p_source_ip_address)) + { + /* Use source IP address. */ + ip_address0 = r_layer3_switch_convert_array_to_int(&p_frame->p_source_ip_address[ipv6_offset], 4); + } + else if ((1 == R_MFWD->FWIP6OC_b.IP6IPOM0) && (NULL != p_frame->p_destination_ip_address)) + { + /* Use destination IP address. */ + ip_address0 = r_layer3_switch_convert_array_to_int(&p_frame->p_destination_ip_address[ipv6_offset], 4); + } + else + { + ip_address0 = 0; + } + + /* Extract additional 4 bytes according to another offset configuration. */ + ipv6_offset = R_MFWD->FWIP6OC_b.IP6IPO1; + if ((0 == R_MFWD->FWIP6OC_b.IP6IPOM1) && (NULL != p_frame->p_source_ip_address)) + { + ip_address1 = r_layer3_switch_convert_array_to_int(&p_frame->p_source_ip_address[ipv6_offset], 4); + } + else if ((1 == R_MFWD->FWIP6OC_b.IP6IPOM1) && (NULL != p_frame->p_destination_ip_address)) + { + ip_address1 = r_layer3_switch_convert_array_to_int(&p_frame->p_destination_ip_address[ipv6_offset], 4); + } + else + { + ip_address1 = 0; + } + + break; + } + + default: + { + break; + } + } + + /* Common for stream filters. */ + p_stream_id->frame_format_code = + r_layer3_switch_calculate_l3_format_code(p_frame); + p_stream_id->words[0] = r_layer3_switch_convert_vlan_tag_to_int(&p_frame->vlan_s_tag, &p_frame->vlan_c_tag); + + if (LAYER3_SWITCH_IP_VERSION_NONE == p_frame->ip_version) + { + /* When Layer2 stream filter. */ + + /* Set destination MAC address. */ + if (NULL != p_frame->p_destination_mac_address) + { + p_stream_id->words[1] = r_layer3_switch_convert_array_to_int(&p_frame->p_destination_mac_address[0], 4); + p_stream_id->words[2] = + r_layer3_switch_convert_array_to_int(&p_frame->p_destination_mac_address[4], 2) << 16; + } + + /* Source address should be input as 2 byte higher part and 4 byte lower part. */ + if (NULL != p_frame->p_source_mac_address) + { + p_stream_id->words[2] |= r_layer3_switch_convert_array_to_int(&p_frame->p_source_mac_address[0], 2); + p_stream_id->words[3] = r_layer3_switch_convert_array_to_int(&p_frame->p_source_mac_address[2], 4); + } + } + else + { + /* When IPv4/IPv6 stream filter. */ + p_stream_id->words[1] = (uint32_t) ((p_frame->layer4_destination_port << 16) | + r_layer3_switch_calculate_l3_hash(p_frame)); + p_stream_id->words[2] = ip_address0; + p_stream_id->words[3] = ip_address1; + } + + return FSP_SUCCESS; +} /* End of function r_layer3_switch_calculate_l3_stream_id() */ + +/******************************************************************************************************************* + * Configure remapping feature for L3 update. + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_remapping_l3_update (layer3_switch_instance_ctrl_t * p_instance_ctrl, + uint32_t routing_number, + layer3_switch_l3_update_config_t * p_update_cfg) +{ + uint8_t dest_port = (uint8_t) (31 - __CLZ(p_update_cfg->enable_destination_ports)); + volatile uint32_t * p_mfwd_fwl23urmc_reg = &R_MFWD->FWL23URMC0 + p_instance_ctrl->l3_remapping_number; + fsp_err_t err; + + FSP_ERROR_RETURN(LAYER3_SWITCH_L3_UPDATE_REMAPPING_MAX_NUM >= p_instance_ctrl->l3_remapping_number, + FSP_ERR_WRITE_FAILED); + + /* Learn new l3 update rule. */ + err = r_layer3_switch_learn_l3_update(p_instance_ctrl, p_update_cfg); + + if (FSP_SUCCESS == err) + { + /* Set remapping feature. */ + *p_mfwd_fwl23urmc_reg = (routing_number << R_MFWD_FWL23URMC0_RMRN_Pos) | + (uint32_t) (dest_port << R_MFWD_FWL23URMC0_RMDPN_Pos) | + (uint32_t) ((p_instance_ctrl->l3_routing_number - 1) << R_MFWD_FWL23URMC0_RMNRN_Pos) | + R_MFWD_FWL23URMC0_RME_Msk; + + /* Increment indexes. */ + p_instance_ctrl->l3_routing_number += 1; + p_instance_ctrl->l3_remapping_number += 1; + } + + return err; +} /* End of function r_layer3_switch_remapping_l3_update() */ + +/******************************************************************************************************************* + * Convert VLAN SC-TAG to uint32_t. + * + * @param[in] p_vlan_s_tag Pointer to a VLAN S-TAG. + * @param[in] p_vlan_c_tag Pointer to a VLAN C-TAG. + **********************************************************************************************************************/ +static uint32_t r_layer3_switch_convert_vlan_tag_to_int (layer3_switch_frame_vlan_tag_t const * p_vlan_s_tag, + layer3_switch_frame_vlan_tag_t const * p_vlan_c_tag) +{ + return (uint32_t) ( + (((p_vlan_s_tag->pcp << LAYER3_SWITCH_VLAN_TAG_PCP_POSITION) | + (p_vlan_s_tag->dei << LAYER3_SWITCH_VLAN_TAG_DEI_POSITION) | p_vlan_s_tag->id) << 16) | + ((p_vlan_c_tag->pcp << LAYER3_SWITCH_VLAN_TAG_PCP_POSITION) | + (p_vlan_c_tag->dei << LAYER3_SWITCH_VLAN_TAG_DEI_POSITION) | p_vlan_c_tag->id)); +} /* End of function r_layer3_switch_convert_vlan_tag_to_int() */ + +/******************************************************************************************************************* + * Convert a uint8_t array to int32_t. + * + * @param[in] array Pointer to a array. + **********************************************************************************************************************/ +static uint32_t r_layer3_switch_convert_array_to_int (uint8_t * array, uint8_t length) +{ + uint32_t result = 0; + if (NULL != array) + { + for (int i = 0; i < length; i++) + { + result |= (uint32_t) (array[i] << (8 * (length - 1 - i))); + } + } + + return result; +} /* End of function r_layer3_switch_convert_array_to_int() */ + +/******************************************************************************************************************* + * Configure CBS feature for a port. + **********************************************************************************************************************/ +static void r_layer3_switch_configure_cbs (layer3_switch_instance_ctrl_t const * p_instance_ctrl, + uint8_t port, + layer3_switch_cbs_cfg_t const * p_cbs_cfg) +{ + R_ETHA0_Type * p_etha_reg = + (R_ETHA0_Type *) (R_ETHA0_BASE + (LAYER3_SWITCH_ETHA_REG_SIZE * port)); + + /* CBS registers for the target queue. */ + volatile uint32_t * p_eacaivc_reg; + volatile uint32_t * p_eacaulc_reg; + + double bandwidth_fraction; + double civ_raw_value; + uint32_t civ_int_value; + uint32_t max_interference_size; + uint8_t enable_cbs_bitmask = 0; + uint32_t eswclk_frequency; + uint32_t link_speed = 0; + ether_phy_instance_t const * p_phy_instance = + ((layer3_switch_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend)->p_ether_phy_instances[port]; + + /*Get frequency of ESWCLK. */ + eswclk_frequency = R_BSP_SourceClockHzGet((fsp_priv_source_clock_t) (R_SYSTEM->ESWCKCR_b.CKSEL)) / + R_FSP_ClockDividerGet(R_SYSTEM->ESWCKDIVCR_b.CKDIV); + + /* Get link speed. */ + switch (p_phy_instance->p_cfg->mii_type) + { + /* 100 Mbps. */ + case ETHER_PHY_MII_TYPE_MII: + case ETHER_PHY_MII_TYPE_RMII: + { + link_speed = LAYER3_SWITCH_LINK_SPEED_100M; + break; + } + + /* 1000 Mbps. */ + case ETHER_PHY_MII_TYPE_GMII: + case ETHER_PHY_MII_TYPE_RGMII: + { + link_speed = LAYER3_SWITCH_LINK_SPEED_1G; + break; + } + + default: + { + break; + } + } + + for (uint8_t i = 0; i < BSP_FEATURE_ESWM_ETHA_IPV_QUEUE_NUM; i++) + { + if (0 != p_cbs_cfg->band_width_list[i]) + { + p_eacaivc_reg = (volatile uint32_t *) ((uint8_t *) &p_etha_reg->EACAIVC0 + (4 * i)); + p_eacaulc_reg = (volatile uint32_t *) ((uint8_t *) &p_etha_reg->EACAULC0 + (4 * i)); + + /* Calculate CIV (Credit Increment Value). */ + bandwidth_fraction = p_cbs_cfg->band_width_list[i] / 100.0; + civ_raw_value = (link_speed / LAYER3_SWITCH_CBS_BITS_PER_BYTE) * bandwidth_fraction / eswclk_frequency; + civ_int_value = + (uint32_t) (((uint8_t) civ_raw_value << 16) | (uint16_t) (civ_raw_value * (UINT16_MAX + 1))); + + /* Set CIV value to the register. */ + *p_eacaivc_reg = civ_int_value; + + /* Calculate credit upper limit. */ + max_interference_size = r_layer3_switch_calculate_max_interference_size(i, p_cbs_cfg->max_burst_num_list); + *p_eacaulc_reg = 0 * + max_interference_size * + (eswclk_frequency / (link_speed + LAYER3_SWITCH_CBS_REQUEST_DELAY)) * civ_int_value; + enable_cbs_bitmask |= (1 << i); + } + } + + /* Enable and apply CBS configuration. */ + p_etha_reg->EACAEC = enable_cbs_bitmask; + p_etha_reg->EACC = enable_cbs_bitmask; +} /* End of function r_layer3_switch_configure_cbs() */ + +/******************************************************************************************************************* + * Calculate max interference size which used to configure CBS. + **********************************************************************************************************************/ +static uint32_t r_layer3_switch_calculate_max_interference_size (uint8_t queue_number, + uint8_t const * p_max_burst_num_list) +{ + uint32_t queue_interference_size = (LAYER3_SWITCH_MAXIMUM_FRAME_SIZE + LAYER3_SWITCH_CBS_INTERFERENCE_SIZE_OFFSET) * + LAYER3_SWITCH_CBS_BITS_PER_BYTE; + uint32_t queue_interference_size_low = 0; + uint32_t queue_interference_size_high = 0; + + /* Get the interference size of the queues with lower priority than the target queue. */ + if (queue_number > 0) + { + queue_interference_size_low = (LAYER3_SWITCH_MAXIMUM_FRAME_SIZE + LAYER3_SWITCH_CBS_INTERFERENCE_SIZE_OFFSET) * + LAYER3_SWITCH_CBS_BITS_PER_BYTE; + } + + /* Get the interference size of the queues with higher priority than the target queue. */ + for (uint8_t i = queue_number + 1; i < BSP_FEATURE_ESWM_ETHA_IPV_QUEUE_NUM; i++) + { + if (0 != p_max_burst_num_list[i]) + { + queue_interference_size_high += + (LAYER3_SWITCH_MAXIMUM_FRAME_SIZE * 2 + LAYER3_SWITCH_CBS_INTERFERENCE_SIZE_OFFSET * 2) * + LAYER3_SWITCH_CBS_BITS_PER_BYTE; + } + else + { + queue_interference_size_high += + ((p_max_burst_num_list[i] + 1) * LAYER3_SWITCH_MAXIMUM_FRAME_SIZE + + LAYER3_SWITCH_CBS_INTERFERENCE_SIZE_OFFSET * 2) * LAYER3_SWITCH_CBS_BITS_PER_BYTE; + } + } + + return queue_interference_size_low + queue_interference_size + queue_interference_size_high; +} /* End of function r_layer3_switch_calculate_max_interference_size() */ + +/*********************************************************************************************************************** + * Create queue of transmission timestamp. + ***********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_create_tx_timestamp_queue (ether_switch_ctrl_t * const p_ctrl, + const layer3_switch_descriptor_queue_cfg_t * const p_queue_cfg, + uint32_t * const p_ts_descriptor_queue_index) +{ + layer3_switch_instance_ctrl_t * p_instance_ctrl = (layer3_switch_instance_ctrl_t *) p_ctrl; + layer3_switch_extended_cfg_t * p_extend; + layer3_switch_ts_reception_process_descriptor_t * p_ts_descriptor; + + volatile uint32_t * p_gwtsdcc_reg; /* Timestamp Descriptor Chain Configuration */ + volatile uint32_t * p_gwtdcac0_reg; /* Timestamp Descriptor Chain Address Configuration 0 */ + volatile uint32_t * p_gwtdcac1_reg; /* Timestamp Descriptor Chain Address Configuration 1 */ + + uint8_t port = (uint8_t) (1 & (p_queue_cfg->ports >> 1)); + + p_extend = (layer3_switch_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + + /* Search TS descriptor queue can use. */ + uint32_t ts_descriptor_queue_index = 0; + for (ts_descriptor_queue_index = 0; + ts_descriptor_queue_index < BSP_FEATURE_ESWM_TS_DESCRIPTOR_QUEUE_MAX_NUM; + ts_descriptor_queue_index++) + { + if (port == ts_descriptor_queue_index) + { + if (LAYER3_SWITCH_TS_DESCRIPTOR_QUEUE_STATUS_UNUSED == + p_instance_ctrl->ts_descriptor_queue_status_list[ts_descriptor_queue_index]) + { + break; + } + } + } + + FSP_ERROR_RETURN(BSP_FEATURE_ESWM_TS_DESCRIPTOR_QUEUE_MAX_NUM > ts_descriptor_queue_index, FSP_ERR_OVERFLOW); + + p_ts_descriptor = p_queue_cfg->p_ts_descriptor_array; + + /* Initialize TS descriptor in queue. */ + for (uint8_t i = 0; i < (p_queue_cfg->array_length - 1); i++) + { + p_ts_descriptor[i].ts_reception_descriptor_basic.dt = LAYER3_SWITCH_DESCRIPTOR_TYPE_FEMPTY_ND; + p_ts_descriptor[i].ts_reception_descriptor_basic.die = 1; + } + + p_ts_descriptor[(p_queue_cfg->array_length - + 1)].ts_reception_descriptor_basic.dt = LAYER3_SWITCH_DESCRIPTOR_TYPE_LINKFIX; + p_ts_descriptor[(p_queue_cfg->array_length - + 1)].ts_reception_descriptor_basic.ptr_l = (uintptr_t) p_ts_descriptor; + + /* Set GWCA to CONFIG mode. */ + r_layer3_switch_update_gwca_operation_mode(p_instance_ctrl, LAYER3_SWITCH_AGENT_MODE_DISABLE); + r_layer3_switch_update_gwca_operation_mode(p_instance_ctrl, LAYER3_SWITCH_AGENT_MODE_CONFIG); + + p_gwtdcac0_reg = (volatile uint32_t *) ((uint8_t *) &p_instance_ctrl->p_gwca_reg->GWTDCAC00 + + (LAYER3_SWITCH_TS_DESCRIPTOR_QUEUE_REGISTOR_OFFSET * + ts_descriptor_queue_index)); + + p_gwtdcac1_reg = (volatile uint32_t *) ((uint8_t *) &p_instance_ctrl->p_gwca_reg->GWTDCAC10 + + (LAYER3_SWITCH_TS_DESCRIPTOR_QUEUE_REGISTOR_OFFSET * + ts_descriptor_queue_index)); + + *p_gwtdcac1_reg = (uintptr_t) p_ts_descriptor; + *p_gwtdcac0_reg = 0; + + p_gwtsdcc_reg = (volatile uint32_t *) ((uint8_t *) &p_instance_ctrl->p_gwca_reg->GWTSDCC0 + + (LAYER3_SWITCH_TS_DESCRIPTOR_TIMER_REGISTOR_OFFSET * + p_extend->gptp_timer_numbers[port])); + + *p_gwtsdcc_reg = ((1U << R_GWCA0_GWTSDCC0_TE_Pos) | + (ts_descriptor_queue_index << R_GWCA0_GWTSDCC0_DCS_Pos)); + + /* Set GWCA to OPERATION mode. */ + r_layer3_switch_update_gwca_operation_mode(p_instance_ctrl, LAYER3_SWITCH_AGENT_MODE_DISABLE); + r_layer3_switch_update_gwca_operation_mode(p_instance_ctrl, LAYER3_SWITCH_AGENT_MODE_OPERATION); + + /* Output TS Descriptor queue index and change state used. */ + *p_ts_descriptor_queue_index = ts_descriptor_queue_index; + p_instance_ctrl->ts_descriptor_queue_status_list[ts_descriptor_queue_index] = + LAYER3_SWITCH_TS_DESCRIPTOR_QUEUE_STATUS_USED; + + return FSP_SUCCESS; +} /* End of function r_layer3_switch_create_tx_timestamp_queue() */ + +/******************************************************************************************************************* + * FRER initialization. + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_frer_init (layer3_switch_instance_ctrl_t * p_instance_ctrl, + layer3_switch_frer_cfg_t const * p_frer_cfg) +{ + volatile uint32_t * p_fwseqngc_reg; + +#if LAYER3_SWITCH_CFG_PARAM_CHECKING_ENABLE + FSP_ERROR_RETURN(p_frer_cfg, FSP_ERR_INVALID_POINTER); +#endif + + r_layer3_switch_frer_table_reset(); + + /* Set FRER timeout us prescaler 1 MHz to derive the timeout 1 kHz clock. */ + R_MFWD->FWFTOPC_b.USP = p_frer_cfg->sys_clock & LAYER3_SWITCH_FRER_SYSTEM_CLOCK_BITMASK; + R_MFWD->FWFTOC_b.TOCE = p_frer_cfg->timeout_enable & 0x1U; + R_MFWD->FWFTOC_b.TOT = p_frer_cfg->check_period & LAYER3_SWITCH_FRER_CHECK_PERIOD_BITMASK; + + /* Sequence number generation reset n (n = 0 to 31). */ + R_MFWD->FWSEQNRC_b.SEQNR |= R_MFWD_FWSEQNRC_SEQNR_Msk; + + /* FWSEQNGCi(i = 0~31) 0 write. */ + for (uint32_t i = 0; i < LAYER3_SWITCH_SEQ_REG_MAX_NUM; i++) + { + p_fwseqngc_reg = + (uint32_t *) ((uintptr_t) (&R_MFWD->FWSEQNGC0) + (i * LAYER3_SWITCH_FWSEQNGC_REGISTER_OFFSET)); + *p_fwseqngc_reg = 0; + } + + /*SEQNR 0 write. */ + R_MFWD->FWSEQNRC_b.SEQNR &= ~R_MFWD_FWSEQNRC_SEQNR_Msk; + + /* Reset current register index. */ + p_instance_ctrl->valid_frer_entry_num = 0; + + R_MFWD->FWSEQNRC |= R_MFWD_FWSEQNRC_SEQNR_Msk; + for (uint32_t i = 0; i < LAYER3_SWITCH_SEQ_REG_MAX_NUM; i++) + { + p_fwseqngc_reg = + (uint32_t *) ((uintptr_t) (&R_MFWD->FWSEQNGC0) + (i * LAYER3_SWITCH_FWSEQNGC_REGISTER_OFFSET)); + *p_fwseqngc_reg = 0; + } + + R_MFWD->FWSEQNRC &= ~R_MFWD_FWSEQNRC_SEQNR_Msk; + + p_instance_ctrl->used_frer_sequence_generator_num = 0; + + return FSP_SUCCESS; +} /* End of function r_layer3_switch_frer_init() */ + +/******************************************************************************************************************* + * Reset FRER table. + **********************************************************************************************************************/ +static void r_layer3_switch_frer_table_reset (void) +{ + R_MFWD->FWFTIM_b.FTIOG = R_MFWD_FWFTIM_FTIOG_Msk; /* FRER Table Initialization Ongoing */ + FSP_HARDWARE_REGISTER_WAIT(R_MFWD->FWFTIM_b.FTR, 1); /* FRER Table Ready */ +} /* End of function r_layer3_switch_frer_table_reset() */ + +/******************************************************************************************************************* + * Configure sequence number generate. + **********************************************************************************************************************/ +static void r_layer3_switch_configure_sequence_number_generation (layer3_switch_instance_ctrl_t * p_instance_ctrl) +{ + volatile uint32_t * p_fwseqngc_reg; + uint32_t fwseqngc_reg_value; + + /* Get FWSEQNGCi register address. */ + p_fwseqngc_reg = + (uint32_t *) ((uintptr_t) (&R_MFWD->FWSEQNGC0) + + ((p_instance_ctrl->used_frer_sequence_generator_num & + LAYER3_SWITCH_FRER_SEQ_GENERATOR_NUM_BITMASK) * + LAYER3_SWITCH_FWSEQNGC_REGISTER_OFFSET)); + fwseqngc_reg_value = *p_fwseqngc_reg; + + /* Sequence generation enable. Routing number set. Sequence number generate valid. */ + fwseqngc_reg_value |= (p_instance_ctrl->l3_routing_number & R_MFWD_FWSEQNGC0_SEQNGRN_Msk) | + (1U << R_MFWD_FWSEQNGC0_SEQNGE_Pos); + *p_fwseqngc_reg = fwseqngc_reg_value; +} /* End of function r_layer3_switch_configure_sequence_number_generation() */ + +/******************************************************************************************************************* + * Learn an entry of FRER table. + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_learn_frer_entry (layer3_switch_instance_ctrl_t * p_instance_ctrl, + layer3_switch_frer_entry_t * const p_frer_entry, + layer3_switch_frer_entry_t * const p_sequence_recovery, + uint32_t sequence_recovery_id) +{ + fsp_err_t err = FSP_SUCCESS; + uint32_t sequence_recovery_addr = 0; + + if (NULL != p_sequence_recovery) + { + sequence_recovery_addr = + p_instance_ctrl->frer_sequence_recovery_status[sequence_recovery_id & + LAYER3_SWITCH_FRER_ENTRY_NUM_BITMASK].frer_entry_index & + LAYER3_SWITCH_FRER_ENTRY_NUM_BITMASK; + } + + /* Set entry address and sequence recovery pointer. */ + R_MFWD->FWFTL0 = (R_MFWD_FWFTL0_FSRPL_Msk & (sequence_recovery_addr << R_MFWD_FWFTL0_FSRPL_Pos)) | + (R_MFWD_FWFTL0_FEAL_Msk & (p_instance_ctrl->valid_frer_entry_num << R_MFWD_FWFTL0_FEAL_Pos)); + + /* Set sequence recovery settings. */ + R_MFWD->FWFTL1 = + (R_MFWD_FWFTL1_FSHLL_Msk & (p_frer_entry->sequence_history_len << R_MFWD_FWFTL1_FSHLL_Pos)) | + (R_MFWD_FWFTL1_FTNSL_Msk & (p_frer_entry->take_no_sequence << R_MFWD_FWFTL1_FTNSL_Pos)) | + (R_MFWD_FWFTL1_FSRPVL_Msk & + (uint32_t) ((NULL != p_sequence_recovery) << R_MFWD_FWFTL1_FSRPVL_Pos)) | + (R_MFWD_FWFTL1_FSRRTL_Msk & (p_frer_entry->set_recovery_remaining_tick << R_MFWD_FWFTL1_FSRRTL_Pos)); + + /* Wait for learning complete. */ + FSP_HARDWARE_REGISTER_WAIT(R_MFWD->FWFTLR_b.FTL, 0); + + /* Check learning error. */ + if (1 == R_MFWD->FWFTLR_b.FLF) + { + err = FSP_ERR_WRITE_FAILED; + } + else + { + /* Store the entry information to sequence recovery table. */ + if (NULL == p_sequence_recovery) + { + p_instance_ctrl->frer_sequence_recovery_status[sequence_recovery_id & + LAYER3_SWITCH_FRER_ENTRY_NUM_BITMASK].learned = + true; + p_instance_ctrl->frer_sequence_recovery_status[sequence_recovery_id & + LAYER3_SWITCH_FRER_ENTRY_NUM_BITMASK].frer_entry_index = + p_instance_ctrl->valid_frer_entry_num & LAYER3_SWITCH_FRER_ENTRY_NUM_BITMASK; + } + + /* Update the current index of the FRER table. */ + p_instance_ctrl->valid_frer_entry_num = (p_instance_ctrl->valid_frer_entry_num + 1) & + LAYER3_SWITCH_FRER_ENTRY_NUM_BITMASK; + } + + return err; +} /* End of function r_layer3_switch_learn_frer_entry() */ + +/******************************************************************************************************************* + * Learn a individual recovery entry of FRER entry. + **********************************************************************************************************************/ +static fsp_err_t r_layer3_switch_learn_frer_individual_recovery (layer3_switch_instance_ctrl_t * p_instance_ctrl, + layer3_switch_frer_entry_cfg_t * const p_frer_entry_cfg) +{ + fsp_err_t err = FSP_SUCCESS; + + /* When the target sequence recovery has not been learned, first learn the sequence recovery entry. */ + if ((NULL != p_frer_entry_cfg->p_sequence_recovery) && + (false == + p_instance_ctrl->frer_sequence_recovery_status[p_frer_entry_cfg->sequence_recovery_id & + LAYER3_SWITCH_FRER_ENTRY_NUM_BITMASK].learned)) + { + r_layer3_switch_learn_frer_entry(p_instance_ctrl, + p_frer_entry_cfg->p_sequence_recovery, + NULL, + p_frer_entry_cfg->sequence_recovery_id & + LAYER3_SWITCH_FRER_ENTRY_NUM_BITMASK); + } + + /* Add the FRER individual recovery entry. */ + err = r_layer3_switch_learn_frer_entry(p_instance_ctrl, + &p_frer_entry_cfg->individual_recovery, + p_frer_entry_cfg->p_sequence_recovery, + p_frer_entry_cfg->sequence_recovery_id & + LAYER3_SWITCH_FRER_ENTRY_NUM_BITMASK); + + return err; +} /* End of function r_layer3_switch_learn_frer_individual_recovery() */ + +/******************************************************************************************************************* + * Calls user callback for each ports. + * + * @param[in] p_instance_ctrl Pointer to a instance control. + * @param[in] p_callback_args Pointer to callback args. + * @param[in] ports Ports that should call user callback. + **********************************************************************************************************************/ +static void r_layer3_switch_call_callback_for_ports (layer3_switch_instance_ctrl_t * p_instance_ctrl, + ether_switch_callback_args_t * p_callback_args, + uint32_t ports) +{ + /* Call port-specific callback for each port. */ + for (uint8_t i = 0; i < BSP_FEATURE_ETHER_NUM_CHANNELS; i++) + { + /* Check if event occurred on this port. */ + if (0 != (ports & (1 << i))) + { + if (NULL != p_instance_ctrl->p_port_cfg_list[i].p_callback) + { + p_callback_args->p_context = p_instance_ctrl->p_port_cfg_list[i].p_context; + r_layer3_switch_call_callback(p_instance_ctrl->p_port_cfg_list[i].p_callback, + p_callback_args, + p_instance_ctrl->p_port_cfg_list[i].p_callback_memory); + } + } + } +} /* End of function r_layer3_switch_call_callback_for_ports() */ + +/*******************************************************************************************************************//** + * Calls user callback. + * + * @param[in] p_callback Function pointer to target callback + * @param[in] p_callback_args Pointer to callback args + * @param[in] p_callback_memory Pointer to memory allocated for callback arguments + **********************************************************************************************************************/ +static void r_layer3_switch_call_callback (void (* p_callback)( + ether_switch_callback_args_t *), + ether_switch_callback_args_t * p_callback_args, + ether_switch_callback_args_t * const p_callback_memory) +{ + ether_switch_callback_args_t args; + + /* Store callback arguments in memory provided by user if available. This allows callback arguments to be + * stored in non-secure memory so they can be accessed by a non-secure callback function. */ + ether_switch_callback_args_t * p_args = p_callback_memory; + + if (NULL == p_args) + { + /* Store on stack. */ + p_args = &args; + } + else + { + /* Save current arguments on the stack in case this is a nested interrupt. */ + args = *p_args; + } + + memcpy(p_args, p_callback_args, sizeof(ether_switch_callback_args_t)); + +#if BSP_TZ_SECURE_BUILD && BSP_FEATURE_ETHER_SUPPORTS_TZ_SECURE + + /* p_callback can point to a secure function or a non-secure function. */ + if (!cmse_is_nsfptr(p_callback)) + { + /* If p_callback is secure, then the project does not need to change security state. */ + p_callback(p_args); + } + else + { + /* If p_callback is Non-secure, then the project must change to Non-secure state in order to call the callback. */ + layer3_switch_prv_ns_callback p_ns_callback = (layer3_switch_prv_ns_callback) (p_callback); + p_ns_callback(p_args); + } + +#else + + /* If the project is not TrustZone Secure, then it will never need to change security state in order to call the callback. */ + p_callback(p_args); +#endif + + if (NULL != p_callback_memory) + { + /* Restore callback memory in case this is a nested interrupt. */ + *p_callback_memory = args; + } +} /* End of function r_layer3_switch_call_callback() */ + +/*********************************************************************************************************************** + * Function Name: layer3_switch_gwdi_isr + * Description : Interrupt handler for GWCA data interrupts that includes RX/TX complete and buffer full error events. + * Arguments : none + * Return Value : none + ***********************************************************************************************************************/ +void layer3_switch_gwdi_isr (void) +{ + /* Save context if RTOS is used. */ + FSP_CONTEXT_SAVE + + IRQn_Type irq = R_FSP_CurrentIrqGet(); + layer3_switch_instance_ctrl_t * p_instance_ctrl = (layer3_switch_instance_ctrl_t *) R_FSP_IsrContextGet(irq); + ether_switch_callback_args_t callback_arg = {}; + bool port_callback_exists = false; + + uint32_t gwdi_status; + uint32_t gwei2_status; + volatile uint32_t * p_gwca_gwdi_reg; + volatile uint32_t * p_gwca_gwei2_reg; + + /* Check if port-specific callbacks are set. */ + for (uint8_t i = 0; i < BSP_FEATURE_ETHER_NUM_CHANNELS; i++) + { + if (NULL != p_instance_ctrl->p_port_cfg_list[i].p_callback) + { + port_callback_exists = true; + break; + } + } + + /* Get queue ISR happened. */ + for (uint32_t i = 0; i < (LAYER3_SWITCH_CFG_AVAILABLE_QUEUE_NUM / LAYER3_SWITCH_REGISTER_SIZE) + 1; i++) + { + /* Get status register address. */ + p_gwca_gwdi_reg = + (uint32_t *) ((uintptr_t) &(p_instance_ctrl->p_gwca_reg->GWDIS0) + + (i * LAYER3_SWITCH_INTERRUPT_REGISTER_OFFSET)); + p_gwca_gwei2_reg = + (uint32_t *) ((uintptr_t) &(p_instance_ctrl->p_gwca_reg->GWEIS20) + + (i * LAYER3_SWITCH_INTERRUPT_REGISTER_OFFSET)); + + /* Copy status bits. GWDI occur when a descriptor complete RX/TX. GWEI2 occur when receive frame for full RX queue. */ + gwdi_status = *p_gwca_gwdi_reg; + gwei2_status = *p_gwca_gwei2_reg; + + /* Clear status bits. */ + *p_gwca_gwdi_reg = gwdi_status; + *p_gwca_gwei2_reg = gwei2_status; + + /* If a callback is provided, then call it with callback argument. */ + if ((NULL != p_instance_ctrl->p_callback) || port_callback_exists) + { + callback_arg.channel = p_instance_ctrl->p_cfg->channel; + + for (uint32_t j = 0; j < (LAYER3_SWITCH_CFG_AVAILABLE_QUEUE_NUM % LAYER3_SWITCH_REGISTER_SIZE); j++) + { + /* Check RX/TX complete event. */ + if (gwdi_status & (1 << j)) + { + /* Store index of queue. */ + callback_arg.queue_index = j + (i * LAYER3_SWITCH_REGISTER_SIZE); + callback_arg.ports = + p_instance_ctrl->p_queues_status[callback_arg.queue_index].p_queue_cfg->ports; + + /* Store that the event is RX or TX. */ + if (LAYER3_SWITCH_QUEUE_TYPE_RX == + p_instance_ctrl->p_queues_status[callback_arg.queue_index].p_queue_cfg->type) + { + /* Check if queue is active. */ + if (r_layer3_switch_is_descriptor_queue_active(p_instance_ctrl, callback_arg.queue_index)) + { + callback_arg.event = ETHER_SWITCH_EVENT_RX_COMPLETE; + } + else + { + callback_arg.event = ETHER_SWITCH_EVENT_RX_QUEUE_FULL; + + /* Clear the status bit. */ + *p_gwca_gwdi_reg = (1 << j); + } + } + else + { + callback_arg.event = ETHER_SWITCH_EVENT_TX_COMPLETE; + } + + r_layer3_switch_call_callback_for_ports(p_instance_ctrl, &callback_arg, callback_arg.ports); + + /* Do not callback other event for the same queue. */ + continue; + } + + /* Check RX descriptor queue full error event. */ + if (gwei2_status & (1 << j)) + { + /* Store index of queue. */ + callback_arg.queue_index = j + (i * LAYER3_SWITCH_REGISTER_SIZE); + callback_arg.ports = + p_instance_ctrl->p_queues_status[callback_arg.queue_index].p_queue_cfg->ports; + callback_arg.event = ETHER_SWITCH_EVENT_RX_MESSAGE_LOST; + + r_layer3_switch_call_callback_for_ports(p_instance_ctrl, &callback_arg, callback_arg.ports); + } + } + } + } + + /* Clear pending interrupt flag to make sure it doesn't fire again after exiting. */ + R_BSP_IrqStatusClear(R_FSP_CurrentIrqGet()); + + /* Restore context if RTOS is used. */ + FSP_CONTEXT_RESTORE +} /* End of function layer3_switch_gwdi_isr() */ + +/*********************************************************************************************************************** + * Interrupt handler for ETHA error interrupts that includes TAS error events. + ***********************************************************************************************************************/ +void layer3_switch_eaei_isr (void) +{ + /* Save context if RTOS is used. */ + FSP_CONTEXT_SAVE + + IRQn_Type irq = R_FSP_CurrentIrqGet(); + layer3_switch_instance_ctrl_t * p_instance_ctrl = (layer3_switch_instance_ctrl_t *) R_FSP_IsrContextGet(irq); + ether_switch_callback_args_t callback_arg = {0}; + + R_ETHA0_Type * p_etha_reg; + uint32_t eaei_status; + + callback_arg.channel = p_instance_ctrl->p_cfg->channel; + callback_arg.p_context = p_instance_ctrl->p_context; + + for (uint32_t i = 0; i < BSP_FEATURE_ETHER_NUM_CHANNELS; i++) + { + /* Get status register address. */ + p_etha_reg = (R_ETHA0_Type *) (R_ETHA0_BASE + (LAYER3_SWITCH_ETHA_REG_SIZE * i)); + eaei_status = p_etha_reg->EAEIS1; + + /* Clear status register. */ + p_etha_reg->EAEIS1 = eaei_status; + + /* Check TAS gate error events. */ + if (eaei_status & (R_ETHA0_EAEIS1_TASGES0_Msk | R_ETHA0_EAEIS1_TASGES1_Msk | + R_ETHA0_EAEIS1_TASGES2_Msk | R_ETHA0_EAEIS1_TASGES3_Msk | R_ETHA0_EAEIS1_TASGES4_Msk | + R_ETHA0_EAEIS1_TASGES5_Msk | R_ETHA0_EAEIS1_TASGES6_Msk | R_ETHA0_EAEIS1_TASGES7_Msk)) + { + /* Call callback function for the port. */ + callback_arg.ports |= (1 << i); + callback_arg.event = ETHER_SWITCH_EVENT_TAS_ERROR; + } + } + + /* Call callback of the switch. */ + if (0 != callback_arg.ports) + { + r_layer3_switch_call_callback(p_instance_ctrl->p_callback, &callback_arg, p_instance_ctrl->p_callback_memory); + } + + /* Clear pending interrupt flag to make sure it doesn't fire again after exiting. */ + R_BSP_IrqStatusClear(R_FSP_CurrentIrqGet()); + + /* Restore context if RTOS is used. */ + FSP_CONTEXT_RESTORE +} /* End of function layer3_switch_eaei_isr() */ diff --git a/drivers/ra/fsp/src/r_rmac/r_rmac.c b/drivers/ra/fsp/src/r_rmac/r_rmac.c new file mode 100644 index 00000000..f0e3299f --- /dev/null +++ b/drivers/ra/fsp/src/r_rmac/r_rmac.c @@ -0,0 +1,2175 @@ +/* +* Copyright (c) 2020 - 2025 Renesas Electronics Corporation and/or its affiliates +* +* SPDX-License-Identifier: BSD-3-Clause +*/ + +/*********************************************************************************************************************** + * Includes + **********************************************************************************************************************/ +#include "r_rmac_cfg.h" +#include "r_rmac.h" +#include "r_layer3_switch.h" +#include "r_rmac_phy.h" + +/*********************************************************************************************************************** + * Macro definitions + **********************************************************************************************************************/ + +#define RMAC_OPEN (('R' << 24U) | ('M' << 16U) | ('A' << 8U) | ('C' << 0U)) + +#define RMAC_ETHA_REG_SIZE (R_ETHA1_BASE - R_ETHA0_BASE) +#define RMAC_RMAC_REG_SIZE (R_RMAC1_BASE - R_RMAC0_BASE) + +/* Register values */ +#define RMAC_REG_MRAFC_PROMISCUOUS_VALUE (R_RMAC0_MRAFC_UCENE_Msk | R_RMAC0_MRAFC_MCENE_Msk | \ + R_RMAC0_MRAFC_BCENE_Msk | R_RMAC0_MRAFC_BCACE_Msk | \ + R_RMAC0_MRAFC_NDAREE_Msk | R_RMAC0_MRAFC_SDSFREE_Msk | \ + R_RMAC0_MRAFC_NSAREE_Msk | R_RMAC0_MRAFC_UCENP_Msk | \ + R_RMAC0_MRAFC_MCENP_Msk | R_RMAC0_MRAFC_BCENP_Msk | \ + R_RMAC0_MRAFC_BCACP_Msk | R_RMAC0_MRAFC_NDAREP_Msk | \ + R_RMAC0_MRAFC_SDSFREP_Msk | R_RMAC0_MRAFC_NSAREP_Msk) + +/* For descriptor fields */ +#define RMAC_DESCRIPTOR_FIELD_DS_UPPER_MASK (0xF00) +#define RMAC_DESCRIPTOR_FIELD_DS_LOWER_MASK (0x00FF) +#define RMAC_DESCRIPTOR_FIELD_DS_UPPER_POSITION (0x8) +#define RMAC_DESCRIPTOR_FIELD_DV_MASK (0x7F) +#define RMAC_DESCRIPTOR_FIELD_PTR_UPPER_MASK (0xFF00000000) +#define RMAC_DESCRIPTOR_FIELD_PTR_LOWER_MASK (0xFFFFFFFF) +#define RMAC_DESCRIPTOR_FIELD_PTR_UPPER_POSITION (32) + +/* Definition of the maximum / minimum number of data that can be sent at one time in the Ethernet */ +#define RMAC_MAXIMUM_FRAME_SIZE (1514U) /* Maximum number of transmitted data */ +#define RMAC_MINIMUM_FRAME_SIZE (60U) /* Minimum number of transmitted data */ +#define RMAC_NO_DATA (0) + +/* Invalid value that mean all queue is not running or not available. */ +#define RMAC_INVALID_QUEUE_INDEX (0xFFFFFFFF) + +/* Increments a index and wraps around to 0 if it exceeds the maximum value */ +#define RMAC_INCREMENT_DESCRIPTOR_QUEUE_INDEX(x, max) (x = (x + 1) % max) + +/* TX Timestamp feature. */ +#define RMAC_WRITE_CFG_TX_TIMESTAMP_ENABLE (1) +#define RMAC_GET_TX_TIMESTAMP_WAIT_TIME (10000) + +/* Timestamp sequence number mask. */ +#define RMAC_TS_SEQUENCE_NUMBER_MASK (0x7F) + +/*********************************************************************************************************************** + * Typedef definitions + ***********************************************************************************************************************/ +#if defined(__ARMCC_VERSION) || defined(__ICCARM__) +typedef void (BSP_CMSE_NONSECURE_CALL * ether_prv_ns_callback)(ether_callback_args_t * p_args); +#elif defined(__GNUC__) +typedef BSP_CMSE_NONSECURE_CALL void (*volatile ether_prv_ns_callback)(ether_callback_args_t * p_args); +#endif + +/*********************************************************************************************************************** + * Private function prototypes + **********************************************************************************************************************/ +#if (RMAC_CFG_PARAM_CHECKING_ENABLE) +static fsp_err_t rmac_open_param_check(rmac_instance_ctrl_t const * const p_instance_ctrl, + ether_cfg_t const * const p_cfg); + +#endif + +void rmac_init_descriptors(rmac_instance_ctrl_t * const p_instance_ctrl); +static fsp_err_t rmac_init_descriptor_queues(rmac_instance_ctrl_t * const p_instance_ctrl); +void rmac_init_buffers(rmac_instance_ctrl_t * const p_instance_ctrl); + +void rmac_configure_reception_filter(rmac_instance_ctrl_t const * const p_instance_ctrl); +fsp_err_t rmac_do_link(rmac_instance_ctrl_t * const p_instance_ctrl, + const layer3_switch_magic_packet_detection_t mode); +static fsp_err_t rmac_link_status_check(rmac_instance_ctrl_t const * const p_instance_ctrl); + +static void rmac_call_callback(rmac_instance_ctrl_t * p_instance_ctrl, + ether_callback_args_t * p_callback_args); +static void r_rmac_switch_interrupt_callback(ether_switch_callback_args_t * p_args); +static fsp_err_t r_rmac_set_rx_buffer(rmac_instance_ctrl_t * p_instance_ctrl, + rmac_buffer_node_t * p_buffer_node); +static fsp_err_t r_rmac_set_tx_buffer(rmac_instance_ctrl_t * p_instance_ctrl, + void * p_write_buffer, + uint32_t frame_length, + uint32_t queue_index); +static fsp_err_t r_rmac_start_tx_queue(rmac_instance_ctrl_t * p_instance_ctrl, uint32_t queue_index); +void r_rmac_disable_reception(rmac_instance_ctrl_t * p_instance_ctrl); +static rmac_buffer_node_t * r_rmac_buffer_dequeue(rmac_buffer_queue_t * p_queue); +static void r_rmac_buffer_enqueue(rmac_buffer_queue_t * p_queue, rmac_buffer_node_t * p_node); +static fsp_err_t r_rmac_get_rx_queue(rmac_instance_ctrl_t * p_instance_ctrl, uint32_t queue_index); +static fsp_err_t r_rmac_set_rx_queue(rmac_instance_ctrl_t * p_instance_ctrl, uint32_t queue_index); +static fsp_err_t r_rmac_get_tx_timestamp(rmac_instance_ctrl_t * p_instance_ctrl); + +/*********************************************************************************************************************** + * ISR prototypes + **********************************************************************************************************************/ +void rmac_rmpi_isr(void); + +/*********************************************************************************************************************** + * Private global variables + **********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Global Variables + **********************************************************************************************************************/ + +/* RMAC implementation of ether interface */ +const ether_api_t g_ether_on_rmac = +{ + .open = R_RMAC_Open, + .close = R_RMAC_Close, + .bufferRelease = R_RMAC_BufferRelease, + .rxBufferUpdate = R_RMAC_RxBufferUpdate, + .linkProcess = R_RMAC_LinkProcess, + .wakeOnLANEnable = R_RMAC_WakeOnLANEnable, + .read = R_RMAC_Read, + .write = R_RMAC_Write, + .txStatusGet = R_RMAC_TxStatusGet, + .callbackSet = R_RMAC_CallbackSet, +}; + +/*******************************************************************************************************************//** + * @addtogroup RMAC + * @{ + **********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Functions + **********************************************************************************************************************/ + +/*******************************************************************************************************************//** + * Initializes the ether module and applies configurations. Implements @ref ether_api_t::open. + * + * @brief After RMAC, Switch and PHY-LSI are reset in software, an auto negotiation of PHY-LSI is begun. + * Afterwards, the link signal change interrupt is permitted. Implements @ref ether_api_t::open. + * + * @retval FSP_SUCCESS Channel opened successfully. + * @retval FSP_ERR_ASSERTION Pointer to ETHER control block or configuration structure is NULL. + * @retval FSP_ERR_ALREADY_OPEN Control block has already been opened or channel is being used by another + * instance. Call close() then open() to reconfigure. + * @retval FSP_ERR_ETHER_ERROR_PHY_COMMUNICATION Initialization of PHY-LSI failed. + * @retval FSP_ERR_INVALID_CHANNEL Invalid channel number is given. + * @retval FSP_ERR_INVALID_POINTER Pointer to extend config structure or MAC address is NULL. + * @retval FSP_ERR_INVALID_ARGUMENT Interrupt is not enabled. + * @retval FSP_ERR_ETHER_PHY_ERROR_LINK Initialization of PHY-LSI failed. + **********************************************************************************************************************/ +fsp_err_t R_RMAC_Open (ether_ctrl_t * const p_ctrl, ether_cfg_t const * const p_cfg) +{ + fsp_err_t err = FSP_SUCCESS; + rmac_instance_ctrl_t * p_instance_ctrl = (rmac_instance_ctrl_t *) p_ctrl; + rmac_extended_cfg_t * p_rmac_extended_cfg; + const ether_switch_instance_t * p_ether_switch; + +#if (RMAC_CFG_PARAM_CHECKING_ENABLE) + + /** Check parameters. */ + err = rmac_open_param_check(p_instance_ctrl, p_cfg); /** check arguments */ + FSP_ERROR_RETURN((FSP_SUCCESS == err), err); +#endif + FSP_ERROR_RETURN((RMAC_OPEN != p_instance_ctrl->open), FSP_ERR_ALREADY_OPEN); + + p_rmac_extended_cfg = (rmac_extended_cfg_t *) p_cfg->p_extend; + p_ether_switch = p_rmac_extended_cfg->p_ether_switch; + + /* Store register addresses. */ + p_instance_ctrl->p_reg_rmac = (R_RMAC0_Type *) (R_RMAC0_BASE + (RMAC_RMAC_REG_SIZE * p_cfg->channel)); + p_instance_ctrl->p_reg_etha = + (R_ETHA0_Type *) (R_ETHA0_BASE + (RMAC_ETHA_REG_SIZE * p_cfg->channel)); + + /* Initialize the flags */ + p_instance_ctrl->link_establish_status = ETHER_LINK_ESTABLISH_STATUS_DOWN; + p_instance_ctrl->link_change = ETHER_LINK_CHANGE_NO_CHANGE; + p_instance_ctrl->previous_link_status = ETHER_PREVIOUS_LINK_STATUS_DOWN; + p_instance_ctrl->wake_on_lan = ETHER_WAKE_ON_LAN_DISABLE; + p_instance_ctrl->p_last_sent_buffer = NULL; + + /* Initialize configuration of Ethernet module. */ + p_instance_ctrl->p_cfg = p_cfg; + + /* Initialize configuration of timestamp. */ + p_instance_ctrl->p_rx_timestamp = NULL; + p_instance_ctrl->tx_timestamp.ns = 0; + p_instance_ctrl->tx_timestamp.sec_lower = 0; + p_instance_ctrl->tx_timestamp.sec_upper = 0; + p_instance_ctrl->tx_timestamp_seq_num = 0; + p_instance_ctrl->write_cfg.tx_timestamp_enable = 0; + + /* Set callback and context pointers, if configured */ + p_instance_ctrl->p_callback = p_cfg->p_callback; + p_instance_ctrl->p_context = p_cfg->p_context; + p_instance_ctrl->p_callback_memory = NULL; + + /* Open ethernet switch module. */ + err = p_ether_switch->p_api->open(p_ether_switch->p_ctrl, p_ether_switch->p_cfg); + + /* Already open is acceptable because switch module is common for each RMAC port. */ + if ((FSP_SUCCESS == err) || (FSP_ERR_ALREADY_OPEN == err)) + { + /* Initialize the Ethernet buffer */ + rmac_init_buffers(p_instance_ctrl); + + /* Create descriptor queues. */ + err = rmac_init_descriptor_queues(p_instance_ctrl); + } + + if (FSP_SUCCESS == err) + { + p_instance_ctrl->open = RMAC_OPEN; + } + + return err; +} + +/********************************************************************************************************************//** + * @brief Disables interrupts. Removes power and releases hardware lock. Implements @ref ether_api_t::close. + * + * @retval FSP_SUCCESS Channel successfully closed. + * @retval FSP_ERR_ASSERTION Pointer to ETHER control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened + * + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_Close (ether_ctrl_t * const p_ctrl) +{ + fsp_err_t err = FSP_SUCCESS; + rmac_instance_ctrl_t * p_instance_ctrl = (rmac_instance_ctrl_t *) p_ctrl; + rmac_extended_cfg_t * p_extend; + layer3_switch_port_cfg_t port_cfg = {0}; + +#if (RMAC_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(RMAC_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); +#endif + + p_extend = (rmac_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + + /* Reset port configuration to disable IRQ on the switch. */ + port_cfg.p_callback = NULL; + R_LAYER3_SWITCH_ConfigurePort(p_extend->p_ether_switch->p_ctrl, p_instance_ctrl->p_cfg->channel, &port_cfg); + + /* Disable IRQ. */ + p_instance_ctrl->p_reg_rmac->MMID2_b.MPDID = 1; + NVIC_DisableIRQ(p_extend->rmpi_irq); + R_FSP_IsrContextSet(p_extend->rmpi_irq, NULL); + + /* Initialize the flags */ + p_instance_ctrl->link_establish_status = ETHER_LINK_ESTABLISH_STATUS_DOWN; + p_instance_ctrl->link_change = ETHER_LINK_CHANGE_NO_CHANGE; + p_instance_ctrl->previous_link_status = ETHER_PREVIOUS_LINK_STATUS_DOWN; + + /** Clear configure block parameters. */ + p_instance_ctrl->p_cfg = NULL; + + /** Mark the channel not open so other APIs cannot use it. */ + p_instance_ctrl->open = 0U; + + return err; +} /* End of function R_RMAC_Close() */ + +/********************************************************************************************************************//** + * @brief Move to the next buffer in the receive buffer list. Implements @ref ether_api_t::bufferRelease. + * + * @retval FSP_SUCCESS Processing completed successfully. + * @retval FSP_ERR_ASSERTION Pointer to ETHER control block or internal buffers is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened + * @retval FSP_ERR_ETHER_ERROR_LINK Auto-negotiation is not completed, and reception is not enabled. + * @retval FSP_ERR_BUFFER_EMPTY There is no available internal RX buffer. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_BufferRelease (ether_ctrl_t * const p_ctrl) +{ + fsp_err_t err = FSP_SUCCESS; + rmac_instance_ctrl_t * p_instance_ctrl = (rmac_instance_ctrl_t *) p_ctrl; + + rmac_buffer_node_t * p_read_buffer_node; /* Buffer location controlled by the Ethernet driver */ + + /* Check argument */ +#if (RMAC_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(RMAC_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(p_instance_ctrl->p_cfg->pp_ether_buffers != NULL, FSP_ERR_ASSERTION) +#endif + + /* When the Link up processing is not completed, return error */ + FSP_ERROR_RETURN(ETHER_LINK_ESTABLISH_STATUS_UP == p_instance_ctrl->link_establish_status, + FSP_ERR_ETHER_ERROR_LINK); + + /* When the zerocopy mode, dequeue a unreleased buffer. */ + p_read_buffer_node = r_rmac_buffer_dequeue(&p_instance_ctrl->rx_unreleased_buffer_queue); + + if (NULL != p_read_buffer_node) + { + /* Try to set to the buffer */ + r_rmac_set_rx_buffer(p_instance_ctrl, p_read_buffer_node); + } + else + { + err = FSP_ERR_BUFFER_EMPTY; + } + + return err; +} /* End of function R_RMAC_BufferRelease() */ + +/********************************************************************************************************************//** + * @brief Change the buffer pointer of the current rx buffer descriptor. Implements @ref ether_api_t::rxBufferUpdate. + * + * @retval FSP_SUCCESS Processing completed successfully. + * @retval FSP_ERR_ASSERTION A pointer argument is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_INVALID_POINTER The pointer of buffer is NULL or not aligned on a 32-bit boundary. + * @retval FSP_ERR_INVALID_MODE Driver is configured to non zero copy mode. + * @retval FSP_ERR_BUFFER_EMPTY There is no available internal RX buffer. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_RxBufferUpdate (ether_ctrl_t * const p_ctrl, void * const p_buffer) +{ + fsp_err_t err = FSP_SUCCESS; + rmac_instance_ctrl_t * p_instance_ctrl = (rmac_instance_ctrl_t *) p_ctrl; + rmac_buffer_node_t * p_read_buffer_node; + + /* Check argument */ +#if (RMAC_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(RMAC_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(NULL != p_buffer, FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(ETHER_ZEROCOPY_ENABLE == p_instance_ctrl->p_cfg->zerocopy, FSP_ERR_INVALID_MODE); +#endif + + if (p_instance_ctrl->rx_initialized_buffer_num < p_instance_ctrl->p_cfg->num_rx_descriptors) + { + p_instance_ctrl->rx_initialized_buffer_num++; + } + + /* Discard unreleased buffer and set the passed new buffer. */ + p_read_buffer_node = r_rmac_buffer_dequeue(&p_instance_ctrl->rx_unreleased_buffer_queue); + + /* When the unreleased buffer queue is empty, use the buffer node pool. */ + if (NULL == p_read_buffer_node) + { + p_read_buffer_node = r_rmac_buffer_dequeue(&p_instance_ctrl->buffer_node_pool); + } + + if (NULL != p_read_buffer_node) + { + p_read_buffer_node->p_buffer = p_buffer; + r_rmac_set_rx_buffer(p_instance_ctrl, p_read_buffer_node); + } + else + { + err = FSP_ERR_BUFFER_EMPTY; + } + + return err; +} + +/********************************************************************************************************************//** + * @brief The Link up processing, the Link down processing, and the magic packet detection processing are executed. + * Implements @ref ether_api_t::linkProcess. + * + * @retval FSP_SUCCESS Link is up. + * @retval FSP_ERR_ASSERTION Pointer to ETHER control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_ETHER_ERROR_LINK Link is down. + * @retval FSP_ERR_ETHER_ERROR_PHY_COMMUNICATION When reopening the PHY interface initialization of the PHY-LSI failed. + * @retval FSP_ERR_ALREADY_OPEN When reopening the PHY interface it was already opened. + * @retval FSP_ERR_INVALID_CHANNEL When reopening the PHY interface an invalid channel was passed. + * @retval FSP_ERR_INVALID_POINTER When reopening the PHY interface the MAC address pointer was NULL. + * @retval FSP_ERR_INVALID_ARGUMENT When reopening the PHY interface the interrupt was not enabled. + * @retval FSP_ERR_ETHER_PHY_ERROR_LINK Initialization of the PHY-LSI failed. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_LinkProcess (ether_ctrl_t * const p_ctrl) +{ + fsp_err_t err = FSP_SUCCESS; + rmac_instance_ctrl_t * p_instance_ctrl = (rmac_instance_ctrl_t *) p_ctrl; + ether_callback_args_t callback_arg; + + /* Check argument */ +#if (RMAC_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(RMAC_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); +#endif + + err = rmac_link_status_check(p_instance_ctrl); + + /* The state of the link status in PHY-LSI is confirmed and Link Up/Down is judged. */ + if (FSP_SUCCESS == err) + { + /* When becoming Link up */ + if (ETHER_PREVIOUS_LINK_STATUS_DOWN == p_instance_ctrl->previous_link_status) + { + p_instance_ctrl->link_change = ETHER_LINK_CHANGE_LINK_UP; + + /* Update Link status */ + p_instance_ctrl->previous_link_status = ETHER_PREVIOUS_LINK_STATUS_UP; + } + } + else + { + /* When becoming Link down */ + if (ETHER_PREVIOUS_LINK_STATUS_UP == p_instance_ctrl->previous_link_status) + { + p_instance_ctrl->link_change = ETHER_LINK_CHANGE_LINK_DOWN; + + /* Update Link status */ + p_instance_ctrl->previous_link_status = ETHER_PREVIOUS_LINK_STATUS_DOWN; + } + } + + /* When the link is up */ + if (ETHER_LINK_CHANGE_LINK_UP == p_instance_ctrl->link_change) + { + p_instance_ctrl->link_change = ETHER_LINK_CHANGE_NO_CHANGE; + p_instance_ctrl->link_establish_status = ETHER_LINK_ESTABLISH_STATUS_UP; + + /* Initialize the Ethernet buffer */ + rmac_init_buffers(p_instance_ctrl); + + /* Initialize receive and transmit descriptors */ + rmac_init_descriptors(p_instance_ctrl); + + /* Configure reception filters. */ + rmac_configure_reception_filter(p_instance_ctrl); + + err = rmac_do_link(p_instance_ctrl, LAYER3_SWITCH_MAGIC_PACKET_DETECTION_DISABLE); + + if (FSP_SUCCESS == err) + { + /* If a callback is provided, then call it with callback argument. */ + if (NULL != p_instance_ctrl->p_callback) + { + callback_arg.channel = p_instance_ctrl->p_cfg->channel; + callback_arg.event = ETHER_EVENT_LINK_ON; + callback_arg.p_context = p_instance_ctrl->p_cfg->p_context; + rmac_call_callback(p_instance_ctrl, &callback_arg); + } + } + else + { + /* When PHY auto-negotiation is not completed */ + p_instance_ctrl->link_establish_status = ETHER_LINK_ESTABLISH_STATUS_DOWN; + p_instance_ctrl->link_change = ETHER_LINK_CHANGE_LINK_UP; + } + } + /* When the link is down */ + else if (ETHER_LINK_CHANGE_LINK_DOWN == p_instance_ctrl->link_change) + { + p_instance_ctrl->link_change = ETHER_LINK_CHANGE_NO_CHANGE; + p_instance_ctrl->link_establish_status = ETHER_LINK_ESTABLISH_STATUS_DOWN; + + /* Disable reception. */ + r_rmac_disable_reception(p_instance_ctrl); + + /* If a callback is provided, then call it with callback argument. */ + if (NULL != p_instance_ctrl->p_callback) + { + callback_arg.channel = p_instance_ctrl->p_cfg->channel; + callback_arg.event = ETHER_EVENT_LINK_OFF; + callback_arg.p_context = p_instance_ctrl->p_cfg->p_context; + rmac_call_callback(p_instance_ctrl, &callback_arg); + } + } + else + { + /* no operation */ + } + + return err; +} /* End of function R_RMAC_LinkProcess() */ + +/********************************************************************************************************************//** + * @brief The setting of RMAC is changed from normal sending and receiving mode to magic packet detection mode. + * Implements @ref ether_api_t::wakeOnLANEnable. + * + * @retval FSP_SUCCESS Processing completed successfully. + * @retval FSP_ERR_ASSERTION Pointer to ETHER control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_ETHER_ERROR_LINK Auto-negotiation is not completed, and reception is not enabled. + * @retval FSP_ERR_ETHER_PHY_ERROR_LINK Initialization of PHY-LSI failed. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_WakeOnLANEnable (ether_ctrl_t * const p_ctrl) +{ + rmac_instance_ctrl_t * p_instance_ctrl = (rmac_instance_ctrl_t *) p_ctrl; + fsp_err_t err = FSP_SUCCESS; + + /* Check argument */ +#if (RMAC_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(RMAC_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); +#endif + + /* When the Link up processing is not completed, return error */ + FSP_ERROR_RETURN(ETHER_LINK_ESTABLISH_STATUS_UP == p_instance_ctrl->link_establish_status, + FSP_ERR_ETHER_ERROR_LINK); + + /* When the Link up processing is completed */ + /* Change to the magic packet detection mode. */ + rmac_do_link(p_instance_ctrl, LAYER3_SWITCH_MAGIC_PACKET_DETECTION_ENABLE); + + /* It is confirmed not to become Link down while changing the setting. */ + err = rmac_link_status_check(p_instance_ctrl); + + if (FSP_SUCCESS == err) + { + p_instance_ctrl->wake_on_lan = ETHER_WAKE_ON_LAN_ENABLE; + } + else + { + err = FSP_ERR_ETHER_ERROR_LINK; + } + + return err; +} /* End of function R_RMAC_WakeOnLANEnable() */ + +/********************************************************************************************************************//** + * @brief Receive Ethernet frame. Receives data to the location specified by the pointer to the receive buffer. + * In zero copy mode, the address of the receive buffer is returned. + * In non zero copy mode, the received data in the internal buffer is copied to the pointer passed by the argument. + * Implements @ref ether_api_t::read. + * + * @retval FSP_SUCCESS Processing completed successfully. + * @retval FSP_ERR_ASSERTION Pointer to ETHER control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_ETHER_ERROR_NO_DATA There is no data in receive buffer. + * @retval FSP_ERR_ETHER_ERROR_LINK Auto-negotiation is not completed, and reception is not enabled. + * @retval FSP_ERR_ETHER_ERROR_FILTERING Multicast Frame filter is enable, and Multicast Address Frame is + * received. + * @retval FSP_ERR_INVALID_POINTER Value of the pointer is NULL. + * @retval FSP_ERR_BUFFER_EMPTY There is no available internal RX buffer. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_Read (ether_ctrl_t * const p_ctrl, void * const p_buffer, uint32_t * const length_bytes) +{ + fsp_err_t err = FSP_SUCCESS; + rmac_instance_ctrl_t * p_instance_ctrl = (rmac_instance_ctrl_t *) p_ctrl; + rmac_buffer_node_t * p_read_buffer_node = NULL; /* Buffer location controlled by the Ethernet driver */ + uint32_t received_size = RMAC_NO_DATA; + uint8_t * p_read_buffer = NULL; + uint8_t ** pp_read_buffer = (uint8_t **) p_buffer; + rmac_extended_cfg_t * p_extend; + + /* Check argument */ +#if (RMAC_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(RMAC_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(NULL != p_buffer, FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(NULL != length_bytes, FSP_ERR_INVALID_POINTER); +#endif + + /* (1) Retrieve the receive buffer location controlled by the descriptor. */ + /* When the Link up processing is not completed, return error */ + FSP_ERROR_RETURN(ETHER_LINK_ESTABLISH_STATUS_UP == p_instance_ctrl->link_establish_status, + FSP_ERR_ETHER_ERROR_LINK); + + p_extend = (rmac_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + + p_read_buffer_node = r_rmac_buffer_dequeue(&p_instance_ctrl->rx_completed_buffer_queue); + if (NULL != p_read_buffer_node) + { + p_read_buffer = p_read_buffer_node->p_buffer; + received_size = p_read_buffer_node->size; + } + + /* When there is data to receive */ + if (received_size > RMAC_NO_DATA) + { +#if LAYER3_SWITCH_CFG_GPTP_ENABLE + + /* Get timestamp. */ + if (NULL != p_instance_ctrl->p_rx_timestamp) + { + p_instance_ctrl->p_rx_timestamp->ns = p_read_buffer_node->timestamp.ns; + p_instance_ctrl->p_rx_timestamp->sec_lower = p_read_buffer_node->timestamp.sec_lower; + + /* Clear for next read. */ + p_instance_ctrl->p_rx_timestamp = NULL; + } +#endif + + if (ETHER_ZEROCOPY_DISABLE == p_instance_ctrl->p_cfg->zerocopy) + { + /* (2) Copy the data read from the receive buffer which is controlled + * by the descriptor to the buffer which is specified by the user (up to 1024 bytes). */ + memcpy(p_buffer, p_read_buffer, received_size); + + /* (3) Read the receive data from the receive buffer controlled by the descriptor, + * and then release the receive buffer. */ + + /* Read a pending buffer, try to set this buffer to the descriptor queue. If failed, it will be enqueued to the buffer pool. */ + r_rmac_set_rx_buffer(p_instance_ctrl, p_read_buffer_node); + } + else + { + *pp_read_buffer = p_read_buffer; + + /* Add this buffer to the buffer pool. It becomes reusable after being released via the BufferRelease API. */ + r_rmac_buffer_enqueue(&p_instance_ctrl->rx_unreleased_buffer_queue, p_read_buffer_node); + } + + *length_bytes = received_size; + } + /* When there is no data to receive */ + else + { + err = FSP_ERR_ETHER_ERROR_NO_DATA; + + if (RMAC_INVALID_QUEUE_INDEX == p_instance_ctrl->rx_running_queue_index) + { + /*Try to set a new empty buffer and restart reception. */ + fsp_err_t serr = R_LAYER3_SWITCH_StartDescriptorQueue(p_extend->p_ether_switch->p_ctrl, + p_extend->p_rx_queue_list[p_instance_ctrl-> + read_queue_index].index); + if (FSP_SUCCESS == serr) + { + p_instance_ctrl->rx_running_queue_index = p_instance_ctrl->read_queue_index; + } + + RMAC_INCREMENT_DESCRIPTOR_QUEUE_INDEX(p_instance_ctrl->read_queue_index, p_extend->rx_queue_num); + } + } + + return err; +} /* End of function R_RMAC_Read() */ + +/********************************************************************************************************************//** + * @brief Transmit Ethernet frame. Transmits data from the location specified by the pointer to the transmit + * buffer, with the data size equal to the specified frame length. + * In the non zero copy mode, transmits data after being copied to the internal buffer. + * Implements @ref ether_api_t::write. + * + * @retval FSP_SUCCESS Processing completed successfully. + * @retval FSP_ERR_ASSERTION Pointer to ETHER control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_ETHER_ERROR_LINK Auto-negotiation is not completed, and reception is not enabled. + * @retval FSP_ERR_ETHER_ERROR_TRANSMIT_BUFFER_FULL Transmit buffer is not empty. + * @retval FSP_ERR_INVALID_POINTER Value of the pointer is NULL. + * @retval FSP_ERR_INVALID_ARGUMENT Value of the send frame size is out of range. + * @retval FSP_ERR_BUFFER_EMPTY There is no available internal TX buffer. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_Write (ether_ctrl_t * const p_ctrl, void * const p_buffer, uint32_t const frame_length) +{ + fsp_err_t err = FSP_SUCCESS; + rmac_instance_ctrl_t * p_instance_ctrl = (rmac_instance_ctrl_t *) p_ctrl; + rmac_extended_cfg_t * p_extend; + + void * p_write_buffer; + rmac_buffer_node_t * p_write_buffer_node = NULL; + uint32_t next_write_queue; + + /* Check argument */ +#if (RMAC_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(RMAC_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(NULL != p_buffer, FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN((RMAC_MINIMUM_FRAME_SIZE <= frame_length) && (RMAC_MAXIMUM_FRAME_SIZE >= frame_length), + FSP_ERR_INVALID_ARGUMENT); + FSP_ERROR_RETURN(p_instance_ctrl->p_cfg->ether_buffer_size >= frame_length, FSP_ERR_OVERFLOW); +#endif + + p_extend = (rmac_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + next_write_queue = p_instance_ctrl->write_queue_index; + + /* When the Link up processing is not completed, return error */ + FSP_ERROR_RETURN(ETHER_LINK_ESTABLISH_STATUS_UP == p_instance_ctrl->link_establish_status, + FSP_ERR_ETHER_ERROR_LINK); + + /* Get TX buffer. */ + if (ETHER_ZEROCOPY_DISABLE == p_instance_ctrl->p_cfg->zerocopy) + { + p_write_buffer_node = r_rmac_buffer_dequeue(&p_instance_ctrl->tx_empty_buffer_queue); + FSP_ERROR_RETURN(NULL != p_write_buffer_node, FSP_ERR_ETHER_ERROR_TRANSMIT_BUFFER_FULL); + p_write_buffer_node->size = frame_length; + + /* Get a buffer from internal buffers. */ + p_write_buffer = p_write_buffer_node->p_buffer; + + /* Copy data to the transmit buffer. */ + memcpy(p_write_buffer, p_buffer, frame_length); + } + else + { + /* In zerocopy mode, use a passed buffer. */ + p_write_buffer = p_buffer; + } + + FSP_CRITICAL_SECTION_DEFINE; + FSP_CRITICAL_SECTION_ENTER; + + /* If a pending buffer exists, this buffer will be also pending. */ + if (NULL != p_instance_ctrl->tx_pending_buffer_queue.p_head) + { + err = FSP_ERR_ETHER_ERROR_TRANSMIT_BUFFER_FULL; + } + else + { + /* If the write target is the running queue, don't write to the descriptor queue. */ + if (RMAC_INVALID_QUEUE_INDEX != p_instance_ctrl->tx_running_queue_index) + { + next_write_queue = p_instance_ctrl->tx_running_queue_index; + RMAC_INCREMENT_DESCRIPTOR_QUEUE_INDEX(next_write_queue, p_extend->tx_queue_num); + } + else + { + next_write_queue = p_instance_ctrl->write_queue_index; + } + + err = + r_rmac_set_tx_buffer(p_instance_ctrl, p_write_buffer, frame_length, + p_extend->p_tx_queue_list[next_write_queue].index); + + /* Try to write to the next queue. */ + if (FSP_ERR_OVERFLOW == err) + { + RMAC_INCREMENT_DESCRIPTOR_QUEUE_INDEX(next_write_queue, p_extend->tx_queue_num); + if (p_instance_ctrl->tx_running_queue_index != next_write_queue) + { + err = + r_rmac_set_tx_buffer(p_instance_ctrl, p_write_buffer, frame_length, + p_extend->p_tx_queue_list[next_write_queue].index); + } + } + } + + FSP_CRITICAL_SECTION_EXIT; + + if (FSP_SUCCESS == err) + { + /* If non-zerocoy mode, move the internal buffer node to the pool. */ + if (ETHER_ZEROCOPY_DISABLE == p_instance_ctrl->p_cfg->zerocopy) + { + r_rmac_buffer_enqueue(&p_instance_ctrl->buffer_node_pool, p_write_buffer_node); + } + } + else + { + /* Copy the zerocopy buffer to a new buffer node. */ + if (NULL == p_write_buffer_node) + { + p_write_buffer_node = r_rmac_buffer_dequeue(&p_instance_ctrl->buffer_node_pool); + FSP_ERROR_RETURN(NULL != p_write_buffer_node, FSP_ERR_ETHER_ERROR_TRANSMIT_BUFFER_FULL); + p_write_buffer_node->p_buffer = p_write_buffer; + p_write_buffer_node->size = frame_length; + } + + /* If write process fails, add the buffer to the pending queue. */ + r_rmac_buffer_enqueue(&p_instance_ctrl->tx_pending_buffer_queue, p_write_buffer_node); + err = FSP_SUCCESS; + } + + /* Check if all TX queues are stopped. */ + if (p_instance_ctrl->tx_running_queue_index == RMAC_INVALID_QUEUE_INDEX) + { + FSP_CRITICAL_SECTION_ENTER; + + /* When all TX queues are stopped, start transmission */ + if (FSP_SUCCESS == + r_rmac_start_tx_queue(p_instance_ctrl, p_extend->p_tx_queue_list[next_write_queue].index)) + { + p_instance_ctrl->tx_running_queue_index = next_write_queue; + } + else + { + p_instance_ctrl->tx_running_queue_index = RMAC_INVALID_QUEUE_INDEX; + } + + RMAC_INCREMENT_DESCRIPTOR_QUEUE_INDEX(next_write_queue, p_extend->tx_queue_num); + + FSP_CRITICAL_SECTION_EXIT; + } + + p_instance_ctrl->write_queue_index = next_write_queue; + + return err; +} /* End of function R_RMAC_Write() */ + +/**********************************************************************************************************************//** + * Provides status of Ethernet driver in the user provided pointer. Implements @ref ether_api_t::txStatusGet. + * + * @retval FSP_SUCCESS Transmit buffer address is stored in provided p_buffer_address. + * @retval FSP_ERR_ASSERTION Pointer to ETHER control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_INVALID_POINTER p_status is NULL. + * @retval FSP_ERR_NOT_FOUND Transmit buffer address has been overwritten in transmit descriptor. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_TxStatusGet (ether_ctrl_t * const p_ctrl, void * const p_buffer_address) +{ + rmac_instance_ctrl_t * p_instance_ctrl = (rmac_instance_ctrl_t *) p_ctrl; + uint8_t ** p_sent_buffer_address = (uint8_t **) p_buffer_address; + fsp_err_t err; + +#if (RMAC_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(RMAC_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + FSP_ERROR_RETURN(NULL != p_buffer_address, FSP_ERR_INVALID_POINTER); +#endif + + FSP_CRITICAL_SECTION_DEFINE; + FSP_CRITICAL_SECTION_ENTER; + + if (NULL != p_instance_ctrl->p_last_sent_buffer) + { + *p_sent_buffer_address = p_instance_ctrl->p_last_sent_buffer; + p_instance_ctrl->p_last_sent_buffer = NULL; + err = FSP_SUCCESS; + } + else + { + /* No descriptors have been sent. */ + err = FSP_ERR_NOT_FOUND; + } + + FSP_CRITICAL_SECTION_EXIT; + + return err; +} /* End of function R_RMAC_VersionGet() */ + +/*******************************************************************************************************************//** + * Updates the user callback with the option to provide memory for the callback argument structure. + * Implements @ref ether_api_t::callbackSet. + * + * @retval FSP_SUCCESS Callback updated successfully. + * @retval FSP_ERR_ASSERTION A required pointer is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_NO_CALLBACK_MEMORY p_callback is non-secure and p_callback_memory is either secure or NULL. + **********************************************************************************************************************/ +fsp_err_t R_RMAC_CallbackSet (ether_ctrl_t * const p_api_ctrl, + void ( * p_callback)(ether_callback_args_t *), + void * const p_context, + ether_callback_args_t * const p_callback_memory) +{ + rmac_instance_ctrl_t * p_ctrl = (rmac_instance_ctrl_t *) p_api_ctrl; + + /* Check argument */ +#if (RMAC_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_ctrl); + FSP_ASSERT(p_callback); + FSP_ERROR_RETURN(RMAC_OPEN == p_ctrl->open, FSP_ERR_NOT_OPEN); +#endif + +#if BSP_TZ_SECURE_BUILD && BSP_FEATURE_ETHER_SUPPORTS_TZ_SECURE + + /* Get security state of p_callback */ + bool callback_is_secure = + (NULL == cmse_check_address_range((void *) p_callback, sizeof(void *), CMSE_AU_NONSECURE)); + + #if RMAC_CFG_PARAM_CHECKING_ENABLE + + /* In secure projects, p_callback_memory must be provided in non-secure space if p_callback is non-secure */ + ether_callback_args_t * const p_callback_memory_checked = cmse_check_pointed_object(p_callback_memory, + CMSE_AU_NONSECURE); + FSP_ERROR_RETURN(callback_is_secure || (NULL != p_callback_memory_checked), FSP_ERR_NO_CALLBACK_MEMORY); + #endif +#endif + + /* Store callback and context */ +#if BSP_TZ_SECURE_BUILD && BSP_FEATURE_ETHER_SUPPORTS_TZ_SECURE + p_ctrl->p_callback = callback_is_secure ? p_callback : + (void (*)(ether_callback_args_t *))cmse_nsfptr_create(p_callback); +#else + p_ctrl->p_callback = p_callback; +#endif + p_ctrl->p_context = p_context; + p_ctrl->p_callback_memory = p_callback_memory; + + return FSP_SUCCESS; +} + +/********************************************************************************************************************//** + * @brief Set configuration for tx frame. This API must call before @ref ether_api_t::write. + * + * @retval FSP_SUCCESS Processing completed successfully. + * @retval FSP_ERR_ASSERTION A pointer argument is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_SetWriteConfig (ether_ctrl_t * const p_ctrl, rmac_write_cfg_t * const p_write_cfg) +{ + rmac_instance_ctrl_t * p_instance_ctrl = (rmac_instance_ctrl_t *) p_ctrl; + + /* Check argument */ +#if (RMAC_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(NULL != p_write_cfg, FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(RMAC_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); +#endif + + p_instance_ctrl->write_cfg.tx_timestamp_enable = p_write_cfg->tx_timestamp_enable; + + return FSP_SUCCESS; +} + +/********************************************************************************************************************//** + * @brief Get timestamp of transmitted frame. This API must call after @ref ether_api_t::write. + * + * @retval FSP_SUCCESS Processing completed successfully. + * @retval FSP_ERR_ASSERTION A pointer argument is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_GetTxTimestamp (ether_ctrl_t * const p_ctrl, rmac_timestamp_t * const p_timestamp) +{ + rmac_instance_ctrl_t * p_instance_ctrl = (rmac_instance_ctrl_t *) p_ctrl; + + /* Check argument */ +#if (RMAC_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(NULL != p_timestamp, FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(RMAC_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); +#endif + + p_timestamp->ns = p_instance_ctrl->tx_timestamp.ns; + p_timestamp->sec_lower = p_instance_ctrl->tx_timestamp.sec_lower; + + /* Clear timestamp */ + p_instance_ctrl->tx_timestamp.ns = 0; + p_instance_ctrl->tx_timestamp.sec_lower = 0; + + return FSP_SUCCESS; +} + +/********************************************************************************************************************//** + * @brief Get timestamp of received frame. This API must call before @ref ether_api_t::read. + * + * @retval FSP_SUCCESS Processing completed successfully. + * @retval FSP_ERR_ASSERTION A pointer argument is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_GetRxTimestamp (ether_ctrl_t * const p_ctrl, rmac_timestamp_t * const p_timestamp) +{ + rmac_instance_ctrl_t * p_instance_ctrl = (rmac_instance_ctrl_t *) p_ctrl; + + /* Check argument */ +#if (RMAC_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(NULL != p_timestamp, FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(RMAC_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); +#endif + + p_instance_ctrl->p_rx_timestamp = p_timestamp; + + return FSP_SUCCESS; +} + +/*******************************************************************************************************************//** + * @} (end addtogroup RMAC) + **********************************************************************************************************************/ + +#if (RMAC_CFG_PARAM_CHECKING_ENABLE) + +/*******************************************************************************************************************//** + * @brief Parameter error check function for open. + * + * @param[in] p_instance_ctrl Pointer to the control block for the channel + * @param[in] p_cfg Pointer to the configuration structure specific to UART mode + * + * @retval FSP_SUCCESS No parameter error found + * @retval FSP_ERR_ASSERTION Pointer to RMAC control block or configuration structure is NULL + * @retval FSP_ERR_INVALID_CHANNEL Invalid channel number is given. + * @retval FSP_ERR_INVALID_POINTER Pointer to MAC address is NULL. + * @retval FSP_ERR_INVALID_ARGUMENT Irq number lower then 0. + **********************************************************************************************************************/ +static fsp_err_t rmac_open_param_check (rmac_instance_ctrl_t const * const p_instance_ctrl, + ether_cfg_t const * const p_cfg) +{ + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(NULL != p_cfg, FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(NULL != p_cfg->p_mac_address, FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(NULL != p_cfg->p_extend, FSP_ERR_INVALID_POINTER); + FSP_ERROR_RETURN(BSP_FEATURE_ETHER_NUM_CHANNELS > p_cfg->channel, FSP_ERR_INVALID_CHANNEL); + + if (p_cfg->zerocopy == ETHER_ZEROCOPY_DISABLE) + { + FSP_ERROR_RETURN((p_cfg->pp_ether_buffers != NULL), FSP_ERR_INVALID_ARGUMENT); + } + + /* RMAC does not support padding feature. */ + FSP_ERROR_RETURN(p_cfg->padding == ETHER_PADDING_DISABLE, FSP_ERR_UNSUPPORTED); + + return FSP_SUCCESS; +} + +#endif + +/*********************************************************************************************************************** + * Function Name: rmac_configure_reception_filter + * Description : Configure the ETHA and RMAC feature. + * Arguments : channel - + * RMAC channel number + * Return Value : none + ***********************************************************************************************************************/ +void rmac_configure_reception_filter (rmac_instance_ctrl_t const * const p_instance_ctrl) +{ + uint32_t mrafc = p_instance_ctrl->p_reg_rmac->MRAFC; + + if (ETHER_PROMISCUOUS_ENABLE == p_instance_ctrl->p_cfg->promiscuous) + { + /* Enable promiscuous reception. */ + mrafc = RMAC_REG_MRAFC_PROMISCUOUS_VALUE; + } + else + { + /* Configure multicast reception features. */ + if (ETHER_MULTICAST_ENABLE == p_instance_ctrl->p_cfg->multicast) + { + /* Enable multicast reception. */ + mrafc |= R_RMAC0_MRAFC_MCENE_Msk | R_RMAC0_MRAFC_MCENP_Msk; + } + else + { + mrafc &= ~(R_RMAC0_MRAFC_MCENE_Msk | R_RMAC0_MRAFC_MCENP_Msk); + } + + /* Enable broadcast reception. */ + mrafc |= R_RMAC0_MRAFC_BCENE_Msk | R_RMAC0_MRAFC_BCENP_Msk; + } + + /* Set the broadcast storm filter regardless of the promiscuous mode configuration. */ + if (0 < p_instance_ctrl->p_cfg->broadcast_filter) + { + /* Enable the broadcast storm filter */ + mrafc |= R_RMAC0_MRAFC_BCACE_Msk | R_RMAC0_MRAFC_BSTENE_Msk | + R_RMAC0_MRAFC_BSTENP_Msk | R_RMAC0_MRAFC_BCACP_Msk; + + /* Configure how many broadcast frames can be received consecutively. */ + p_instance_ctrl->p_reg_rmac->MRSCE_b.CBFE = (R_RMAC0_MRSCE_CBFE_Msk >> R_RMAC0_MRSCE_CBFE_Pos) & + (p_instance_ctrl->p_cfg->broadcast_filter - 1); + } + else + { + mrafc &= ~(R_RMAC0_MRAFC_BSTENE_Msk | R_RMAC0_MRAFC_BSTENP_Msk); + } + + p_instance_ctrl->p_reg_rmac->MRAFC = mrafc; +} /* End of function rmac_configure_reception_filter() */ + +/********************************************************************************************************************//** + * @brief Determines the partner PHY capability through auto-negotiation process. The link abilities + * are handled to determine duplex, speed and flow control (PAUSE frames). + * + * @param[in] p_instance_ctrl Pointer to the control block for the channel + * @param[in] mode The operational mode is specified. + * NO_USE_MAGIC_PACKET_DETECT (0) - Communicate mode usually + * USE_MAGIC_PACKET_DETECT (1) - Magic packet detection mode + * @retval FSP_SUCCESS Processing completed successfully. + * @retval FSP_ERR_ASSERTION Pointer to ETHER control block or configuration structure is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_ETHER_ERROR_LINK Auto-negotiation of PHY-LSI is not completed + * or result of Auto-negotiation is abnormal. + * + ***********************************************************************************************************************/ +fsp_err_t rmac_do_link (rmac_instance_ctrl_t * const p_instance_ctrl, + const layer3_switch_magic_packet_detection_t mode) +{ + fsp_err_t err = FSP_SUCCESS; + + rmac_extended_cfg_t * p_rmac_extended_cfg; + layer3_switch_extended_cfg_t * p_switch_extended_cfg; + const ether_phy_instance_t * p_phy_instance; + + layer3_switch_port_cfg_t port_cfg = {0}; + uint32_t link_speed_duplex = 0; + uint32_t local_pause_bits = 0; + uint32_t partner_pause_bits = 0; + fsp_err_t link_result; + +#if (RMAC_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(p_instance_ctrl); + FSP_ERROR_RETURN(RMAC_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); +#endif + + p_rmac_extended_cfg = (rmac_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + p_switch_extended_cfg = (layer3_switch_extended_cfg_t *) p_rmac_extended_cfg->p_ether_switch->p_cfg->p_extend; +#ifndef RMAC_CFG_SKIP_PHY_LINK_ABILITY_CHECK + p_phy_instance = + (ether_phy_instance_t *) p_switch_extended_cfg->p_ether_phy_instances[p_instance_ctrl->p_cfg->channel]; + + /* Set the link status */ + R_RMAC_PHY_ChipSelect(p_phy_instance->p_ctrl, p_instance_ctrl->p_cfg->channel); + link_result = p_phy_instance->p_api->linkPartnerAbilityGet(p_phy_instance->p_ctrl, + &link_speed_duplex, + &local_pause_bits, + &partner_pause_bits); +#else + FSP_PARAMETER_NOT_USED(p_phy_instance); + FSP_PARAMETER_NOT_USED(link_speed_duplex); + FSP_PARAMETER_NOT_USED(local_pause_bits); + FSP_PARAMETER_NOT_USED(partner_pause_bits); + + link_result = FSP_SUCCESS; +#endif + + if (FSP_SUCCESS == link_result) + { + /* Set MAC address. */ + port_cfg.p_mac_address = p_instance_ctrl->p_cfg->p_mac_address; + + if (LAYER3_SWITCH_MAGIC_PACKET_DETECTION_DISABLE == mode) + { + /* + * To prevent reception from starting at an invalid descriptor, the queue must be reloaded first. + * Until then, reception is kept disabled. + */ + port_cfg.forwarding_to_cpu_enable = false; + R_LAYER3_SWITCH_ConfigurePort(p_rmac_extended_cfg->p_ether_switch->p_ctrl, + p_instance_ctrl->p_cfg->channel, + &port_cfg); + + /* Reload RX queue. */ + if (NULL != p_instance_ctrl->p_cfg->pp_ether_buffers) + { + p_instance_ctrl->rx_running_queue_index = 0; + err = R_LAYER3_SWITCH_StartDescriptorQueue(p_rmac_extended_cfg->p_ether_switch->p_ctrl, + p_rmac_extended_cfg->p_rx_queue_list[0].index); + } + + /* Set callback for switch data interrupt. */ + port_cfg.p_callback = r_rmac_switch_interrupt_callback; + port_cfg.p_context = p_instance_ctrl; + port_cfg.p_callback_memory = NULL; + + /* Enable reception. */ + port_cfg.forwarding_to_cpu_enable = true; + R_LAYER3_SWITCH_ConfigurePort(p_rmac_extended_cfg->p_ether_switch->p_ctrl, + p_instance_ctrl->p_cfg->channel, + &port_cfg); + } + else + { + /* Disable reception. */ + r_rmac_disable_reception(p_instance_ctrl); + + /* Enable a magic packet interrupt. */ + R_BSP_IrqCfgEnable(p_rmac_extended_cfg->rmpi_irq, p_rmac_extended_cfg->rmpi_ipl, p_instance_ctrl); + p_instance_ctrl->p_reg_rmac->MMIE2_b.MPDIE = 1; + } + } + else + { + err = FSP_ERR_ETHER_ERROR_LINK; + } + + return err; +} /* End of function rmac_do_link() */ + +/*******************************************************************************************************************//** + * @brief Verifies the Ethernet link is up or not. + * + * @param[in] p_instance_ctrl Pointer to the control block for the channel + * + * @retval FSP_SUCCESS: Link is up + * @retval FSP_ERR_ETHER_ERROR_LINK: Link is down + * @retval FSP_ERR_ETHER_PHY_ERROR_LINK Initialization of PHY-LSI failed. + **********************************************************************************************************************/ +static fsp_err_t rmac_link_status_check (rmac_instance_ctrl_t const * const p_instance_ctrl) +{ + fsp_err_t err = FSP_SUCCESS; + fsp_err_t link_status; + + rmac_extended_cfg_t * p_rmac_extended_cfg; + layer3_switch_extended_cfg_t * p_switch_extended_cfg; + const ether_phy_instance_t * p_phy_instance; + uint32_t line_speed_duplex; + uint32_t local_pause; + uint32_t partner_pause; + + p_rmac_extended_cfg = (rmac_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + p_switch_extended_cfg = (layer3_switch_extended_cfg_t *) p_rmac_extended_cfg->p_ether_switch->p_cfg->p_extend; + p_phy_instance = + (ether_phy_instance_t *) p_switch_extended_cfg->p_ether_phy_instances[p_instance_ctrl->p_cfg->channel]; + + /* Update PHY LSI information */ + R_RMAC_PHY_ChipSelect(p_phy_instance->p_ctrl, p_instance_ctrl->p_cfg->channel); + + /* Get link status */ + link_status = p_phy_instance->p_api->linkStatusGet(p_phy_instance->p_ctrl); + + if (FSP_ERR_ETHER_PHY_ERROR_LINK == link_status) + { + /* Link is down */ + err = FSP_ERR_ETHER_ERROR_LINK; + } + else + { + /* Link is up */ + err = FSP_SUCCESS; + + /* Call LinkPartnerAbilityGet to configure link speed. */ + p_phy_instance->p_api->linkPartnerAbilityGet(p_phy_instance->p_ctrl, + &line_speed_duplex, + &local_pause, + &partner_pause); + } + + return err; +} /* End of function rmac_link_status_check() */ + +/*******************************************************************************************************************//** + * Calls user callback. + * + * @param[in] p_instance_ctrl Pointer to ether instance control block + * @param[in] p_callback_args Pointer to callback args + **********************************************************************************************************************/ +static void rmac_call_callback (rmac_instance_ctrl_t * p_instance_ctrl, ether_callback_args_t * p_callback_args) +{ + ether_callback_args_t args; + + /* Store callback arguments in memory provided by user if available. This allows callback arguments to be + * stored in non-secure memory so they can be accessed by a non-secure callback function. */ + ether_callback_args_t * p_args = p_instance_ctrl->p_callback_memory; + if (NULL == p_args) + { + /* Store on stack */ + p_args = &args; + } + else + { + /* Save current arguments on the stack in case this is a nested interrupt. */ + args = *p_args; + } + + p_args->event = p_callback_args->event; + p_args->channel = p_instance_ctrl->p_cfg->channel; + p_args->p_context = p_instance_ctrl->p_context; + +#if BSP_TZ_SECURE_BUILD && BSP_FEATURE_ETHER_SUPPORTS_TZ_SECURE + + /* p_callback can point to a secure function or a non-secure function. */ + if (!cmse_is_nsfptr(p_instance_ctrl->p_callback)) + { + /* If p_callback is secure, then the project does not need to change security state. */ + p_instance_ctrl->p_callback(p_args); + } + else + { + /* If p_callback is Non-secure, then the project must change to Non-secure state in order to call the callback. */ + ether_prv_ns_callback p_callback = (ether_prv_ns_callback) (p_instance_ctrl->p_callback); + p_callback(p_args); + } + +#else + + /* If the project is not Trustzone Secure, then it will never need to change security state in order to call the callback. */ + p_instance_ctrl->p_callback(p_args); +#endif + + if (NULL != p_instance_ctrl->p_callback_memory) + { + /* Restore callback memory in case this is a nested interrupt. */ + *p_instance_ctrl->p_callback_memory = args; + } +} + +/*********************************************************************************************************************** + * Function Name: rmac_init_descriptors + * Description : Initialize descriptors and set buffers to these. + * Arguments : channel - + * RMAC channel number + * Return Value : none + ***********************************************************************************************************************/ +void rmac_init_descriptors (rmac_instance_ctrl_t * const p_instance_ctrl) +{ + layer3_switch_descriptor_t descriptor = {0}; + + rmac_extended_cfg_t * p_extend = (rmac_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + uint32_t buffers_index = 0; + uint32_t nodes_index = 0; + rmac_buffer_node_t * p_buffer_node; + + /* Initialize common buffer pool. */ + p_instance_ctrl->buffer_node_pool.p_head = NULL; + p_instance_ctrl->buffer_node_pool.p_tail = NULL; + p_instance_ctrl->tx_empty_buffer_queue.p_head = NULL; + p_instance_ctrl->tx_empty_buffer_queue.p_tail = NULL; + p_instance_ctrl->rx_empty_buffer_queue.p_head = NULL; + p_instance_ctrl->rx_empty_buffer_queue.p_tail = NULL; + + /* Enable Data interrupt for RX descriptor. */ + descriptor.basic.die = 1; + + /* Initialize variables that used for RX. */ + p_instance_ctrl->read_queue_index = 0; + p_instance_ctrl->rx_running_queue_index = RMAC_INVALID_QUEUE_INDEX; + p_instance_ctrl->rx_completed_buffer_queue.p_head = NULL; + p_instance_ctrl->rx_completed_buffer_queue.p_tail = NULL; + p_instance_ctrl->rx_unreleased_buffer_queue.p_head = NULL; + p_instance_ctrl->rx_unreleased_buffer_queue.p_tail = NULL; + + if (NULL != p_instance_ctrl->p_cfg->pp_ether_buffers) + { + p_instance_ctrl->rx_initialized_buffer_num = p_instance_ctrl->p_cfg->num_rx_descriptors; + + /* Settings for RX descriptors */ + descriptor.basic.ds_h = (RMAC_DESCRIPTOR_FIELD_DS_UPPER_MASK & RMAC_MAXIMUM_FRAME_SIZE) >> + RMAC_DESCRIPTOR_FIELD_DS_UPPER_POSITION; + descriptor.basic.ds_l = RMAC_DESCRIPTOR_FIELD_DS_LOWER_MASK & RMAC_MAXIMUM_FRAME_SIZE; + descriptor.basic.dt = LAYER3_SWITCH_DESCRIPTOR_TYPE_FEMPTY; + } + else + { + p_instance_ctrl->rx_initialized_buffer_num = 0; + } + + /* Initialize descriptors of each RX queue if buffers are allocated by configuration. */ + for (uint32_t i = 0; i < p_extend->rx_queue_num; i++) + { + /* For each RX descriptor exclude the last. */ + for (uint32_t j = 0; j < p_extend->p_rx_queue_list[0].queue_cfg.array_length - 1; j++) + { + /* Get a node without buffer. */ + p_buffer_node = &p_extend->p_buffer_node_list[nodes_index]; + p_buffer_node->p_buffer = NULL; + nodes_index++; + + if (NULL != p_instance_ctrl->p_cfg->pp_ether_buffers) + { + descriptor.basic.ptr_h = + (RMAC_DESCRIPTOR_FIELD_PTR_UPPER_MASK & + (uint64_t) (uintptr_t) p_instance_ctrl->p_cfg->pp_ether_buffers[buffers_index]) >> + RMAC_DESCRIPTOR_FIELD_PTR_UPPER_POSITION; + descriptor.basic.ptr_l = RMAC_DESCRIPTOR_FIELD_PTR_LOWER_MASK & + (uintptr_t) p_instance_ctrl->p_cfg->pp_ether_buffers[buffers_index]; + + /* Set data to descriptor. */ + R_LAYER3_SWITCH_SetDescriptor(p_extend->p_ether_switch->p_ctrl, + p_extend->p_rx_queue_list[i].index, + &descriptor); + + /* Add to RX buffer pool without a buffer. */ + r_rmac_buffer_enqueue(&p_instance_ctrl->buffer_node_pool, p_buffer_node); + buffers_index++; + } + else + { + /* Add to unreleased buffer queue without a buffer. */ + r_rmac_buffer_enqueue(&p_instance_ctrl->rx_unreleased_buffer_queue, p_buffer_node); + } + } + } + + /* Save the remaining reception buffers as reserves. */ + if (NULL != p_instance_ctrl->p_cfg->pp_ether_buffers) + { + while (nodes_index < p_instance_ctrl->p_cfg->num_rx_descriptors) + { + p_buffer_node = &p_extend->p_buffer_node_list[nodes_index]; + p_buffer_node->p_buffer = p_instance_ctrl->p_cfg->pp_ether_buffers[buffers_index]; + r_rmac_buffer_enqueue(&p_instance_ctrl->rx_empty_buffer_queue, p_buffer_node); + nodes_index++; + buffers_index++; + } + } + + /* Settings for TX descriptors.*/ + /* Initialize variables that used for TX. */ + p_instance_ctrl->write_queue_index = 0; + p_instance_ctrl->tx_running_queue_index = RMAC_INVALID_QUEUE_INDEX; + + p_instance_ctrl->tx_pending_buffer_queue.p_head = NULL; + p_instance_ctrl->tx_pending_buffer_queue.p_tail = NULL; + + /* Set descriptor type as terminate descriptor. TX descriptors will be set actual buffer in Write() API. */ + descriptor.basic.dt = LAYER3_SWITCH_DESCRIPTOR_TYPE_LEMPTY; + descriptor.basic.ptr_h = 0; + descriptor.basic.ptr_l = (uintptr_t) NULL; + descriptor.basic.ds_h = 0; + descriptor.basic.ds_l = 0; + descriptor.basic.die = 0; + descriptor.basic.ds_l = 0; + + /* Initialize descriptors of each TX queue */ + for (uint32_t i = 0; i < p_extend->tx_queue_num; i++) + { + /* For each TX descriptor exclude the last. */ + for (uint32_t j = 0; j < p_extend->p_tx_queue_list[0].queue_cfg.array_length - 1; j++) + { + /* Set data to descriptor. */ + R_LAYER3_SWITCH_SetDescriptor(p_extend->p_ether_switch->p_ctrl, + p_extend->p_tx_queue_list[i].index, + &descriptor); + } + + /* Performing a null transmission to initialize the TX queue. */ + R_LAYER3_SWITCH_StartDescriptorQueue(p_extend->p_ether_switch->p_ctrl, p_extend->p_tx_queue_list[i].index); + } + + /* Save the remaining transmit buffers as reserves. */ + while (nodes_index < p_extend->buffer_node_num) + { + p_buffer_node = &p_extend->p_buffer_node_list[nodes_index]; + nodes_index++; + if ((ETHER_ZEROCOPY_DISABLE == p_instance_ctrl->p_cfg->zerocopy) && + (buffers_index < (p_instance_ctrl->p_cfg->num_tx_descriptors + p_instance_ctrl->p_cfg->num_rx_descriptors))) + { + p_buffer_node->p_buffer = p_instance_ctrl->p_cfg->pp_ether_buffers[buffers_index]; + r_rmac_buffer_enqueue(&p_instance_ctrl->tx_empty_buffer_queue, p_buffer_node); + buffers_index++; + } + else + { + r_rmac_buffer_enqueue(&p_instance_ctrl->buffer_node_pool, p_buffer_node); + } + } +} /* End of function ether_init_descriptors() */ + +/*********************************************************************************************************************** + * Function Name: rmac_init_buffers + * Description : The driver buffers are initialized. + * Arguments : p_instance_ctrl - + * RMAC control block. + * Return Value : none + ***********************************************************************************************************************/ +void rmac_init_buffers (rmac_instance_ctrl_t * const p_instance_ctrl) +{ + uint32_t i; + uint32_t buffer_num; + + if (NULL != p_instance_ctrl->p_cfg->pp_ether_buffers) + { + if (ETHER_ZEROCOPY_DISABLE == p_instance_ctrl->p_cfg->zerocopy) + { + buffer_num = + (uint32_t) (p_instance_ctrl->p_cfg->num_tx_descriptors + + p_instance_ctrl->p_cfg->num_rx_descriptors); + } + else + { + buffer_num = (uint32_t) p_instance_ctrl->p_cfg->num_rx_descriptors; + } + + for (i = 0; i < buffer_num; i++) + { + memset(p_instance_ctrl->p_cfg->pp_ether_buffers[i], 0x00, p_instance_ctrl->p_cfg->ether_buffer_size); + } + } +} /* End of function rmac_init_buffers() */ + +static fsp_err_t rmac_init_descriptor_queues (rmac_instance_ctrl_t * const p_instance_ctrl) +{ + rmac_extended_cfg_t * p_extend = (rmac_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + fsp_err_t err = FSP_SUCCESS; + + /* Initialize TX queues. */ + for (uint32_t i = 0; i < p_extend->tx_queue_num; i++) + { + err = R_LAYER3_SWITCH_CreateDescriptorQueue(p_extend->p_ether_switch->p_ctrl, + &p_extend->p_tx_queue_list[i].index, + &p_extend->p_tx_queue_list[i].queue_cfg); + if (FSP_SUCCESS != err) + { + break; + } + } + +#if LAYER3_SWITCH_CFG_GPTP_ENABLE + if (FSP_SUCCESS == err) + { + /* Initialize TS queues. */ + err = R_LAYER3_SWITCH_CreateDescriptorQueue(p_extend->p_ether_switch->p_ctrl, + &p_extend->p_ts_queue->index, + &p_extend->p_ts_queue->queue_cfg); + } +#endif + + if (FSP_SUCCESS == err) + { + /* Initialize RX queues. */ + for (uint32_t i = 0; i < p_extend->rx_queue_num; i++) + { + err = R_LAYER3_SWITCH_CreateDescriptorQueue(p_extend->p_ether_switch->p_ctrl, + &p_extend->p_rx_queue_list[i].index, + &p_extend->p_rx_queue_list[i].queue_cfg); + if (FSP_SUCCESS != err) + { + break; + } + } + } + + return err; +} + +/*********************************************************************************************************************** + * Reset RX descriptor. If a buffer is passed, set it to descriptor. + * When all descriptors in the queue are reset, increment index of the queue and restart next queue. + ***********************************************************************************************************************/ +static fsp_err_t r_rmac_set_rx_buffer (rmac_instance_ctrl_t * p_instance_ctrl, rmac_buffer_node_t * p_buffer_node) +{ + rmac_extended_cfg_t * p_extend = (rmac_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + layer3_switch_descriptor_t descriptor = {0}; + fsp_err_t err; + + FSP_ERROR_RETURN(NULL != p_buffer_node, FSP_ERR_BUFFER_EMPTY); + + /* Reset descriptor as waiting for reception. */ + descriptor.basic.err = 0; + descriptor.basic.ds_h = (RMAC_DESCRIPTOR_FIELD_DS_UPPER_MASK & RMAC_MAXIMUM_FRAME_SIZE) >> + RMAC_DESCRIPTOR_FIELD_DS_UPPER_POSITION; + descriptor.basic.ds_l = RMAC_DESCRIPTOR_FIELD_DS_LOWER_MASK & RMAC_MAXIMUM_FRAME_SIZE; + descriptor.basic.dt = LAYER3_SWITCH_DESCRIPTOR_TYPE_FEMPTY; + descriptor.basic.die = 1; + + /* Update the buffer when a new buffer is passed. */ + descriptor.basic.ptr_h = + (RMAC_DESCRIPTOR_FIELD_PTR_UPPER_MASK & (uint64_t) (uintptr_t) p_buffer_node->p_buffer) >> + RMAC_DESCRIPTOR_FIELD_PTR_UPPER_POSITION; + descriptor.basic.ptr_l = RMAC_DESCRIPTOR_FIELD_PTR_LOWER_MASK & (uintptr_t) p_buffer_node->p_buffer; + + /* Set new buffer to the descriptor. */ + if (RMAC_INVALID_QUEUE_INDEX != p_instance_ctrl->rx_running_queue_index) + { + p_instance_ctrl->read_queue_index = p_instance_ctrl->rx_running_queue_index; + RMAC_INCREMENT_DESCRIPTOR_QUEUE_INDEX(p_instance_ctrl->read_queue_index, p_extend->rx_queue_num); + } + + err = + R_LAYER3_SWITCH_SetDescriptor(p_extend->p_ether_switch->p_ctrl, + p_extend->p_rx_queue_list[p_instance_ctrl->read_queue_index].index, &descriptor); + + if (FSP_SUCCESS == err) + { + r_rmac_buffer_enqueue(&p_instance_ctrl->buffer_node_pool, p_buffer_node); + } + else + { + /* Failed to set to the descriptor, store the buffer. */ + r_rmac_buffer_enqueue(&p_instance_ctrl->rx_empty_buffer_queue, p_buffer_node); + if (FSP_ERR_OVERFLOW == err) + { + /* If any RX queue is not running, restart this queue. */ + if ((RMAC_INVALID_QUEUE_INDEX == p_instance_ctrl->rx_running_queue_index) && + (p_instance_ctrl->rx_initialized_buffer_num == p_instance_ctrl->p_cfg->num_rx_descriptors)) + { + fsp_err_t serr = R_LAYER3_SWITCH_StartDescriptorQueue(p_extend->p_ether_switch->p_ctrl, + p_extend->p_rx_queue_list[p_instance_ctrl-> + read_queue_index].index); + if (FSP_SUCCESS == serr) + { + p_instance_ctrl->rx_running_queue_index = p_instance_ctrl->read_queue_index; + } + else + { + p_instance_ctrl->rx_running_queue_index = RMAC_INVALID_QUEUE_INDEX; + } + } + + RMAC_INCREMENT_DESCRIPTOR_QUEUE_INDEX(p_instance_ctrl->read_queue_index, p_extend->rx_queue_num); + } + } + + return err; +} + +/*********************************************************************************************************************** + * Set a buffer to the TX descriptor queue. + ***********************************************************************************************************************/ +static fsp_err_t r_rmac_set_tx_buffer (rmac_instance_ctrl_t * p_instance_ctrl, + void * p_write_buffer, + uint32_t frame_length, + uint32_t queue_index) +{ + layer3_switch_descriptor_t descriptor = {0}; + fsp_err_t err; + rmac_extended_cfg_t * p_extend = (rmac_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + layer3_switch_extended_cfg_t * p_layer3_switch_extend = + (layer3_switch_extended_cfg_t *) p_extend->p_ether_switch->p_cfg->p_extend; + + /* Set the buffer address to the descriptor. */ + descriptor.basic.ptr_h = (RMAC_DESCRIPTOR_FIELD_PTR_UPPER_MASK & (uint64_t) (uintptr_t) p_write_buffer) >> + RMAC_DESCRIPTOR_FIELD_PTR_UPPER_POSITION; + descriptor.basic.ptr_l = RMAC_DESCRIPTOR_FIELD_PTR_LOWER_MASK & (uintptr_t) p_write_buffer; + + /* Configure transmission descriptor. */ + descriptor.basic.ds_h = (RMAC_DESCRIPTOR_FIELD_DS_UPPER_MASK & frame_length) >> + RMAC_DESCRIPTOR_FIELD_DS_UPPER_POSITION; + descriptor.basic.ds_l = RMAC_DESCRIPTOR_FIELD_DS_LOWER_MASK & frame_length; + descriptor.basic.dt = LAYER3_SWITCH_DESCRIPTOR_TYPE_FSINGLE; + descriptor.basic.die = 1; + descriptor.info1_tx.dv = RMAC_DESCRIPTOR_FIELD_DV_MASK & (1 << p_instance_ctrl->p_cfg->channel); + descriptor.info1_tx.fmt = 1; + + if (RMAC_WRITE_CFG_TX_TIMESTAMP_ENABLE == p_instance_ctrl->write_cfg.tx_timestamp_enable) + { + descriptor.info1_tx.txc = 1; + descriptor.info1_tx.tn = + (uint8_t) (p_layer3_switch_extend->gptp_timer_numbers[p_instance_ctrl->p_cfg->channel] & 0x1); + descriptor.info1_tx.tsun = + (p_instance_ctrl->tx_timestamp_seq_num & RMAC_TS_SEQUENCE_NUMBER_MASK); + p_instance_ctrl->tx_timestamp_seq_num = (p_instance_ctrl->tx_timestamp_seq_num + 1) & + RMAC_TS_SEQUENCE_NUMBER_MASK; + } + else + { + descriptor.info1_tx.txc = 0; + descriptor.info1_tx.tn = 0; + descriptor.info1_tx.tsun = 0; + } + + err = + R_LAYER3_SWITCH_SetDescriptor(p_extend->p_ether_switch->p_ctrl, queue_index, &descriptor); + + return err; +} + +/*********************************************************************************************************************** + * Start a TX descriptor queue. + ***********************************************************************************************************************/ +static fsp_err_t r_rmac_start_tx_queue (rmac_instance_ctrl_t * p_instance_ctrl, uint32_t queue_index) +{ + layer3_switch_descriptor_t descriptor = {0}; + fsp_err_t err = FSP_SUCCESS; + rmac_extended_cfg_t * p_extend = (rmac_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + + /* Try to set a terminate descriptor to imply the end of the queue. */ + descriptor.basic.dt = LAYER3_SWITCH_DESCRIPTOR_TYPE_LEMPTY; + + while (FSP_SUCCESS == err) + { + err = R_LAYER3_SWITCH_SetDescriptor(p_extend->p_ether_switch->p_ctrl, queue_index, &descriptor); + } + + /* Start transmission. */ + err = R_LAYER3_SWITCH_StartDescriptorQueue(p_extend->p_ether_switch->p_ctrl, queue_index); + + if (RMAC_WRITE_CFG_TX_TIMESTAMP_ENABLE == p_instance_ctrl->write_cfg.tx_timestamp_enable) + { + for (uint32_t i = 0; i < RMAC_GET_TX_TIMESTAMP_WAIT_TIME; i++) + { + /* If found tx timestamp, break from wait time. */ + if (FSP_SUCCESS == r_rmac_get_tx_timestamp(p_instance_ctrl)) + { + break; + } + } + + /* Clear write configuration for next write */ + p_instance_ctrl->write_cfg.tx_timestamp_enable = 0; + } + + return err; +} + +/******************************************************************************************************************* + * Disable reception on this port. + **********************************************************************************************************************/ +void r_rmac_disable_reception (rmac_instance_ctrl_t * p_instance_ctrl) +{ + rmac_extended_cfg_t * p_extend = (rmac_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + layer3_switch_port_cfg_t port_cfg = {0}; + layer3_switch_descriptor_t descriptor = {0}; + + /* When magic packet detection is enabled, disable data interrupt. */ + port_cfg.p_callback = NULL; + + /* Disable CPU reception from this port. */ + port_cfg.forwarding_to_cpu_enable = false; + R_LAYER3_SWITCH_ConfigurePort(p_extend->p_ether_switch->p_ctrl, p_instance_ctrl->p_cfg->channel, &port_cfg); + + FSP_CRITICAL_SECTION_DEFINE; + FSP_CRITICAL_SECTION_ENTER; + + /* Set the all RX descriptors to stop state. */ + descriptor.basic.dt = LAYER3_SWITCH_DESCRIPTOR_TYPE_LEMPTY; + for (uint32_t j = 0; j < p_extend->rx_queue_num; ++j) + { + /* Perform starting reception to initialize queue status. */ + R_LAYER3_SWITCH_StartDescriptorQueue(p_extend->p_ether_switch->p_ctrl, p_extend->p_rx_queue_list[j].index); + + for (uint32_t i = 0; i < p_extend->p_rx_queue_list[j].queue_cfg.array_length; i++) + { + R_LAYER3_SWITCH_SetDescriptor(p_extend->p_ether_switch->p_ctrl, + p_extend->p_rx_queue_list[j].index, + &descriptor); + } + + /* Perform starting reception to initialize queue status. */ + R_LAYER3_SWITCH_StartDescriptorQueue(p_extend->p_ether_switch->p_ctrl, p_extend->p_rx_queue_list[j].index); + } + + p_instance_ctrl->rx_running_queue_index = RMAC_INVALID_QUEUE_INDEX; + + FSP_CRITICAL_SECTION_EXIT; +} + +/** Get the node from head of the queue and remove it. */ +static rmac_buffer_node_t * r_rmac_buffer_dequeue (rmac_buffer_queue_t * p_queue) +{ + /* Use critical section to prevent concurrent access to the queue. */ + FSP_CRITICAL_SECTION_DEFINE; + FSP_CRITICAL_SECTION_ENTER; + + rmac_buffer_node_t * p_node = p_queue->p_head; + if (NULL != p_node) + { + p_queue->p_head = p_node->p_next; + if (NULL == p_queue->p_head) + { + /* If the queue become empty, set the tail to the empty. */ + p_queue->p_tail = NULL; + } + + p_node->p_next = NULL; + } + + FSP_CRITICAL_SECTION_EXIT; + + return p_node; +} + +/** Add the node to tail of the queue. */ +static void r_rmac_buffer_enqueue (rmac_buffer_queue_t * p_queue, rmac_buffer_node_t * p_node) +{ + /* Use critical section to prevent concurrent access to the queue. */ + FSP_CRITICAL_SECTION_DEFINE; + FSP_CRITICAL_SECTION_ENTER; + + /* To add as a terminal node, set the next node to NULL. */ + if (NULL != p_node) + { + p_node->p_next = NULL; + + if (NULL != p_queue->p_tail) + { + p_queue->p_tail->p_next = p_node; + } + else + { + /* If the queue is empty, set the node also to the head. */ + p_queue->p_head = p_node; + } + + p_queue->p_tail = p_node; + } + + FSP_CRITICAL_SECTION_EXIT; +} + +static fsp_err_t r_rmac_get_rx_queue (rmac_instance_ctrl_t * p_instance_ctrl, uint32_t queue_index) +{ + rmac_extended_cfg_t * p_extend = (rmac_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + layer3_switch_descriptor_t descriptor; + rmac_buffer_node_t * p_buffer_node = NULL; + fsp_err_t get_err; + + /* Read all descriptors in the queue. */ + get_err = R_LAYER3_SWITCH_GetDescriptor(p_extend->p_ether_switch->p_ctrl, queue_index, &descriptor); + while (FSP_SUCCESS == get_err) + { + /* Store the read buffer as the RX completed buffer. */ + p_buffer_node = r_rmac_buffer_dequeue(&p_instance_ctrl->buffer_node_pool); + if (NULL != p_buffer_node) + { + if ((RMAC_INVALID_QUEUE_INDEX != p_instance_ctrl->rx_running_queue_index) && + (queue_index != p_extend->p_rx_queue_list[p_instance_ctrl->rx_running_queue_index].index)) + { + /* When the queue raising this interrupt is not the same as the currently running queue, discard data. */ + p_buffer_node->p_buffer = (void *) (uintptr_t) descriptor.basic.ptr_l; + r_rmac_buffer_enqueue(&p_instance_ctrl->rx_empty_buffer_queue, p_buffer_node); + } + else + { + /* Store the received buffer. */ + p_buffer_node->p_buffer = (void *) (uintptr_t) descriptor.basic.ptr_l; + p_buffer_node->size = + (uint32_t) ((descriptor.basic.ds_h << RMAC_DESCRIPTOR_FIELD_DS_UPPER_POSITION) + + descriptor.basic.ds_l); + r_rmac_buffer_enqueue(&p_instance_ctrl->rx_completed_buffer_queue, p_buffer_node); + +#if LAYER3_SWITCH_CFG_GPTP_ENABLE + + /* Get timestamp. */ + if (1 == descriptor.reception_ethernet_descriptor.tsv) + { + p_buffer_node->timestamp.ns = descriptor.reception_ethernet_descriptor.tsns; + p_buffer_node->timestamp.sec_lower = descriptor.reception_ethernet_descriptor.tss; + } + else + { + p_buffer_node->timestamp.ns = 0; + p_buffer_node->timestamp.sec_lower = 0; + } +#endif + } + } + + get_err = R_LAYER3_SWITCH_GetDescriptor(p_extend->p_ether_switch->p_ctrl, queue_index, &descriptor); + } + + return get_err; +} + +static fsp_err_t r_rmac_set_rx_queue (rmac_instance_ctrl_t * p_instance_ctrl, uint32_t queue_index) +{ + rmac_extended_cfg_t * p_extend = (rmac_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + layer3_switch_descriptor_t descriptor; + rmac_buffer_node_t * p_new_buffer_node = NULL; + fsp_err_t set_err = FSP_SUCCESS; + + /* When get the all descriptors in the queue, set new descriptors. */ + while (FSP_SUCCESS == set_err) + { + p_new_buffer_node = r_rmac_buffer_dequeue(&p_instance_ctrl->rx_empty_buffer_queue); + if (NULL != p_new_buffer_node) + { + descriptor.basic.err = 0; + descriptor.basic.ds_h = (RMAC_DESCRIPTOR_FIELD_DS_UPPER_MASK & RMAC_MAXIMUM_FRAME_SIZE) >> + RMAC_DESCRIPTOR_FIELD_DS_UPPER_POSITION; + descriptor.basic.ds_l = RMAC_DESCRIPTOR_FIELD_DS_LOWER_MASK & RMAC_MAXIMUM_FRAME_SIZE; + descriptor.basic.dt = LAYER3_SWITCH_DESCRIPTOR_TYPE_FEMPTY; + descriptor.basic.die = 1; + descriptor.basic.ptr_h = + (RMAC_DESCRIPTOR_FIELD_PTR_UPPER_MASK & (uint64_t) (uintptr_t) p_new_buffer_node->p_buffer) >> + RMAC_DESCRIPTOR_FIELD_PTR_UPPER_POSITION; + descriptor.basic.ptr_l = RMAC_DESCRIPTOR_FIELD_PTR_LOWER_MASK & + (uintptr_t) p_new_buffer_node->p_buffer; + } + else + { + /* If no buffer is available, set LEMPTY. */ + descriptor.basic.dt = LAYER3_SWITCH_DESCRIPTOR_TYPE_LEMPTY; + descriptor.basic.ptr_l = 0; + } + + set_err = R_LAYER3_SWITCH_SetDescriptor(p_extend->p_ether_switch->p_ctrl, queue_index, &descriptor); + if (NULL != p_new_buffer_node) + { + if (FSP_SUCCESS == set_err) + { + /* Store the buffer node. */ + r_rmac_buffer_enqueue(&p_instance_ctrl->buffer_node_pool, p_new_buffer_node); + } + else + { + /* Back the buffer node to the pool. */ + r_rmac_buffer_enqueue(&p_instance_ctrl->rx_empty_buffer_queue, p_new_buffer_node); + } + } + } + + return set_err; +} + +static fsp_err_t r_rmac_get_tx_timestamp (rmac_instance_ctrl_t * p_instance_ctrl) +{ + fsp_err_t err = FSP_ERR_NOT_FOUND; + rmac_extended_cfg_t * p_rmac_extended_cfg = (rmac_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + layer3_switch_descriptor_queue_cfg_t queue_cfg = p_rmac_extended_cfg->p_ts_queue->queue_cfg; + + /* Search tx timestamp from ts reception descriptor that has same timestamp id and not empty. */ + for (uint8_t i = 0; i < (queue_cfg.array_length - 1); i++) + { + if ((LAYER3_SWITCH_DESCRIPTOR_TYPE_FEMPTY_ND != + queue_cfg.p_ts_descriptor_array[i].ts_reception_descriptor_result.dt) && + (((p_instance_ctrl->tx_timestamp_seq_num - 1) & RMAC_TS_SEQUENCE_NUMBER_MASK) == + queue_cfg.p_ts_descriptor_array[i].ts_reception_descriptor_result.tsun)) + { + p_instance_ctrl->tx_timestamp.sec_lower = + queue_cfg.p_ts_descriptor_array[i].ts_reception_descriptor_result.tss; + p_instance_ctrl->tx_timestamp.ns = + queue_cfg.p_ts_descriptor_array[i].ts_reception_descriptor_result.tsns; + + queue_cfg.p_ts_descriptor_array[i].ts_reception_descriptor_result.dt = + LAYER3_SWITCH_DESCRIPTOR_TYPE_FEMPTY_ND; + + err = FSP_SUCCESS; + break; + } + } + + return err; +} + +/*********************************************************************************************************************** + * Function Name: r_rmac_switch_interrupt_callback + * Description : Callback for RX/TX data interrupts. This function set to switch module callback. + ***********************************************************************************************************************/ +static void r_rmac_switch_interrupt_callback (ether_switch_callback_args_t * p_args) +{ + rmac_instance_ctrl_t * p_instance_ctrl = (rmac_instance_ctrl_t *) p_args->p_context; + rmac_extended_cfg_t * p_extend = (rmac_extended_cfg_t *) p_instance_ctrl->p_cfg->p_extend; + ether_callback_args_t callback_args = {0}; + uint32_t next_running_queue_index = RMAC_INVALID_QUEUE_INDEX; + layer3_switch_descriptor_t descriptor; + rmac_buffer_node_t * p_buffer_node = NULL; + fsp_err_t get_err = FSP_SUCCESS; + fsp_err_t err = FSP_SUCCESS; + + switch (p_args->event) + { + case ETHER_SWITCH_EVENT_TX_COMPLETE: + { + callback_args.event = ETHER_EVENT_TX_COMPLETE; + get_err = R_LAYER3_SWITCH_GetDescriptor(p_extend->p_ether_switch->p_ctrl, + p_args->queue_index, + &descriptor); + while (FSP_SUCCESS == get_err) + { + /* If zerocopy is enabled, save the last sent buffer. */ + if (ETHER_ZEROCOPY_ENABLE == p_instance_ctrl->p_cfg->zerocopy) + { + if (LAYER3_SWITCH_DESCRIPTOR_TYPE_FEMPTY == descriptor.basic.dt) + { + p_instance_ctrl->p_last_sent_buffer = (void *) (uintptr_t) descriptor.basic.ptr_l; + } + } + else + { + /* Store the buffer to the buffer pool. */ + p_buffer_node = r_rmac_buffer_dequeue(&p_instance_ctrl->buffer_node_pool); + if (NULL != p_buffer_node) + { + p_buffer_node->p_buffer = (void *) (uintptr_t) descriptor.basic.ptr_l; + r_rmac_buffer_enqueue(&p_instance_ctrl->tx_empty_buffer_queue, p_buffer_node); + } + } + + get_err = R_LAYER3_SWITCH_GetDescriptor(p_extend->p_ether_switch->p_ctrl, + p_args->queue_index, + &descriptor); + } + + /* When finish transmission on the queue. */ + if (get_err == FSP_ERR_NOT_INITIALIZED) + { + p_buffer_node = r_rmac_buffer_dequeue(&p_instance_ctrl->tx_pending_buffer_queue); + while (NULL != p_buffer_node) + { + err = r_rmac_set_tx_buffer(p_instance_ctrl, + p_buffer_node->p_buffer, + p_buffer_node->size, + p_args->queue_index); + + if (FSP_SUCCESS == err) + { + r_rmac_buffer_enqueue(&p_instance_ctrl->buffer_node_pool, p_buffer_node); + + /* Dequeue the next pending buffer. */ + p_buffer_node = r_rmac_buffer_dequeue(&p_instance_ctrl->tx_pending_buffer_queue); + } + else + { + if (ETHER_ZEROCOPY_DISABLE == p_instance_ctrl->p_cfg->zerocopy) + { + r_rmac_buffer_enqueue(&p_instance_ctrl->tx_empty_buffer_queue, p_buffer_node); + } + else + { + r_rmac_buffer_enqueue(&p_instance_ctrl->buffer_node_pool, p_buffer_node); + } + + p_buffer_node = NULL; + } + } + + /* When pending TX data exits, start the next queue. */ + next_running_queue_index = p_instance_ctrl->tx_running_queue_index; + RMAC_INCREMENT_DESCRIPTOR_QUEUE_INDEX(next_running_queue_index, p_extend->tx_queue_num); + err = + r_rmac_start_tx_queue(p_instance_ctrl, p_extend->p_tx_queue_list[next_running_queue_index].index); + + if (err == FSP_SUCCESS) + { + p_instance_ctrl->tx_running_queue_index = next_running_queue_index; + } + else + { + p_instance_ctrl->tx_running_queue_index = RMAC_INVALID_QUEUE_INDEX; + } + } + + break; + } + + case ETHER_SWITCH_EVENT_RX_COMPLETE: + { + p_instance_ctrl->is_lost_rx_packet = false; + + /* When the link is down, ignore this event. */ + FSP_ERROR_RETURN(ETHER_LINK_ESTABLISH_STATUS_UP == p_instance_ctrl->link_establish_status, ); + + /* Read all descriptors in the queue. */ + err = r_rmac_get_rx_queue(p_instance_ctrl, p_args->queue_index); + if (FSP_ERR_NOT_INITIALIZED == err) + { + err = r_rmac_set_rx_queue(p_instance_ctrl, p_args->queue_index); + } + + if (FSP_ERR_OVERFLOW == err) + { + /* Start reception on next queue. */ + next_running_queue_index = p_instance_ctrl->rx_running_queue_index; + RMAC_INCREMENT_DESCRIPTOR_QUEUE_INDEX(next_running_queue_index, p_extend->rx_queue_num); + err = + R_LAYER3_SWITCH_StartDescriptorQueue(p_extend->p_ether_switch->p_ctrl, + p_extend->p_rx_queue_list[next_running_queue_index].index); + + if (FSP_SUCCESS == err) + { + p_instance_ctrl->rx_running_queue_index = next_running_queue_index; + } + else + { + p_instance_ctrl->rx_running_queue_index = RMAC_INVALID_QUEUE_INDEX; + } + } + + callback_args.event = ETHER_EVENT_RX_COMPLETE; + break; + } + + case ETHER_SWITCH_EVENT_RX_QUEUE_FULL: + { + p_instance_ctrl->is_lost_rx_packet = false; + + /* When the link is down, ignore this event. */ + FSP_ERROR_RETURN(ETHER_LINK_ESTABLISH_STATUS_UP == p_instance_ctrl->link_establish_status, ); + + if ((RMAC_INVALID_QUEUE_INDEX != p_instance_ctrl->rx_running_queue_index) && + (p_args->queue_index == p_extend->p_rx_queue_list[p_instance_ctrl->rx_running_queue_index].index)) + { + get_err = R_LAYER3_SWITCH_GetDescriptor(p_extend->p_ether_switch->p_ctrl, + p_args->queue_index, + &descriptor); + if (FSP_SUCCESS == get_err) + { + /* Start reception on next queue. */ + next_running_queue_index = p_instance_ctrl->rx_running_queue_index; + RMAC_INCREMENT_DESCRIPTOR_QUEUE_INDEX(next_running_queue_index, p_extend->rx_queue_num); + err = + R_LAYER3_SWITCH_StartDescriptorQueue(p_extend->p_ether_switch->p_ctrl, + p_extend->p_rx_queue_list[next_running_queue_index].index); + + /* Store the read buffer as the RX completed buffer. */ + p_buffer_node = r_rmac_buffer_dequeue(&p_instance_ctrl->buffer_node_pool); + if (NULL != p_buffer_node) + { + /* Store the received buffer. */ + p_buffer_node->p_buffer = (void *) (uintptr_t) descriptor.basic.ptr_l; + p_buffer_node->size = + (uint32_t) ((descriptor.basic.ds_h << RMAC_DESCRIPTOR_FIELD_DS_UPPER_POSITION) + + descriptor.basic.ds_l); + r_rmac_buffer_enqueue(&p_instance_ctrl->rx_completed_buffer_queue, p_buffer_node); + +#if LAYER3_SWITCH_CFG_GPTP_ENABLE + + /* Get timestamp. */ + if (1 == descriptor.reception_ethernet_descriptor.tsv) + { + p_buffer_node->timestamp.ns = descriptor.reception_ethernet_descriptor.tsns; + p_buffer_node->timestamp.sec_lower = descriptor.reception_ethernet_descriptor.tss; + } + else + { + p_buffer_node->timestamp.ns = 0; + p_buffer_node->timestamp.sec_lower = 0; + } +#endif + } + + if (FSP_SUCCESS == err) + { + p_instance_ctrl->rx_running_queue_index = next_running_queue_index; + } + else + { + p_instance_ctrl->rx_running_queue_index = RMAC_INVALID_QUEUE_INDEX; + } + + r_rmac_get_rx_queue(p_instance_ctrl, p_args->queue_index); + r_rmac_set_rx_queue(p_instance_ctrl, p_args->queue_index); + } + } + + callback_args.event = ETHER_EVENT_RX_COMPLETE; + break; + } + + case ETHER_SWITCH_EVENT_RX_MESSAGE_LOST: + { + /* When the link is down, ignore this event. */ + FSP_ERROR_RETURN(ETHER_LINK_ESTABLISH_STATUS_UP == p_instance_ctrl->link_establish_status, ); + + callback_args.event = ETHER_EVENT_RX_MESSAGE_LOST; + + /* When the lost error occurs repeatedly, treat the queue as stopped. */ + if (p_instance_ctrl->is_lost_rx_packet) + { + r_rmac_get_rx_queue(p_instance_ctrl, p_args->queue_index); + r_rmac_set_rx_queue(p_instance_ctrl, p_args->queue_index); + p_instance_ctrl->rx_running_queue_index = RMAC_INVALID_QUEUE_INDEX; + p_instance_ctrl->is_lost_rx_packet = false; + } + + if ((RMAC_INVALID_QUEUE_INDEX != p_instance_ctrl->rx_running_queue_index) && + (p_args->queue_index == p_extend->p_rx_queue_list[p_instance_ctrl->rx_running_queue_index].index)) + { + p_instance_ctrl->is_lost_rx_packet = true; + } + + break; + } + + default: + { + break; + } + } + + if (NULL != p_instance_ctrl->p_callback) + { + rmac_call_callback(p_instance_ctrl, &callback_args); + } +} + +/*********************************************************************************************************************** + * Function Name: rmac_rmpi_isr + * Description : Interrupt handler for RMPI interrupts. + * Arguments : none + * Return Value : none + ***********************************************************************************************************************/ +void rmac_rmpi_isr (void) +{ + /* Save context if RTOS is used */ + FSP_CONTEXT_SAVE + + ether_callback_args_t callback_arg; + uint32_t status_rmpi; + + IRQn_Type irq = R_FSP_CurrentIrqGet(); + rmac_instance_ctrl_t * p_instance_ctrl = (rmac_instance_ctrl_t *) R_FSP_IsrContextGet(irq); + + status_rmpi = p_instance_ctrl->p_reg_rmac->MMIS2; + + /* When the Magic Packet detection interrupt is generated */ + if (status_rmpi & R_RMAC0_MMIS2_MPDIS_Msk) + { + /* Disable the interrupt of Magic packet detection. */ + p_instance_ctrl->p_reg_rmac->MMID2_b.MPDID = 1; + + /* If a callback is provided, then call it with callback argument. */ + if (NULL != p_instance_ctrl->p_callback) + { + callback_arg.channel = p_instance_ctrl->p_cfg->channel; + callback_arg.event = ETHER_EVENT_WAKEON_LAN; + callback_arg.p_context = p_instance_ctrl->p_cfg->p_context; + rmac_call_callback(p_instance_ctrl, &callback_arg); + } + + if (ETHER_WAKE_ON_LAN_ENABLE == p_instance_ctrl->wake_on_lan) + { + /* Initialize the link status. */ + p_instance_ctrl->link_establish_status = ETHER_LINK_ESTABLISH_STATUS_DOWN; + p_instance_ctrl->link_change = ETHER_LINK_CHANGE_NO_CHANGE; + p_instance_ctrl->previous_link_status = ETHER_PREVIOUS_LINK_STATUS_DOWN; + + /* + * Call LinkProcess to initialize descriptors and resume reception. + * If the link is down, it need to call LinkProcess again after this interrput. + */ + R_RMAC_LinkProcess(p_instance_ctrl); + } + } + + /* Clear RMPI status bit. */ + p_instance_ctrl->p_reg_rmac->MMIS2 = status_rmpi; /* Clear all ETHERC status BFR, PSRTO, LCHNG, MPD, ICD */ + + /* Clear pending interrupt flag to make sure it doesn't fire again + * after exiting. */ + R_BSP_IrqStatusClear(R_FSP_CurrentIrqGet()); + + /* Restore context if RTOS is used */ + FSP_CONTEXT_RESTORE +} /* End of function rmac_rmpi_isr() */ diff --git a/drivers/ra/fsp/src/r_rmac_phy/r_rmac_phy.c b/drivers/ra/fsp/src/r_rmac_phy/r_rmac_phy.c new file mode 100644 index 00000000..1054b857 --- /dev/null +++ b/drivers/ra/fsp/src/r_rmac_phy/r_rmac_phy.c @@ -0,0 +1,1076 @@ +/* +* Copyright (c) 2020 - 2025 Renesas Electronics Corporation and/or its affiliates +* +* SPDX-License-Identifier: BSD-3-Clause +*/ + +/*********************************************************************************************************************** + * Includes , "Project Includes" + ***********************************************************************************************************************/ + +/* Access to peripherals and board defines. */ +#include "bsp_api.h" +#include "r_rmac_phy.h" + +/*********************************************************************************************************************** + * Macro definitions + ***********************************************************************************************************************/ +#ifndef RMAC_PHY_ERROR_RETURN + #define RMAC_PHY_ERROR_RETURN(a, err) FSP_ERROR_RETURN((a), (err)) +#endif + +#define RMAC_REG_SIZE (R_RMAC1_BASE - R_RMAC0_BASE) +#define ETHA_REG_SIZE (R_ETHA1_BASE - R_ETHA0_BASE) + +/** "RPHY" in ASCII. Used to determine if the control block is open. */ +#define RMAC_PHY_OPEN (('R' << 24U) | ('P' << 16U) | ('H' << 8U) | ('Y' << 0U)) + +/* Standard PHY Registers */ +#define RMAC_PHY_REG_CONTROL (0) +#define RMAC_PHY_REG_STATUS (1) +#define RMAC_PHY_REG_IDENTIFIER1 (2) +#define RMAC_PHY_REG_IDENTIFIER2 (3) +#define RMAC_PHY_REG_AN_ADVERTISEMENT (4) +#define RMAC_PHY_REG_AN_LINK_PARTNER (5) +#define RMAC_PHY_REG_AN_EXPANSION (6) +#define RMAC_PHY_REG_GIGABIT_CONTROL (9) +#define RMAC_PHY_REG_GIGABIT_STATUS (10) + +/* Basic Mode Control Register Bit Definitions */ +#define RMAC_PHY_CONTROL_RESET (1 << 15) +#define RMAC_PHY_CONTROL_LOOPBACK (1 << 14) +#define RMAC_PHY_CONTROL_100_MBPS (1 << 13) +#define RMAC_PHY_CONTROL_AN_ENABLE (1 << 12) +#define RMAC_PHY_CONTROL_POWER_DOWN (1 << 11) +#define RMAC_PHY_CONTROL_ISOLATE (1 << 10) +#define RMAC_PHY_CONTROL_AN_RESTART (1 << 9) +#define RMAC_PHY_CONTROL_FULL_DUPLEX (1 << 8) +#define RMAC_PHY_CONTROL_COLLISION (1 << 7) + +/* Basic Mode Status Register Bit Definitions */ +#define RMAC_PHY_STATUS_100_T4 (1 << 15) +#define RMAC_PHY_STATUS_100F (1 << 14) +#define RMAC_PHY_STATUS_100H (1 << 13) +#define RMAC_PHY_STATUS_10F (1 << 12) +#define RMAC_PHY_STATUS_10H (1 << 11) +#define RMAC_PHY_STATUS_AN_COMPLETE (1 << 5) +#define RMAC_PHY_STATUS_RM_FAULT (1 << 4) +#define RMAC_PHY_STATUS_AN_ABILITY (1 << 3) +#define RMAC_PHY_STATUS_LINK_UP (1 << 2) +#define RMAC_PHY_STATUS_JABBER (1 << 1) +#define RMAC_PHY_STATUS_EX_CAPABILITY (1 << 0) + +/* Auto Negotiation Advertisement Bit Definitions */ +#define RMAC_PHY_AN_ADVERTISEMENT_NEXT_PAGE (1 << 15) +#define RMAC_PHY_AN_ADVERTISEMENT_RM_FAULT (1 << 13) +#define RMAC_PHY_AN_ADVERTISEMENT_ASM_DIR (1 << 11) +#define RMAC_PHY_AN_ADVERTISEMENT_PAUSE (1 << 10) +#define RMAC_PHY_AN_ADVERTISEMENT_100_T4 (1 << 9) +#define RMAC_PHY_AN_ADVERTISEMENT_100F (1 << 8) +#define RMAC_PHY_AN_ADVERTISEMENT_100H (1 << 7) +#define RMAC_PHY_AN_ADVERTISEMENT_10F (1 << 6) +#define RMAC_PHY_AN_ADVERTISEMENT_10H (1 << 5) +#define RMAC_PHY_AN_ADVERTISEMENT_SELECTOR (1 << 0) + +/* Auto Negotiate Link Partner Ability Bit Definitions */ +#define RMAC_PHY_AN_LINK_PARTNER_NEXT_PAGE (1 << 15) +#define RMAC_PHY_AN_LINK_PARTNER_ACK (1 << 14) +#define RMAC_PHY_AN_LINK_PARTNER_RM_FAULT (1 << 13) +#define RMAC_PHY_AN_LINK_PARTNER_ASM_DIR (1 << 11) +#define RMAC_PHY_AN_LINK_PARTNER_PAUSE (1 << 10) +#define RMAC_PHY_AN_LINK_PARTNER_100_T4 (1 << 9) +#define RMAC_PHY_AN_LINK_PARTNER_100F (1 << 8) +#define RMAC_PHY_AN_LINK_PARTNER_100H (1 << 7) +#define RMAC_PHY_AN_LINK_PARTNER_10F (1 << 6) +#define RMAC_PHY_AN_LINK_PARTNER_10H (1 << 5) +#define RMAC_PHY_AN_LINK_PARTNER_SELECTOR (1 << 0) + +/* Gigabit Control Bit Definitions */ +#define RMAC_PHY_GIGABIT_CONTROL_1000F (1 << 9) +#define RMAC_PHY_GIGABIT_CONTROL_1000H (1 << 8) + +/* Gigabit Status Bit Definitions. LP stands for Link Partner. */ +#define RMAC_PHY_GIGABIT_STATUS_LP_1000H (1 << 10) +#define RMAC_PHY_GIGABIT_STATUS_LP_1000F (1 << 11) + +#define RMAC_PHY_ADDRESS_SIZE (0x1fU) +#define RMAC_PHY_REGISTER_DATA_SIZE (0xffffU) + +/* MDIO frame OP code */ +#define RMAC_PHY_MDIO_OPCODE_WRITE (0x1) +#define RMAC_PHY_MDIO_OPCODE_READ (0x2) + +/* eMDIO frame OP code */ +#define RMAC_PHY_EMDIO_OPCODE_ADDRESS (0x0) +#define RMAC_PHY_EMDIO_OPCODE_WRITE (0x1) +#define RMAC_PHY_EMDIO_OPCODE_READ (0x3) +#define RMAC_PHY_EMDIO_OPCODE_POST_READ (0x2) + +/* Operation mode of ETHA*/ +#define RMAC_PHY_ETHA_CONFIG_MODE (0b10) +#define RMAC_PHY_ETHA_OPERATION_MODE (0b11) +#define RMAC_PHY_ETHA_DISABLE_MODE (0b01) + +/*********************************************************************************************************************** + * Typedef definitions + ***********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Exported global variables (to be accessed by other files) + ***********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Exported global function + ***********************************************************************************************************************/ +#if (ETHER_PHY_CFG_TARGET_KSZ8091RNB_ENABLE) +extern void rmac_phy_target_ksz8091rnb_initialize(rmac_phy_instance_ctrl_t * p_instance_ctrl); +extern bool rmac_phy_target_ksz8091rnb_is_support_link_partner_ability(rmac_phy_instance_ctrl_t * p_instance_ctrl, + uint32_t line_speed_duplex); + +#endif +#if (ETHER_PHY_CFG_TARGET_KSZ8041_ENABLE) +extern void rmac_phy_target_ksz8041_initialize(rmac_phy_instance_ctrl_t * p_instance_ctrl); +extern bool rmac_phy_target_ksz8041_is_support_link_partner_ability(rmac_phy_instance_ctrl_t * p_instance_ctrl, + uint32_t line_speed_duplex); + +#endif +#if (ETHER_PHY_CFG_TARGET_DP83620_ENABLE) +extern void rmac_phy_target_dp83620_initialize(rmac_phy_instance_ctrl_t * p_instance_ctrl); +extern bool rmac_phy_target_dp83620_is_support_link_partner_ability(rmac_phy_instance_ctrl_t * p_instance_ctrl, + uint32_t line_speed_duplex); + +#endif +#if (ETHER_PHY_CFG_TARGET_ICS1894_ENABLE) +extern void rmac_phy_target_ics1894_initialize(rmac_phy_instance_ctrl_t * p_instance_ctrl); +extern bool rmac_phy_target_ics1894_is_support_link_partner_ability(rmac_phy_instance_ctrl_t * p_instance_ctrl, + uint32_t line_speed_duplex); + +#endif + +#if (ETHER_PHY_CFG_TARGET_GPY111_ENABLE) +extern void rmac_phy_target_gpy111_initialize(rmac_phy_instance_ctrl_t * p_instance_ctrl); +extern bool rmac_phy_target_gpy111_is_support_link_partner_ability(rmac_phy_instance_ctrl_t * p_instance_ctrl, + uint32_t line_speed_duplex); + +#endif + +/*********************************************************************************************************************** + * Private global variables and functions + ***********************************************************************************************************************/ +static void rmac_phy_targets_initialize(rmac_phy_instance_ctrl_t * p_instance_ctrl); +static bool rmac_phy_targets_is_support_link_partner_ability(rmac_phy_instance_ctrl_t * p_instance_ctrl, + uint32_t line_speed_duplex); +static uint32_t r_rmac_phy_calculate_mpic(rmac_phy_instance_ctrl_t * p_instance_ctrl, uint32_t line_speed_duplex); +uint8_t r_rmac_phy_get_operation_mode(rmac_phy_instance_ctrl_t * p_instance_ctrl); +void r_rmac_phy_set_operation_mode(uint8_t channel, uint8_t mode); +void r_rmac_phy_set_mii_type_configuration(rmac_phy_instance_ctrl_t * p_instance_ctrl, uint8_t port); + +/** RMAC_PHY HAL API mapping for Ethernet PHY Controller interface */ +/*LDRA_INSPECTED 27 D This structure must be accessible in user code. It cannot be static. */ +const ether_phy_api_t g_ether_phy_on_rmac_phy = +{ + .open = R_RMAC_PHY_Open, + .close = R_RMAC_PHY_Close, + .startAutoNegotiate = R_RMAC_PHY_StartAutoNegotiate, + .linkPartnerAbilityGet = R_RMAC_PHY_LinkPartnerAbilityGet, + .linkStatusGet = R_RMAC_PHY_LinkStatusGet, + .chipInit = R_RMAC_PHY_ChipInit, + .read = R_RMAC_PHY_Read, + .write = R_RMAC_PHY_Write +}; + +/*******************************************************************************************************************//** + * @addtogroup RMAC_PHY + * @{ + **********************************************************************************************************************/ + +/*********************************************************************************************************************** + * Functions + **********************************************************************************************************************/ + +/********************************************************************************************************************//** + * @brief Resets Ethernet PHY device. Implements @ref ether_phy_api_t::open. + * + * @retval FSP_SUCCESS Channel opened successfully. + * @retval FSP_ERR_ASSERTION Pointer to RMAC_PHY control block is NULL. + * @retval FSP_ERR_ALREADY_OPEN Control block has already been opened or channel is being used by another + * instance. Call close() then open() to reconfigure. + * @retval FSP_ERR_INVALID_CHANNEL Invalid channel number is given. + * @retval FSP_ERR_INVALID_POINTER Pointer to p_cfg is NULL. + * @retval FSP_ERR_INVALID_MODE Function is called when not in CONFIG mode. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_PHY_Open (ether_phy_ctrl_t * const p_ctrl, ether_phy_cfg_t const * const p_cfg) +{ + fsp_err_t err = FSP_SUCCESS; + rmac_phy_instance_ctrl_t * p_instance_ctrl = (rmac_phy_instance_ctrl_t *) p_ctrl; + rmac_phy_extended_cfg_t * p_extend; + uint32_t link_speed = ETHER_PHY_LINK_SPEED_10F; + R_RMAC0_Type * p_reg_rmac; + +#if (RMAC_PHY_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(NULL != p_instance_ctrl); + RMAC_PHY_ERROR_RETURN(NULL != p_cfg, FSP_ERR_INVALID_POINTER); + RMAC_PHY_ERROR_RETURN((RMAC_PHY_OPEN != p_instance_ctrl->open), FSP_ERR_ALREADY_OPEN); + RMAC_PHY_ERROR_RETURN((BSP_FEATURE_ETHER_NUM_CHANNELS > p_cfg->channel), FSP_ERR_INVALID_CHANNEL); + p_extend = (rmac_phy_extended_cfg_t *) p_cfg->p_extend; + RMAC_PHY_ERROR_RETURN(NULL != p_extend, FSP_ERR_INVALID_POINTER); + RMAC_PHY_ERROR_RETURN(NULL != p_extend->p_phy_lsi_cfg_list[p_extend->default_phy_lsi_cfg_index], + FSP_ERR_INVALID_ARGUMENT); +#else + p_extend = (rmac_phy_extended_cfg_t *) p_cfg->p_extend; +#endif + + /* Initialize configuration of ethernet phy module. */ + p_instance_ctrl->p_ether_phy_cfg = p_cfg; + +#ifndef RMAC_PHY_CFG_CUSTOM_PHY_INIT + /* ETHA IP should be CONFIG mode */ + RMAC_PHY_ERROR_RETURN(RMAC_PHY_ETHA_CONFIG_MODE == r_rmac_phy_get_operation_mode(p_instance_ctrl), + FSP_ERR_INVALID_MODE); +#endif + + /* Set the RMAC register address of this channel. */ + p_instance_ctrl->p_reg_rmac = (R_RMAC0_Type *) (R_RMAC0_BASE + (RMAC_REG_SIZE * p_cfg->channel)); + p_instance_ctrl->local_advertise = 0; + + /* Copy default PHY LSI settings. */ + p_instance_ctrl->phy_lsi_cfg_index = p_extend->default_phy_lsi_cfg_index; + + /* Configure maximum link speed for each interface. */ + if ((ETHER_PHY_MII_TYPE_MII == p_instance_ctrl->p_ether_phy_cfg->mii_type) || + (ETHER_PHY_MII_TYPE_RMII == p_instance_ctrl->p_ether_phy_cfg->mii_type)) + { + link_speed = ETHER_PHY_LINK_SPEED_100F; + } + else if ((ETHER_PHY_MII_TYPE_GMII == p_instance_ctrl->p_ether_phy_cfg->mii_type) || + (ETHER_PHY_MII_TYPE_RGMII == p_instance_ctrl->p_ether_phy_cfg->mii_type)) + { + link_speed = ETHER_PHY_LINK_SPEED_1000F; + } + else + { + ; + } + + /* Configure PHY interface for each available channels. */ + for (uint32_t i = 0; i < BSP_FEATURE_ETHER_NUM_CHANNELS; i++) + { +#ifndef RMAC_PHY_CFG_CUSTOM_PHY_INIT + if (NULL != p_extend->p_phy_lsi_cfg_list[i]) + { + p_reg_rmac = (R_RMAC0_Type *) (R_RMAC0_BASE + (RMAC_REG_SIZE * i)); + p_reg_rmac->MPIC = r_rmac_phy_calculate_mpic(p_instance_ctrl, link_speed); + } +#else + r_rmac_phy_set_operation_mode(i, RMAC_PHY_ETHA_DISABLE_MODE); + r_rmac_phy_set_operation_mode(i, RMAC_PHY_ETHA_CONFIG_MODE); + p_reg_rmac = (R_RMAC0_Type *) (R_RMAC0_BASE + (RMAC_REG_SIZE * i)); + p_reg_rmac->MPIC = r_rmac_phy_calculate_mpic(p_instance_ctrl, link_speed); + r_rmac_phy_set_mii_type_configuration(p_instance_ctrl, i); + r_rmac_phy_set_operation_mode(i, RMAC_PHY_ETHA_DISABLE_MODE); + r_rmac_phy_set_operation_mode(i, RMAC_PHY_ETHA_OPERATION_MODE); +#endif + } + + p_instance_ctrl->open = RMAC_PHY_OPEN; + + return err; +} /* End of function R_RMAC_PHY_Open() */ + +/********************************************************************************************************************//** + * @brief Close Ethernet PHY device. Implements @ref ether_phy_api_t::close. + * + * @retval FSP_SUCCESS Channel successfully closed. + * @retval FSP_ERR_ASSERTION Pointer to RMAC_PHY control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened + * @retval FSP_ERR_INVALID_MODE Function is called when not in DISABLE mode. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_PHY_Close (ether_phy_ctrl_t * const p_ctrl) +{ + fsp_err_t err = FSP_SUCCESS; + rmac_phy_instance_ctrl_t * p_instance_ctrl = (rmac_phy_instance_ctrl_t *) p_ctrl; + +#if (RMAC_PHY_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(NULL != p_instance_ctrl); + RMAC_PHY_ERROR_RETURN(RMAC_PHY_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); +#endif + + /* ETHA IP should be DISABLE mode */ + RMAC_PHY_ERROR_RETURN(RMAC_PHY_ETHA_DISABLE_MODE == r_rmac_phy_get_operation_mode(p_instance_ctrl), + FSP_ERR_INVALID_MODE); + + /** Clear configure block parameters. */ + p_instance_ctrl->p_ether_phy_cfg = NULL; + p_instance_ctrl->local_advertise = 0; + p_instance_ctrl->p_reg_rmac = NULL; + + p_instance_ctrl->interface_status = RMAC_PHY_INTERFACE_STATUS_UNINITIALIZED; + p_instance_ctrl->open = 0; + + return err; +} /* End of function R_RMAC_PHY_Close() */ + +/********************************************************************************************************************//** + * @brief Starts auto-negotiate. Implements @ref ether_phy_api_t::startAutoNegotiate. + * + * @retval FSP_SUCCESS RMAC_PHY successfully starts auto-negotiate. + * @retval FSP_ERR_ASSERTION Pointer to RMAC_PHY control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened + * @retval FSP_ERR_NOT_INITIALIZED The control block has not been initialized + * @retval FSP_ERR_INVALID_MODE Function is called when not in OPERATION mode. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_PHY_StartAutoNegotiate (ether_phy_ctrl_t * const p_ctrl) +{ + rmac_phy_instance_ctrl_t * p_instance_ctrl = (rmac_phy_instance_ctrl_t *) p_ctrl; + uint32_t reg = 0; + +#if (RMAC_PHY_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(NULL != p_instance_ctrl); + RMAC_PHY_ERROR_RETURN(RMAC_PHY_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + RMAC_PHY_ERROR_RETURN(RMAC_PHY_INTERFACE_STATUS_INITIALIZED == p_instance_ctrl->interface_status, + FSP_ERR_NOT_INITIALIZED); +#endif + + /* ETHA IP should be OPERATION mode */ + RMAC_PHY_ERROR_RETURN(RMAC_PHY_ETHA_OPERATION_MODE == r_rmac_phy_get_operation_mode(p_instance_ctrl), + FSP_ERR_INVALID_MODE); + + /* Set local ability */ + /* When pause frame is not used */ + if (ETHER_PHY_FLOW_CONTROL_DISABLE == p_instance_ctrl->p_ether_phy_cfg->flow_control) + { + p_instance_ctrl->local_advertise = ((((RMAC_PHY_AN_ADVERTISEMENT_100F | + RMAC_PHY_AN_ADVERTISEMENT_100H) | + RMAC_PHY_AN_ADVERTISEMENT_10F) | + RMAC_PHY_AN_ADVERTISEMENT_10H) | + RMAC_PHY_AN_ADVERTISEMENT_SELECTOR); + } + /* When pause frame is used */ + else + { + p_instance_ctrl->local_advertise = ((((((RMAC_PHY_AN_ADVERTISEMENT_ASM_DIR | + RMAC_PHY_AN_ADVERTISEMENT_PAUSE) | + RMAC_PHY_AN_ADVERTISEMENT_100F) | + RMAC_PHY_AN_ADVERTISEMENT_100H) | + RMAC_PHY_AN_ADVERTISEMENT_10F) | + RMAC_PHY_AN_ADVERTISEMENT_10H) | + RMAC_PHY_AN_ADVERTISEMENT_SELECTOR); + } + + /* Configure what the PHY and the Ethernet controller on this board supports */ + R_RMAC_PHY_Write(p_instance_ctrl, RMAC_PHY_REG_AN_ADVERTISEMENT, p_instance_ctrl->local_advertise); + + /* Advertise Gigabit Ethernet capacity when using RGMII or GMII. */ + if ((ETHER_PHY_MII_TYPE_RGMII == p_instance_ctrl->p_ether_phy_cfg->mii_type) || + (ETHER_PHY_MII_TYPE_GMII == p_instance_ctrl->p_ether_phy_cfg->mii_type)) + { + R_RMAC_PHY_Write(p_instance_ctrl, RMAC_PHY_REG_GIGABIT_CONTROL, + (RMAC_PHY_GIGABIT_CONTROL_1000F | RMAC_PHY_GIGABIT_CONTROL_1000H)); + } + + /* Start auto-negotiation. */ + R_RMAC_PHY_Write(p_instance_ctrl, + RMAC_PHY_REG_CONTROL, + (RMAC_PHY_CONTROL_AN_ENABLE | + RMAC_PHY_CONTROL_AN_RESTART)); + + R_RMAC_PHY_Read(p_instance_ctrl, RMAC_PHY_REG_AN_ADVERTISEMENT, ®); + + return FSP_SUCCESS; +} /* End of function R_RMAC_PHY_StartAutoNegotiate() */ + +/********************************************************************************************************************//** + * @brief Reports the other side's physical capability. Implements @ref ether_phy_api_t::linkPartnerAbilityGet. + * + * @retval FSP_SUCCESS RMAC_PHY successfully get link partner ability. + * @retval FSP_ERR_ASSERTION Pointer to RMAC_PHY control block is NULL. + * @retval FSP_ERR_INVALID_POINTER Pointer to arguments are NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened + * @retval FSP_ERR_ETHER_PHY_ERROR_LINK PHY-LSI is not link up. + * @retval FSP_ERR_ETHER_PHY_NOT_READY The auto-negotiation isn't completed + * @retval FSP_ERR_NOT_INITIALIZED The control block has not been initialized + * @retval FSP_ERR_INVALID_MODE Function is called when not in OPERATION mode. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_PHY_LinkPartnerAbilityGet (ether_phy_ctrl_t * const p_ctrl, + uint32_t * const p_line_speed_duplex, + uint32_t * const p_local_pause, + uint32_t * const p_partner_pause) +{ + fsp_err_t err = FSP_SUCCESS; + rmac_phy_instance_ctrl_t * p_instance_ctrl = (rmac_phy_instance_ctrl_t *) p_ctrl; + uint32_t reg = 0; + uint32_t line_speed_duplex = ETHER_PHY_LINK_SPEED_NO_LINK; + uint32_t mpic; + R_RMAC0_Type * p_reg_rmac; + +#if (RMAC_PHY_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(NULL != p_instance_ctrl); + RMAC_PHY_ERROR_RETURN(RMAC_PHY_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + RMAC_PHY_ERROR_RETURN(NULL != p_line_speed_duplex, FSP_ERR_INVALID_POINTER); + RMAC_PHY_ERROR_RETURN(NULL != p_local_pause, FSP_ERR_INVALID_POINTER); + RMAC_PHY_ERROR_RETURN(NULL != p_partner_pause, FSP_ERR_INVALID_POINTER); + RMAC_PHY_ERROR_RETURN(RMAC_PHY_INTERFACE_STATUS_INITIALIZED == p_instance_ctrl->interface_status, + FSP_ERR_NOT_INITIALIZED); +#endif + + /* ETHA IP should be OPERATION mode */ + RMAC_PHY_ERROR_RETURN(RMAC_PHY_ETHA_OPERATION_MODE == r_rmac_phy_get_operation_mode(p_instance_ctrl), + FSP_ERR_INVALID_MODE); + + /* Because reading the first time shows the previous state, the Link status bit is read twice. */ + R_RMAC_PHY_Read(p_instance_ctrl, RMAC_PHY_REG_STATUS, ®); + R_RMAC_PHY_Read(p_instance_ctrl, RMAC_PHY_REG_STATUS, ®); + + /* When the link isn't up, return error */ + RMAC_PHY_ERROR_RETURN(RMAC_PHY_STATUS_LINK_UP == (reg & RMAC_PHY_STATUS_LINK_UP), FSP_ERR_ETHER_PHY_ERROR_LINK); + + /* Establish local pause capability */ + if (RMAC_PHY_AN_ADVERTISEMENT_PAUSE == (p_instance_ctrl->local_advertise & RMAC_PHY_AN_ADVERTISEMENT_PAUSE)) + { + (*p_local_pause) |= (1 << 1); + } + + if (RMAC_PHY_AN_ADVERTISEMENT_ASM_DIR == (p_instance_ctrl->local_advertise & RMAC_PHY_AN_ADVERTISEMENT_ASM_DIR)) + { + (*p_local_pause) |= 1; + } + + /* When the auto-negotiation isn't completed, return error */ + RMAC_PHY_ERROR_RETURN(RMAC_PHY_STATUS_AN_COMPLETE == (reg & RMAC_PHY_STATUS_AN_COMPLETE), + FSP_ERR_ETHER_PHY_NOT_READY); + + /* Get the link partner response */ + R_RMAC_PHY_Read(p_instance_ctrl, RMAC_PHY_REG_AN_LINK_PARTNER, ®); + + /* Establish partner pause capability */ + if (RMAC_PHY_AN_LINK_PARTNER_PAUSE == (reg & RMAC_PHY_AN_LINK_PARTNER_PAUSE)) + { + (*p_partner_pause) = (1 << 1); + } + + if (RMAC_PHY_AN_LINK_PARTNER_ASM_DIR == (reg & RMAC_PHY_AN_LINK_PARTNER_ASM_DIR)) + { + (*p_partner_pause) |= 1; + } + + /* Establish the line speed and the duplex */ + if ((RMAC_PHY_AN_LINK_PARTNER_10H == (reg & RMAC_PHY_AN_LINK_PARTNER_10H)) && + rmac_phy_targets_is_support_link_partner_ability(p_instance_ctrl, ETHER_PHY_LINK_SPEED_10H)) + { + line_speed_duplex = ETHER_PHY_LINK_SPEED_10H; + } + + if ((RMAC_PHY_AN_LINK_PARTNER_10F == (reg & RMAC_PHY_AN_LINK_PARTNER_10F)) && + rmac_phy_targets_is_support_link_partner_ability(p_instance_ctrl, ETHER_PHY_LINK_SPEED_10F)) + { + line_speed_duplex = ETHER_PHY_LINK_SPEED_10F; + } + + if ((RMAC_PHY_AN_LINK_PARTNER_100H == (reg & RMAC_PHY_AN_LINK_PARTNER_100H)) && + rmac_phy_targets_is_support_link_partner_ability(p_instance_ctrl, ETHER_PHY_LINK_SPEED_100H)) + { + line_speed_duplex = ETHER_PHY_LINK_SPEED_100H; + } + + if ((RMAC_PHY_AN_LINK_PARTNER_100F == (reg & RMAC_PHY_AN_LINK_PARTNER_100F)) && + rmac_phy_targets_is_support_link_partner_ability(p_instance_ctrl, ETHER_PHY_LINK_SPEED_100F)) + { + line_speed_duplex = ETHER_PHY_LINK_SPEED_100F; + } + + /* When MII type is RGMII, check also Gigabit Ethernet ability. */ + if ((ETHER_PHY_MII_TYPE_RGMII == p_instance_ctrl->p_ether_phy_cfg->mii_type) || + (ETHER_PHY_MII_TYPE_GMII == p_instance_ctrl->p_ether_phy_cfg->mii_type)) + { + R_RMAC_PHY_Read(p_instance_ctrl, RMAC_PHY_REG_GIGABIT_STATUS, ®); + + if ((RMAC_PHY_GIGABIT_STATUS_LP_1000H == (reg & RMAC_PHY_GIGABIT_STATUS_LP_1000H)) && + rmac_phy_targets_is_support_link_partner_ability(p_instance_ctrl, ETHER_PHY_LINK_SPEED_1000H)) + { + line_speed_duplex = ETHER_PHY_LINK_SPEED_1000H; + } + + if ((RMAC_PHY_GIGABIT_STATUS_LP_1000F == (reg & RMAC_PHY_GIGABIT_STATUS_LP_1000F)) && + rmac_phy_targets_is_support_link_partner_ability(p_instance_ctrl, ETHER_PHY_LINK_SPEED_1000F)) + { + line_speed_duplex = ETHER_PHY_LINK_SPEED_1000F; + } + } + + if (ETHER_PHY_LINK_SPEED_NO_LINK == line_speed_duplex) + { + err = FSP_ERR_ETHER_PHY_ERROR_LINK; + } + else + { + (*p_line_speed_duplex) = line_speed_duplex; + } + + mpic = r_rmac_phy_calculate_mpic(p_instance_ctrl, line_speed_duplex); + p_reg_rmac = (R_RMAC0_Type *) (R_RMAC0_BASE + (RMAC_REG_SIZE * p_instance_ctrl->phy_lsi_cfg_index)); + if (mpic != p_reg_rmac->MPIC) + { + /* Set ETHA to CONFIG mode */ + r_rmac_phy_set_operation_mode(p_instance_ctrl->phy_lsi_cfg_index, RMAC_PHY_ETHA_DISABLE_MODE); + r_rmac_phy_set_operation_mode(p_instance_ctrl->phy_lsi_cfg_index, RMAC_PHY_ETHA_CONFIG_MODE); + + p_reg_rmac->MPIC = mpic; + + /* Set ETHA to OPERATION mode */ + r_rmac_phy_set_operation_mode(p_instance_ctrl->phy_lsi_cfg_index, RMAC_PHY_ETHA_DISABLE_MODE); + r_rmac_phy_set_operation_mode(p_instance_ctrl->phy_lsi_cfg_index, RMAC_PHY_ETHA_OPERATION_MODE); + } + + return err; +} /* End of function R_RMAC_PHY_LinkPartnerAbilityGet() */ + +/********************************************************************************************************************//** + * @brief Returns the status of the physical link. Implements @ref ether_phy_api_t::linkStatusGet. + * + * @retval FSP_SUCCESS RMAC_PHY successfully get link partner ability. + * @retval FSP_ERR_ASSERTION Pointer to RMAC_PHY control block is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened + * @retval FSP_ERR_ETHER_PHY_ERROR_LINK PHY-LSI is not link up. + * @retval FSP_ERR_NOT_INITIALIZED The control block has not been initialized + * @retval FSP_ERR_INVALID_MODE Function is called when not in OPERATION mode. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_PHY_LinkStatusGet (ether_phy_ctrl_t * const p_ctrl) +{ + rmac_phy_instance_ctrl_t * p_instance_ctrl = (rmac_phy_instance_ctrl_t *) p_ctrl; + uint32_t reg = 0; + fsp_err_t err = FSP_SUCCESS; + +#if (RMAC_PHY_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(NULL != p_instance_ctrl); + RMAC_PHY_ERROR_RETURN(RMAC_PHY_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + RMAC_PHY_ERROR_RETURN(RMAC_PHY_INTERFACE_STATUS_INITIALIZED == p_instance_ctrl->interface_status, + FSP_ERR_NOT_INITIALIZED); +#endif + + /* ETHA IP should be OPERATION mode */ + RMAC_PHY_ERROR_RETURN(RMAC_PHY_ETHA_OPERATION_MODE == r_rmac_phy_get_operation_mode(p_instance_ctrl), + FSP_ERR_INVALID_MODE); + + /* Because reading the first time shows the previous state, the Link status bit is read twice. */ + R_RMAC_PHY_Read(p_instance_ctrl, RMAC_PHY_REG_STATUS, ®); + R_RMAC_PHY_Read(p_instance_ctrl, RMAC_PHY_REG_STATUS, ®); + + /* When the link isn't up, return error */ + if (RMAC_PHY_STATUS_LINK_UP != (reg & RMAC_PHY_STATUS_LINK_UP)) + { + /* Link is down */ + err = FSP_ERR_ETHER_PHY_ERROR_LINK; + } + else + { + /* Link is up */ + err = FSP_SUCCESS; + } + + return err; +} /* End of function R_RMAC_PHY_LinkStatusGet() */ + +/********************************************************************************************************************//** + * @brief Initialize Ethernet PHY device. Implements @ref ether_phy_api_t::chipInit. + * + * @retval FSP_SUCCESS PHY device initialized successfully. + * @retval FSP_ERR_ASSERTION Pointer to RMAC_PHY control block or configuration structure is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_TIMEOUT PHY-LSI Reset wait timeout. + * @retval FSP_ERR_INVALID_MODE Function is called when not in OPERATION mode. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_PHY_ChipInit (ether_phy_ctrl_t * const p_ctrl, ether_phy_cfg_t const * const p_cfg) +{ + fsp_err_t err = FSP_SUCCESS; + rmac_phy_instance_ctrl_t * p_instance_ctrl = (rmac_phy_instance_ctrl_t *) p_ctrl; + uint32_t reg = 0; + uint32_t count = 0; + +#if (RMAC_PHY_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(NULL != p_instance_ctrl); + RMAC_PHY_ERROR_RETURN(NULL != p_cfg, FSP_ERR_INVALID_POINTER); + RMAC_PHY_ERROR_RETURN(RMAC_PHY_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); +#endif + + /* ETHA IP should be OPERATION mode */ + RMAC_PHY_ERROR_RETURN(RMAC_PHY_ETHA_OPERATION_MODE == r_rmac_phy_get_operation_mode(p_instance_ctrl), + FSP_ERR_INVALID_MODE); + + /* Set MII type configuration for this PHY LSI port. */ + r_rmac_phy_set_mii_type_configuration(p_instance_ctrl, p_instance_ctrl->phy_lsi_cfg_index); + + p_instance_ctrl->interface_status = RMAC_PHY_INTERFACE_STATUS_INITIALIZED; + + /* Reset PHY */ + R_RMAC_PHY_Write(p_instance_ctrl, RMAC_PHY_REG_CONTROL, RMAC_PHY_CONTROL_RESET); + + /* Reset completion waiting */ + do + { + R_RMAC_PHY_Read(p_instance_ctrl, RMAC_PHY_REG_CONTROL, ®); + count++; + } while ((reg & RMAC_PHY_CONTROL_RESET) && (count < p_cfg->phy_reset_wait_time)); + + if (count < p_cfg->phy_reset_wait_time) + { + rmac_phy_targets_initialize(p_instance_ctrl); + } + else + { + err = FSP_ERR_TIMEOUT; + } + + return err; +} /* End of function R_RMAC_PHY_ChipInit() */ + +/********************************************************************************************************************//** + * @brief Read data from register of PHY-LSI . Implements @ref ether_phy_api_t::read. + * + * @retval FSP_SUCCESS RMAC_PHY successfully read data. + * @retval FSP_ERR_ASSERTION Pointer to RMAC_PHY control block is NULL. + * @retval FSP_ERR_INVALID_POINTER Pointer to read buffer is NULL. + * @retval FSP_ERR_INVALID_ARGUMENT Address is not a valid size + * @retval FSP_ERR_NOT_INITIALIZED The control block has not been initialized + * @retval FSP_ERR_INVALID_MODE Function is called when not in OPERATION mode. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_PHY_Read (ether_phy_ctrl_t * const p_ctrl, uint32_t reg_addr, uint32_t * const p_data) +{ + rmac_phy_instance_ctrl_t * p_instance_ctrl = (rmac_phy_instance_ctrl_t *) p_ctrl; + uint32_t mpsm = 0; + rmac_phy_extended_cfg_t * p_extend; + +#if (RMAC_PHY_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(NULL != p_instance_ctrl); + RMAC_PHY_ERROR_RETURN(NULL != p_data, FSP_ERR_INVALID_POINTER); + RMAC_PHY_ERROR_RETURN(RMAC_PHY_ADDRESS_SIZE >= reg_addr, FSP_ERR_INVALID_ARGUMENT); + RMAC_PHY_ERROR_RETURN(RMAC_PHY_INTERFACE_STATUS_INITIALIZED == p_instance_ctrl->interface_status, + FSP_ERR_NOT_INITIALIZED); +#endif + p_extend = (rmac_phy_extended_cfg_t *) p_instance_ctrl->p_ether_phy_cfg->p_extend; + + /* Create configuration value to read phy register. */ + mpsm |= R_RMAC0_MPSM_POP_Msk & (RMAC_PHY_MDIO_OPCODE_READ << R_RMAC0_MPSM_POP_Pos); + mpsm |= R_RMAC0_MPSM_PDA_Msk & + ((uint32_t) p_extend->p_phy_lsi_cfg_list[p_instance_ctrl->phy_lsi_cfg_index]->address << + R_RMAC0_MPSM_PDA_Pos); + mpsm |= R_RMAC0_MPSM_PRA_Msk & (reg_addr << R_RMAC0_MPSM_PRA_Pos); + + /* Set configuration value. */ + p_instance_ctrl->p_reg_rmac->MPSM = mpsm; + + /* Start read access to the phy register and wait for completion. */ + p_instance_ctrl->p_reg_rmac->MPSM_b.PSME = 1; + FSP_HARDWARE_REGISTER_WAIT(p_instance_ctrl->p_reg_rmac->MPSM_b.PSME, 0); + + /* Copy register value. */ + *p_data = p_instance_ctrl->p_reg_rmac->MPSM_b.PRD; + + return FSP_SUCCESS; +} /* End of function R_RMAC_PHY_Read() */ + +/********************************************************************************************************************//** + * @brief Write data to register of PHY-LSI . Implements @ref ether_phy_api_t::write. + * + * @retval FSP_SUCCESS RMAC_PHY successfully write data. + * @retval FSP_ERR_ASSERTION Pointer to RMAC_PHY control block is NULL. + * @retval FSP_ERR_INVALID_ARGUMENT Address or data is not a valid size + * @retval FSP_ERR_NOT_INITIALIZED The control block has not been initialized + * @retval FSP_ERR_INVALID_MODE Function is called when not in OPERATION mode. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_PHY_Write (ether_phy_ctrl_t * const p_ctrl, uint32_t reg_addr, uint32_t data) +{ + rmac_phy_instance_ctrl_t * p_instance_ctrl = (rmac_phy_instance_ctrl_t *) p_ctrl; + uint32_t mpsm = 0; + rmac_phy_extended_cfg_t * p_extend; + +#if (RMAC_PHY_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(NULL != p_instance_ctrl); + RMAC_PHY_ERROR_RETURN(RMAC_PHY_ADDRESS_SIZE >= reg_addr, FSP_ERR_INVALID_ARGUMENT); + RMAC_PHY_ERROR_RETURN(RMAC_PHY_REGISTER_DATA_SIZE >= data, FSP_ERR_INVALID_ARGUMENT); + RMAC_PHY_ERROR_RETURN(RMAC_PHY_INTERFACE_STATUS_INITIALIZED == p_instance_ctrl->interface_status, + FSP_ERR_NOT_INITIALIZED); +#endif + p_extend = (rmac_phy_extended_cfg_t *) p_instance_ctrl->p_ether_phy_cfg->p_extend; + + /* ETHA IP should be OPERATION mode */ + RMAC_PHY_ERROR_RETURN(RMAC_PHY_ETHA_OPERATION_MODE == r_rmac_phy_get_operation_mode(p_instance_ctrl), + FSP_ERR_INVALID_MODE); + + /* Create configuration value to write phy register. */ + mpsm |= R_RMAC0_MPSM_POP_Msk & (RMAC_PHY_MDIO_OPCODE_WRITE << R_RMAC0_MPSM_POP_Pos); + mpsm |= R_RMAC0_MPSM_PDA_Msk & + ((uint32_t) p_extend->p_phy_lsi_cfg_list[p_instance_ctrl->phy_lsi_cfg_index]->address << + R_RMAC0_MPSM_PDA_Pos); + mpsm |= R_RMAC0_MPSM_PRA_Msk & (reg_addr << R_RMAC0_MPSM_PRA_Pos); + mpsm |= R_RMAC0_MPSM_PRD_Msk & (data << R_RMAC0_MPSM_PRD_Pos); + + /* Set configuration value. */ + p_instance_ctrl->p_reg_rmac->MPSM = mpsm; + + /* Start write access to the phy register and wait for completion. */ + p_instance_ctrl->p_reg_rmac->MPSM_b.PSME = 1; + FSP_HARDWARE_REGISTER_WAIT(p_instance_ctrl->p_reg_rmac->MPSM_b.PSME, 0); + + return FSP_SUCCESS; +} /* End of function R_RMAC_PHY_Write() */ + +/********************************************************************************************************************//** + * @brief Update the target PHY LSI of this driver. + * + * @retval FSP_SUCCESS PHY device initialized successfully. + * @retval FSP_ERR_ASSERTION Pointer to RMAC_PHY control block or configuration structure is NULL. + * @retval FSP_ERR_NOT_OPEN The control block has not been opened. + * @retval FSP_ERR_INVALID_ARGUMENT Invalid PHY LSI is selected. + ***********************************************************************************************************************/ +fsp_err_t R_RMAC_PHY_ChipSelect (ether_phy_ctrl_t * const p_ctrl, uint8_t port) +{ + rmac_phy_instance_ctrl_t * p_instance_ctrl = (rmac_phy_instance_ctrl_t *) p_ctrl; + rmac_phy_extended_cfg_t * p_extend; +#if (RMAC_PHY_CFG_PARAM_CHECKING_ENABLE) + FSP_ASSERT(NULL != p_instance_ctrl); + RMAC_PHY_ERROR_RETURN(RMAC_PHY_OPEN == p_instance_ctrl->open, FSP_ERR_NOT_OPEN); + RMAC_PHY_ERROR_RETURN(BSP_FEATURE_ETHER_NUM_CHANNELS > port, FSP_ERR_INVALID_ARGUMENT); +#endif + p_extend = (rmac_phy_extended_cfg_t *) p_instance_ctrl->p_ether_phy_cfg->p_extend; + FSP_ERROR_RETURN(NULL != p_extend->p_phy_lsi_cfg_list[port], FSP_ERR_INVALID_ARGUMENT); + + /* Update the target PHY lSI. */ + p_instance_ctrl->phy_lsi_cfg_index = port; + + return FSP_SUCCESS; +} + +/*******************************************************************************************************************//** + * @} (end addtogroup RMAC_PHY) + **********************************************************************************************************************/ + +/** + * Private functions + */ + +/*********************************************************************************************************************** + * Function Name: rmac_phy_targets_initialize + * Description : PHY-LSI specific initialization processing + * Arguments : p_instance_ctrl - + * Ethernet control block + * Return Value : none + ***********************************************************************************************************************/ +static void rmac_phy_targets_initialize (rmac_phy_instance_ctrl_t * p_instance_ctrl) +{ + rmac_phy_extended_cfg_t * p_extend = (rmac_phy_extended_cfg_t *) p_instance_ctrl->p_ether_phy_cfg->p_extend; + + switch (p_extend->p_phy_lsi_cfg_list[p_instance_ctrl->phy_lsi_cfg_index]->type) + { + /* Use KSZ8091RNB */ +#if (ETHER_PHY_CFG_TARGET_KSZ8091RNB_ENABLE) + case ETHER_PHY_LSI_TYPE_KSZ8091RNB: + { + rmac_phy_target_ksz8091rnb_initialize(p_instance_ctrl); + break; + } +#endif + + /* Use KSZ8041 */ +#if (ETHER_PHY_CFG_TARGET_KSZ8041_ENABLE) + case ETHER_PHY_LSI_TYPE_KSZ8041: + { + rmac_phy_target_ksz8041_initialize(p_instance_ctrl); + break; + } +#endif + + /* Use DP83620 */ +#if (ETHER_PHY_CFG_TARGET_DP83620_ENABLE) + case ETHER_PHY_LSI_TYPE_DP83620: + { + rmac_phy_target_dp83620_initialize(p_instance_ctrl); + break; + } +#endif + + /* Use ICS1894 */ +#if (ETHER_PHY_CFG_TARGET_ICS1894_ENABLE) + case ETHER_PHY_LSI_TYPE_ICS1894: + { + rmac_phy_target_ics1894_initialize(p_instance_ctrl); + break; + } +#endif + + /* Use GPY111 */ +#if (ETHER_PHY_CFG_TARGET_GPY111_ENABLE) + case ETHER_PHY_LSI_TYPE_GPY111: + { + rmac_phy_target_gpy111_initialize(p_instance_ctrl); + break; + } +#endif + + /* User custom */ +#if (ETHER_PHY_CFG_USE_CUSTOM_PHY_LSI_ENABLE) + case ETHER_PHY_LSI_TYPE_CUSTOM: + { + if (NULL != p_instance_ctrl->p_ether_phy_cfg->p_extend) + { + rmac_phy_extended_cfg_t const * p_callback = p_instance_ctrl->p_ether_phy_cfg->p_extend; + if (NULL != p_callback->p_port_custom_init) + { + p_callback->p_port_custom_init(p_instance_ctrl); + } + } + + break; + } +#endif + + /* If module is configured for default LSI */ + default: + { + break; + } + } +} /* End of function rmac_phy_targets_initialize() */ + +/*********************************************************************************************************************** + * Function Name: rmac_phy_targets_is_support_link_partner_ability + * Description : Check if the PHY-LSI connected Ethernet controller supports link ability + * Arguments : p_instance_ctrl - + * Ethernet control block + * line_speed_duplex - + * Line speed duplex of link partner PHY-LSI + * Return Value : bool + ***********************************************************************************************************************/ +static bool rmac_phy_targets_is_support_link_partner_ability (rmac_phy_instance_ctrl_t * p_instance_ctrl, + uint32_t line_speed_duplex) +{ + rmac_phy_extended_cfg_t * p_extend = (rmac_phy_extended_cfg_t *) p_instance_ctrl->p_ether_phy_cfg->p_extend; + bool result = false; + switch (p_extend->p_phy_lsi_cfg_list[p_instance_ctrl->phy_lsi_cfg_index]->type) + { + /* Use KSZ8091RNB */ +#if (ETHER_PHY_CFG_TARGET_KSZ8091RNB_ENABLE) + case ETHER_PHY_LSI_TYPE_KSZ8091RNB: + { + result = rmac_phy_target_ksz8091rnb_is_support_link_partner_ability(p_instance_ctrl, line_speed_duplex); + break; + } +#endif + + /* Use KSZ8041 */ +#if (ETHER_PHY_CFG_TARGET_KSZ8041_ENABLE) + case ETHER_PHY_LSI_TYPE_KSZ8041: + { + result = rmac_phy_target_ksz8041_is_support_link_partner_ability(p_instance_ctrl, line_speed_duplex); + break; + } +#endif + + /* Use DP83620 */ +#if (ETHER_PHY_CFG_TARGET_DP83620_ENABLE) + case ETHER_PHY_LSI_TYPE_DP83620: + { + result = rmac_phy_target_dp83620_is_support_link_partner_ability(p_instance_ctrl, line_speed_duplex); + break; + } +#endif + + /* Use ICS1894 */ +#if (ETHER_PHY_CFG_TARGET_ICS1894_ENABLE) + case ETHER_PHY_LSI_TYPE_ICS1894: + { + result = rmac_phy_target_ics1894_is_support_link_partner_ability(p_instance_ctrl, line_speed_duplex); + break; + } +#endif + + /* Use ICS1894 */ +#if (ETHER_PHY_CFG_TARGET_GPY111_ENABLE) + case ETHER_PHY_LSI_TYPE_GPY111: + { + result = rmac_phy_target_gpy111_is_support_link_partner_ability(p_instance_ctrl, line_speed_duplex); + break; + } +#endif + + /* User custom */ +#if (ETHER_PHY_CFG_USE_CUSTOM_PHY_LSI_ENABLE) + case ETHER_PHY_LSI_TYPE_CUSTOM: + { + if (NULL != p_instance_ctrl->p_ether_phy_cfg->p_extend) + { + rmac_phy_extended_cfg_t const * p_callback = p_instance_ctrl->p_ether_phy_cfg->p_extend; + if (NULL != p_callback->p_port_custom_link_partner_ability_get) + { + result = p_callback->p_port_custom_link_partner_ability_get(p_instance_ctrl, line_speed_duplex); + } + } + + break; + } +#endif + + /* If module is configured for default LSI, always return true */ + default: + { + result = true; + break; + } + } + + return result; +} /* End of function rmac_phy_targets_is_support_link_partner_ability() */ + +/*********************************************************************************************************************** + * Function Name: r_rmac_phy_calculate_mpic + * Description : Calculate MPIC register value. + * Arguments : p_instance_ctrl - + * Ethernet control block + * line_speed_duplex - + * Line speed duplex of link partner PHY-LSI + * Return Value : uint32_t + ***********************************************************************************************************************/ +static uint32_t r_rmac_phy_calculate_mpic (rmac_phy_instance_ctrl_t * p_instance_ctrl, uint32_t line_speed_duplex) +{ + rmac_phy_extended_cfg_t * p_extend = (rmac_phy_extended_cfg_t *) p_instance_ctrl->p_ether_phy_cfg->p_extend; + uint32_t mpic; + uint32_t mpic_psmcs; + uint32_t mpic_pis; + uint32_t mpic_lsc; + uint32_t eswclk_frequency; + + /*Get frequency of ESWCLK. */ + eswclk_frequency = R_BSP_SourceClockHzGet((fsp_priv_source_clock_t) (R_SYSTEM->ESWCKCR_b.CKSEL)) / + R_FSP_ClockDividerGet(R_SYSTEM->ESWCKDIVCR_b.CKDIV); + + /* Calculate cycle of MDC clock. */ + mpic_psmcs = ((eswclk_frequency / p_extend->mdc_clock_rate) / 2) - 1; + + /* Configure interface. */ + if ((ETHER_PHY_MII_TYPE_MII == p_instance_ctrl->p_ether_phy_cfg->mii_type) || + (ETHER_PHY_MII_TYPE_RMII == p_instance_ctrl->p_ether_phy_cfg->mii_type)) + { + /* MII, 100Mbps */ + mpic_pis = 0; + } + else + { + /* GMII, 1000Mbps */ + mpic_pis = 2; + } + + /* Configure link speed. */ + switch (line_speed_duplex) + { + case ETHER_PHY_LINK_SPEED_10H: + case ETHER_PHY_LINK_SPEED_10F: + { + mpic_lsc = 0; + break; + } + + case ETHER_PHY_LINK_SPEED_100H: + case ETHER_PHY_LINK_SPEED_100F: + { + mpic_lsc = 1; + break; + } + + case ETHER_PHY_LINK_SPEED_1000H: + case ETHER_PHY_LINK_SPEED_1000F: + { + mpic_lsc = 2; + break; + } + + default: + { + mpic_lsc = 0; + break; + } + } + + mpic = (R_RMAC0_MPIC_PSMCS_Msk & (mpic_psmcs << R_RMAC0_MPIC_PSMCS_Pos)) | + (R_RMAC0_MPIC_PIS_Msk & (mpic_pis << R_RMAC0_MPIC_PIS_Pos)) | + (R_RMAC0_MPIC_LSC_Msk & (mpic_lsc << R_RMAC0_MPIC_LSC_Pos)) | + (R_RMAC0_MPIC_PSMCT_Msk & ((uint32_t) p_extend->mdio_capture_time << R_RMAC0_MPIC_PSMCT_Pos)) | + (R_RMAC0_MPIC_PSMHT_Msk & ((uint32_t) p_extend->mdio_hold_time << R_RMAC0_MPIC_PSMHT_Pos)); + + return mpic; +} + +/*********************************************************************************************************************** + * Function Name: r_rmac_phy_get_operation_mode + * Description : Get operation mode of ETHA. + * Arguments : p_instance_ctrl - + * Ethernet control block + * Return Value : uint8_t + ***********************************************************************************************************************/ +uint8_t r_rmac_phy_get_operation_mode (rmac_phy_instance_ctrl_t * p_instance_ctrl) +{ + R_ETHA0_Type * p_etha_reg = + (R_ETHA0_Type *) (R_ETHA0_BASE + (ETHA_REG_SIZE * p_instance_ctrl->p_ether_phy_cfg->channel)); + + /* Return operation mode of ETHA IP. */ + return p_etha_reg->EAMS_b.OPS; +} + +/*********************************************************************************************************************** + * Change operation mode of ETHA. + * + * @param[in] mode New operation mode + ***********************************************************************************************************************/ +void r_rmac_phy_set_operation_mode (uint8_t channel, uint8_t mode) +{ + R_ETHA0_Type * p_etha_reg = + (R_ETHA0_Type *) (R_ETHA0_BASE + (ETHA_REG_SIZE * channel)); + + /* Mode transition */ + p_etha_reg->EAMC_b.OPC = R_ETHA0_EAMC_OPC_Msk & mode; + FSP_HARDWARE_REGISTER_WAIT(p_etha_reg->EAMS_b.OPS, mode); +} + +/*********************************************************************************************************************** + * Function Name: r_rmac_phy_set_mii_type_configuration + * Description : Set MII type configuration for the port. + * Arguments : p_instance_ctrl - + * Ethernet control block + * Return Value : uint32_t + ***********************************************************************************************************************/ +void r_rmac_phy_set_mii_type_configuration (rmac_phy_instance_ctrl_t * p_instance_ctrl, uint8_t port) +{ + volatile uint32_t * p_miicr_register; + + /* Configure pins for MII or RMII. Set PHYMODE0 if MII is selected. */ + R_PMISC->PFENET = + (uint8_t) ((ETHER_PHY_MII_TYPE_MII == + p_instance_ctrl->p_ether_phy_cfg->mii_type) << (R_PMISC_PFENET_PHYMODE0_Pos + port)); + + /* Get pointer to a MIICRn register. */ + p_miicr_register = &(R_ESWM->MIICR0) + port; + + /* Configure ESWM as MII, RMII, or RGMII. */ + switch (p_instance_ctrl->p_ether_phy_cfg->mii_type) + { + case ETHER_PHY_MII_TYPE_RMII: + { + *p_miicr_register = 2; + break; + } + + case ETHER_PHY_MII_TYPE_RGMII: + { + *p_miicr_register = (R_ESWM_MIICR0_TXCIDE_Msk | 1); + + /* Enable TXC generation. */ + R_ESWM->MIIRR = R_ESWM->MIIRR | (1 << (R_ESWM_MIIRR_RGRST_Pos + port)); + break; + } + + default: + { + /* MII or GMII. */ + *p_miicr_register = 0; + break; + } + } +} diff --git a/zephyr/ra/ra_cfg/fsp_cfg/r_gptp_cfg.h b/zephyr/ra/ra_cfg/fsp_cfg/r_gptp_cfg.h new file mode 100644 index 00000000..84cc68c9 --- /dev/null +++ b/zephyr/ra/ra_cfg/fsp_cfg/r_gptp_cfg.h @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2025 Renesas Electronics Corporation and/or its affiliates + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef R_GPTP_CFG_H_ +#define R_GPTP_CFG_H_ +#ifdef __cplusplus +extern "C" { +#endif + +#define GPTP_CFG_PARAM_CHECKING_ENABLE (BSP_CFG_PARAM_CHECKING_ENABLE) + +#ifdef __cplusplus +} +#endif +#endif /* R_GPTP_CFG_H_ */ diff --git a/zephyr/ra/ra_cfg/fsp_cfg/r_layer3_switch_cfg.h b/zephyr/ra/ra_cfg/fsp_cfg/r_layer3_switch_cfg.h new file mode 100644 index 00000000..b3d2ce3e --- /dev/null +++ b/zephyr/ra/ra_cfg/fsp_cfg/r_layer3_switch_cfg.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2025 Renesas Electronics Corporation and/or its affiliates + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef R_LAYER3_SWITCH_CFG_H_ +#define R_LAYER3_SWITCH_CFG_H_ +#ifdef __cplusplus +extern "C" { +#endif + +#define LAYER3_SWITCH_CFG_PARAM_CHECKING_ENABLE (BSP_CFG_PARAM_CHECKING_ENABLE) + +#define LAYER3_SWITCH_GET_QUEUE_NUM(id, compat, prop) \ + COND_CODE_1(DT_NODE_HAS_COMPAT(id,compat),(DT_PROP(id,prop)),(0)) + +#define LAYER3_SWITCH_CFG_AVAILABLE_QUEUE_NUM \ + (DT_FOREACH_CHILD_SEP_VARGS(DT_NODELABEL(eswm), LAYER3_SWITCH_GET_QUEUE_NUM, \ + (+), renesas_ra_ethernet_rmac, txq_num) + \ + DT_FOREACH_CHILD_SEP_VARGS(DT_NODELABEL(eswm), LAYER3_SWITCH_GET_QUEUE_NUM, \ + (+), renesas_ra_ethernet_rmac, rxq_num)) + +#ifdef __cplusplus +} +#endif +#endif /* R_LAYER3_SWITCH_CFG_H_ */ diff --git a/zephyr/ra/ra_cfg/fsp_cfg/r_rmac_cfg.h b/zephyr/ra/ra_cfg/fsp_cfg/r_rmac_cfg.h new file mode 100644 index 00000000..6a48b71b --- /dev/null +++ b/zephyr/ra/ra_cfg/fsp_cfg/r_rmac_cfg.h @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2025 Renesas Electronics Corporation and/or its affiliates + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef R_RMAC_CFG_H_ +#define R_RMAC_CFG_H_ +#ifdef __cplusplus +extern "C" { +#endif + +#define RMAC_CFG_PARAM_CHECKING_ENABLE (BSP_CFG_PARAM_CHECKING_ENABLE) +#define RMAC_CFG_SKIP_PHY_LINK_ABILITY_CHECK + +#ifdef __cplusplus +} +#endif +#endif /* R_RMAC_CFG_H_ */ diff --git a/zephyr/ra/ra_cfg/fsp_cfg/r_rmac_phy_cfg.h b/zephyr/ra/ra_cfg/fsp_cfg/r_rmac_phy_cfg.h new file mode 100644 index 00000000..bae04558 --- /dev/null +++ b/zephyr/ra/ra_cfg/fsp_cfg/r_rmac_phy_cfg.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2025 Renesas Electronics Corporation and/or its affiliates + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef R_RMAC_PHY_CFG_H_ +#define R_RMAC_PHY_CFG_H_ +#ifdef __cplusplus +extern "C" { +#endif + +#define RMAC_PHY_CFG_PARAM_CHECKING_ENABLE (BSP_CFG_PARAM_CHECKING_ENABLE) +#ifndef ETHER_PHY_CFG_TARGET_KSZ8091RNB_ENABLE +#define ETHER_PHY_CFG_TARGET_KSZ8091RNB_ENABLE (0) +#endif +#ifndef ETHER_PHY_CFG_TARGET_KSZ8041_ENABLE +#define ETHER_PHY_CFG_TARGET_KSZ8041_ENABLE (0) +#endif +#ifndef ETHER_PHY_CFG_TARGET_DP83620_ENABLE +#define ETHER_PHY_CFG_TARGET_DP83620_ENABLE (0) +#endif +#ifndef ETHER_PHY_CFG_TARGET_ICS1894_ENABLE +#define ETHER_PHY_CFG_TARGET_ICS1894_ENABLE (0) +#endif +#ifndef ETHER_PHY_CFG_TARGET_GPY111_ENABLE +#define ETHER_PHY_CFG_TARGET_GPY111_ENABLE (0) +#endif +#ifndef ETHER_PHY_CFG_USE_CUSTOM_PHY_LSI_ENABLE +#define ETHER_PHY_CFG_USE_CUSTOM_PHY_LSI_ENABLE (0) +#endif +#define ETHER_PHY_CFG_USE_REF_CLK (BOARD_PHY_REF_CLK) + +#define RMAC_PHY_CFG_CUSTOM_PHY_INIT + +#ifdef __cplusplus +} +#endif +#endif /* R_RMAC_PHY_CFG_H_ */