1717#include "esp_cpu.h"
1818#include "soc/soc_caps_full.h"
1919#include "soc/io_mux_reg.h"
20+ #include "hal/dedic_gpio_caps.h"
2021#include "hal/dedic_gpio_cpu_ll.h"
2122#include "esp_private/gpio.h"
2223#include "esp_private/periph_ctrl.h"
2526#include "driver/dedic_gpio.h"
2627#include "hal/dedic_gpio_periph.h"
2728
28- #if DEDIC_GPIO_LL_ALLOW_REG_ACCESS
29+ #if DEDIC_GPIO_CAPS_GET ( ALLOW_REG_ACCESS )
2930#include "soc/dedic_gpio_struct.h"
3031#endif
31- #if !DEDIC_GPIO_CPU_LL_PERIPH_ALWAYS_ENABLE
32+ #if !DEDIC_GPIO_CAPS_GET ( CPU_PERIPH_ALWAYS_ENABLE )
3233#include "hal/dedic_gpio_ll.h"
3334#endif
3435
@@ -50,11 +51,11 @@ struct dedic_gpio_platform_t {
5051 uint32_t in_occupied_mask ; // mask of input channels that already occupied
5152#if SOC_DEDIC_GPIO_HAS_INTERRUPT
5253 intr_handle_t intr_hdl ; // interrupt handle
53- dedic_gpio_isr_callback_t cbs [SOC_DEDIC_GPIO_ATTR (IN_CHANS_PER_CPU )]; // array of callback function for input channel
54- void * cb_args [SOC_DEDIC_GPIO_ATTR (IN_CHANS_PER_CPU )]; // array of callback arguments for input channel
55- dedic_gpio_bundle_t * in_bundles [SOC_DEDIC_GPIO_ATTR (IN_CHANS_PER_CPU )]; // which bundle belongs to for input channel
54+ dedic_gpio_isr_callback_t cbs [DEDIC_GPIO_CAPS_GET (IN_CHANS_PER_CPU )]; // array of callback function for input channel
55+ void * cb_args [DEDIC_GPIO_CAPS_GET (IN_CHANS_PER_CPU )]; // array of callback arguments for input channel
56+ dedic_gpio_bundle_t * in_bundles [DEDIC_GPIO_CAPS_GET (IN_CHANS_PER_CPU )]; // which bundle belongs to for input channel
5657#endif
57- #if DEDIC_GPIO_LL_ALLOW_REG_ACCESS
58+ #if DEDIC_GPIO_CAPS_GET ( ALLOW_REG_ACCESS )
5859 dedic_dev_t * dev ;
5960#endif
6061};
@@ -81,18 +82,18 @@ static esp_err_t dedic_gpio_build_platform(int core_id)
8182 // initialize platform members
8283 s_platform [core_id ]-> spinlock = (portMUX_TYPE )portMUX_INITIALIZER_UNLOCKED ;
8384 // initial occupy_mask: 1111...100...0
84- s_platform [core_id ]-> out_occupied_mask = UINT32_MAX & ~((1 << SOC_DEDIC_GPIO_ATTR (OUT_CHANS_PER_CPU )) - 1 );
85- s_platform [core_id ]-> in_occupied_mask = UINT32_MAX & ~((1 << SOC_DEDIC_GPIO_ATTR (IN_CHANS_PER_CPU )) - 1 );
86- #if DEDIC_GPIO_LL_ALLOW_REG_ACCESS
85+ s_platform [core_id ]-> out_occupied_mask = UINT32_MAX & ~((1 << DEDIC_GPIO_CAPS_GET (OUT_CHANS_PER_CPU )) - 1 );
86+ s_platform [core_id ]-> in_occupied_mask = UINT32_MAX & ~((1 << DEDIC_GPIO_CAPS_GET (IN_CHANS_PER_CPU )) - 1 );
87+ #if DEDIC_GPIO_CAPS_GET ( ALLOW_REG_ACCESS )
8788 s_platform [core_id ]-> dev = & DEDIC_GPIO ;
88- #endif // DEDIC_GPIO_LL_ALLOW_REG_ACCESS
89- #if !DEDIC_GPIO_CPU_LL_PERIPH_ALWAYS_ENABLE
89+ #endif // DEDIC_GPIO_CAPS_GET(ALLOW_REG_ACCESS)
90+ #if !DEDIC_GPIO_CAPS_GET ( CPU_PERIPH_ALWAYS_ENABLE )
9091 // enable dedicated GPIO register clock
9192 PERIPH_RCC_ATOMIC () {
9293 dedic_gpio_ll_enable_bus_clock (true);
9394 dedic_gpio_ll_reset_register ();
9495 }
95- #endif // !DEDIC_GPIO_CPU_LL_PERIPH_ALWAYS_ENABLE
96+ #endif // !DEDIC_GPIO_CAPS_GET(CPU_PERIPH_ALWAYS_ENABLE)
9697 }
9798 }
9899 _lock_release (& s_platform_mutexlock [core_id ]);
@@ -113,12 +114,12 @@ static void dedic_gpio_break_platform(int core_id)
113114 if (s_platform [core_id ]) {
114115 free (s_platform [core_id ]);
115116 s_platform [core_id ] = NULL ;
116- #if !DEDIC_GPIO_CPU_LL_PERIPH_ALWAYS_ENABLE
117+ #if !DEDIC_GPIO_CAPS_GET ( CPU_PERIPH_ALWAYS_ENABLE )
117118 // disable the register clock if no GPIO channel is in use
118119 PERIPH_RCC_ATOMIC () {
119120 dedic_gpio_ll_enable_bus_clock (false);
120121 }
121- #endif // !DEDIC_GPIO_CPU_LL_PERIPH_ALWAYS_ENABLE
122+ #endif // !DEDIC_GPIO_CAPS_GET(CPU_PERIPH_ALWAYS_ENABLE)
122123 }
123124 _lock_release (& s_platform_mutexlock [core_id ]);
124125 }
@@ -222,11 +223,11 @@ esp_err_t dedic_gpio_new_bundle(const dedic_gpio_bundle_config_t *config, dedic_
222223 // configure outwards channels
223224 uint32_t out_offset = 0 ;
224225 if (config -> flags .out_en ) {
225- ESP_GOTO_ON_FALSE (config -> array_size <= SOC_DEDIC_GPIO_ATTR (OUT_CHANS_PER_CPU ), ESP_ERR_INVALID_ARG , err , TAG ,
226- "array size(%d) exceeds maximum supported out channels(%d)" , config -> array_size , SOC_DEDIC_GPIO_ATTR (OUT_CHANS_PER_CPU ));
226+ ESP_GOTO_ON_FALSE (config -> array_size <= DEDIC_GPIO_CAPS_GET (OUT_CHANS_PER_CPU ), ESP_ERR_INVALID_ARG , err , TAG ,
227+ "array size(%d) exceeds maximum supported out channels(%d)" , config -> array_size , DEDIC_GPIO_CAPS_GET (OUT_CHANS_PER_CPU ));
227228 // prevent install bundle concurrently
228229 portENTER_CRITICAL (& s_platform [core_id ]-> spinlock );
229- for (size_t i = 0 ; i <= SOC_DEDIC_GPIO_ATTR (OUT_CHANS_PER_CPU ) - config -> array_size ; i ++ ) {
230+ for (size_t i = 0 ; i <= DEDIC_GPIO_CAPS_GET (OUT_CHANS_PER_CPU ) - config -> array_size ; i ++ ) {
230231 if ((s_platform [core_id ]-> out_occupied_mask & (pattern << i )) == 0 ) {
231232 out_mask = pattern << i ;
232233 out_offset = i ;
@@ -235,7 +236,7 @@ esp_err_t dedic_gpio_new_bundle(const dedic_gpio_bundle_config_t *config, dedic_
235236 }
236237 if (out_mask ) {
237238 s_platform [core_id ]-> out_occupied_mask |= out_mask ;
238- #if DEDIC_GPIO_LL_ALLOW_REG_ACCESS
239+ #if DEDIC_GPIO_CAPS_GET ( ALLOW_REG_ACCESS )
239240 // always enable instruction to access output GPIO, which has better performance than register access
240241 dedic_gpio_ll_enable_instruction_access_out (s_platform [core_id ]-> dev , out_mask , true);
241242#endif
@@ -248,11 +249,11 @@ esp_err_t dedic_gpio_new_bundle(const dedic_gpio_bundle_config_t *config, dedic_
248249 // configure inwards channels
249250 uint32_t in_offset = 0 ;
250251 if (config -> flags .in_en ) {
251- ESP_GOTO_ON_FALSE (config -> array_size <= SOC_DEDIC_GPIO_ATTR (IN_CHANS_PER_CPU ), ESP_ERR_INVALID_ARG , err , TAG ,
252- "array size(%d) exceeds maximum supported in channels(%d)" , config -> array_size , SOC_DEDIC_GPIO_ATTR (IN_CHANS_PER_CPU ));
252+ ESP_GOTO_ON_FALSE (config -> array_size <= DEDIC_GPIO_CAPS_GET (IN_CHANS_PER_CPU ), ESP_ERR_INVALID_ARG , err , TAG ,
253+ "array size(%d) exceeds maximum supported in channels(%d)" , config -> array_size , DEDIC_GPIO_CAPS_GET (IN_CHANS_PER_CPU ));
253254 // prevent install bundle concurrently
254255 portENTER_CRITICAL (& s_platform [core_id ]-> spinlock );
255- for (size_t i = 0 ; i <= SOC_DEDIC_GPIO_ATTR (IN_CHANS_PER_CPU ) - config -> array_size ; i ++ ) {
256+ for (size_t i = 0 ; i <= DEDIC_GPIO_CAPS_GET (IN_CHANS_PER_CPU ) - config -> array_size ; i ++ ) {
256257 if ((s_platform [core_id ]-> in_occupied_mask & (pattern << i )) == 0 ) {
257258 in_mask = pattern << i ;
258259 in_offset = i ;
@@ -320,8 +321,8 @@ esp_err_t dedic_gpio_del_bundle(dedic_gpio_bundle_handle_t bundle)
320321 portENTER_CRITICAL (& s_platform [core_id ]-> spinlock );
321322 s_platform [core_id ]-> out_occupied_mask &= ~(bundle -> out_mask );
322323 s_platform [core_id ]-> in_occupied_mask &= ~(bundle -> in_mask );
323- if (s_platform [core_id ]-> in_occupied_mask == (UINT32_MAX & ~((1 << SOC_DEDIC_GPIO_ATTR (IN_CHANS_PER_CPU )) - 1 )) &&
324- s_platform [core_id ]-> out_occupied_mask == (UINT32_MAX & ~((1 << SOC_DEDIC_GPIO_ATTR (OUT_CHANS_PER_CPU )) - 1 ))) {
324+ if (s_platform [core_id ]-> in_occupied_mask == (UINT32_MAX & ~((1 << DEDIC_GPIO_CAPS_GET (IN_CHANS_PER_CPU )) - 1 )) &&
325+ s_platform [core_id ]-> out_occupied_mask == (UINT32_MAX & ~((1 << DEDIC_GPIO_CAPS_GET (OUT_CHANS_PER_CPU )) - 1 ))) {
325326 recycle_all = true;
326327 }
327328 portEXIT_CRITICAL (& s_platform [core_id ]-> spinlock );
0 commit comments