1919#include < string>
2020#include < unordered_map>
2121#include < vector>
22+ #include < atomic>
2223
2324#include " ggml-alloc.h"
2425#include " ggml-backend.h"
6061#define SD_UNUSED (x ) (void )(x)
6162#endif
6263
63- inline bool & sd_global_circular_padding_enabled () {
64- static bool enabled = false ;
64+ inline std::atomic< bool >& sd_circular_padding_flag () {
65+ static std::atomic< bool > enabled{ false } ;
6566 return enabled;
6667}
6768
69+ inline void sd_set_circular_padding_enabled (bool enabled) {
70+ sd_circular_padding_flag ().store (enabled, std::memory_order_relaxed);
71+ }
72+
73+ inline bool sd_is_circular_padding_enabled () {
74+ return sd_circular_padding_flag ().load (std::memory_order_relaxed);
75+ }
76+
6877__STATIC_INLINE__ struct ggml_tensor * sd_pad (struct ggml_context * ctx,
6978 struct ggml_tensor * a,
7079 int p0,
7180 int p1,
7281 int p2,
7382 int p3) {
74- if (sd_global_circular_padding_enabled ()) {
83+ if (sd_is_circular_padding_enabled ()) {
7584 return ggml_pad_circular (ctx, a, 0 , p0, 0 , p1, 0 , p2, 0 , p3);
7685 }
7786 return ggml_pad (ctx, a, p0, p1, p2, p3);
@@ -87,7 +96,7 @@ __STATIC_INLINE__ struct ggml_tensor* sd_pad_ext(struct ggml_context* ctx,
8796 int rp2,
8897 int lp3,
8998 int rp3) {
90- if (sd_global_circular_padding_enabled ()) {
99+ if (sd_is_circular_padding_enabled ()) {
91100 return ggml_pad_circular (ctx, a, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3);
92101 }
93102 return ggml_pad_ext (ctx, a, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3);
@@ -1019,7 +1028,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_conv_2d(struct ggml_context* ctx,
10191028 if (scale != 1 .f ) {
10201029 x = ggml_scale (ctx, x, scale);
10211030 }
1022- const bool use_circular = sd_global_circular_padding_enabled () && (p0 != 0 || p1 != 0 );
1031+ const bool use_circular = sd_is_circular_padding_enabled () && (p0 != 0 || p1 != 0 );
10231032 const bool is_depthwise = (w->ne [2 ] == 1 && x->ne [2 ] == w->ne [3 ]);
10241033 if (direct) {
10251034 if (use_circular) {
0 commit comments