@@ -863,20 +863,22 @@ test_spinlock(void)
863
863
*/
864
864
#ifndef HAVE_SPINLOCKS
865
865
{
866
+ uint32 i ;
867
+
866
868
/*
867
869
* Initialize enough spinlocks to advance counter close to
868
870
* wraparound. It's too expensive to perform acquire/release for each,
869
871
* as those may be syscalls when the spinlock emulation is used (and
870
872
* even just atomic TAS would be expensive).
871
873
*/
872
- for (uint32 i = 0 ; i < INT32_MAX - 100000 ; i ++ )
874
+ for (i = 0 ; i < INT32_MAX - 100000 ; i ++ )
873
875
{
874
876
slock_t lock ;
875
877
876
878
SpinLockInit (& lock );
877
879
}
878
880
879
- for (uint32 i = 0 ; i < 200000 ; i ++ )
881
+ for (i = 0 ; i < 200000 ; i ++ )
880
882
{
881
883
slock_t lock ;
882
884
@@ -912,25 +914,26 @@ test_atomic_spin_nest(void)
912
914
#define NUM_TEST_ATOMICS (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES + 27)
913
915
pg_atomic_uint32 atomics32 [NUM_TEST_ATOMICS ];
914
916
pg_atomic_uint64 atomics64 [NUM_TEST_ATOMICS ];
917
+ int i ;
915
918
916
919
SpinLockInit (& lock );
917
920
918
- for (int i = 0 ; i < NUM_TEST_ATOMICS ; i ++ )
921
+ for (i = 0 ; i < NUM_TEST_ATOMICS ; i ++ )
919
922
{
920
923
pg_atomic_init_u32 (& atomics32 [i ], 0 );
921
924
pg_atomic_init_u64 (& atomics64 [i ], 0 );
922
925
}
923
926
924
927
/* just so it's not all zeroes */
925
- for (int i = 0 ; i < NUM_TEST_ATOMICS ; i ++ )
928
+ for (i = 0 ; i < NUM_TEST_ATOMICS ; i ++ )
926
929
{
927
930
EXPECT_EQ_U32 (pg_atomic_fetch_add_u32 (& atomics32 [i ], i ), 0 );
928
931
EXPECT_EQ_U64 (pg_atomic_fetch_add_u64 (& atomics64 [i ], i ), 0 );
929
932
}
930
933
931
934
/* test whether we can do atomic op with lock held */
932
935
SpinLockAcquire (& lock );
933
- for (int i = 0 ; i < NUM_TEST_ATOMICS ; i ++ )
936
+ for (i = 0 ; i < NUM_TEST_ATOMICS ; i ++ )
934
937
{
935
938
EXPECT_EQ_U32 (pg_atomic_fetch_sub_u32 (& atomics32 [i ], i ), i );
936
939
EXPECT_EQ_U32 (pg_atomic_read_u32 (& atomics32 [i ]), 0 );
0 commit comments