diff --git a/arch/lkl/include/uapi/asm/host_ops.h b/arch/lkl/include/uapi/asm/host_ops.h index d5de01bc7120dd..88ef9c9288f6eb 100644 --- a/arch/lkl/include/uapi/asm/host_ops.h +++ b/arch/lkl/include/uapi/asm/host_ops.h @@ -151,4 +151,13 @@ int lkl_is_running(void); int lkl_printf(const char *, ...); void lkl_bug(const char *, ...); +/* atomic ops */ +int lkl__sync_fetch_and_sub(int *ptr, int value); +int lkl__sync_fetch_and_add(int *ptr, int value); +long lkl__sync_fetch_and_or(long *ptr, long value); +long lkl__sync_fetch_and_and(long *ptr, long value); +void lkl__sync_synchronize(void); +void atomic_ops_init(void); +void atomic_ops_cleanup(void); + #endif diff --git a/arch/lkl/kernel/Makefile b/arch/lkl/kernel/Makefile index ef489f2f717618..0021141dba280c 100644 --- a/arch/lkl/kernel/Makefile +++ b/arch/lkl/kernel/Makefile @@ -1,4 +1,4 @@ extra-y := vmlinux.lds obj-y = setup.o threads.o irq.o time.o syscalls.o misc.o console.o \ - syscalls_32.o cpu.o + syscalls_32.o cpu.o atomic.o diff --git a/arch/lkl/kernel/atomic.c b/arch/lkl/kernel/atomic.c new file mode 100644 index 00000000000000..a1129e40ee3cdb --- /dev/null +++ b/arch/lkl/kernel/atomic.c @@ -0,0 +1,97 @@ +#include +#include +#include + +#if defined(__ARMEL__) +static void *atomic_lock; + +long lkl__sync_fetch_and_or(long *ptr, long value) +{ + lkl_ops->sem_down(atomic_lock); + *ptr = value; + lkl_ops->sem_up(atomic_lock); + return 0; +} + +long lkl__sync_fetch_and_and(long *ptr, long value) +{ + int tmp; + + lkl_ops->sem_down(atomic_lock); + tmp = *ptr; + *ptr *= value; + lkl_ops->sem_up(atomic_lock); + return tmp; +} + +int lkl__sync_fetch_and_add(int *ptr, int value) +{ + int tmp; + + lkl_ops->sem_down(atomic_lock); + tmp = *ptr; + *ptr += value; + lkl_ops->sem_up(atomic_lock); + return tmp; +} + +int lkl__sync_fetch_and_sub(int *ptr, int value) +{ + int tmp; + + lkl_ops->sem_down(atomic_lock); + tmp = *ptr; + *ptr -= value; + lkl_ops->sem_up(atomic_lock); + return tmp; +} + +void lkl__sync_synchronize(void) +{ +} + +void atomic_ops_init(void) +{ + atomic_lock = lkl_ops->sem_alloc(1); +} + +void atomic_ops_cleanup(void) +{ + lkl_ops->sem_free(atomic_lock); +} + +#else +long lkl__sync_fetch_and_or(long *ptr, long value) +{ + return __sync_fetch_and_or(ptr, value); +} + +long lkl__sync_fetch_and_and(long *ptr, long value) +{ + return __sync_fetch_and_and(ptr, value); +} + +int lkl__sync_fetch_and_add(int *ptr, int value) +{ + return __sync_fetch_and_add(ptr, value); +} + +int lkl__sync_fetch_and_sub(int *ptr, int value) +{ + return __sync_fetch_and_sub(ptr, value); +} + +void lkl__sync_synchronize(void) +{ + return __sync_synchronize(); +} + +void atomic_ops_init(void) +{ +} + +void atomic_ops_cleanup(void) +{ +} +#endif + diff --git a/arch/lkl/kernel/cpu.c b/arch/lkl/kernel/cpu.c index 2c315262a935e3..dbc47f8b6f76a9 100644 --- a/arch/lkl/kernel/cpu.c +++ b/arch/lkl/kernel/cpu.c @@ -66,7 +66,7 @@ static int __cpu_try_get_lock(int n) { lkl_thread_t self; - if (__sync_fetch_and_add(&cpu.shutdown_gate, n) >= MAX_THREADS) + if (lkl__sync_fetch_and_add(&cpu.shutdown_gate, n) >= MAX_THREADS) return -2; lkl_ops->mutex_lock(cpu.lock); @@ -89,7 +89,7 @@ static void __cpu_try_get_unlock(int lock_ret, int n) { if (lock_ret >= -1) lkl_ops->mutex_unlock(cpu.lock); - __sync_fetch_and_sub(&cpu.shutdown_gate, n); + lkl__sync_fetch_and_sub(&cpu.shutdown_gate, n); } void lkl_cpu_change_owner(lkl_thread_t owner) @@ -173,7 +173,7 @@ int lkl_cpu_try_run_irq(int irq) void lkl_cpu_shutdown(void) { - __sync_fetch_and_add(&cpu.shutdown_gate, MAX_THREADS); + lkl__sync_fetch_and_add(&cpu.shutdown_gate, MAX_THREADS); } void lkl_cpu_wait_shutdown(void) @@ -184,7 +184,7 @@ void lkl_cpu_wait_shutdown(void) static void lkl_cpu_cleanup(bool shutdown) { - while (__sync_fetch_and_add(&cpu.shutdown_gate, 0) > MAX_THREADS) + while (lkl__sync_fetch_and_add(&cpu.shutdown_gate, 0) > MAX_THREADS) ; if (shutdown) diff --git a/arch/lkl/kernel/irq.c b/arch/lkl/kernel/irq.c index b5dbbaa7ebba39..0635afbaaeda70 100644 --- a/arch/lkl/kernel/irq.c +++ b/arch/lkl/kernel/irq.c @@ -27,14 +27,14 @@ static inline unsigned long test_and_clear_irq_index_status(void) { if (!irq_index_status) return 0; - return __sync_fetch_and_and(&irq_index_status, 0); + return lkl__sync_fetch_and_and(&irq_index_status, 0); } static inline unsigned long test_and_clear_irq_status(int index) { if (!&irq_status[index]) return 0; - return __sync_fetch_and_and(&irq_status[index], 0); + return lkl__sync_fetch_and_and(&irq_status[index], 0); } void set_irq_pending(int irq) @@ -42,8 +42,8 @@ void set_irq_pending(int irq) int index = irq / IRQ_STATUS_BITS; int bit = irq % IRQ_STATUS_BITS; - __sync_fetch_and_or(&irq_status[index], BIT(bit)); - __sync_fetch_and_or(&irq_index_status, BIT(index)); + lkl__sync_fetch_and_or(&irq_status[index], BIT(bit)); + lkl__sync_fetch_and_or(&irq_index_status, BIT(index)); } static struct irq_info { diff --git a/arch/lkl/kernel/setup.c b/arch/lkl/kernel/setup.c index 501d32175dc426..37edb46e2a75a2 100644 --- a/arch/lkl/kernel/setup.c +++ b/arch/lkl/kernel/setup.c @@ -35,6 +35,7 @@ void __init setup_arch(char **cl) static void __init lkl_run_kernel(void *arg) { + atomic_ops_init(); threads_init(); lkl_cpu_get(); start_kernel(); @@ -126,6 +127,7 @@ long lkl_sys_halt(void) syscalls_cleanup(); threads_cleanup(); + atomic_ops_cleanup(); /* Shutdown the clockevents source. */ tick_suspend_local(); free_mem(); diff --git a/arch/lkl/kernel/threads.c b/arch/lkl/kernel/threads.c index db8a6b7e34f015..82e0e6825d9c2a 100644 --- a/arch/lkl/kernel/threads.c +++ b/arch/lkl/kernel/threads.c @@ -5,7 +5,7 @@ #include #include -static volatile int threads_counter; +static int threads_counter; static int init_ti(struct thread_info *ti) { @@ -123,7 +123,7 @@ struct task_struct *__switch_to(struct task_struct *prev, } if (_prev->dead) { - __sync_fetch_and_sub(&threads_counter, 1); + lkl__sync_fetch_and_sub(&threads_counter, 1); lkl_ops->thread_exit(); } @@ -193,7 +193,7 @@ int copy_thread(unsigned long clone_flags, unsigned long esp, return -ENOMEM; } - __sync_fetch_and_add(&threads_counter, 1); + lkl__sync_fetch_and_add(&threads_counter, 1); return 0; } @@ -220,7 +220,7 @@ void threads_init(void) void threads_cnt_dec(void) { - __sync_fetch_and_sub(&threads_counter, 1); + lkl__sync_fetch_and_sub(&threads_counter, 1); } void threads_cleanup(void)