summaryrefslogtreecommitdiff
path: root/kernel/src/generic/lock/spinlock.hpp
blob: 2c2b2a6f3ce9b4a291f037260d74eb6cf3c3814c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
#pragma once
#include <atomic>
#include <cstdint>

#include <generic/arch.hpp>

namespace locks {
    inline bool is_disabled = 0;

    class preempt_spinlock {
    private:
        std::atomic_flag flag = ATOMIC_FLAG_INIT;
    public:
            bool lock() {
                if(is_disabled)
                    return 0;

                bool state = arch::test_interrupts();
                
                arch::disable_interrupts();
                while (flag.test_and_set(std::memory_order_acquire)) {
                    arch::pause();
                }

                return state;
            }

            void unlock(bool state) {
                flag.clear(std::memory_order_release);

                if(state)
                    arch::enable_interrupts();
            }

            bool test() {
                return flag.test();
            }

            bool try_lock() {
                return !flag.test_and_set(std::memory_order_acquire);
            }
    };

    class spinlock {
    private:
        std::atomic_flag flag = ATOMIC_FLAG_INIT;
    public:
            void lock() {
                if(is_disabled)
                    return;
                    
                while (flag.test_and_set(std::memory_order_acquire)) {
                    arch::pause();
                }
            }

            void unlock() {
                flag.clear(std::memory_order_release);
            }

            bool test() {
                return flag.test();
            }

            bool try_lock() {
                return !flag.test_and_set(std::memory_order_acquire);
            }
    };
};