k-vmiter.cc 4.21 KB
Newer Older
Eddie Kohler's avatar
Eddie Kohler committed
1
2
3
4
#include "k-vmiter.hh"

const x86_64_pageentry_t vmiter::zero_pe = 0;

Eddie Kohler's avatar
Eddie Kohler committed
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
uint64_t vmiter::range_perm(size_t sz) const {
    uint64_t p = perm();
    size_t rsz = pageoffmask(level_) + 1;
    if ((p & PTE_P) != 0 && sz > rsz) {
        if (sz > ((int64_t) va() < 0 ? 0 : VA_LOWEND) - va()) {
            return 0;
        }
        vmiter it(*this);
        sz += va() & (rsz - 1);
        do {
            sz -= rsz;
            it.next_range();
            p &= it.perm();
            rsz = pageoffmask(it.level_) + 1;
        } while ((p & PTE_P) != 0 && sz > rsz);
    }
    if ((p & PTE_P) != 0) {
        return p;
    } else {
        return 0;
    }
}

Eddie Kohler's avatar
Eddie Kohler committed
28
29
void vmiter::down() {
    while (level_ > 0 && (*pep_ & (PTE_P | PTE_PS)) == PTE_P) {
Eddie Kohler's avatar
Eddie Kohler committed
30
        perm_ &= *pep_ | ~(PTE_P | PTE_W | PTE_U);
Eddie Kohler's avatar
Eddie Kohler committed
31
32
33
34
35
36
        --level_;
        uintptr_t pa = *pep_ & PTE_PAMASK;
        x86_64_pagetable* pt = pa2kptr<x86_64_pagetable*>(pa);
        pep_ = &pt->entry[pageindex(va_, level_)];
    }
    if ((*pep_ & PTE_PAMASK) >= 0x100000000UL) {
James Foster's avatar
James Foster committed
37
38
        // Note that panic() will prouce a page fault if there is no console
        // (https://github.com/CS161/chickadee/issues/14)
Eddie Kohler's avatar
Eddie Kohler committed
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
        panic("Page table %p may contain uninitialized memory!\n"
              "(Page table contents: %p)\n", pt_, *pep_);
    }
}

void vmiter::real_find(uintptr_t va) {
    if (level_ == 3 || ((va_ ^ va) & ~pageoffmask(level_ + 1)) != 0) {
        level_ = 3;
        if (va_is_canonical(va)) {
            perm_ = initial_perm;
            pep_ = &pt_->entry[pageindex(va, level_)];
        } else {
            perm_ = 0;
            pep_ = const_cast<x86_64_pageentry_t*>(&zero_pe);
        }
    } else {
Eddie Kohler's avatar
Eddie Kohler committed
55
        int curidx = (reinterpret_cast<uintptr_t>(pep_) % PAGESIZE) >> 3;
Eddie Kohler's avatar
Eddie Kohler committed
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
        pep_ += pageindex(va, level_) - curidx;
    }
    va_ = va;
    down();
}

void vmiter::next() {
    int level = 0;
    if (level_ > 0 && !perm()) {
        level = level_;
    }
    real_find((va_ | pageoffmask(level)) + 1);
}

int vmiter::try_map(uintptr_t pa, int perm) {
    if (pa == (uintptr_t) -1 && perm == 0) {
        pa = 0;
    }
Eddie Kohler's avatar
Eddie Kohler committed
74
75
    // virtual address is page-aligned
    assert((va_ % PAGESIZE) == 0, "vmiter::try_map va not aligned");
Eddie Kohler's avatar
Eddie Kohler committed
76
    if (perm & PTE_P) {
Eddie Kohler's avatar
Eddie Kohler committed
77
78
79
        // if mapping present, physical address is page-aligned
        assert(pa != (uintptr_t) -1, "vmiter::try_map mapping nonexistent pa");
        assert((pa & PTE_PAMASK) == pa, "vmiter::try_map pa not aligned");
Eddie Kohler's avatar
Eddie Kohler committed
80
    } else {
Eddie Kohler's avatar
Eddie Kohler committed
81
        assert((pa & PTE_P) == 0, "vmiter::try_map invalid pa");
Eddie Kohler's avatar
Eddie Kohler committed
82
    }
Eddie Kohler's avatar
Eddie Kohler committed
83
84
    // new permissions (`perm`) cannot be less restrictive than permissions
    // imposed by higher-level page tables (`perm_`)
Eddie Kohler's avatar
Eddie Kohler committed
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
    assert(!(perm & ~perm_ & (PTE_P | PTE_W | PTE_U)));

    while (level_ > 0 && perm) {
        assert(!(*pep_ & PTE_P));
        x86_64_pagetable* pt = knew<x86_64_pagetable>();
        if (!pt) {
            return -1;
        }
        memset(pt, 0, PAGESIZE);
        std::atomic_thread_fence(std::memory_order_release);
        *pep_ = ka2pa(pt) | PTE_P | PTE_W | PTE_U;
        down();
    }

    if (level_ == 0) {
        std::atomic_thread_fence(std::memory_order_release);
        *pep_ = pa | perm;
    }
    return 0;
}


void ptiter::go(uintptr_t va) {
    level_ = 3;
    pep_ = &pt_->entry[pageindex(va, level_)];
    va_ = va;
    down(false);
}

void ptiter::down(bool skip) {
    int stop_level = 1;
    while (true) {
        if ((*pep_ & (PTE_P | PTE_PS)) == PTE_P && !skip) {
            if (level_ == stop_level) {
                break;
            } else {
                --level_;
                uintptr_t pa = *pep_ & PTE_PAMASK;
                x86_64_pagetable* pt = pa2kptr<x86_64_pagetable*>(pa);
                pep_ = &pt->entry[pageindex(va_, level_)];
            }
        } else {
            uintptr_t va = (va_ | pageoffmask(level_)) + 1;
            if ((va ^ va_) & ~pageoffmask(level_ + 1)) {
                // up one level
                if (level_ == 3) {
                    va_ = VA_NONCANONMAX + 1;
                    return;
                }
                stop_level = level_ + 1;
                level_ = 3;
                pep_ = &pt_->entry[pageindex(va_, level_)];
            } else {
                ++pep_;
                va_ = va;
            }
            skip = false;
        }
    }
}