aboutsummaryrefslogtreecommitdiff
path: root/kernel/inc/mm.h
blob: 5a307b26526f06f05960616824164e5962bd0d71 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
// MIT License, Copyright (c) 2021 Marvin Borner

#ifndef PAGING_H
#define PAGING_H

#include <boot.h>
#include <def.h>
#include <interrupts.h>

struct memory_range {
	u32 base;
	u32 size;
};

/**
 * Lowlevel paging
 */

void paging_enable(void);
void page_fault_handler(struct regs *r);

/**
 * Physical
 */

struct memory_range physical_alloc(u32 size);
void physical_free(struct memory_range range);

/**
 * Virtual
 */

#define PAGE_SIZE 0x1000
#define PAGE_COUNT 1024
#define PAGE_KERNEL_COUNT 256
#define PAGE_ALIGN(x) ((x) + PAGE_SIZE - ((x) % PAGE_SIZE))
#define PAGE_ALIGNED(x) ((x) % PAGE_SIZE == 0)
#define PAGE_ALIGN_UP(x) (((x) % PAGE_SIZE == 0) ? (x) : (x) + PAGE_SIZE - ((x) % PAGE_SIZE))
#define PAGE_ALIGN_DOWN(x) ((x) - ((x) % PAGE_SIZE))

union page_table_entry {
	struct PACKED {
		u32 present : 1;
		u32 writable : 1;
		u32 user : 1;
		u32 write_through : 1;
		u32 cache_disable : 1;
		u32 accessed : 1;
		u32 dirty : 1;
		u32 attribute : 1;
		u32 global : 1;
		u32 available : 3;
		u32 address : 20;
	} bits;
	u32 uint;
} PACKED;

struct page_table {
	union page_table_entry entries[PAGE_COUNT];
} PACKED;

union page_dir_entry {
	struct PACKED {
		u32 present : 1;
		u32 writable : 1;
		u32 user : 1;
		u32 write_through : 1;
		u32 cache_disable : 1;
		u32 accessed : 1;
		u32 reserved : 1;
		u32 page_size : 1;
		u32 global : 1;
		u32 available : 3;
		u32 address : 20;
	} bits;
	u32 uint;
} PACKED;

struct page_dir {
	union page_dir_entry entries[PAGE_COUNT];
} PACKED;

u8 virtual_present(struct page_dir *dir, u32 vaddr);
u32 virtual_to_physical(struct page_dir *dir, u32 vaddr);
void virtual_map(struct page_dir *dir, struct memory_range prange, u32 vaddr, u32 flags);
struct memory_range virtual_alloc(struct page_dir *dir, struct memory_range physical_range,
				  u32 flags);
void virtual_free(struct page_dir *dir, struct memory_range vrange);
struct page_dir *virtual_create_dir(void);
void virtual_destroy_dir(struct page_dir *dir);
struct page_dir *virtual_kernel_dir(void);

/**
 * Memory wrappers
 */

#define MEMORY_NONE (0 << 0)
#define MEMORY_USER (1 << 0)
#define MEMORY_CLEAR (1 << 1)
#define memory_range(base, size) ((struct memory_range){ (base), (size) })

struct memory_range memory_range_from(u32 base, u32 size);
struct memory_range memory_range_around(u32 base, u32 size);

void *memory_alloc(struct page_dir *dir, u32 size, u32 flags);
void *memory_alloc_identity(struct page_dir *dir, u32 flags);
void memory_map_identity(struct page_dir *dir, struct memory_range prange, u32 flags);
void memory_free(struct page_dir *dir, struct memory_range vrange);
void memory_switch_dir(struct page_dir *dir);
void memory_backup_dir(struct page_dir **backup);

// Bypass should almost never be used
void memory_bypass_enable(void);
void memory_bypass_disable(void);
u8 memory_valid(const void *addr);

void memory_install(struct mem_info *mem_info, struct vid_info *vid_info);

#endif