aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/features/memory.c408
-rw-r--r--kernel/inc/memory.h79
-rw-r--r--kernel/link.ld4
-rw-r--r--kernel/main.c4
-rw-r--r--libc/Makefile1
-rw-r--r--libc/alloc.c355
-rw-r--r--libc/cpu.c13
-rw-r--r--libc/inc/cpu.h6
-rw-r--r--libc/inc/def.h2
-rw-r--r--libc/mem.c351
11 files changed, 868 insertions, 356 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 9cf18e5..8f090d1 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -10,6 +10,7 @@ COBJS = main.o \
drivers/ide.o \
drivers/timer.o \
drivers/rtl8139.o \
+ features/memory.o \
features/fs.o \
features/load.o \
features/proc.o \
diff --git a/kernel/features/memory.c b/kernel/features/memory.c
new file mode 100644
index 0000000..aa861e1
--- /dev/null
+++ b/kernel/features/memory.c
@@ -0,0 +1,408 @@
+// Hugely inspired by the implementation in skiftOS: MIT License, Copyright (c) 2020 N. Van Bossuyt
+// MIT License, Copyright (c) 2021 Marvin Borner
+
+#include <assert.h>
+#include <cpu.h>
+#include <def.h>
+#include <mem.h>
+#include <memory.h>
+
+#include <print.h>
+
+/**
+ * Paging
+ */
+
+void paging_disable(void)
+{
+ cr0_set(cr0_get() | 0x7fffffff);
+}
+
+void paging_enable(void)
+{
+ cr0_set(cr0_get() | 0x80000000);
+}
+
+void paging_switch_dir(u32 dir)
+{
+ cr3_set(dir);
+}
+
+void paging_invalidate_tlb(void)
+{
+ /* __asm__ volatile("invlpg"); */
+}
+
+/**
+ * Physical
+ */
+
+static u32 memory_used = 0;
+static u32 memory_total = 0;
+static u8 memory[PAGE_COUNT * PAGE_COUNT / 8] = { 0 };
+#define PHYSICAL_IS_USED(addr) \
+ (memory[(u32)(addr) / PAGE_SIZE / 8] & (1 << ((u32)(addr) / PAGE_SIZE % 8)))
+
+#define PHYSICAL_SET_USED(addr) \
+ (memory[(u32)(addr) / PAGE_SIZE / 8] |= (1 << ((u32)(addr) / PAGE_SIZE % 8)))
+
+#define PHYSICAL_SET_FREE(addr) \
+ (memory[(u32)(addr) / PAGE_SIZE / 8] &= ~(1 << ((u32)(addr) / PAGE_SIZE % 8)))
+
+u8 physical_is_used(u32 addr, u32 n)
+{
+ for (u32 i = 0; i < n; i++) {
+ if (PHYSICAL_IS_USED(addr + (i * PAGE_SIZE)))
+ return 1;
+ }
+ return 0;
+}
+
+void physical_set_used(u32 addr, u32 n)
+{
+ for (u32 i = 0; i < n; i++) {
+ if (!PHYSICAL_IS_USED(addr + (i * PAGE_SIZE))) {
+ memory_used += PAGE_SIZE;
+ PHYSICAL_SET_USED(addr + (i * PAGE_SIZE));
+ }
+ }
+}
+
+void physical_set_free(u32 addr, u32 n)
+{
+ for (u32 i = 0; i < n; i++) {
+ if (PHYSICAL_IS_USED(addr + (i * PAGE_SIZE))) {
+ memory_used -= PAGE_SIZE;
+ PHYSICAL_SET_FREE(addr + (i * PAGE_SIZE));
+ }
+ }
+}
+
+u32 physical_alloc(u32 n)
+{
+ for (u32 i = 0; i < (memory_total / PAGE_SIZE); i++) {
+ u32 addr = i * PAGE_SIZE;
+ if (!physical_is_used(addr, n)) {
+ physical_set_used(addr, n);
+ return addr;
+ }
+ }
+
+ panic("Out of physical memory!\n");
+ return 0;
+}
+
+void physical_free(u32 addr, u32 n)
+{
+ physical_set_free(addr, n);
+}
+
+/**
+ * Virtual
+ */
+
+#define PDI(vaddr) ((vaddr) >> 22)
+#define PTI(vaddr) (((vaddr) >> 12) & 0x03ff)
+
+struct page_dir kernel_dir ALIGNED(PAGE_SIZE) = { 0 };
+struct page_table kernel_tables[256] ALIGNED(PAGE_SIZE) = { 0 };
+
+u8 virtual_present(struct page_dir *dir, u32 vaddr)
+{
+ u32 pdi = PDI(vaddr);
+ u32 pti = PTI(vaddr);
+
+ union page_dir_entry *dir_entry = &dir->entries[pdi];
+ if (!dir_entry->bits.present)
+ return 0;
+
+ struct page_table *table = (struct page_table *)(dir_entry->bits.address * PAGE_SIZE);
+ union page_table_entry *table_entry = &table->entries[pti];
+ if (!table_entry->bits.present)
+ return 0;
+
+ return 1;
+}
+
+u32 virtual_to_physical(struct page_dir *dir, u32 vaddr)
+{
+ u32 pdi = PDI(vaddr);
+ u32 pti = PTI(vaddr);
+
+ union page_dir_entry *dir_entry = &dir->entries[pdi];
+ struct page_table *table = (struct page_table *)(dir_entry->bits.address * PAGE_SIZE);
+ union page_table_entry *table_entry = &table->entries[pti];
+
+ return (table_entry->bits.address * PAGE_SIZE) + (vaddr & (PAGE_SIZE - 1));
+}
+
+void memory_alloc_identity(struct page_dir *dir, u32 flags, u32 *out);
+void virtual_map(struct page_dir *dir, u32 vaddr, u32 paddr, u32 n, u8 user)
+{
+ for (u32 i = 0; i < n; i++) {
+ u32 offset = i * PAGE_SIZE;
+ u32 pdi = PDI(vaddr + offset);
+ u32 pti = PTI(vaddr + offset);
+
+ union page_dir_entry *dir_entry = &dir->entries[pdi];
+ struct page_table *table =
+ (struct page_table *)(dir_entry->bits.address * PAGE_SIZE);
+ union page_table_entry *table_entry = &table->entries[pti];
+
+ if (!dir_entry->bits.present) {
+ memory_alloc_identity(dir, MEMORY_CLEAR, (u32 *)&table);
+ dir_entry->bits.present = 1;
+ dir_entry->bits.writable = 1;
+ dir_entry->bits.user = user;
+ dir_entry->bits.address = (u32)table >> 12;
+ }
+
+ table_entry->bits.present = 1;
+ table_entry->bits.writable = 1;
+ table_entry->bits.user = user;
+ table_entry->bits.address = (paddr + offset) >> 12;
+ }
+
+ paging_invalidate_tlb();
+}
+
+struct memory_range virtual_alloc(struct page_dir *dir, struct memory_range physical_range,
+ u32 flags)
+{
+ u8 is_user = flags & MEMORY_USER;
+ u32 vaddr = 0;
+ u32 size = 0;
+ for (u32 i = (is_user ? 256 : 1) * PAGE_COUNT;
+ i < (is_user ? PAGE_COUNT : 256) * PAGE_COUNT; i++) {
+ u32 addr = i * PAGE_SIZE;
+ if (!virtual_present(dir, addr)) {
+ if (size == 0)
+ vaddr = addr;
+ size += PAGE_SIZE;
+ if (size == physical_range.size) {
+ virtual_map(dir, vaddr, physical_range.base,
+ physical_range.size / PAGE_SIZE, is_user);
+ return memory_range(vaddr, size);
+ }
+ } else {
+ size = 0;
+ }
+ }
+
+ panic("Out of virtual memory!\n");
+ return memory_range(0, 0);
+}
+
+void virtual_free(struct page_dir *dir, struct memory_range virtual_range)
+{
+ for (u32 i = 0; i < virtual_range.size / PAGE_SIZE; i++) {
+ u32 offset = i * PAGE_SIZE;
+
+ u32 pdi = PDI(virtual_range.base + offset);
+ u32 pti = PTI(virtual_range.base + offset);
+
+ union page_dir_entry *dir_entry = &dir->entries[pdi];
+ struct page_table *table =
+ (struct page_table *)(dir_entry->bits.address * PAGE_SIZE);
+ union page_table_entry *table_entry = &table->entries[pti];
+
+ if (table_entry->bits.present)
+ table_entry->uint = 0;
+ }
+
+ paging_invalidate_tlb();
+}
+
+/**
+ * Memory wrapper
+ */
+
+/* extern u32 kernel_start; */
+/* extern u32 kernel_end; */
+// TODO!
+u32 kernel_start = 0x50000;
+u32 kernel_end = 0xa0000;
+
+struct memory_range memory_range_from_address(u32 base, u32 size)
+{
+ u32 align = PAGE_SIZE - base % PAGE_SIZE;
+
+ if (base % PAGE_SIZE == 0) {
+ align = 0;
+ }
+
+ base += align;
+ size -= align;
+
+ size -= size % PAGE_SIZE;
+
+ return memory_range(base, size);
+}
+
+struct memory_range memory_range_around_address(u32 base, u32 size)
+{
+ u32 align = base % PAGE_SIZE;
+
+ base -= align;
+ size += align;
+
+ size += PAGE_SIZE - size % PAGE_SIZE;
+
+ return memory_range(base, size);
+}
+
+static struct memory_range kernel_memory_range(void)
+{
+ return memory_range_around_address((u32)&kernel_start,
+ (u32)&kernel_end - (u32)&kernel_start);
+}
+
+void memory_map_identity(struct page_dir *dir, struct memory_range range, u32 flags)
+{
+ assert(PAGE_ALIGNED(range.base) && PAGE_ALIGNED(range.size));
+
+ u32 page_count = range.size / PAGE_SIZE;
+ physical_set_used(range.base, page_count);
+ virtual_map(dir, range.base, range.base, page_count, flags & MEMORY_USER);
+
+ if (flags & MEMORY_CLEAR)
+ memset((void *)range.base, 0, range.size);
+}
+
+void memory_map(struct page_dir *dir, struct memory_range range, u32 flags)
+{
+ assert(PAGE_ALIGNED(range.base) && PAGE_ALIGNED(range.size));
+
+ for (u32 i = 0; i < range.size / PAGE_SIZE; i++) {
+ u32 vaddr = range.base + i * PAGE_SIZE;
+ if (!virtual_present(dir, vaddr)) {
+ u32 paddr = physical_alloc(1);
+ virtual_map(dir, vaddr, paddr, 1, flags & MEMORY_USER);
+ }
+ }
+
+ if (flags & MEMORY_CLEAR)
+ memset((void *)range.base, 0, range.size);
+}
+
+void memory_alloc_identity(struct page_dir *dir, u32 flags, u32 *out)
+{
+ for (u32 i = 1; i < 256 * PAGE_COUNT; i++) {
+ u32 addr = i * PAGE_SIZE;
+ if (!virtual_present(dir, addr) && !physical_is_used(addr, 1)) {
+ physical_set_used(addr, 1);
+ virtual_map(dir, addr, addr, 1, flags & MEMORY_USER);
+
+ if (flags & MEMORY_CLEAR)
+ memset((void *)addr, 0, PAGE_SIZE);
+
+ *out = addr;
+
+ return;
+ }
+ }
+
+ *out = 0;
+ panic("Out of memory!\n");
+}
+
+void memory_alloc(struct page_dir *dir, u32 size, u32 flags, u32 *out)
+{
+ assert(size && PAGE_ALIGNED(size));
+ *out = 0;
+
+ u32 page_count = size / PAGE_SIZE;
+ u32 paddr = physical_alloc(page_count);
+ assert(paddr);
+ u32 vaddr = virtual_alloc(dir, memory_range(paddr, size), flags).base;
+ assert(vaddr);
+ if (flags & MEMORY_CLEAR)
+ memset((void *)vaddr, 0, page_count * PAGE_SIZE);
+ *out = vaddr;
+}
+
+void memory_free(struct page_dir *dir, struct memory_range range)
+{
+ assert(PAGE_ALIGNED(range.base) && PAGE_ALIGNED(range.size));
+
+ for (u32 i = 0; i < range.size / PAGE_SIZE; i++) {
+ u32 vaddr = range.base + i * PAGE_SIZE;
+ if (virtual_present(dir, vaddr)) {
+ physical_free(virtual_to_physical(dir, vaddr), 1);
+ virtual_free(dir, memory_range(vaddr, PAGE_SIZE));
+ }
+ }
+}
+
+struct page_dir *memory_dir_create(void)
+{
+ struct page_dir *dir = NULL;
+ memory_alloc(&kernel_dir, sizeof(*dir), MEMORY_CLEAR, (u32 *)&dir);
+ memset(dir, 0, sizeof(*dir));
+
+ for (u32 i = 0; i < 256; i++) {
+ union page_dir_entry *entry = &dir->entries[i];
+ entry->bits.present = 1;
+ entry->bits.writable = 1;
+ entry->bits.user = 0;
+ entry->bits.address = (u32)&kernel_tables[i] / PAGE_SIZE;
+ }
+
+ return dir;
+}
+
+void memory_dir_destroy(struct page_dir *dir)
+{
+ for (u32 i = 256; i < 1024; i++) {
+ union page_dir_entry *dir_entry = &dir->entries[i];
+ if (dir_entry->bits.present) {
+ struct page_table *table =
+ (struct page_table *)(dir_entry->bits.address * PAGE_SIZE);
+ for (u32 j = 0; j < 1024; j++) {
+ union page_table_entry *table_entry = &table->entries[j];
+ if (table_entry->bits.present)
+ physical_free(table_entry->bits.address * PAGE_SIZE, 1);
+ }
+
+ memory_free(&kernel_dir, memory_range((u32)table, sizeof(*table)));
+ }
+ }
+ memory_free(&kernel_dir, memory_range((u32)dir, sizeof(*dir)));
+}
+
+void memory_dir_switch(struct page_dir *dir)
+{
+ paging_switch_dir(virtual_to_physical(&kernel_dir, (u32)dir));
+}
+
+void memory_initialize(void)
+{
+ memset(memory, 0xff, PAGE_COUNT * PAGE_COUNT / 8);
+ for (u32 i = 0; i < 256; i++) {
+ union page_dir_entry *entry = &kernel_dir.entries[i];
+ entry->bits.present = 1;
+ entry->bits.writable = 1;
+ entry->bits.user = 0;
+ entry->bits.address = (u32)&kernel_tables[i] / PAGE_SIZE;
+ }
+
+ // TODO: Loop over mmap and set free
+
+ memory_used = 0;
+ memory_total = 100 << 20; // 100Megs?
+
+ memory_map_identity(&kernel_dir, kernel_memory_range(), MEMORY_NONE);
+ virtual_free(&kernel_dir, memory_range(0, PAGE_SIZE));
+ physical_set_used(0, 1);
+ memory_dir_switch(&kernel_dir);
+ printf("OK!\n");
+ paging_enable();
+}
+
+#define HEAP_START 0x00f00000
+void paging_install(void)
+{
+ heap_init(HEAP_START);
+ memory_initialize();
+ printf("OK!\n");
+}
diff --git a/kernel/inc/memory.h b/kernel/inc/memory.h
new file mode 100644
index 0000000..4647c3e
--- /dev/null
+++ b/kernel/inc/memory.h
@@ -0,0 +1,79 @@
+// MIT License, Copyright (c) 2021 Marvin Borner
+
+#ifndef PAGING_H
+#define PAGING_H
+
+#include <def.h>
+
+/**
+ * Physical
+ */
+
+/**
+ * Virtual
+ */
+
+#define PAGE_SIZE 0x1000
+#define PAGE_COUNT 1024
+#define PAGE_ALIGN(x) ((x) + PAGE_SIZE - ((x) % PAGE_SIZE))
+#define PAGE_ALIGNED(x) ((x) % PAGE_SIZE == 0)
+
+union page_table_entry {
+ struct PACKED {
+ u32 present : 1;
+ u32 writable : 1;
+ u32 user : 1;
+ u32 write_through : 1;
+ u32 cache_disable : 1;
+ u32 accessed : 1;
+ u32 dirty : 1;
+ u32 attribute : 1;
+ u32 global : 1;
+ u32 available : 3;
+ u32 address : 20;
+ } bits;
+ u32 uint;
+} PACKED;
+
+struct page_table {
+ union page_table_entry entries[PAGE_COUNT];
+} PACKED;
+
+union page_dir_entry {
+ struct PACKED {
+ u32 present : 1;
+ u32 writable : 1;
+ u32 user : 1;
+ u32 write_through : 1;
+ u32 cache_disable : 1;
+ u32 accessed : 1;
+ u32 reserved : 1;
+ u32 page_size : 1;
+ u32 global : 1;
+ u32 available : 3;
+ u32 address : 20;
+ } bits;
+ u32 uint;
+} PACKED;
+
+struct page_dir {
+ union page_dir_entry entries[PAGE_COUNT];
+} PACKED;
+
+void paging_install(void);
+
+/**
+ * Memory wrappers
+ */
+
+#define MEMORY_NONE (0 << 0)
+#define MEMORY_USER (1 << 0)
+#define MEMORY_CLEAR (1 << 1)
+#define memory_range(base, size) ((struct memory_range){ (base), (size) })
+
+struct memory_range {
+ u32 base;
+ u32 size;
+};
+
+#endif
diff --git a/kernel/link.ld b/kernel/link.ld
index 0d8d865..e63bd71 100644
--- a/kernel/link.ld
+++ b/kernel/link.ld
@@ -5,6 +5,8 @@ phys = 0x00050000;
SECTIONS
{
+ kernel_start = .;
+
.text phys : AT(phys) {
code = .;
*(.text)
@@ -26,5 +28,5 @@ SECTIONS
. = ALIGN(4096);
}
- end = .;
+ kernel_end = .;
}
diff --git a/kernel/main.c b/kernel/main.c
index 6be31bd..4d8165f 100644
--- a/kernel/main.c
+++ b/kernel/main.c
@@ -7,7 +7,7 @@
#include <interrupts.h>
#include <keyboard.h>
#include <load.h>
-#include <mem.h>
+#include <memory.h>
#include <mouse.h>
#include <net.h>
#include <pci.h>
@@ -26,7 +26,7 @@ void kernel_main(struct vid_info *vid_info)
serial_print("\nKernel was compiled at " __TIME__ " on " __DATE__ "\n");
serial_print("Serial connected.\n");
- heap_init(0x00f00000);
+ paging_install();
boot_passed = vid_info;
diff --git a/libc/Makefile b/libc/Makefile
index 9ac6c67..3bf4473 100644
--- a/libc/Makefile
+++ b/libc/Makefile
@@ -3,6 +3,7 @@
# TODO: Remove serial and cpu from libc?
COBJS = sanitize.o \
str.o \
+ alloc.o \
mem.o \
math.o \
conv.o \
diff --git a/libc/alloc.c b/libc/alloc.c
new file mode 100644
index 0000000..16154ef
--- /dev/null
+++ b/libc/alloc.c
@@ -0,0 +1,355 @@
+// MIT License, Copyright (c) 2021 Marvin Borner
+
+#include <assert.h>
+#include <def.h>
+#include <mem.h>
+#include <print.h>
+#include <sys.h>
+
+/**
+ * Kernel heap allocator
+ * Inspired by SHMALL (MIT License)
+ * Copyright (c) 2017 Chris Careaga
+ * Copyright (c) 2021 Marvin Borner
+ */
+
+#ifdef kernel
+
+#define HEAP_MAGIC 0x424242
+#define HEAP_INIT_SIZE 0xf000000
+#define HEAP_MIN_SIZE HEAP_INIT_SIZE
+#define MIN_ALLOC_SZ 4
+#define BIN_COUNT 9
+#define BIN_MAX_IDX (BIN_COUNT - 1)
+#define OVERHEAD (sizeof(struct h_footer) + sizeof(struct h_node))
+/* #define MIN_WILDERNESS 0x2000 */
+/* #define MAX_WILDERNESS 0x1000000 */
+
+struct h_node {
+ u32 magic;
+ u32 hole;
+ u32 size;
+ struct h_node *next;
+ struct h_node *prev;
+};
+
+struct h_footer {
+ struct h_node *header;
+};
+
+struct h_bin {
+ struct h_node *head;
+};
+
+struct heap {
+ u32 start;
+ u32 end;
+ struct h_bin bins[BIN_COUNT];
+};
+
+static void node_add(struct h_bin *bin, struct h_node *node)
+{
+ node->magic = HEAP_MAGIC;
+ node->next = NULL;
+ node->prev = NULL;
+ if (!bin->head) {
+ bin->head = node;
+ return;
+ }
+ struct h_node *curr = bin->head;
+ struct h_node *prev = NULL;
+ while (curr && curr->size <= node->size) {
+ prev = curr;
+ curr = curr->next;
+ }
+ if (!curr) {
+ prev->next = node;
+ node->prev = prev;
+ } else if (prev) {
+ node->next = curr;
+ prev->next = node;
+ node->prev = prev;
+ curr->prev = node;
+ } else {
+ node->next = bin->head;
+ bin->head->prev = node;
+ bin->head = node;
+ }
+}
+
+static void node_remove(struct h_bin *bin, struct h_node *node)
+{
+ if (!bin->head)
+ return;
+ if (bin->head == node) {
+ bin->head = bin->head->next;
+ return;
+ }
+
+ struct h_node *temp = bin->head->next;
+ while (temp) {
+ if (temp == node) {
+ if (!temp->next) {
+ temp->prev->next = NULL;
+ } else {
+ temp->prev->next = temp->next;
+ temp->next->prev = temp->prev;
+ }
+ return;
+ }
+ temp = temp->next;
+ }
+}
+
+static struct h_node *node_best_fit(struct h_bin *bin, u32 size)
+{
+ if (!bin->head)
+ return NULL;
+
+ struct h_node *temp = bin->head;
+ while (temp) {
+ if (temp->size >= size)
+ return temp;
+ temp = temp->next;
+ }
+ return NULL;
+}
+
+/* static struct h_node *node_last(struct h_bin *bin) */
+/* { */
+/* struct h_node *temp = bin->head; */
+/* while (temp->next) */
+/* temp = temp->next; */
+/* return temp; */
+/* } */
+
+static struct h_footer *node_foot(struct h_node *node)
+{
+ return (struct h_footer *)((char *)node + sizeof(*node) + node->size);
+}
+
+static void node_create_foot(struct h_node *head)
+{
+ struct h_footer *foot = node_foot(head);
+ foot->header = head;
+}
+
+static u32 bin_index(u32 sz)
+{
+ u32 index = 0;
+ sz = sz < 4 ? 4 : sz;
+ while (sz >>= 1)
+ index++;
+ index -= 2;
+ if (index > BIN_MAX_IDX)
+ index = BIN_MAX_IDX;
+ return index;
+}
+
+/* struct h_node *wilderness_get(struct heap *heap) */
+/* { */
+/* struct h_footer *wild_foot = (struct h_footer *)((char *)heap->end - sizeof(*wild_foot)); */
+/* return wild_foot->header; */
+/* } */
+
+/* static u32 expand(struct heap *heap, u32 sz) */
+/* { */
+/* (void)heap; */
+/* (void)sz; */
+/* return 0; */
+/* } */
+
+/* static u32 contract(struct heap *heap, u32 sz) */
+/* { */
+/* (void)heap; */
+/* (void)sz; */
+/* return 0; */
+/* } */
+
+static struct heap heap = { 0 };
+void heap_init(u32 start)
+{
+ struct h_node *init_region = (struct h_node *)start;
+ init_region->hole = 1;
+ init_region->size = HEAP_INIT_SIZE - OVERHEAD;
+ node_create_foot(init_region);
+ node_add(&heap.bins[bin_index(init_region->size)], init_region);
+ heap.start = (u32)start;
+ heap.end = (u32)start + HEAP_INIT_SIZE;
+}
+
+#define ALIGN sizeof(long)
+static void *_malloc(u32 size)
+{
+ size = ((size + ALIGN - 1) / ALIGN) * ALIGN; // Alignment
+ u32 index = bin_index(size);
+ struct h_bin *temp = (struct h_bin *)&heap.bins[index];
+ struct h_node *found = node_best_fit(temp, size);
+
+ while (!found) {
+ assert(index + 1 < BIN_COUNT);
+
+ temp = &heap.bins[++index];
+ found = node_best_fit(temp, size);
+ }
+
+ assert(found->magic == HEAP_MAGIC);
+
+ if ((found->size - size) > (OVERHEAD + MIN_ALLOC_SZ)) {
+ struct h_node *split = (struct h_node *)(((char *)found + OVERHEAD) + size);
+ split->magic = HEAP_MAGIC;
+ split->size = found->size - size - OVERHEAD;
+ split->hole = 1;
+
+ node_create_foot(split);
+
+ u32 new_idx = bin_index(split->size);
+
+ node_add(&heap.bins[new_idx], split);
+
+ found->size = size;
+ node_create_foot(found);
+ }
+
+ found->hole = 0;
+ node_remove(&heap.bins[index], found);
+
+ // TODO: Implement expand/contract
+ /* struct h_node *wild = wilderness_get(&heap); */
+ /* if (wild->size < MIN_WILDERNESS) { */
+ /* assert(expand(&heap, 0x1000)); */
+ /* } else if (wild->size > MAX_WILDERNESS) { */
+ /* assert(contract(&heap, 0x1000)); */
+ /* } */
+
+ found->prev = NULL;
+ found->next = NULL;
+ return &found->next;
+}
+
+static void _free(void *p)
+{
+ if (!p)
+ return;
+
+ struct h_bin *list;
+ struct h_footer *new_foot, *old_foot;
+
+ struct h_node *head = (struct h_node *)((char *)p - 12);
+ assert(head->magic == HEAP_MAGIC && head->hole == 0);
+ if (head == (struct h_node *)(u32 *)heap.start) {
+ head->hole = 1;
+ node_add(&heap.bins[bin_index(head->size)], head);
+ return;
+ }
+
+ struct h_node *next = (struct h_node *)((char *)node_foot(head) + sizeof(struct h_footer));
+ struct h_footer *f = (struct h_footer *)((char *)head - sizeof(struct h_footer));
+ struct h_node *prev = f->header;
+
+ if (prev->hole) {
+ list = &heap.bins[bin_index(prev->size)];
+ node_remove(list, prev);
+
+ prev->size += OVERHEAD + head->size;
+ new_foot = node_foot(head);
+ new_foot->header = prev;
+
+ head = prev;
+ }
+
+ if (next->hole) {
+ list = &heap.bins[bin_index(next->size)];
+ node_remove(list, next);
+
+ head->size += OVERHEAD + next->size;
+
+ old_foot = node_foot(next);
+ old_foot->header = 0;
+ next->size = 0;
+ next->hole = 0;
+
+ new_foot = node_foot(head);
+ new_foot->header = head;
+ }
+
+ head->hole = 1;
+ node_add(&heap.bins[bin_index(head->size)], head);
+}
+
+#elif defined(userspace)
+
+#define kmalloc(n) (void *)sys1(SYS_MALLOC, n)
+#define kfree(ptr) (void)(sys1(SYS_FREE, (int)ptr))
+
+static void *_malloc(u32 size)
+{
+ return kmalloc(size);
+}
+
+static void _free(void *ptr)
+{
+ kfree(ptr);
+}
+
+#endif
+
+#ifdef kernel
+#define PREFIX "K"
+#define FUNC printf
+#else
+#define PREFIX "U"
+#define FUNC log
+#endif
+
+void *zalloc(u32 size)
+{
+ void *ret = malloc(size);
+ memset(ret, 0, size);
+ return ret;
+}
+
+// Naive realloc implementation - TODO!
+void *realloc(void *ptr, u32 size)
+{
+ if (!ptr)
+ return malloc(size);
+
+ FUNC("Realloc not implemented!\n");
+ return NULL;
+ /* // This could work; untested
+ struct h_node *node = (struct h_node *)((char *)ptr - 12);
+ u32 old_size = node->size;
+
+ void *new = malloc(size);
+ memcpy(new, ptr, old_size);
+
+ free(ptr);
+ return new;
+ */
+}
+
+void *malloc_debug(u32 size, const char *file, int line, const char *func, const char *inp)
+{
+ assert(size < (100 << 20)); // Don't brag with memory pls
+ void *ret = _malloc(size);
+
+ (void)file;
+ (void)line;
+ (void)func;
+ (void)inp;
+ /* FUNC(PREFIX "MALLOC\t%s:%d: %s: 0x%x %dB (%s)\n", file, line, func, ret, size, inp); */
+ return ret;
+}
+
+void free_debug(void *ptr, const char *file, int line, const char *func, const char *inp)
+{
+ if (ptr)
+ _free(ptr);
+
+ (void)file;
+ (void)line;
+ (void)func;
+ (void)inp;
+ /* FUNC(PREFIX "FREE\t%s:%d: %s: 0x%x (%s)\n", file, line, func, ptr, inp); */
+}
diff --git a/libc/cpu.c b/libc/cpu.c
index 08e742c..b74ed57 100644
--- a/libc/cpu.c
+++ b/libc/cpu.c
@@ -70,26 +70,31 @@ void cpu_print(void)
printf("CPU vendor: %s\n", cpu_string(buf));
}
-static u32 cr0_get(void)
+u32 cr0_get(void)
{
u32 cr0;
__asm__ volatile("movl %%cr0, %%eax" : "=a"(cr0));
return cr0;
}
-static void cr0_set(u32 cr0)
+void cr0_set(u32 cr0)
{
__asm__ volatile("movl %%eax, %%cr0" ::"a"(cr0));
}
-static u32 cr4_get(void)
+void cr3_set(u32 cr3)
+{
+ __asm__ volatile("movl %%eax, %%cr3" ::"a"(cr3));
+}
+
+u32 cr4_get(void)
{
u32 cr4;
__asm__ volatile("movl %%cr4, %%eax" : "=a"(cr4));
return cr4;
}
-static void cr4_set(u32 cr4)
+void cr4_set(u32 cr4)
{
__asm__ volatile("movl %%eax, %%cr4" ::"a"(cr4));
}
diff --git a/libc/inc/cpu.h b/libc/inc/cpu.h
index fa82fbe..5c55913 100644
--- a/libc/inc/cpu.h
+++ b/libc/inc/cpu.h
@@ -27,6 +27,12 @@ void cpu_print(void);
void cpu_enable_features(void);
void fpu_restore(void);
+u32 cr0_get(void);
+void cr0_set(u32 cr0);
+void cr3_set(u32 cr3);
+u32 cr4_get(void);
+void cr4_set(u32 cr4);
+
void cli(void);
void sti(void);
void hlt(void);
diff --git a/libc/inc/def.h b/libc/inc/def.h
index c8b9dbf..945ccb0 100644
--- a/libc/inc/def.h
+++ b/libc/inc/def.h
@@ -26,6 +26,8 @@ typedef unsigned long long u64;
#define UNUSED(a) ((void)(a))
#define NO_SANITIZE __attribute__((no_sanitize("undefined")))
+#define PACKED __attribute__((packed))
+#define ALIGNED(align) __attribute__((aligned(align)))
#define EOF (-1)
#define NULL ((void *)0)
diff --git a/libc/mem.c b/libc/mem.c
index 971315a..95242e4 100644
--- a/libc/mem.c
+++ b/libc/mem.c
@@ -66,12 +66,13 @@ void *memcpy(void *dest, const void *src, u32 n)
void *memset(void *dest, int val, u32 n)
{
+ u32 uval = val;
u32 num_dwords = n / 4;
u32 num_bytes = n % 4;
u32 *dest32 = (u32 *)dest;
u8 *dest8 = ((u8 *)dest) + num_dwords * 4;
u8 val8 = (u8)val;
- u32 val32 = val | (val << 8) | (val << 16) | (val << 24);
+ u32 val32 = uval | (uval << 8) | (uval << 16) | (uval << 24);
// TODO: What's faster?
__asm__ volatile("rep stosl\n"
@@ -118,351 +119,3 @@ int mememp(const u8 *buf, u32 n)
{
return buf[0] == 0 && !memcmp(buf, buf + 1, n - 1);
}
-
-/**
- * Heap allocator
- * Inspired by SHMALL (MIT License)
- * Copyright (c) 2017 Chris Careaga
- * Copyright (c) 2021 Marvin Borner
- */
-
-#ifdef kernel
-
-#define HEAP_MAGIC 0x424242
-#define HEAP_INIT_SIZE 0xf000000
-#define HEAP_MIN_SIZE HEAP_INIT_SIZE
-#define MIN_ALLOC_SZ 4
-#define BIN_COUNT 9
-#define BIN_MAX_IDX (BIN_COUNT - 1)
-#define OVERHEAD (sizeof(struct h_footer) + sizeof(struct h_node))
-/* #define MIN_WILDERNESS 0x2000 */
-/* #define MAX_WILDERNESS 0x1000000 */
-
-struct h_node {
- u32 magic;
- u32 hole;
- u32 size;
- struct h_node *next;
- struct h_node *prev;
-};
-
-struct h_footer {
- struct h_node *header;
-};
-
-struct h_bin {
- struct h_node *head;
-};
-
-struct heap {
- u32 start;
- u32 end;
- struct h_bin bins[BIN_COUNT];
-};
-
-static void node_add(struct h_bin *bin, struct h_node *node)
-{
- node->magic = HEAP_MAGIC;
- node->next = NULL;
- node->prev = NULL;
- if (!bin->head) {
- bin->head = node;
- return;
- }
- struct h_node *curr = bin->head;
- struct h_node *prev = NULL;
- while (curr && curr->size <= node->size) {
- prev = curr;
- curr = curr->next;
- }
- if (!curr) {
- prev->next = node;
- node->prev = prev;
- } else if (prev) {
- node->next = curr;
- prev->next = node;
- node->prev = prev;
- curr->prev = node;
- } else {
- node->next = bin->head;
- bin->head->prev = node;
- bin->head = node;
- }
-}
-
-static void node_remove(struct h_bin *bin, struct h_node *node)
-{
- if (!bin->head)
- return;
- if (bin->head == node) {
- bin->head = bin->head->next;
- return;
- }
-
- struct h_node *temp = bin->head->next;
- while (temp) {
- if (temp == node) {
- if (!temp->next) {
- temp->prev->next = NULL;
- } else {
- temp->prev->next = temp->next;
- temp->next->prev = temp->prev;
- }
- return;
- }
- temp = temp->next;
- }
-}
-
-static struct h_node *node_best_fit(struct h_bin *bin, u32 size)
-{
- if (!bin->head)
- return NULL;
-
- struct h_node *temp = bin->head;
- while (temp) {
- if (temp->size >= size)
- return temp;
- temp = temp->next;
- }
- return NULL;
-}
-
-/* static struct h_node *node_last(struct h_bin *bin) */
-/* { */
-/* struct h_node *temp = bin->head; */
-/* while (temp->next) */
-/* temp = temp->next; */
-/* return temp; */
-/* } */
-
-static struct h_footer *node_foot(struct h_node *node)
-{
- return (struct h_footer *)((char *)node + sizeof(*node) + node->size);
-}
-
-static void node_create_foot(struct h_node *head)
-{
- struct h_footer *foot = node_foot(head);
- foot->header = head;
-}
-
-static u32 bin_index(u32 sz)
-{
- u32 index = 0;
- sz = sz < 4 ? 4 : sz;
- while (sz >>= 1)
- index++;
- index -= 2;
- if (index > BIN_MAX_IDX)
- index = BIN_MAX_IDX;
- return index;
-}
-
-/* struct h_node *wilderness_get(struct heap *heap) */
-/* { */
-/* struct h_footer *wild_foot = (struct h_footer *)((char *)heap->end - sizeof(*wild_foot)); */
-/* return wild_foot->header; */
-/* } */
-
-/* static u32 expand(struct heap *heap, u32 sz) */
-/* { */
-/* (void)heap; */
-/* (void)sz; */
-/* return 0; */
-/* } */
-
-/* static u32 contract(struct heap *heap, u32 sz) */
-/* { */
-/* (void)heap; */
-/* (void)sz; */
-/* return 0; */
-/* } */
-
-static struct heap heap = { 0 };
-void heap_init(u32 start)
-{
- struct h_node *init_region = (struct h_node *)start;
- init_region->hole = 1;
- init_region->size = HEAP_INIT_SIZE - OVERHEAD;
- node_create_foot(init_region);
- node_add(&heap.bins[bin_index(init_region->size)], init_region);
- heap.start = (u32)start;
- heap.end = (u32)start + HEAP_INIT_SIZE;
-}
-
-#define ALIGN sizeof(long)
-static void *_malloc(u32 size)
-{
- size = ((size + ALIGN - 1) / ALIGN) * ALIGN; // Alignment
- u32 index = bin_index(size);
- struct h_bin *temp = (struct h_bin *)&heap.bins[index];
- struct h_node *found = node_best_fit(temp, size);
-
- while (!found) {
- assert(index + 1 < BIN_COUNT);
-
- temp = &heap.bins[++index];
- found = node_best_fit(temp, size);
- }
-
- assert(found->magic == HEAP_MAGIC);
-
- if ((found->size - size) > (OVERHEAD + MIN_ALLOC_SZ)) {
- struct h_node *split = (struct h_node *)(((char *)found + OVERHEAD) + size);
- split->magic = HEAP_MAGIC;
- split->size = found->size - size - OVERHEAD;
- split->hole = 1;
-
- node_create_foot(split);
-
- u32 new_idx = bin_index(split->size);
-
- node_add(&heap.bins[new_idx], split);
-
- found->size = size;
- node_create_foot(found);
- }
-
- found->hole = 0;
- node_remove(&heap.bins[index], found);
-
- // TODO: Implement expand/contract
- /* struct h_node *wild = wilderness_get(&heap); */
- /* if (wild->size < MIN_WILDERNESS) { */
- /* assert(expand(&heap, 0x1000)); */
- /* } else if (wild->size > MAX_WILDERNESS) { */
- /* assert(contract(&heap, 0x1000)); */
- /* } */
-
- found->prev = NULL;
- found->next = NULL;
- return &found->next;
-}
-
-static void _free(void *p)
-{
- if (!p)
- return;
-
- struct h_bin *list;
- struct h_footer *new_foot, *old_foot;
-
- struct h_node *head = (struct h_node *)((char *)p - 12);
- assert(head->magic == HEAP_MAGIC && head->hole == 0);
- if (head == (struct h_node *)(u32 *)heap.start) {
- head->hole = 1;
- node_add(&heap.bins[bin_index(head->size)], head);
- return;
- }
-
- struct h_node *next = (struct h_node *)((char *)node_foot(head) + sizeof(struct h_footer));
- struct h_footer *f = (struct h_footer *)((char *)head - sizeof(struct h_footer));
- struct h_node *prev = f->header;
-
- if (prev->hole) {
- list = &heap.bins[bin_index(prev->size)];
- node_remove(list, prev);
-
- prev->size += OVERHEAD + head->size;
- new_foot = node_foot(head);
- new_foot->header = prev;
-
- head = prev;
- }
-
- if (next->hole) {
- list = &heap.bins[bin_index(next->size)];
- node_remove(list, next);
-
- head->size += OVERHEAD + next->size;
-
- old_foot = node_foot(next);
- old_foot->header = 0;
- next->size = 0;
- next->hole = 0;
-
- new_foot = node_foot(head);
- new_foot->header = head;
- }
-
- head->hole = 1;
- node_add(&heap.bins[bin_index(head->size)], head);
-}
-
-#elif defined(userspace)
-
-#define kmalloc(n) (void *)sys1(SYS_MALLOC, n)
-#define kfree(ptr) (void)(sys1(SYS_FREE, (int)ptr))
-
-static void *_malloc(u32 size)
-{
- return kmalloc(size);
-}
-
-static void _free(void *ptr)
-{
- kfree(ptr);
-}
-
-#endif
-
-#ifdef kernel
-#define PREFIX "K"
-#define FUNC printf
-#else
-#define PREFIX "U"
-#define FUNC log
-#endif
-
-void *zalloc(u32 size)
-{
- void *ret = malloc(size);
- memset(ret, 0, size);
- return ret;
-}
-
-// Naive realloc implementation - TODO!
-void *realloc(void *ptr, u32 size)
-{
- if (!ptr)
- return malloc(size);
-
- FUNC("Realloc not implemented!\n");
- return NULL;
- /* // This could work; untested
- struct h_node *node = (struct h_node *)((char *)ptr - 12);
- u32 old_size = node->size;
-
- void *new = malloc(size);
- memcpy(new, ptr, old_size);
-
- free(ptr);
- return new;
- */
-}
-
-void *malloc_debug(u32 size, const char *file, int line, const char *func, const char *inp)
-{
- assert(size < (100 << 20)); // Don't brag with memory pls
- void *ret = _malloc(size);
-
- (void)file;
- (void)line;
- (void)func;
- (void)inp;
- /* FUNC(PREFIX "MALLOC\t%s:%d: %s: 0x%x %dB (%s)\n", file, line, func, ret, size, inp); */
- return ret;
-}
-
-void free_debug(void *ptr, const char *file, int line, const char *func, const char *inp)
-{
- if (ptr)
- _free(ptr);
-
- (void)file;
- (void)line;
- (void)func;
- (void)inp;
- /* FUNC(PREFIX "FREE\t%s:%d: %s: 0x%x (%s)\n", file, line, func, ptr, inp); */
-}