diff options
author | Marvin Borner | 2021-03-14 16:12:44 +0100 |
---|---|---|
committer | GitHub | 2021-03-14 16:12:44 +0100 |
commit | 268f3ccdb90ab4b9bd70ca176478797aae97ca05 (patch) | |
tree | 2dbc3e52d90dab4aae8021773f09b6b72a74b8cb /kernel/features | |
parent | 4309322f9d2b3e31421a3cc5399ab1f4368e0652 (diff) | |
parent | 6dec7db5158447b66f31a3f786ce2916cab83cec (diff) |
Added memory management using paging
This was quite a roller-coaster and most things are slower now, but it works and is way more secure. I still need to implement things like shared memory for the WM/GUI system but other than that everything is supported.
Diffstat (limited to 'kernel/features')
-rw-r--r-- | kernel/features/fs.c | 56 | ||||
-rw-r--r-- | kernel/features/load.c | 34 | ||||
-rw-r--r-- | kernel/features/mm.c | 498 | ||||
-rw-r--r-- | kernel/features/proc.asm | 6 | ||||
-rw-r--r-- | kernel/features/proc.c | 84 | ||||
-rw-r--r-- | kernel/features/syscall.c | 52 |
6 files changed, 663 insertions, 67 deletions
diff --git a/kernel/features/fs.c b/kernel/features/fs.c index 687d7ad..c8ad317 100644 --- a/kernel/features/fs.c +++ b/kernel/features/fs.c @@ -62,13 +62,15 @@ static struct mount_info *vfs_recursive_find(char *path) static struct mount_info *vfs_find_mount_info(const char *path) { - assert(path[0] == '/'); + if (path[0] != '/') + return NULL; return vfs_recursive_find(strdup(path)); } struct device *vfs_find_dev(const char *path) { - assert(path[0] == '/'); + if (path[0] != '/') + return NULL; struct mount_info *m = vfs_find_mount_info(path); if (m->dev->vfs->type == VFS_DEVFS) // TODO: ? return device_get_by_name(path + strlen(m->path) + 1); @@ -120,7 +122,7 @@ s32 vfs_mount(struct device *dev, const char *path) s32 vfs_read(const char *path, void *buf, u32 offset, u32 count) { - /* printf("%s READ: %s\n", proc_current()->name, path); */ + /* printf("%s READ: %s\n", proc_current() ? proc_current()->name : "Unknown", path); */ if (!count) return 0; @@ -131,7 +133,8 @@ s32 vfs_read(const char *path, void *buf, u32 offset, u32 count) path++; struct mount_info *m = vfs_find_mount_info(path); - assert(m && m->dev && m->dev->vfs && m->dev->vfs->read && m->dev->vfs->perm); + if (!(m && m->dev && m->dev->vfs && m->dev->vfs->read && m->dev->vfs->perm)) + return -1; u32 len = strlen(m->path); if (len > 1) @@ -145,7 +148,7 @@ s32 vfs_read(const char *path, void *buf, u32 offset, u32 count) s32 vfs_write(const char *path, void *buf, u32 offset, u32 count) { - /* printf("%s WRITE: %s\n", proc_current()->name, path); */ + /* printf("%s WRITE: %s\n", proc_current() ? proc_current()->name : "Unknown", path); */ if (!count) return 0; @@ -156,7 +159,8 @@ s32 vfs_write(const char *path, void *buf, u32 offset, u32 count) path++; struct mount_info *m = vfs_find_mount_info(path); - assert(m && m->dev && m->dev->vfs && m->dev->vfs->write && m->dev->vfs->perm); + if (!(m && m->dev && m->dev->vfs && m->dev->vfs->write && m->dev->vfs->perm)) + return -1; u32 len = strlen(m->path); if (len > 1) @@ -168,6 +172,25 @@ s32 vfs_write(const char *path, void *buf, u32 offset, u32 count) return m->dev->vfs->write(path, buf, offset, count, m->dev); } +s32 vfs_ioctl(const char *path, u32 request, void *arg1, void *arg2, void *arg3) +{ + while (*path == ' ') + path++; + + struct mount_info *m = vfs_find_mount_info(path); + if (!(m && m->dev && m->dev->vfs && m->dev->vfs->ioctl && m->dev->vfs->perm)) + return -1; + + u32 len = strlen(m->path); + if (len > 1) + path += len; + + if (!m->dev->vfs->perm(path, VFS_WRITE, m->dev) && !proc_super()) + return -1; + + return m->dev->vfs->ioctl(path, request, arg1, arg2, arg3, m->dev); +} + s32 vfs_stat(const char *path, struct stat *buf) { while (*path == ' ') @@ -177,12 +200,16 @@ s32 vfs_stat(const char *path, struct stat *buf) return -1; struct mount_info *m = vfs_find_mount_info(path); - assert(m && m->dev && m->dev->vfs && m->dev->vfs->stat); + if (!(m && m->dev && m->dev->vfs && m->dev->vfs->stat && m->dev->vfs->perm)) + return -1; u32 len = strlen(m->path); if (len > 1) path += len; + if (!m->dev->vfs->perm(path, VFS_WRITE, m->dev) && !proc_super()) + return -1; + return m->dev->vfs->stat(path, buf, m->dev); } @@ -192,7 +219,8 @@ s32 vfs_wait(const char *path, u32 func_ptr) path++; struct mount_info *m = vfs_find_mount_info(path); - assert(m && m->dev && m->dev->vfs); + if (!(m && m->dev && m->dev->vfs)) + return -1; // Default wait if (!m->dev->vfs->wait) { @@ -280,10 +308,19 @@ static s32 devfs_read(const char *path, void *buf, u32 offset, u32 count, struct { struct device *target = device_get_by_name(path + 1); if (!target || !target->read) - return 0; + return -1; return target->read(buf, offset, count, dev); } +static s32 devfs_ioctl(const char *path, u32 request, void *arg1, void *arg2, void *arg3, + struct device *dev) +{ + struct device *target = device_get_by_name(path + 1); + if (!target || !target->ioctl) + return -1; + return target->ioctl(request, arg1, arg2, arg3, dev); +} + static u8 devfs_perm(const char *path, enum vfs_perm perm, struct device *dev) { (void)path; @@ -309,6 +346,7 @@ void device_install(void) struct vfs *vfs = zalloc(sizeof(*vfs)); vfs->type = VFS_DEVFS; vfs->read = devfs_read; + vfs->ioctl = devfs_ioctl; vfs->perm = devfs_perm; vfs->ready = devfs_ready; struct device *dev = zalloc(sizeof(*dev)); diff --git a/kernel/features/load.c b/kernel/features/load.c index 8a4aae3..9e6db79 100644 --- a/kernel/features/load.c +++ b/kernel/features/load.c @@ -3,28 +3,36 @@ #include <fs.h> #include <load.h> #include <mem.h> +#include <mm.h> #include <str.h> -void proc_load(struct proc *proc, void *data) -{ - u32 stack = (u32)malloc(0x2000) + 0x1000; - - proc->regs.ebp = (u32)stack; - proc->regs.useresp = (u32)stack; - proc->regs.eip = (u32)data; - proc->entry = (u32)data; -} +#define PROC_STACK_SIZE 0x4000 int bin_load(const char *path, struct proc *proc) { struct stat s = { 0 }; vfs_stat(path, &s); - char *data = malloc(s.size); - if (!vfs_read(path, data, 0, s.size)) - return 1; strcpy(proc->name, path); - proc_load(proc, data); + struct page_dir *prev; + memory_backup_dir(&prev); + memory_switch_dir(proc->page_dir); + + u32 size = PAGE_ALIGN_UP(s.size); + u32 data = (u32)memory_alloc(proc->page_dir, size, MEMORY_USER | MEMORY_CLEAR); + + if (!vfs_read(proc->name, (void *)data, 0, s.size)) { + memory_switch_dir(prev); + return 1; + } + + u32 stack = (u32)memory_alloc(proc->page_dir, PROC_STACK_SIZE, MEMORY_USER | MEMORY_CLEAR); + proc->regs.ebp = stack; + proc->regs.useresp = stack; + proc->regs.eip = data; + proc->entry = data; + + memory_switch_dir(prev); return 0; } diff --git a/kernel/features/mm.c b/kernel/features/mm.c new file mode 100644 index 0000000..9eca438 --- /dev/null +++ b/kernel/features/mm.c @@ -0,0 +1,498 @@ +// Hugely inspired by the implementation in skiftOS: MIT License, Copyright (c) 2020 N. Van Bossuyt +// MIT License, Copyright (c) 2021 Marvin Borner + +#include <assert.h> +#include <cpu.h> +#include <def.h> +#include <mem.h> +#include <mm.h> +#include <print.h> + +static struct page_dir kernel_dir ALIGNED(PAGE_SIZE) = { 0 }; +static struct page_table kernel_tables[256] ALIGNED(PAGE_SIZE) = { 0 }; + +/** + * Lowlevel paging + */ + +/*static void paging_disable(void) +{ + cr0_set(cr0_get() | 0x7fffffff); +}*/ + +static void paging_enable(void) +{ + cr0_set(cr0_get() | 0x80000000); +} + +static void paging_switch_dir(u32 dir) +{ + cr3_set(dir); +} + +extern void paging_invalidate_tlb(void); + +void page_fault_handler(struct regs *r) +{ + // Check error code + const char *type = (r->err_code & 1) ? "present" : "non-present"; + const char *operation = (r->err_code & 2) ? "write" : "read"; + const char *super = (r->err_code & 4) ? "User" : "Super"; + + // Check cr2 address + u32 vaddr; + __asm__ volatile("movl %%cr2, %%eax" : "=a"(vaddr)); + struct proc *proc = proc_current(); + struct page_dir *dir = NULL; + if (proc && proc->page_dir) { + dir = proc->page_dir; + printf("Stack is at %x, entry at %x\n", virtual_to_physical(dir, proc->regs.ebp), + virtual_to_physical(dir, proc->entry)); + } else { + dir = &kernel_dir; + } + u32 paddr = virtual_to_physical(dir, vaddr); + + // Print! + printf("%s process tried to %s a %s page at [vaddr=%x; paddr=%x]\n", super, operation, type, + vaddr, paddr); + + isr_panic(r); +} + +/** + * Physical + */ + +static u32 memory_used = 0; +static u32 memory_total = 0; +static u32 best_bet = 0; +static u8 memory[PAGE_COUNT * PAGE_COUNT / 8] = { 0 }; + +static u8 physical_page_is_used(u32 addr) +{ + u32 page = addr / PAGE_SIZE; + return memory[page / 8] & (1 << (page % 8)); +} + +static void physical_page_set_used(u32 address) +{ + u32 page = address / PAGE_SIZE; + + if (page == best_bet) + best_bet++; + + memory[page / 8] |= 1 << (page % 8); +} + +static void physical_page_set_free(u32 address) +{ + u32 page = address / PAGE_SIZE; + + if (page < best_bet) + best_bet = page; + + memory[page / 8] &= ~(1 << (page % 8)); +} + +static void physical_set_used(struct memory_range range) +{ + assert(PAGE_ALIGNED(range.base) && PAGE_ALIGNED(range.size)); + + for (u32 i = 0; i < range.size / PAGE_SIZE; i++) { + u32 addr = range.base + i * PAGE_SIZE; + if (!physical_page_is_used(addr)) { + memory_used += PAGE_SIZE; + physical_page_set_used(addr); + } + } +} + +static void physical_set_free(struct memory_range range) +{ + assert(PAGE_ALIGNED(range.base) && PAGE_ALIGNED(range.size)); + + for (u32 i = 0; i < range.size / PAGE_SIZE; i++) { + u32 addr = range.base + i * PAGE_SIZE; + + if (physical_page_is_used(addr)) { + memory_used -= PAGE_SIZE; + physical_page_set_free(addr); + } + } +} + +static u8 physical_is_used(struct memory_range range) +{ + assert(PAGE_ALIGNED(range.base) && PAGE_ALIGNED(range.size)); + + for (u32 i = 0; i < range.size / PAGE_SIZE; i++) { + u32 addr = range.base + i * PAGE_SIZE; + + if (physical_page_is_used(addr)) + return 1; + } + + return 0; +} + +struct memory_range physical_alloc(u32 size) +{ + assert(PAGE_ALIGNED(size)); + + for (u32 i = best_bet; i < ((memory_total - size) / PAGE_SIZE); i++) { + struct memory_range range = memory_range(i * PAGE_SIZE, size); + + if (!physical_is_used(range)) { + physical_set_used(range); + return range; + } + } + + panic("Out of physical memory!\n"); + return memory_range(0, 0); +} + +void physical_free(struct memory_range range) +{ + assert(PAGE_ALIGNED(range.base) && PAGE_ALIGNED(range.size)); + physical_set_free(range); +} + +/** + * Virtual + */ + +#define PDI(vaddr) (((vaddr) >> 22) & 0x03ff) +#define PTI(vaddr) (((vaddr) >> 12) & 0x03ff) + +u8 virtual_present(struct page_dir *dir, u32 vaddr) +{ + u32 pdi = PDI(vaddr); + union page_dir_entry *dir_entry = &dir->entries[pdi]; + if (!dir_entry->bits.present) + return 0; + + struct page_table *table = (struct page_table *)(dir_entry->bits.address * PAGE_SIZE); + + u32 pti = PTI(vaddr); + union page_table_entry *table_entry = &table->entries[pti]; + + return table_entry->bits.present; +} + +u32 virtual_to_physical(struct page_dir *dir, u32 vaddr) +{ + u32 pdi = PDI(vaddr); + union page_dir_entry *dir_entry = &dir->entries[pdi]; + if (!dir_entry->bits.present) + return 0; + + struct page_table *table = (struct page_table *)(dir_entry->bits.address * PAGE_SIZE); + + u32 pti = PTI(vaddr); + union page_table_entry *table_entry = &table->entries[pti]; + if (!table_entry->bits.present) + return 0; + + return (table_entry->bits.address * PAGE_SIZE) + (vaddr & (PAGE_SIZE - 1)); +} + +void virtual_map(struct page_dir *dir, struct memory_range prange, u32 vaddr, u32 flags) +{ + for (u32 i = 0; i < prange.size / PAGE_SIZE; i++) { + u32 offset = i * PAGE_SIZE; + + u32 pdi = PDI(vaddr + offset); + union page_dir_entry *dir_entry = &dir->entries[pdi]; + struct page_table *table = + (struct page_table *)(dir_entry->bits.address * PAGE_SIZE); + + if (!dir_entry->bits.present) { + table = memory_alloc_identity(dir, MEMORY_CLEAR); + dir_entry->bits.present = 1; + dir_entry->bits.writable = 1; + dir_entry->bits.user = 1; + dir_entry->bits.address = (u32)(table) >> 12; + } + + u32 pti = PTI(vaddr + offset); + union page_table_entry *table_entry = &table->entries[pti]; + table_entry->bits.present = 1; + table_entry->bits.writable = 1; + table_entry->bits.user = flags & MEMORY_USER; + table_entry->bits.address = (prange.base + offset) >> 12; + } + + paging_invalidate_tlb(); +} + +struct memory_range virtual_alloc(struct page_dir *dir, struct memory_range prange, u32 flags) +{ + u8 user = flags & MEMORY_USER; + u32 vaddr = 0; + u32 size = 0; + + for (u32 i = (user ? 256 : 1) * PAGE_COUNT; i < (user ? PAGE_COUNT : 256) * PAGE_COUNT; + i++) { + u32 addr = i * PAGE_SIZE; + if (!virtual_present(dir, addr)) { + if (size == 0) + vaddr = addr; + + size += PAGE_SIZE; + + if (size == prange.size) { + virtual_map(dir, prange, vaddr, flags); + return memory_range(vaddr, size); + } + } else { + size = 0; + } + } + + panic("Out of virtual memory!\n"); + return memory_range(0, 0); +} + +void virtual_free(struct page_dir *dir, struct memory_range vrange) +{ + for (u32 i = 0; i < vrange.size / PAGE_SIZE; i++) { + u32 offset = i * PAGE_SIZE; + + u32 pdi = PDI(vrange.base + offset); + union page_dir_entry *dir_entry = &dir->entries[pdi]; + if (!dir_entry->bits.present) + continue; + + struct page_table *table = + (struct page_table *)(dir_entry->bits.address * PAGE_SIZE); + + u32 pti = PTI(vrange.base + offset); + union page_table_entry *table_entry = &table->entries[pti]; + + if (table_entry->bits.present) + table_entry->uint = 0; + } + + paging_invalidate_tlb(); +} + +struct page_dir *virtual_create_dir(void) +{ + struct page_dir *dir = memory_alloc(&kernel_dir, sizeof(*dir), MEMORY_CLEAR); + + memset(dir, 0, sizeof(*dir)); + + for (u32 i = 0; i < 256; i++) { + union page_dir_entry *dir_entry = &dir->entries[i]; + + dir_entry->bits.present = 1; + dir_entry->bits.writable = 1; + dir_entry->bits.user = 0; + dir_entry->bits.address = (u32)&kernel_tables[i] / PAGE_SIZE; + } + + return dir; +} + +void virtual_destroy_dir(struct page_dir *dir) +{ + assert(dir != &kernel_dir); + + for (u32 i = 256; i < PAGE_COUNT; i++) { + union page_dir_entry *dir_entry = &dir->entries[i]; + if (dir_entry->bits.present) { + struct page_table *table = + (struct page_table *)(dir_entry->bits.address * PAGE_SIZE); + for (u32 j = 0; j < PAGE_COUNT; j++) { + union page_table_entry *table_entry = &table->entries[j]; + if (table_entry->bits.present) { + u32 paddr = table_entry->bits.address * PAGE_SIZE; + physical_free(memory_range(paddr, PAGE_SIZE)); + } + } + + memory_free(&kernel_dir, memory_range((u32)table, sizeof(*table))); + } + } + + memory_free(&kernel_dir, memory_range((u32)dir, sizeof(*dir))); +} + +struct page_dir *virtual_kernel_dir(void) +{ + return &kernel_dir; +} + +/** + * Memory wrappers + */ + +void *memory_alloc(struct page_dir *dir, u32 size, u32 flags) +{ + assert(PAGE_ALIGNED(size)); + + if (!size) + goto err; + + struct memory_range prange = physical_alloc(size); + if (prange.size == 0) + goto err; + + u32 vaddr = virtual_alloc(dir, prange, flags).base; + if (!vaddr) { + physical_free(prange); + goto err; + } + + if (flags & MEMORY_CLEAR) + memset((void *)vaddr, 0, size); + + return (void *)vaddr; + +err: + print("Memory allocation error!\n"); + return 0; +} + +void *memory_alloc_identity(struct page_dir *dir, u32 flags) +{ + for (u32 i = 1; i < 256 * PAGE_COUNT; i++) { + struct memory_range range = memory_range(i * PAGE_SIZE, PAGE_SIZE); + + if (!virtual_present(dir, range.base) && !physical_is_used(range)) { + physical_set_used(range); + virtual_map(dir, range, range.base, flags); + if (flags & MEMORY_CLEAR) + memset((void *)range.base, 0, PAGE_SIZE); + return (void *)range.base; + } + } + + return 0; +} + +void memory_free(struct page_dir *dir, struct memory_range vrange) +{ + assert(PAGE_ALIGNED(vrange.base) && PAGE_ALIGNED(vrange.size)); + + for (u32 i = 0; i < vrange.size / PAGE_SIZE; i++) { + u32 vaddr = vrange.base + i * PAGE_SIZE; + if (virtual_present(dir, vaddr)) { + struct memory_range page_prange = + memory_range(virtual_to_physical(dir, vaddr), PAGE_SIZE); + struct memory_range page_vrange = memory_range(vaddr, PAGE_SIZE); + physical_free(page_prange); + virtual_free(dir, page_vrange); + } + } +} + +void memory_map_identity(struct page_dir *dir, struct memory_range prange, u32 flags) +{ + assert(PAGE_ALIGNED(prange.base) && PAGE_ALIGNED(prange.size)); + + physical_set_used(prange); + virtual_map(dir, prange, prange.base, flags); + if (flags & MEMORY_CLEAR) + memset((void *)prange.base, 0, prange.size); +} + +void memory_switch_dir(struct page_dir *dir) +{ + paging_switch_dir(virtual_to_physical(&kernel_dir, (u32)dir)); +} + +void memory_backup_dir(struct page_dir **backup) +{ + struct proc *proc = proc_current(); + struct page_dir *dir = proc ? proc->page_dir : virtual_kernel_dir(); + *backup = dir; +} + +struct memory_range memory_range_from(u32 base, u32 size) +{ + u32 align = PAGE_SIZE - base % PAGE_SIZE; + + if (base % PAGE_SIZE == 0) { + align = 0; + } + + base += align; + size -= align; + + size -= size % PAGE_SIZE; + + return memory_range(base, size); +} + +struct memory_range memory_range_around(u32 base, u32 size) +{ + u32 align = base % PAGE_SIZE; + + base -= align; + size += align; + + size += PAGE_SIZE - size % PAGE_SIZE; + + return memory_range(base, size); +} + +extern u32 kernel_start; +extern u32 kernel_end; +static struct memory_range kernel_memory_range(void) +{ + return memory_range_around((u32)&kernel_start, (u32)&kernel_end - (u32)&kernel_start); +} + +void memory_install(struct mem_info *mem_info, struct vid_info *vid_info) +{ + for (struct mmap_boot *p = mem_info->start; (u32)(p - mem_info->start) < mem_info->size; + p++) { + if (p->hbase || !p->acpi || !p->type) + continue; + + u32 size = p->lsize; + if (p->hsize) + size = U32_MAX - p->lbase; + + /* printf("Memory region: %x-%x\n", p->lbase, p->lbase + size); */ + if (p->type == MEMORY_AVAILABLE) { + physical_set_free(memory_range_around(p->lbase, size / PAGE_SIZE)); + memory_total += size; + } else if (p->type == MEMORY_DEFECT) { + printf("Defect memory at 0x%x-0x%x!\n", p->lbase, p->lbase + size); + } + } + + for (u32 i = 0; i < 256; i++) { + union page_dir_entry *dir_entry = &kernel_dir.entries[i]; + dir_entry->bits.present = 1; + dir_entry->bits.writable = 1; + dir_entry->bits.user = 0; + dir_entry->bits.address = (u32)&kernel_tables[i] / PAGE_SIZE; + } + + memory_used = 0; + printf("Detected memory: %dKiB (%dMiB)\n", memory_total >> 10, memory_total >> 20); + + // Map kernel + memory_map_identity(&kernel_dir, kernel_memory_range(), MEMORY_NONE); + + // Map kernel stack + memory_map_identity(&kernel_dir, memory_range_around(STACK_START - STACK_SIZE, STACK_SIZE), + MEMORY_NONE); + + // Map VBE data + memory_map_identity(&kernel_dir, memory_range_around((u32)vid_info->vbe, 0x1000), + MEMORY_NONE); + + // Unmap NULL byte/page + struct memory_range zero = memory_range(0, PAGE_SIZE); + virtual_free(&kernel_dir, zero); + physical_set_used(zero); + + memory_switch_dir(&kernel_dir); + paging_enable(); +} diff --git a/kernel/features/proc.asm b/kernel/features/proc.asm index 1a2ba65..dfc3448 100644 --- a/kernel/features/proc.asm +++ b/kernel/features/proc.asm @@ -28,3 +28,9 @@ proc_jump_userspace: push dword [_eip] iret + +global paging_invalidate_tlb +paging_invalidate_tlb: + mov eax, cr3 + mov cr3, eax + ret diff --git a/kernel/features/proc.c b/kernel/features/proc.c index cdbe8b1..b93f7c8 100644 --- a/kernel/features/proc.c +++ b/kernel/features/proc.c @@ -6,6 +6,7 @@ #include <fs.h> #include <load.h> #include <mem.h> +#include <mm.h> #include <print.h> #include <proc.h> #include <stack.h> @@ -19,7 +20,7 @@ struct list *proc_list = NULL; struct node *idle_proc = NULL; struct node *current = NULL; -// TODO: Use less memcpy and only copy relevant registers +// TODO: Use less memcpy and only copy relevant registers (rewrite for efficiency argh) // TODO: 20 priority queues (https://www.kernel.org/doc/html/latest/scheduler/sched-nice-design.html) void scheduler(struct regs *regs) { @@ -56,6 +57,7 @@ void scheduler(struct regs *regs) } } + memory_switch_dir(((struct proc *)current->data)->page_dir); memcpy(regs, &((struct proc *)current->data)->regs, sizeof(struct regs)); if (regs->cs != GDT_USER_CODE_OFFSET) { @@ -74,12 +76,6 @@ void scheduler(struct regs *regs) /* printf("{%d}", ((struct proc *)current->data)->pid); */ } -static void kernel_idle(void) -{ - while (1) - ; -} - void proc_print(void) { struct node *node = proc_list->head; @@ -103,7 +99,7 @@ u8 proc_super(void) { struct proc *proc = proc_current(); if (proc) - return proc->super; + return proc->priv == PROC_PRIV_ROOT || proc->priv == PROC_PRIV_KERNEL; else if (current_pid == 0) return 1; // Kernel has super permissions else @@ -161,6 +157,9 @@ void proc_yield(struct regs *r) void proc_enable_waiting(u32 id, enum proc_wait_type type) { + struct page_dir *dir_bak; + memory_backup_dir(&dir_bak); + struct proc *proc_bak = proc_current(); if (!proc_bak) return; @@ -183,8 +182,11 @@ void proc_enable_waiting(u32 id, enum proc_wait_type type) struct regs *r = &p->regs; u32 (*func)(u32, u32, u32, u32) = (u32(*)(u32, u32, u32, u32))w->ids[i].func_ptr; - if (w->ids[i].func_ptr) + if (w->ids[i].func_ptr) { + memory_switch_dir(p->page_dir); r->eax = func(r->ebx, r->ecx, r->edx, r->esi); + memory_switch_dir(dir_bak); + } memset(&w->ids[i], 0, sizeof(w->ids[i])); p->wait.id_cnt--; p->state = PROC_RUNNING; @@ -237,20 +239,37 @@ end: p->state = PROC_SLEEPING; } -struct proc *proc_make(void) +struct proc *proc_make(enum proc_priv priv) { struct proc *proc = zalloc(sizeof(*proc)); proc->pid = current_pid++; - proc->super = 0; + proc->priv = priv; proc->messages = stack_new(); proc->state = PROC_RUNNING; + if (priv == PROC_PRIV_KERNEL) + proc->page_dir = virtual_kernel_dir(); + else + proc->page_dir = virtual_create_dir(); + if (current) list_add(proc_list, proc); return proc; } +void proc_stack_push(struct proc *proc, u32 data) +{ + struct page_dir *prev; + memory_backup_dir(&prev); + memory_switch_dir(proc->page_dir); + + proc->regs.useresp -= sizeof(data); + *(u32 *)proc->regs.useresp = data; + + memory_switch_dir(prev); +} + // TODO: Procfs needs a simpler interface structure (memcmp and everything sucks) static const char *procfs_parse_path(const char **path, u32 *pid) @@ -286,6 +305,11 @@ static enum stream_defaults procfs_stream(const char *path) } } +struct procfs_message { + u8 *data; + u32 size; +}; + static s32 procfs_write(const char *path, void *buf, u32 offset, u32 count, struct device *dev) { u32 pid = 0; @@ -299,7 +323,10 @@ static s32 procfs_write(const char *path, void *buf, u32 offset, u32 count, stru if (!memcmp(path, "msg", 4)) { void *msg_data = malloc(count); memcpy(msg_data, buf, count); - stack_push_bot(p->messages, msg_data); // TODO: Use offset + struct procfs_message *msg = malloc(sizeof(*msg)); + msg->data = msg_data; + msg->size = count; + stack_push_bot(p->messages, msg); // TODO: Use offset proc_enable_waiting(pid, PROC_WAIT_MSG); return count; } else if (!memcmp(path, "io/", 3)) { @@ -321,7 +348,7 @@ static s32 procfs_write(const char *path, void *buf, u32 offset, u32 count, stru } } - printf("%s - off: %d, cnt: %d, buf: %x, dev %x\n", path, offset, count, buf, dev); + printf("ERR: %s - off: %d, cnt: %d, buf: %x, dev %x\n", path, offset, count, buf, dev); return -1; } @@ -352,12 +379,13 @@ static s32 procfs_read(const char *path, void *buf, u32 offset, u32 count, struc if (stack_empty(p->messages)) { return -1; // This shouldn't happen } else { - u8 *msg = stack_pop(p->messages); + struct procfs_message *msg = stack_pop(p->messages); if (!msg) return -1; - memcpy(buf, msg + offset, count); + memcpy(buf, msg->data + offset, MIN(count, msg->size)); + free(msg->data); free(msg); - return count; + return MIN(count, msg->size); } } else if (!memcmp(path, "io/", 3)) { path += 3; @@ -465,32 +493,26 @@ void proc_init(void) vfs_mount(dev, "/proc/"); // Idle proc - struct proc *kernel_proc = proc_make(); - void (*func)(void) = kernel_idle; - proc_load(kernel_proc, *(void **)&func); - strcpy(kernel_proc->name, "idle"); + struct proc *kernel_proc = proc_make(PROC_PRIV_NONE); + bin_load("/bin/idle", kernel_proc); kernel_proc->state = PROC_SLEEPING; idle_proc = list_add(proc_list, kernel_proc); - struct node *new = list_add(proc_list, proc_make()); + // Init proc (root) + struct node *new = list_add(proc_list, proc_make(PROC_PRIV_ROOT)); bin_load("/bin/init", new->data); current = new; + proc_stack_push(new->data, 0); _eip = ((struct proc *)new->data)->regs.eip; _esp = ((struct proc *)new->data)->regs.useresp; - ((struct proc *)new->data)->super = 1; - - u32 argc = 2; - char **argv = malloc(sizeof(*argv) * (argc + 1)); - argv[0] = strdup("init"); - argv[1] = (char *)boot_passed->vbe; - argv[2] = NULL; - - ((u32 *)_esp)[0] = argc; // First argument (argc) - ((u32 *)_esp)[1] = (u32)argv; // Second argument (argv) printf("Jumping to userspace!\n"); + memory_switch_dir(((struct proc *)new->data)->page_dir); + + // You're waiting for a train. A train that will take you far away... proc_jump_userspace(); + while (1) { }; } diff --git a/kernel/features/syscall.c b/kernel/features/syscall.c index 61c7479..68bcbaa 100644 --- a/kernel/features/syscall.c +++ b/kernel/features/syscall.c @@ -5,6 +5,7 @@ #include <interrupts.h> #include <load.h> #include <mem.h> +#include <mm.h> #include <net.h> #include <print.h> #include <proc.h> @@ -25,12 +26,13 @@ static void syscall_handler(struct regs *r) loop(); break; } - case SYS_MALLOC: { - r->eax = (u32)malloc(r->ebx); + case SYS_ALLOC: { + r->eax = (u32)memory_alloc(proc_current()->page_dir, r->ebx, + MEMORY_CLEAR | MEMORY_USER); break; } case SYS_FREE: { - free((void *)r->ebx); + memory_free(proc_current()->page_dir, memory_range(r->ebx, r->ecx)); break; } case SYS_STAT: { @@ -52,6 +54,11 @@ static void syscall_handler(struct regs *r) r->eax = (u32)vfs_write((char *)r->ebx, (void *)r->ecx, r->edx, r->esi); break; } + case SYS_IOCTL: { + r->eax = (u32)vfs_ioctl((char *)r->ebx, r->ecx, (void *)r->edx, (void *)r->esi, + (void *)r->edi); + break; + } case SYS_POLL: { s32 ret = vfs_poll((const char **)r->ebx); if (ret == PROC_MAX_WAIT_IDS + 1) @@ -62,17 +69,10 @@ static void syscall_handler(struct regs *r) } case SYS_EXEC: { char *path = (char *)r->ebx; - struct proc *proc = proc_make(); + struct proc *proc = proc_make(PROC_PRIV_NONE); r->eax = (u32)bin_load(path, proc); - u32 argc = 3; // TODO: Add argc evaluator - char **argv = malloc(sizeof(*argv) * (argc + 1)); - argv[0] = (char *)r->ecx; - argv[1] = (char *)r->edx; - argv[2] = (char *)r->esi; - argv[3] = (char *)r->edi; - argv[4] = NULL; - ((u32 *)proc->regs.useresp)[0] = argc; - ((u32 *)proc->regs.useresp)[1] = (u32)argv; + // TODO: Reimplement argc,argv + proc_stack_push(proc, 0); if (r->eax) proc_exit(proc, (int)r->eax); proc_yield(r); @@ -82,6 +82,30 @@ static void syscall_handler(struct regs *r) proc_exit(proc_current(), (int)r->ebx); break; } + case SYS_BOOT: { // TODO: Move + if (r->ebx != SYS_BOOT_MAGIC || !proc_super()) { + r->eax = -1; + break; + } + switch (r->ecx) { + case SYS_BOOT_REBOOT: + print("Rebooting...\n"); + outb(0x64, 0xfe); + __asm__ volatile("ud2"); + break; + case SYS_BOOT_SHUTDOWN: + print("Shutting down...\n"); + outw(0xB004, 0x2000); + outw(0x604, 0x2000); + outw(0x4004, 0x3400); + outb(0x64, 0xfe); + __asm__ volatile("ud2"); + break; + default: + r->eax = -1; + } + break; + } case SYS_YIELD: { proc_yield(r); break; @@ -123,7 +147,7 @@ static void syscall_handler(struct regs *r) break; } default: { - print("Unknown syscall!\n"); + printf("Unknown syscall %d!\n", num); break; } } |