idk we're trying out pmm

This commit is contained in:
2025-06-11 17:33:42 -04:00
parent 03aecd514f
commit f216c32f22
6 changed files with 104 additions and 86 deletions

View File

@ -6,50 +6,3 @@
//extern uint32_t endkernel; // found in link.ld
/**
* The page table must be page aligned (aligned at 4KiB)
*
*
* This is a temporary solution, as we want a page frame allocator, to properly get page frames.. but this works for now
*/
uint32_t page_directory[PAGE_DIRECTORY_ENTRIES] __attribute__((aligned(4096)));
uint32_t first_page_table[PAGE_TABLE_ENTRIES] __attribute__((aligned(4096)));
void setup_page_table(void)
{
uint32_t i;
for (i = 0; i < PAGE_TABLE_ENTRIES; i++) {
first_page_table[i] = (i * 0x1000) | 3; // supervisor, r/w, present
}
puts("test");
}
void setup_page_dir(void)
{
setup_page_table();
/**
* Now that we have a page directory, we need to blank it.
*
* The page directory should have exactly 1024 entries. We will set each entry to not present, so that the if the
* MMU looks for that page table, it will see that it is not there yet.
*/
int i;
for (i = 0; i < PAGE_DIRECTORY_ENTRIES; i++) {
// This sets the following flags to the pages:
// Supervisor: Only kernel-mode can access them
// Write Enabled: It can be both read from and written to
// Not Present: The page table is not present
page_directory[i] = 0x00000002;
}
page_directory[0] = ((uint32_t) first_page_table) | 3;
}
void setup_paging(void)
{
setup_page_dir();
load_page_directory(page_directory);
enable_paging();
}

66
kernel/arch/pmm/pmm.c Normal file
View File

@ -0,0 +1,66 @@
#include <kernel/pmm.h>
/**
* What i want to do is create a linked list of all the memory structures
*
* Theres one at the very start of the memory
*
* one at 1MB
*
* and then one provided by ram.
*
*
* So the idea is to create a way to access memory through this such that,
* when you give a bit block number, it'll go through the first item in the linked list, if the block is out of that range, it
* traverses to the next node, tries to find it there, and then continues until it either runs out of memory, or finds a location
*/
#define PMM_PAGE_SIZE 4096
struct pmm_mem_info {
uint64_t startaddr;
uint64_t len; // in kb
uint32_t* bitmap;
};
#define PMM_GET_MEM_BLOCKS(x) x.len / PMM_PAGE_SIZE
struct pmm_mem_info main_mem;
void __pmm_set(int bit, uint32_t* bitmap)
{
bitmap[bit / 32] |= (1 << (bit % 32));
}
void __pmm_unset(int bit, uint32_t* bitmap)
{
bitmap[bit / 32] &= ~(1 << (bit % 32));
}
void __pmm_first_free(struct pmm_mem_info mem_block)
{
for (uint32_t i = 0; i < PMM_GET_MEM_BLOCKS(mem_block) / 32; i++) {
}
}
void pmm_set(int bit)
{
/**
* Here we want to calculate if the bit is over the length
* subtract the length and bit amount so that we compensate for the bit map
*
* i.e. (length / 4096) == amount of blocks in that specific mem region
* if (bit > amt of blocks),
* go to next node, subtract amt of blocks from bit, and pass that
*
* below is merely a temporary solution
*/
__pmm_set(bit, main_mem.bitmap);
}
void pmm_unset(int bit)
{
// TODO: same as above
__pmm_unset(bit, main_mem.bitmap);
}