diff options
Diffstat (limited to 'sys/arch/amd64/cpu')
| -rw-r--r-- | sys/arch/amd64/cpu/cpu.c | 6 | ||||
| -rw-r--r-- | sys/arch/amd64/cpu/idt.S | 13 | ||||
| -rw-r--r-- | sys/arch/amd64/cpu/lapic.c | 24 | ||||
| -rw-r--r-- | sys/arch/amd64/cpu/mmu.c | 139 | ||||
| -rw-r--r-- | sys/arch/amd64/cpu/mp.c | 9 | ||||
| -rw-r--r-- | sys/arch/amd64/cpu/spinlock.c | 6 |
6 files changed, 188 insertions, 9 deletions
diff --git a/sys/arch/amd64/cpu/cpu.c b/sys/arch/amd64/cpu/cpu.c index 9047064..be2bf7d 100644 --- a/sys/arch/amd64/cpu/cpu.c +++ b/sys/arch/amd64/cpu/cpu.c @@ -16,15 +16,15 @@ * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPKERNE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LKERNS OF USE, DATA, OR PROFITS; OR BUSINESS + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * PKERNSIBILITY OF SUCH DAMAGE. + * POSSIBILITY OF SUCH DAMAGE. */ #include <sys/types.h> diff --git a/sys/arch/amd64/cpu/idt.S b/sys/arch/amd64/cpu/idt.S index b7169d8..729d958 100644 --- a/sys/arch/amd64/cpu/idt.S +++ b/sys/arch/amd64/cpu/idt.S @@ -28,12 +28,10 @@ */ #include <md/kfence.h> +#include <md/idt.h> #define KERNEL_CS 0x08 -#define INT_GATE 0x8E -#define TRAP_GATE 0x8F - .macro set_trap vector, isr movq \vector, %rdi movq $TRAP_GATE, %rsi @@ -308,6 +306,15 @@ page_fault: jmp 1b hlt + .globl lapic_tmr_isr +lapic_tmr_isr: + KFENCE + push_frame 0x81 + nop + pop_frame 0x81 + KFENCE + iretq + .section .data .align 8 IDT: diff --git a/sys/arch/amd64/cpu/lapic.c b/sys/arch/amd64/cpu/lapic.c index acf2573..076ddd3 100644 --- a/sys/arch/amd64/cpu/lapic.c +++ b/sys/arch/amd64/cpu/lapic.c @@ -43,6 +43,7 @@ #include <md/i8254.h> #include <md/cpuid.h> #include <md/msr.h> +#include <md/idt.h> #define dtrace(fmt, ...) trace("lapic: " fmt, ##__VA_ARGS__) @@ -82,6 +83,7 @@ /* Accessed via RDMSR/WRMSR */ #define X2APIC_MSR_BASE 0x00000800 +extern void lapic_tmr_isr(void); static struct acpi_madt *madt; /* @@ -337,6 +339,17 @@ lapic_enable(struct mcb *mcb) lapic_write(mcb, LAPIC_REG_SVR, svr | 0xFF); } +static void +lapic_timer_oneshot(struct mcb *mcb, size_t count) +{ + if (mcb == NULL) { + return; + } + + lapic_tmr_enable(mcb, LAPIC_TMR_ONESHOT); + lapic_write(mcb, LAPIC_REG_TICR, count); +} + uint32_t lapic_read_id(struct mcb *mcb) { @@ -350,6 +363,16 @@ lapic_read_id(struct mcb *mcb) } void +lapic_oneshot_usec(struct mcb *mcb, size_t usec) +{ + if (mcb == NULL) { + return; + } + + lapic_timer_oneshot(mcb, mcb->lapic_tmr_freq / 1000000); +} + +void lapic_init(void) { struct cpu_info *ci; @@ -375,4 +398,5 @@ lapic_init(void) lapic_enable(mcb); mcb->lapic_tmr_freq = lapic_tmr_clbr(mcb); + idt_set_gate(LAPIC_TMR_VEC, INT_GATE, (uintptr_t)lapic_tmr_isr, 0); } diff --git a/sys/arch/amd64/cpu/mmu.c b/sys/arch/amd64/cpu/mmu.c index 128cfcf..ec6d071 100644 --- a/sys/arch/amd64/cpu/mmu.c +++ b/sys/arch/amd64/cpu/mmu.c @@ -35,6 +35,8 @@ #include <vm/vm.h> #include <vm/phys.h> #include <md/vas.h> +#include <lib/stdbool.h> +#include <lib/string.h> /* * See Intel SDM Vol 3A, Section 4.5, Table 4-19 @@ -51,6 +53,143 @@ #define PTE_GLOBAL BIT(8) /* Global / sticky map */ #define PTE_NX BIT(63) /* Execute-disable */ +typedef enum { + PMAP_PML1, + PMAP_PML2, + PMAP_PML3, + PMAP_PML4 +} pagelevel_t; + +/* + * Invalidate a single TLB entry + */ +static inline void +pmap_invlpg(uintptr_t pa) +{ + __asm( + "invlpg (%0)" + : + : "r" (pa) + : "memory" + ); +} + +/* + * Convert protection flags to page table flags + */ +static size_t +pmap_prot_conv(uint16_t prot) +{ + size_t pte = PTE_P | PTE_NX; + + if (ISSET(prot, PROT_WRITE)) + pte |= PTE_RW; + if (ISSET(prot, PROT_EXEC)) + pte &= ~PTE_NX; + if (ISSET(prot, PROT_USER)) + pte |= PTE_US; + + return pte; +} + +/* + * Get the index of a pagemap level using a linear + * address and the specified desired level + */ +static inline size_t +pmap_get_index(uintptr_t va, pagelevel_t level) +{ + switch (level) { + case PMAP_PML4: + return (va >> 39) & 0x1FF; + case PMAP_PML3: + return (va >> 30) & 0x1FF; + case PMAP_PML2: + return (va >> 21) & 0x1FF; + case PMAP_PML1: + return (va >> 12) & 0x1FF; + } + + panic("vm: panic index in %s()\n", __func__); +} + +/* + * Perform full or partial linear address translation by virtue + * of iterative descent. + * + * @vas: Virtual address space to target + * @va: Virtual address to translate + * @en_alloc: If true, allocate new levels if needed + * @lvl: Requested level + */ +static uintptr_t * +pmap_get_level(struct mmu_vas *vas, uintptr_t va, bool en_alloc, pagelevel_t lvl) +{ + uintptr_t *pmap, phys; + uint8_t *tmp; + size_t index; + pagelevel_t curlvl; + + if (vas == NULL) { + return NULL; + } + + /* Start here */ + phys = vas->cr3; + pmap = PHYS_TO_VIRT(phys); + curlvl = PMAP_PML4; + + /* Start moving down */ + while ((curlvl--) > lvl) { + index = pmap_get_index(va, curlvl); + if (ISSET(pmap[index], PTE_P)) { + return PHYS_TO_VIRT(pmap[index] & PTE_ADDR_MASK); + } + + if (!en_alloc) { + return NULL; + } + + /* Allocate a new level */ + phys = vm_phys_alloc(1); + if (phys == 0) { + return NULL; + } + + /* Ensure it is zeroed */ + tmp = PHYS_TO_VIRT(phys); + memset(tmp, 0, 4096); + + pmap[index] = phys | (PTE_P | PTE_RW | PTE_US); + pmap = (uintptr_t *)tmp; + } + + return pmap; +} + +int +mu_pmap_map(struct mmu_vas *vas, uintptr_t pa, uintptr_t va, + uint16_t prot, pagesize_t ps) +{ + uintptr_t *pgtbl; + size_t index, pte_flags; + + if (vas == NULL || ps > PMAP_PML4) { + return -EINVAL; + } + + pgtbl = pmap_get_level(vas, va, true, PMAP_PML1); + if (pgtbl == NULL) { + return -ENOMEM; + } + + index = pmap_get_index(va, PMAP_PML1); + pte_flags = pmap_prot_conv(prot); + pgtbl[index] = pa | pte_flags; + pmap_invlpg(va); + return 0; +} + int mu_pmap_readvas(struct mmu_vas *vas) { diff --git a/sys/arch/amd64/cpu/mp.c b/sys/arch/amd64/cpu/mp.c index 5ab7815..c370906 100644 --- a/sys/arch/amd64/cpu/mp.c +++ b/sys/arch/amd64/cpu/mp.c @@ -27,6 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#include <sys/atomic.h> #include <sys/types.h> #include <sys/cdefs.h> #include <sys/errno.h> @@ -101,6 +102,7 @@ struct mtrr_save { static struct ap_bootspace bs; static volatile size_t ap_sync = 0; +static volatile uint32_t ap_count = 0; __section(".trampoline") static char ap_code[4096]; static void @@ -263,6 +265,7 @@ cpu_lm_entry(void) ); cpu_loinit(); + atomic_inc_int(&ap_count); for (;;) { __asmv("cli; hlt"); } @@ -371,4 +374,10 @@ cpu_start_aps(struct cpu_info *ci) cpu_lapic_cb, lapic_read_id(mcb) ); + + if (ap_count == 0) { + dtrace("cpu only has a single core\n"); + } else { + dtrace("%d processor(s) up\n", ap_count); + } } diff --git a/sys/arch/amd64/cpu/spinlock.c b/sys/arch/amd64/cpu/spinlock.c index 8c037a9..1cfa088 100644 --- a/sys/arch/amd64/cpu/spinlock.c +++ b/sys/arch/amd64/cpu/spinlock.c @@ -16,15 +16,15 @@ * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPKERNE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LKERNS OF USE, DATA, OR PROFITS; OR BUSINESS + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * PKERNSIBILITY OF SUCH DAMAGE. + * POSSIBILITY OF SUCH DAMAGE. */ #include <sys/cdefs.h> |
