diff options
Diffstat (limited to 'sys')
225 files changed, 42353 insertions, 819 deletions
diff --git a/sys/arch/aarch64/aarch64/acpi_machdep.c b/sys/arch/aarch64/aarch64/acpi_machdep.c new file mode 100644 index 0000000..f077de3 --- /dev/null +++ b/sys/arch/aarch64/aarch64/acpi_machdep.c @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <dev/acpi/acpi.h> +#include <dev/acpi/acpivar.h> + +int +acpi_init_madt(void) +{ + return 0; +} diff --git a/sys/arch/aarch64/aarch64/exception.c b/sys/arch/aarch64/aarch64/exception.c new file mode 100644 index 0000000..d6f1f97 --- /dev/null +++ b/sys/arch/aarch64/aarch64/exception.c @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/syslog.h> +#include <sys/param.h> +#include <sys/cdefs.h> +#include <machine/cdefs.h> +#include <machine/exception.h> + +#define pr_trace(fmt, ...) kprintf("exception: " fmt, ##__VA_ARGS__) +#define pr_error(...) pr_trace(__VA_ARGS__) + +static inline void +log_esr_class(uint8_t class) +{ + switch (class) { + case EC_WF: + pr_error("trapped WF\n"); + break; + case EC_MCRMRC: + pr_error("trapped MCR/MRC\n"); + break; + case EC_MCRRC: + pr_trace("trapped MCRR/MRRC\n"); + break; + case EC_LDCSTC: + pr_error("trapped LDC/STC\n"); + break; + case EC_SVE: + pr_trace("trapped SVE/SIMD/FP operation\n"); + break; + case EC_BRE: + pr_error("ibt: bad branch target\n"); + break; + case EC_ILLX: + pr_error("illegal execution state\n"); + break; + case EC_SVC64: + /* TODO */ + pr_error("supervisor call (TODO)!!\n"); + break; + case EC_PCALIGN: + pr_error("PC alignment fault\n"); + break; + case EC_DABORT: + case EC_EDABORT: + pr_error("data abort\n"); + break; + case EC_SPALIGN: + pr_error("SP alignment fault\n"); + break; + case EC_SERR: + pr_error("system error\n"); + break; + default: + pr_error("unknown exception\n"); + } +} + +static void +regdump(struct trapframe *tf, uint64_t elr) +{ + kprintf(OMIT_TIMESTAMP + "X0=%p X1=%p X2=%p\n" + "X3=%p X4=%p X5=%p\n" + "X6=%p X7=%p X8=%p\n" + "X9=%p X10=%p X11=%p\n" + "X12=%p X13=%p X14=%p\n" + "X15=%p X16=%p X17=%p\n" + "X18=%p X19=%p X20=%p\n" + "X21=%p X22=%p X23=%p\n" + "X24=%p X25=%p X26=%p\n" + "X27=%p X28=%p X29=%p\n" + "X30=%p\n" + "ELR=%p\n", + tf->x0, tf->x1, tf->x2, tf->x3, + tf->x4, tf->x5, tf->x6, tf->x7, + tf->x8, tf->x9, tf->x10, tf->x11, + tf->x12, tf->x13, tf->x14, tf->x15, + tf->x16, tf->x17, tf->x18, tf->x19, + tf->x20, tf->x21, tf->x22, tf->x23, + tf->x24, tf->x25, tf->x26, tf->x27, + tf->x28, tf->x29, tf->x30, elr); +} + +/* + * Handle an exception + * + * @esr: Copy of the Exception Syndrome Register + */ +void +handle_exception(struct trapframe *tf) +{ + uint8_t class; + + class = (tf->esr >> 26) & 0x3F; + log_esr_class(class); + regdump(tf, tf->elr); + for (;;) { + md_hlt(); + } +} diff --git a/sys/arch/aarch64/aarch64/intr.c b/sys/arch/aarch64/aarch64/intr.c new file mode 100644 index 0000000..5fd2439 --- /dev/null +++ b/sys/arch/aarch64/aarch64/intr.c @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <machine/intr.h> + +void * +intr_register(const char *name, const struct intr_hand *ih) +{ + /* TODO: Stub */ + return NULL; +} diff --git a/sys/arch/amd64/isa/i8042.S b/sys/arch/aarch64/aarch64/locore.S index 123d3a5..2155991 100644 --- a/sys/arch/amd64/isa/i8042.S +++ b/sys/arch/aarch64/aarch64/locore.S @@ -27,11 +27,10 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include <machine/frameasm.h> - .text - .globl i8042_kb_isr -INTRENTRY(i8042_kb_isr, handle_kb) -handle_kb: - call i8042_kb_event - retq + .globl md_cpu_init +md_cpu_init: + ldr x0, =__vectab + msr vbar_el1, x0 + ret + diff --git a/sys/arch/aarch64/aarch64/machdep.c b/sys/arch/aarch64/aarch64/machdep.c new file mode 100644 index 0000000..33d7c42 --- /dev/null +++ b/sys/arch/aarch64/aarch64/machdep.c @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/syslog.h> +#include <sys/panic.h> +#include <machine/cpu.h> +#include <machine/sync.h> + +struct cpu_info g_bsp_ci = {0}; + +void md_cpu_init(void); + +void +cpu_halt_others(void) +{ + /* TODO: STUB */ + return; +} + +void +serial_init(void) +{ + /* TODO: STUB */ + return; +} + +void +md_backtrace(void) +{ + /* TODO: STUB */ + return; +} + +void +serial_putc(char c) +{ + /* TODO: STUB */ + return; +} + +int +md_sync_all(void) +{ + /* TODO: STUB */ + return 0; +} + +void +cpu_halt_all(void) +{ + /* TODO: Stub */ + for (;;); +} + +/* + * Get the descriptor for the currently + * running processor. + */ +struct cpu_info * +this_cpu(void) +{ + struct cpu_info *ci; + + __ASMV("mrs %0, tpidr_el1" : "=r" (ci)); + return ci; +} + +void +cpu_startup(struct cpu_info *ci) +{ + ci->self = ci; + __ASMV("msr tpidr_el1, %0" :: "r" (ci)); + md_cpu_init(); +} diff --git a/sys/arch/aarch64/aarch64/mp.c b/sys/arch/aarch64/aarch64/mp.c new file mode 100644 index 0000000..4e07566 --- /dev/null +++ b/sys/arch/aarch64/aarch64/mp.c @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <machine/cpu.h> + +#define pr_trace(fmt, ...) kprintf("cpu_mp: " fmt, ##__VA_ARGS__) + +void +mp_bootstrap_aps(struct cpu_info *ci) +{ + for (;;); +} diff --git a/sys/arch/aarch64/aarch64/pmap.c b/sys/arch/aarch64/aarch64/pmap.c new file mode 100644 index 0000000..870ef80 --- /dev/null +++ b/sys/arch/aarch64/aarch64/pmap.c @@ -0,0 +1,340 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <sys/cdefs.h> +#include <sys/param.h> +#include <sys/panic.h> +#include <machine/vas.h> +#include <vm/pmap.h> +#include <vm/physmem.h> +#include <vm/vm.h> + +/* Memory types for MAIR_ELx */ +#define MT_NORMAL 0x00 +#define MT_NORMAL_UC 0x02 +#define MT_DEVICE 0x03 + +/* Memory attributes */ +#define MEM_DEV_NGNRNE 0x00 +#define MEM_DEV_NVNRE 0x04 +#define MEM_NORMAL_UC 0x44 +#define MEM_NORMAL 0xFF + +#define MT_ATTR(idx, attr) ((attr) << (8 * (idx))) + +/* + * Descriptor bits for page table entries + * + * @PTE_VALID: Must be set to be valid + * @PTE_TABLE: Table (1), block (0) + * @PTE_USER: User access allowed + * @PTE_READONLY: Read-only + * @PTE_ISH: Inner sharable + * @PTE_AF: Accessed flag + * @PTE_XN: Execute never + */ +#define PTE_ADDR_MASK 0x0000FFFFFFFFF000 +#define PTE_VALID BIT(0) +#define PTE_TABLE BIT(1) +#define PTE_USER BIT(6) +#define PTE_READONLY BIT(7) +#define PTE_ISH (3 << 8) +#define PTE_AF BIT(10) +#define PTE_XN BIT(54) + +/* + * Write the EL1 Memory Attribute Indirection + * Register. + * + * @val: Value to write + * + * XXX: Refer to the ARMv8 Reference Manual section + * D7.2.70 + */ +static inline void +mair_el1_write(uint64_t val) +{ + __ASMV("msr mair_el1, %0" + : + : "r" (val) + : "memory" + ); +} + +static inline void +tlb_flush(vaddr_t va) +{ + __ASMV( + "tlbi vaae1is, %0\n" + "dsb ish\n" + "isb\n" + : + : "r" (va >> 12) + : "memory" + ); +} + +static uint64_t +pmap_prot_to_pte(vm_prot_t prot) +{ + uint64_t pte_flags = 0; + + pte_flags |= (PTE_VALID | PTE_TABLE | PTE_AF); + pte_flags |= (PTE_XN | PTE_READONLY | PTE_ISH); + + if (ISSET(prot, PROT_WRITE)) + pte_flags &= ~PTE_READONLY; + if (ISSET(prot, PROT_EXEC)) + pte_flags &= ~PTE_XN; + if (ISSET(prot, PROT_USER)) + pte_flags |= PTE_USER; + + return pte_flags; +} + +/* + * Returns an index for a specific page map + * label based on an input address. + */ +static size_t +pmap_level_idx(vaddr_t ia, uint8_t level) +{ + switch (level) { + case 0: return (ia >> 39) & 0x1FF; + case 1: return (ia >> 30) & 0x1FF; + case 2: return (ia >> 21) & 0x1FF; + case 3: return (ia >> 12) & 0x1FF; + default: panic("pmap_level_idx: bad index\n"); + } + + __builtin_unreachable(); +} + +/* + * Extract a level from a pagemap + * + * @level: Current pagemap level + * @ia: Input virtual address + * @pmap: Current level to extract from + * @alloc: Set to true to allocate new entries + * + * XXX: `level_idx' can be grabbed with pmap_level_idx(). + */ +static uintptr_t * +pmap_extract(uint8_t level, vaddr_t ia, vaddr_t *pmap, bool alloc) +{ + uintptr_t next, level_alloc; + uint8_t idx; + + if (pmap == NULL) { + return NULL; + } + + idx = pmap_level_idx(ia, level); + next = pmap[idx]; + + if (ISSET(next, PTE_VALID)) { + next = next & PTE_ADDR_MASK; + return PHYS_TO_VIRT(next); + } + + /* + * Nothing to grab at this point, we'll need to + * allocate our own entry. However, if we are + * told not to allocate anything, just return + * NULL. + */ + if (!alloc) { + return NULL; + } + + level_alloc = vm_alloc_frame(1); + if (level_alloc == 0) { + return NULL; + } + + pmap[idx] = (level_alloc | PTE_VALID | PTE_USER | PTE_TABLE); + return PHYS_TO_VIRT(level_alloc); +} + +/* + * Get the lowest pagemap table referring to a 4 KiB + * frame. + * + * @ttrb: Translation table base to use + * @ia: Input virtual address + * @alloc: If true, allocate new pagemap entries as needed + * @res: Result goes here + */ +static int +pmap_get_tbl(paddr_t ttbrn, vaddr_t ia, bool alloc, uintptr_t **res) +{ + vaddr_t *root; + uintptr_t *l1, *l2, *l3; + + root = PHYS_TO_VIRT(ttbrn); + + l1 = pmap_extract(0, ia, root, alloc); + if (l1 == NULL) { + return -1; + } + + l2 = pmap_extract(1, ia, l1, alloc); + if (l2 == NULL) { + return -1; + } + + l3 = pmap_extract(2, ia, l2, alloc); + if (l3 == NULL) { + return -1; + } + + *res = l3; + return 0; +} + +struct vas +pmap_read_vas(void) +{ + struct vas vas = {0}; + + __ASMV( + "mrs %0, ttbr0_el1\n" + "mrs %1, ttbr1_el1\n" + : "=r" (vas.ttbr0_el1), + "=r" (vas.ttbr1_el1) + : + : "memory" + ); + + return vas; +} + +void +pmap_switch_vas(struct vas vas) +{ + __ASMV( + "msr ttbr0_el1, %0\n" + "msr ttbr1_el1, %1\n" + : + : "r" (vas.ttbr0_el1), + "r" (vas.ttbr1_el1) + : "memory" + ); + return; +} + +int +pmap_map(struct vas vas, vaddr_t va, paddr_t pa, vm_prot_t prot) +{ + paddr_t ttbrn = vas.ttbr0_el1; + uint64_t pte_flags; + uintptr_t *tbl; + int error; + + if (va >= VM_HIGHER_HALF) { + ttbrn = vas.ttbr1_el1; + } + + if ((error = pmap_get_tbl(ttbrn, va, true, &tbl)) < 0) { + return error; + } + if (__unlikely(tbl == NULL)) { + return -1; + } + + pte_flags = pmap_prot_to_pte(prot); + tbl[pmap_level_idx(va, 3)] = pa | pte_flags; + tlb_flush(va); + return 0; +} + +int +pmap_unmap(struct vas vas, vaddr_t va) +{ + paddr_t ttbrn = vas.ttbr0_el1; + uintptr_t *tbl; + int error; + + if (va >= VM_HIGHER_HALF) { + ttbrn = vas.ttbr1_el1; + } + + if ((error = pmap_get_tbl(ttbrn, va, true, &tbl)) < 0) { + return error; + } + if (__unlikely(tbl == NULL)) { + return -1; + } + + tbl[pmap_level_idx(va, 3)] = 0; + tlb_flush(va); + return 0; +} + +void +pmap_destroy_vas(struct vas vas) +{ + /* TODO: STUB */ + return; +} + +bool +pmap_is_clean(struct vas vas, vaddr_t va) +{ + /* TODO: STUB */ + return false; +} + +void +pmap_mark_clean(struct vas vas, vaddr_t va) +{ + /* TODO: STUB */ + return; +} + +int +pmap_set_cache(struct vas vas, vaddr_t va, int type) +{ + /* TODO: STUB */ + return 0; +} + +int +pmap_init(void) +{ + uint64_t mair; + + mair = MT_ATTR(MT_NORMAL, MEM_NORMAL) | + MT_ATTR(MT_NORMAL_UC, MEM_NORMAL_UC) | + MT_ATTR(MT_DEVICE, MEM_DEV_NGNRNE); + mair_el1_write(mair); + return 0; +} diff --git a/sys/arch/aarch64/aarch64/proc_machdep.c b/sys/arch/aarch64/aarch64/proc_machdep.c new file mode 100644 index 0000000..cc58af9 --- /dev/null +++ b/sys/arch/aarch64/aarch64/proc_machdep.c @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/proc.h> + +/* + * MD thread init code + * + * @p: New process. + * @parent: Parent of new process. + * @ip: Instruction pointer. + */ +int +md_spawn(struct proc *p, struct proc *parent, uintptr_t ip) +{ + /* TODO: STUB */ + return 0; +} + +uintptr_t +md_td_stackinit(struct proc *td, void *stack_top, struct exec_prog *prog) +{ + /* TODO: STUB */ + return 0; +} + +void +setregs(struct proc *td, struct exec_prog *prog, uintptr_t stack) +{ + /* TODO: STUB */ + return; +} + +/* + * Startup a user thread. + * + * @td: Thread to start up. + */ +void +md_td_kick(struct proc *td) +{ + /* TODO: STUB */ + for (;;); +} diff --git a/sys/arch/aarch64/aarch64/reboot.c b/sys/arch/aarch64/aarch64/reboot.c new file mode 100644 index 0000000..372012a --- /dev/null +++ b/sys/arch/aarch64/aarch64/reboot.c @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/reboot.h> +#include <sys/param.h> + +/* + * Typically the reset vector is at address 0 but this can + * be remapped if the vendor is feeling silly. + */ +void(*g_cpu_reboot)(void) = NULL; + +void +cpu_reboot(int method) +{ + g_cpu_reboot(); + for (;;); +} + +/* + * arg0: Method bits + */ +scret_t +sys_reboot(struct syscall_args *scargs) +{ + cpu_reboot(scargs->arg0); + __builtin_unreachable(); +} diff --git a/sys/arch/aarch64/aarch64/vector.S b/sys/arch/aarch64/aarch64/vector.S new file mode 100644 index 0000000..c8f77ca --- /dev/null +++ b/sys/arch/aarch64/aarch64/vector.S @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <machine/frameasm.h> + +// Vector table entries are aligned at 128 bytes +// giving us 32 exception entries +.macro ventry label + .align 7 + b \label +.endm + + .text +x_sync_elx: + PUSH_XFRAME(TRAPNO_XSYNC) // Synchronous: sp+top @ X0 + bl handle_exception // Handle the exception + POP_XFRAME() // Pop the trapframe +1: hlt #0 // TODO + b 1b + +x_irq_elx: + PUSH_XFRAME(TRAPNO_XIRQ) // IRQ: sp+top @ X0 + bl handle_exception // Handle the exception + POP_XFRAME() // Pop the trapframe +1: hlt #0 // TODO + b 1b + +x_fiq_elx: + PUSH_XFRAME(TRAPNO_XFIQ) // FIQ: sp+top @ X0 + bl handle_exception // Handle the exception + POP_XFRAME() // Pop the trapframe +1: hlt #0 + b 1b + +x_serr_elx: + PUSH_XFRAME(TRAPNO_XSERR) // SERR: sp+top @ X0 + bl handle_exception // Handle the exception + POP_XFRAME() // Pop the trapframe +1: hlt #0 // TODO + b 1b + +x_unimpl: +1: hlt #0 + b 1b + + .align 11 // Table aligned @ 2 KiB + .globl __vectab +__vectab: + // From current EL (w/ SP_EL0) + ventry x_sync_elx + ventry x_irq_elx + ventry x_fiq_elx + ventry x_serr_elx + + // From current EL (w/ SP_ELx > 0) + ventry x_sync_elx + ventry x_irq_elx + ventry x_fiq_elx + ventry x_serr_elx + + // Lower EL with faulting code in AARCH64 + ventry x_sync_elx + ventry x_irq_elx + ventry x_fiq_elx + ventry x_serr_elx + + ventry x_unimpl + ventry x_unimpl + ventry x_unimpl + ventry x_unimpl diff --git a/sys/arch/aarch64/conf/GENERIC b/sys/arch/aarch64/conf/GENERIC new file mode 100644 index 0000000..eeb9d9d --- /dev/null +++ b/sys/arch/aarch64/conf/GENERIC @@ -0,0 +1,10 @@ +// Kernel options +option SERIAL_DEBUG yes // Enable kmsg serial logging +option USER_KMSG yes // Show kmsg in user consoles + +// Kernel constants +setval SCHED_NQUEUE 4 // Number of scheduler queues (for MLFQ) + +// Console attributes +setval CONSOLE_BG 0x000000 +setval CONSOLE_FG 0xB57614 diff --git a/sys/arch/aarch64/conf/link.ld b/sys/arch/aarch64/conf/link.ld new file mode 100644 index 0000000..2aa8c93 --- /dev/null +++ b/sys/arch/aarch64/conf/link.ld @@ -0,0 +1,81 @@ +/* Tell the linker that we want an aarch64 ELF64 output file */ +OUTPUT_FORMAT(elf64-littleaarch64) +OUTPUT_ARCH(aarch64) + +/* We want the symbol main to be our entry point */ +ENTRY(main) + +/* Define the program headers we want so the bootloader gives us the right */ +/* MMU permissions */ +PHDRS +{ + text PT_LOAD FLAGS((1 << 0) | (1 << 2)) ; /* Execute + Read */ + rodata PT_LOAD FLAGS((1 << 2)) ; /* Read only */ + data PT_LOAD FLAGS((1 << 1) | (1 << 2)) ; /* Write + Read */ + dynamic PT_DYNAMIC FLAGS((1 << 1) | (1 << 2)) ; /* Dynamic PHDR for relocations */ +} + +SECTIONS +{ + /* We wanna be placed in the topmost 2GiB of the address space, for optimisations */ + /* and because that is what the Limine spec mandates. */ + /* Any address in this region will do, but often 0xffffffff80000000 is chosen as */ + /* that is the beginning of the region. */ + . = 0xffffffff80000000; + + .text : { + *(.text .text.*) + } :text + + /* Move to the next memory page for .rodata */ + . += CONSTANT(MAXPAGESIZE); + + .rodata : { + *(.rodata .rodata.*) + } :rodata + + .drivers : { + __drivers_init_start = .; + *(.drivers .drivers) + __drivers_init_end = .; + } :rodata + + .drivers.defer : { + __driversd_init_start = .; + *(.drivers.defer .drivers.defer) + __driversd_init_end = .; + } :rodata + + /* Move to the next memory page for .data */ + . += CONSTANT(MAXPAGESIZE); + + .data : { + *(.data .data.*) + } :data + + /* Dynamic section for relocations, both in its own PHDR and inside data PHDR */ + .dynamic : { + *(.dynamic) + } :data :dynamic + + /* NOTE: .bss needs to be the last thing mapped to :data, otherwise lots of */ + /* unnecessary zeros will be written to the binary. */ + /* If you need, for example, .init_array and .fini_array, those should be placed */ + /* above this. */ + .bss : { + *(.bss .bss.*) + *(COMMON) + } :data + + /* -- Cache line alignment -- */ + . = ALIGN(64); + .data.cacheline_aligned : { + *(.data.cacheline_aligned) + } + + /* Discard .note.* and .eh_frame since they may cause issues on some hosts. */ + /DISCARD/ : { + *(.eh_frame) + *(.note .note.*) + } +} diff --git a/sys/arch/aarch64/pci/pci_machdep.c b/sys/arch/aarch64/pci/pci_machdep.c new file mode 100644 index 0000000..8de6cc9 --- /dev/null +++ b/sys/arch/aarch64/pci/pci_machdep.c @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <dev/pci/pci.h> + +/* + * Map a BAR into kernel memory. + * + * @dev: Device of BAR to map. + * @barno: BAR number to map. + * @vap: Resulting virtual address. + */ +int +pci_map_bar(struct pci_device *dev, uint8_t barno, void **vap) +{ + /* TODO: STUB */ + return 0; +} + +void +pci_msix_eoi(void) +{ + return; +} + +/* + * Enable MSI-X for a device and allocate an + * interrupt vector. + * + * @dev: Device to enable MSI-X for. + * @intr: MSI-X interrupt descriptor. + */ +int +pci_enable_msix(struct pci_device *dev, const struct msi_intr *intr) +{ + /* TODO: STUB */ + return 0; +} diff --git a/sys/arch/amd64/amd64/gdt.c b/sys/arch/amd64/amd64/gdt.c index a8fe54d..40d8f48 100644 --- a/sys/arch/amd64/amd64/gdt.c +++ b/sys/arch/amd64/amd64/gdt.c @@ -29,50 +29,70 @@ #include <machine/gdt.h> -struct gdt_entry g_gdt_data[256] = { +/* + * The GDT should be cache line aligned, since it is accessed every time a + * segment selector is reloaded + */ +__cacheline_aligned struct gdt_entry g_gdt_data[GDT_ENTRY_COUNT] = { /* Null */ {0}, - /* Kernel code (0x8) */ + /* Kernel code (0x08) */ { - .limit = 0x0000, - .base_low = 0x0000, - .base_mid = 0x00, - .access = 0x9A, - .granularity = 0x20, - .base_hi = 0x00 + .limit = 0x0000, + .base_low = 0x0000, + .base_mid = 0x00, + .attributes = GDT_ATTRIBUTE_64BIT_CODE | GDT_ATTRIBUTE_PRESENT | + GDT_ATTRIBUTE_DPL0 | GDT_ATTRIBUTE_NONSYSTEM | + GDT_ATTRIBUTE_EXECUTABLE | GDT_ATTRIBUTE_READABLE, + .base_hi = 0x00 }, /* Kernel data (0x10) */ { - .limit = 0x0000, - .base_low = 0x0000, - .base_mid = 0x00, - .access = 0x92, - .granularity = 0x00, - .base_hi = 0x00 + .limit = 0x0000, + .base_low = 0x0000, + .base_mid = 0x00, + .attributes = GDT_ATTRIBUTE_PRESENT | GDT_ATTRIBUTE_DPL0 | + GDT_ATTRIBUTE_NONSYSTEM | GDT_ATTRIBUTE_WRITABLE, + .base_hi = 0x00 }, /* User code (0x18) */ { - .limit = 0x0000, - .base_low = 0x0000, - .base_mid = 0x00, - .access = 0xFA, - .granularity = 0xAF, - .base_hi = 0x00 + .limit = 0x0000, + .base_low = 0x0000, + .base_mid = 0x00, + .attributes = GDT_ATTRIBUTE_64BIT_CODE | GDT_ATTRIBUTE_PRESENT | + GDT_ATTRIBUTE_DPL3 | GDT_ATTRIBUTE_NONSYSTEM | + GDT_ATTRIBUTE_EXECUTABLE | GDT_ATTRIBUTE_READABLE, + .base_hi = 0x00 }, /* User data (0x20) */ { - .limit = 0x0000, - .base_low = 0x0000, - .base_mid = 0x00, - .access = 0xF2, - .granularity = 0x00, - .base_hi = 0x00 + .limit = 0x0000, + .base_low = 0x0000, + .base_mid = 0x00, + .attributes = GDT_ATTRIBUTE_PRESENT | GDT_ATTRIBUTE_DPL3 | + GDT_ATTRIBUTE_NONSYSTEM | GDT_ATTRIBUTE_WRITABLE, + .base_hi = 0x00 }, - /* TSS segment (0x28) */ - {0} + /* + * TSS segment (0x28) + * + * NOTE: 64-bit TSS descriptors are 16 bytes, equivalent to the size of two + * regular descriptor entries. + * See Intel SPG 3/25 Section 9.2.3 - TSS Descriptor in 64-bit mode. + */ + {0}, {0} +}; + +/* Verify that the GDT is of the correct size */ +__static_assert(sizeof(g_gdt_data) == (8 * GDT_ENTRY_COUNT)); + +const struct gdtr g_gdtr = { + .limit = sizeof(g_gdt_data) - 1, + .offset = (uintptr_t)&g_gdt_data[0] }; diff --git a/sys/arch/amd64/amd64/hpet.c b/sys/arch/amd64/amd64/hpet.c index 1670546..3b0ca46 100644 --- a/sys/arch/amd64/amd64/hpet.c +++ b/sys/arch/amd64/amd64/hpet.c @@ -47,6 +47,7 @@ #define CAP_CLK_PERIOD(caps) (caps >> 32) #define FSEC_PER_SECOND 1000000000000000ULL +#define NSEC_PER_SECOND 1000000000ULL #define USEC_PER_SECOND 1000000ULL static void *hpet_base = NULL; @@ -135,6 +136,20 @@ hpet_time_usec(void) } static size_t +hpet_time_nsec(void) +{ + uint64_t period, freq, caps; + uint64_t counter; + + caps = hpet_read(HPET_REG_CAPS); + period = CAP_CLK_PERIOD(caps); + freq = FSEC_PER_SECOND / period; + + counter = hpet_read(HPET_REG_MAIN_COUNTER); + return (counter * NSEC_PER_SECOND) / freq; +} + +static size_t hpet_time_sec(void) { return hpet_time_usec() / USEC_PER_SECOND; @@ -180,6 +195,7 @@ hpet_init(void) timer.usleep = hpet_usleep; timer.nsleep = hpet_nsleep; timer.get_time_usec = hpet_time_usec; + timer.get_time_nsec = hpet_time_nsec; timer.get_time_sec = hpet_time_sec; register_timer(TIMER_GP, &timer); return 0; diff --git a/sys/arch/amd64/amd64/intr.c b/sys/arch/amd64/amd64/intr.c index c31ee3c..685a16d 100644 --- a/sys/arch/amd64/amd64/intr.c +++ b/sys/arch/amd64/amd64/intr.c @@ -31,12 +31,19 @@ #include <sys/param.h> #include <sys/errno.h> #include <sys/panic.h> +#include <sys/cdefs.h> +#include <sys/syslog.h> #include <machine/intr.h> #include <machine/cpu.h> #include <machine/asm.h> +#include <machine/ioapic.h> #include <vm/dynalloc.h> +#include <string.h> -static struct intr_entry *intrs[256] = {0}; +#define pr_trace(fmt, ...) kprintf("intr: " fmt, ##__VA_ARGS__) +#define pr_error(...) pr_trace(__VA_ARGS__) + +struct intr_hand *g_intrs[256] = {0}; int splraise(uint8_t s) @@ -67,35 +74,69 @@ splx(uint8_t s) ci->ipl = s; } -int -intr_alloc_vector(const char *name, uint8_t priority) +void * +intr_register(const char *name, const struct intr_hand *ih) { - size_t vec = MAX(priority << IPL_SHIFT, 0x20); - struct intr_entry *intr; + uint32_t vec = MAX(ih->priority << IPL_SHIFT, 0x20); + struct intr_hand *ih_new; + struct intr_data *idp_new; + const struct intr_data *idp; + size_t name_len; /* Sanity check */ - if (vec > NELEM(intrs)) { - return -1; + if (vec > NELEM(g_intrs) || name == NULL) { + return NULL; + } + + ih_new = dynalloc(sizeof(*ih_new)); + if (ih_new == NULL) { + pr_error("could not allocate new interrupt handler\n"); + return NULL; } /* * Try to allocate an interrupt vector. An IPL is made up * of 4 bits so there can be 16 vectors per IPL. + * + * XXX: Vector 0x20 is reserved for the Hyra scheduler, + * vector 0x21 is reserved for the CPU halt IPI, + * and vector 0x22 is reserved for TLB shootdowns. */ for (int i = vec; i < vec + 16; ++i) { - if (intrs[i] != NULL) { + if (g_intrs[i] != NULL || i < 0x23) { continue; } - intr = dynalloc(sizeof(*intr)); - if (intr == NULL) { - return -ENOMEM; + /* Allocate memory for the name */ + name_len = strlen(name) + 1; + ih_new->name = dynalloc(name_len); + if (ih_new->name == NULL) { + dynfree(ih_new); + pr_trace("could not allocate interrupt name\n"); + return NULL; } - intr->priority = priority; - intrs[i] = intr; - return i; + memcpy(ih_new->name, name, name_len); + idp_new = &ih_new->data; + idp = &ih->data; + + /* Pass the interrupt data */ + idp_new->ihp = ih_new; + idp_new->data_u64 = idp->data_u64; + + /* Setup the new intr_hand */ + ih_new->func = ih->func; + ih_new->priority = ih->priority; + ih_new->irq = ih->irq; + ih_new->vector = i; + g_intrs[i] = ih_new; + + if (ih->irq >= 0) { + ioapic_set_vec(ih->irq, i); + ioapic_irq_unmask(ih->irq); + } + return ih_new; } - return -1; + return NULL; } diff --git a/sys/arch/amd64/amd64/lapic.c b/sys/arch/amd64/amd64/lapic.c index 70d36a5..022592c 100644 --- a/sys/arch/amd64/amd64/lapic.c +++ b/sys/arch/amd64/amd64/lapic.c @@ -340,7 +340,7 @@ lapic_init(void) /* Allocate a vector if needed */ if (lapic_timer_vec == 0) { - lapic_timer_vec = intr_alloc_vector("lapictmr", IPL_CLOCK); + lapic_timer_vec = (IPL_CLOCK << IPL_SHIFT) | 0x20; idt_set_desc(lapic_timer_vec, IDT_INT_GATE, ISR(lapic_tmr_isr), IST_SCHED); } diff --git a/sys/arch/amd64/amd64/lapic_intr.S b/sys/arch/amd64/amd64/lapic_intr.S index e22cbca..5ae8f39 100644 --- a/sys/arch/amd64/amd64/lapic_intr.S +++ b/sys/arch/amd64/amd64/lapic_intr.S @@ -34,6 +34,5 @@ INTRENTRY(lapic_tmr_isr, handle_lapic_tmr) handle_lapic_tmr: call sched_switch // Context switch per every timer IRQ - call i8042_sync // Sometimes needed depending on i8042 quirks call lapic_eoi // Done! Signal that we finished to the Local APIC retq diff --git a/sys/arch/amd64/amd64/machdep.c b/sys/arch/amd64/amd64/machdep.c index 07d6cdd..8258f8e 100644 --- a/sys/arch/amd64/amd64/machdep.c +++ b/sys/arch/amd64/amd64/machdep.c @@ -42,24 +42,32 @@ #include <machine/uart.h> #include <machine/sync.h> #include <machine/intr.h> +#include <machine/cdefs.h> #include <machine/isa/i8042var.h> +#define pr_trace(fmt, ...) kprintf("cpu: " fmt, ##__VA_ARGS__) +#define pr_error(...) pr_trace(__VA_ARGS__) +#define pr_trace_bsp(...) \ + if (!bsp_init) { \ + pr_trace(__VA_ARGS__); \ + } + +#define HALT_VECTOR 0x21 +#define TLB_VECTOR 0x22 + #if defined(__SPECTRE_IBRS) #define SPECTRE_IBRS __SPECTRE_IBRS #else #define SPECTRE_IBRS 0 #endif -static uint8_t halt_vector = 0; - int ibrs_enable(void); +int simd_init(void); void syscall_isr(void); +void pin_isr_load(void); struct cpu_info g_bsp_ci = {0}; -static struct gdtr bsp_gdtr = { - .limit = sizeof(struct gdt_entry) * 256 - 1, - .offset = (uintptr_t)&g_gdt_data[0] -}; +static bool bsp_init = false; __attribute__((__interrupt__)) static void @@ -69,13 +77,34 @@ cpu_halt_isr(void *p) __builtin_unreachable(); } +__attribute__((__interrupt__)) static void -setup_vectors(void) +tlb_shootdown_isr(void *p) { - if (halt_vector == 0) { - halt_vector = intr_alloc_vector("cpu-halt", IPL_HIGH); + struct cpu_info *ci; + int ipl; + + /* + * Get the current CPU and check if we even + * need a shootdown. If `tlb_shootdown' is + * unset, this is not for us. + */ + ci = this_cpu(); + if (!ci->tlb_shootdown) { + return; } + ipl = splraise(IPL_HIGH); + __invlpg(ci->shootdown_va); + + ci->shootdown_va = 0; + ci->tlb_shootdown = 0; + splx(ipl); +} + +static void +setup_vectors(void) +{ idt_set_desc(0x0, IDT_TRAP_GATE, ISR(arith_err), 0); idt_set_desc(0x2, IDT_TRAP_GATE, ISR(nmi), 0); idt_set_desc(0x3, IDT_TRAP_GATE, ISR(breakpoint_handler), 0); @@ -89,7 +118,9 @@ setup_vectors(void) idt_set_desc(0xD, IDT_TRAP_GATE, ISR(general_prot), 0); idt_set_desc(0xE, IDT_TRAP_GATE, ISR(page_fault), 0); idt_set_desc(0x80, IDT_USER_INT_GATE, ISR(syscall_isr), 0); - idt_set_desc(halt_vector, IDT_INT_GATE, ISR(cpu_halt_isr), 0); + idt_set_desc(HALT_VECTOR, IDT_INT_GATE, ISR(cpu_halt_isr), 0); + idt_set_desc(TLB_VECTOR, IDT_INT_GATE, ISR(tlb_shootdown_isr), 0); + pin_isr_load(); } static inline void @@ -97,7 +128,7 @@ init_tss(struct cpu_info *ci) { struct tss_desc *desc; - desc = (struct tss_desc *)&g_gdt_data[GDT_TSS]; + desc = (struct tss_desc *)&g_gdt_data[GDT_TSS_INDEX]; write_tss(ci, desc); tss_load(); } @@ -133,6 +164,40 @@ backtrace_addr_to_name(uintptr_t addr, off_t *off) return NULL; } +static void +enable_simd(void) +{ + int retval; + + if ((retval = simd_init()) < 0) { + pr_trace_bsp("SIMD not supported\n"); + } + + if (retval == 1) { + pr_trace_bsp("SSE enabled but not AVX\n"); + } +} + +void +cpu_shootdown_tlb(vaddr_t va) +{ + uint32_t ncpu = cpu_count(); + struct cpu_info *cip; + + for (uint32_t i = 0; i < ncpu; ++i) { + cip = cpu_get(i); + if (cip == NULL) { + break; + } + + spinlock_acquire(&cip->lock); + cip->shootdown_va = va; + cip->tlb_shootdown = 1; + lapic_send_ipi(cip->apicid, IPI_SHORTHAND_NONE, TLB_VECTOR); + spinlock_release(&cip->lock); + } +} + void md_backtrace(void) { @@ -170,10 +235,25 @@ cpu_halt_all(void) } /* Send IPI to all cores */ - lapic_send_ipi(0, IPI_SHORTHAND_ALL, halt_vector); + lapic_send_ipi(0, IPI_SHORTHAND_ALL, HALT_VECTOR); for (;;); } +/* + * Same as cpu_halt_all() but for all other + * cores but ourselves. + */ +void +cpu_halt_others(void) +{ + if (rdmsr(IA32_GS_BASE) == 0) { + __ASMV("cli; hlt"); + } + + /* Send IPI to all cores */ + lapic_send_ipi(0, IPI_SHORTHAND_OTHERS, HALT_VECTOR); +} + void serial_init(void) { @@ -224,7 +304,7 @@ void cpu_startup(struct cpu_info *ci) { ci->self = ci; - gdt_load(&bsp_gdtr); + gdt_load(); idt_load(); setup_vectors(); @@ -233,5 +313,10 @@ cpu_startup(struct cpu_info *ci) init_tss(ci); try_mitigate_spectre(); + enable_simd(); lapic_init(); + + if (!bsp_init) { + bsp_init = true; + } } diff --git a/sys/arch/amd64/amd64/mp.c b/sys/arch/amd64/amd64/mp.c index a8a36c7..dbee32c 100644 --- a/sys/arch/amd64/amd64/mp.c +++ b/sys/arch/amd64/amd64/mp.c @@ -29,9 +29,12 @@ #include <sys/types.h> #include <sys/limine.h> +#include <sys/limits.h> #include <sys/syslog.h> +#include <sys/proc.h> #include <sys/spinlock.h> #include <sys/sched.h> +#include <sys/atomic.h> #include <machine/cpu.h> #include <vm/dynalloc.h> #include <assert.h> @@ -39,42 +42,66 @@ #define pr_trace(fmt, ...) kprintf("cpu_mp: " fmt, ##__VA_ARGS__) +extern struct proc g_proc0; static volatile struct limine_smp_request g_smp_req = { .id = LIMINE_SMP_REQUEST, .revision = 0 }; +static volatile uint32_t ncpu_up = 1; +static struct cpu_info *ci_list[CPU_MAX]; +static struct spinlock ci_list_lock = {0}; + static void ap_trampoline(struct limine_smp_info *si) { - static struct spinlock lock = {0}; struct cpu_info *ci; ci = dynalloc(sizeof(*ci)); __assert(ci != NULL); memset(ci, 0, sizeof(*ci)); - spinlock_acquire(&lock); cpu_startup(ci); + spinlock_acquire(&ci_list_lock); + ci_list[ncpu_up] = ci; + spinlock_release(&ci_list_lock); - spinlock_release(&lock); + atomic_inc_int(&ncpu_up); sched_enter(); - while (1); } +struct cpu_info * +cpu_get(uint32_t index) +{ + if (index >= ncpu_up) { + return NULL; + } + + return ci_list[index]; +} + +uint32_t +cpu_count(void) +{ + return ncpu_up; +} + void mp_bootstrap_aps(struct cpu_info *ci) { struct limine_smp_response *resp = g_smp_req.response; struct limine_smp_info **cpus; size_t cpu_init_counter; + uint32_t ncpu; /* Should not happen */ __assert(resp != NULL); cpus = resp->cpus; - cpu_init_counter = resp->cpu_count - 1; + ncpu = resp->cpu_count; + cpu_init_counter = ncpu - 1; + ci_list[0] = ci; if (resp->cpu_count == 1) { pr_trace("CPU has 1 core, no APs to bootstrap...\n"); @@ -90,4 +117,13 @@ mp_bootstrap_aps(struct cpu_info *ci) cpus[i]->goto_address = ap_trampoline; } + + /* Start up idle threads */ + pr_trace("kicking %d idle threads...\n", ncpu); + for (uint32_t i = 0; i < ncpu; ++i) { + spawn(&g_proc0, sched_enter, NULL, 0, NULL); + } + + /* Wait for all cores to be ready */ + while ((ncpu_up - 1) < cpu_init_counter); } diff --git a/sys/arch/amd64/amd64/pmap.c b/sys/arch/amd64/amd64/pmap.c index 2e62a4b..6c6bfcd 100644 --- a/sys/arch/amd64/amd64/pmap.c +++ b/sys/arch/amd64/amd64/pmap.c @@ -33,6 +33,8 @@ #include <sys/errno.h> #include <machine/tlb.h> #include <machine/vas.h> +#include <machine/cpu.h> +#include <machine/cdefs.h> #include <vm/pmap.h> #include <vm/physmem.h> #include <vm/vm.h> @@ -52,7 +54,7 @@ #define PTE_PCD BIT(4) /* Page-level cache disable */ #define PTE_ACC BIT(5) /* Accessed */ #define PTE_DIRTY BIT(6) /* Dirty (written-to page) */ -#define PTE_PAT BIT(7) +#define PTE_PS BIT(7) /* Page size */ #define PTE_GLOBAL BIT(8) #define PTE_NX BIT(63) /* Execute-disable */ @@ -112,6 +114,16 @@ pmap_extract(uint8_t level, vaddr_t va, vaddr_t *pmap, bool alloc) return NULL; } + /* + * TODO: Support huge pages... For now, don't let the + * bootloader fuck us up with their pre-kernel + * mappings and tell huge pages to get the fuck. + * + */ + if (ISSET(pmap[idx], PTE_PS)) { + pmap[idx] = 0; + } + if (ISSET(pmap[idx], PTE_P)) { next = (pmap[idx] & PTE_ADDR_MASK); return PHYS_TO_VIRT(next); @@ -176,14 +188,15 @@ done: * @vas: Virtual address space. * @va: Target virtual address. * @val: Value to write. + * @alloc: True to alloc new paging entries. */ static int -pmap_update_tbl(struct vas vas, vaddr_t va, uint64_t val) +pmap_update_tbl(struct vas vas, vaddr_t va, uint64_t val, bool alloc) { uintptr_t *tbl; int status; - if ((status = pmap_get_tbl(vas, va, true, &tbl)) != 0) { + if ((status = pmap_get_tbl(vas, va, alloc, &tbl)) != 0) { return status; } @@ -266,19 +279,21 @@ pmap_map(struct vas vas, vaddr_t va, paddr_t pa, vm_prot_t prot) { uint32_t flags = pmap_prot_to_pte(prot); - return pmap_update_tbl(vas, va, (pa | flags)); + return pmap_update_tbl(vas, va, (pa | flags), true); } int pmap_unmap(struct vas vas, vaddr_t va) { - return pmap_update_tbl(vas, va, 0); + return pmap_update_tbl(vas, va, 0, false); } int pmap_set_cache(struct vas vas, vaddr_t va, int type) { uintptr_t *tbl; + uint32_t flags; + paddr_t pa; int status; size_t idx; @@ -286,20 +301,62 @@ pmap_set_cache(struct vas vas, vaddr_t va, int type) return status; idx = pmap_get_level_index(1, va); + pa = tbl[idx] & PTE_ADDR_MASK; + flags = tbl[idx] & ~PTE_ADDR_MASK; /* Set the caching policy */ switch (type) { case VM_CACHE_UC: - tbl[idx] |= PTE_PCD; - tbl[idx] &= ~PTE_PWT; + flags |= PTE_PCD; + flags &= ~PTE_PWT; break; case VM_CACHE_WT: - tbl[idx] &= ~PTE_PCD; - tbl[idx] |= PTE_PWT; + flags &= ~PTE_PCD; + flags |= PTE_PWT; break; default: return -EINVAL; } + return pmap_update_tbl(vas, va, (pa | flags), false); +} + +bool +pmap_is_clean(struct vas vas, vaddr_t va) +{ + uintptr_t *tbl; + int status; + size_t idx; + + if ((status = pmap_get_tbl(vas, va, false, &tbl)) != 0) + return status; + + idx = pmap_get_level_index(1, va); + return ISSET(tbl[idx], PTE_DIRTY) == 0; +} + +void +pmap_mark_clean(struct vas vas, vaddr_t va) +{ + uintptr_t *tbl; + int status; + size_t idx; + + if ((status = pmap_get_tbl(vas, va, false, &tbl)) != 0) + return; + + idx = pmap_get_level_index(1, va); + tbl[idx] &= ~PTE_DIRTY; + + if (cpu_count() > 1) { + cpu_shootdown_tlb(va); + } else { + __invlpg(va); + } +} + +int +pmap_init(void) +{ return 0; } diff --git a/sys/arch/amd64/amd64/proc_machdep.c b/sys/arch/amd64/amd64/proc_machdep.c index 0be85fd..63604a4 100644 --- a/sys/arch/amd64/amd64/proc_machdep.c +++ b/sys/arch/amd64/amd64/proc_machdep.c @@ -40,7 +40,7 @@ #include <vm/map.h> #include <string.h> -void +uintptr_t md_td_stackinit(struct proc *td, void *stack_top, struct exec_prog *prog) { uintptr_t *sp = stack_top; @@ -97,6 +97,7 @@ md_td_stackinit(struct proc *td, void *stack_top, struct exec_prog *prog) STACK_PUSH(sp, argc); tfp = &td->tf; tfp->rsp = (uintptr_t)sp - VM_HIGHER_HALF; + return tfp->rsp; } void @@ -123,24 +124,31 @@ md_td_kick(struct proc *td) { struct trapframe *tfp; struct cpu_info *ci; + uint16_t ds = USER_DS | 3; tfp = &td->tf; ci = this_cpu(); ci->curtd = td; + td->flags &= ~PROC_KTD; __ASMV( - "push %0\n" + "mov %0, %%rax\n" "push %1\n" - "pushf\n" "push %2\n" "push %3\n" + "push %%rax\n" + "push %4\n" + "test $3, %%ax\n" + "jz 1f\n" "lfence\n" "swapgs\n" - "iretq" + "1:\n" + " iretq" : - : "i" (USER_DS | 3), + : "r" (tfp->cs), + "r" (ds), "r" (tfp->rsp), - "i" (USER_CS | 3), + "m" (tfp->rflags), "r" (tfp->rip) ); @@ -155,13 +163,14 @@ md_td_kick(struct proc *td) * @ip: Instruction pointer. */ int -md_fork(struct proc *p, struct proc *parent, uintptr_t ip) +md_spawn(struct proc *p, struct proc *parent, uintptr_t ip) { uintptr_t stack_base; struct trapframe *tfp; struct pcb *pcbp; uint8_t rpl = 0; int error; + vm_prot_t prot = PROT_READ | PROT_WRITE; tfp = &p->tf; @@ -201,9 +210,10 @@ md_fork(struct proc *p, struct proc *parent, uintptr_t ip) */ if (rpl == 0) { stack_base += VM_HIGHER_HALF; + p->flags |= PROC_KTD; } else { - vm_map(pcbp->addrsp, stack_base, stack_base, - PROT_READ | PROT_WRITE | PROT_USER, PROC_STACK_PAGES); + prot |= PROT_USER; + vm_map(pcbp->addrsp, stack_base, stack_base, prot, PROC_STACK_PAGES); } p->stack_base = stack_base; diff --git a/sys/arch/amd64/amd64/reboot.c b/sys/arch/amd64/amd64/reboot.c index b9df1c0..d47a352 100644 --- a/sys/arch/amd64/amd64/reboot.c +++ b/sys/arch/amd64/amd64/reboot.c @@ -32,10 +32,15 @@ #include <sys/cdefs.h> #include <machine/pio.h> #include <machine/cpu.h> +#include <dev/acpi/acpi.h> void cpu_reboot(int method) { + if (ISSET(method, REBOOT_POWEROFF)) { + acpi_sleep(ACPI_SLEEP_S5); + } + if (ISSET(method, REBOOT_HALT)) { cpu_halt_all(); } @@ -45,3 +50,13 @@ cpu_reboot(int method) outb(0x64, 0xFE); } } + +/* + * arg0: Method bits + */ +scret_t +sys_reboot(struct syscall_args *scargs) +{ + cpu_reboot(scargs->arg0); + __builtin_unreachable(); +} diff --git a/sys/arch/amd64/amd64/simd.S b/sys/arch/amd64/amd64/simd.S new file mode 100644 index 0000000..23fe461 --- /dev/null +++ b/sys/arch/amd64/amd64/simd.S @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + .text + .globl simd_init +simd_init: + /* + * Enable SIMD, if SSE and AVX is supported, + * a value of zero is returned. If SSE is + * supported yet AVX is not, a value of one + * is returned. However, if none are supported, + * this routine returns -1. + */ + + // Do we support SSE? + mov $1, %eax + cpuid + bt $25, %edx + jnc .sse_not_sup + + mov %cr0, %rax // Old CR0 -> EAX + and $0xFFFB, %ax // Disable co-processor emulation + or $0x02, %ax // Enable co-processor monitoring + mov %rax, %cr0 // Update CR0 with new flags + + mov %cr4, %rax // Old CR4 -> EAX + or $0x200, %ax // Enable FXSAVE/FXRSTOR + or $0x400, %ax // Enable SIMD FP exceptions + mov %rax, %cr4 // Update CR4 with new flags + + mov $1, %eax // LEAF 1 + cpuid // Bit 28 of ECX indicates AVX support + mov $3, %eax // We need to check two bits + shl $27, %eax // Which are ECX.OSXSAVE and ECX.AVX + test %eax, %ecx // Are XSAVE and AVX supported? + jnc .avx_not_sup // Nope, just continue + + // Enable AVX + xor %rcx, %rcx // Select XCR0 + xgetbv // Load extended control register + or $0x07, %eax // Set AVX + SSE bits + xsetbv // Store new flags + xor %rax, %rax // Everything is good + retq // Return back to caller (RETURN) +.sse_not_sup: + mov $-1, %rax + retq +.avx_not_sup: + mov $1, %rax + retq diff --git a/sys/arch/amd64/amd64/trap.c b/sys/arch/amd64/amd64/trap.c index 9a3a7ba..c57b5d2 100644 --- a/sys/arch/amd64/amd64/trap.c +++ b/sys/arch/amd64/amd64/trap.c @@ -60,6 +60,17 @@ static const char *trap_type[] = { [TRAP_SS] = "stack-segment fault" }; +/* Page-fault flags */ +static const char pf_flags[] = { + 'p', /* Present */ + 'w', /* Write */ + 'u', /* User */ + 'r', /* Reserved write */ + 'x', /* Instruction fetch */ + 'k', /* Protection key violation */ + 's' /* Shadow stack access */ +}; + static inline uintptr_t pf_faultaddr(void) { @@ -69,7 +80,24 @@ pf_faultaddr(void) } static void -regdump(struct trapframe *tf) +pf_code(uint64_t error_code) +{ + char tab[8] = { + '-', '-', '-', + '-', '-', '-', + '-', '\0' + }; + + for (int i = 0; i < 7; ++i) { + if (ISSET(error_code, BIT(i))) { + tab[i] = pf_flags[i]; + } + } + kprintf("code=[%s]\n", tab); +} + +__dead static void +trap_fatal(struct trapframe *tf) { uintptr_t cr3, cr2 = pf_faultaddr(); @@ -79,11 +107,16 @@ regdump(struct trapframe *tf) : "memory" ); - kprintf(OMIT_TIMESTAMP + if (tf->trapno == TRAP_PAGEFLT) { + pf_code(tf->error_code); + } + + panic("got fatal trap\n\n" + "-- DUMPING PROCESSOR STATE --\n" "RAX=%p RCX=%p RDX=%p\n" "RBX=%p RSI=%p RDI=%p\n" "RFL=%p CR2=%p CR3=%p\n" - "RBP=%p RSP=%p RIP=%p\n", + "RBP=%p RSP=%p RIP=%p\n\n", tf->rax, tf->rcx, tf->rdx, tf->rbx, tf->rsi, tf->rdi, tf->rflags, cr2, cr3, @@ -101,6 +134,9 @@ trap_user(struct trapframe *tf) switch (tf->trapno) { case TRAP_PROTFLT: case TRAP_PAGEFLT: + if (tf->trapno == TRAP_PAGEFLT) { + pf_code(tf->error_code); + } sigaddset(&sigset, SIGSEGV); break; case TRAP_ARITH_ERR: @@ -120,20 +156,6 @@ trap_user(struct trapframe *tf) dispatch_signals(td); } -static void -trap_quirks(struct cpu_info *ci) -{ - static uint8_t count; - - if (ISSET(ci->irq_mask, CPU_IRQ(1)) && count < 1) { - ++count; - pr_error("detected buggy i8042\n"); - pr_error("applying I8042_HOSTILE quirk\n"); - i8042_quirk(I8042_HOSTILE); - return; - } -} - void trap_syscall(struct trapframe *tf) { @@ -155,24 +177,22 @@ trap_syscall(struct trapframe *tf) void trap_handler(struct trapframe *tf) { - struct cpu_info *ci; - - splraise(IPL_HIGH); + int ipl; + ipl = splraise(IPL_HIGH); if (tf->trapno >= NELEM(trap_type)) { panic("got unknown trap %d\n", tf->trapno); } pr_error("got %s\n", trap_type[tf->trapno]); - ci = this_cpu(); - trap_quirks(ci); /* Handle traps from userland */ if (ISSET(tf->cs, 3)) { + splx(ipl); trap_user(tf); return; } - regdump(tf); - panic("fatal trap - halting\n"); + trap_fatal(tf); + __builtin_unreachable(); } diff --git a/sys/arch/amd64/amd64/vector.S b/sys/arch/amd64/amd64/vector.S new file mode 100644 index 0000000..c820a41 --- /dev/null +++ b/sys/arch/amd64/amd64/vector.S @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <machine/frameasm.h> + +#define IDT_INT_GATE 0x8E + +.macro IDT_SET_VEC vec, sym + mov $\vec, %rdi + mov $IDT_INT_GATE, %rsi + lea \sym(%rip), %rdx + xor %rcx, %rcx + call idt_set_desc +.endm + + .text + ALIGN_TEXT +ioapic_common_func: + xor %rcx, %rcx // Clear counter +.walk: // Walk the handlers + lea g_intrs(%rip), %rbx // Grab table to RBX + lea (%rbx, %rcx, 8), %rbx // g_intrs + (8 * rcx) + mov (%rbx), %rdx // Grab the intr_hand + or %rdx, %rdx // No more? + jz 1f // Nope, return + + mov (%rdx), %rbx // intr_hand.func + add $8, %rdx // Get interrupt data + mov %rdx, %rdi // Pass the interrupt data + push %rcx // Save our counter + call *%rbx // Call the handler + pop %rcx // Restore our counter + or %rax, %rax // Was it theirs? (RET >= 1) + jnz done // Yes, we are done. +1: inc %rcx // Next + cmp $256, %rcx // Did we reach the end? + jl .walk // Nope, keep going +done: + call lapic_eoi + retq + + .globl pin_isr_load +pin_isr_load: + IDT_SET_VEC 35, ioapic_edge_0 + IDT_SET_VEC 36, ioapic_edge_1 + IDT_SET_VEC 37, ioapic_edge_2 + IDT_SET_VEC 38, ioapic_edge_3 + IDT_SET_VEC 39, ioapic_edge_4 + IDT_SET_VEC 40, ioapic_edge_5 + IDT_SET_VEC 41, ioapic_edge_6 + IDT_SET_VEC 42, ioapic_edge_7 + IDT_SET_VEC 43, ioapic_edge_8 + IDT_SET_VEC 44, ioapic_edge_9 + IDT_SET_VEC 45, ioapic_edge_10 + IDT_SET_VEC 46, ioapic_edge_11 + IDT_SET_VEC 47, ioapic_edge_12 + IDT_SET_VEC 48, ioapic_edge_13 + IDT_SET_VEC 49, ioapic_edge_14 + IDT_SET_VEC 50, ioapic_edge_15 + IDT_SET_VEC 51, ioapic_edge_16 + IDT_SET_VEC 52, ioapic_edge_17 + IDT_SET_VEC 53, ioapic_edge_18 + IDT_SET_VEC 54, ioapic_edge_19 + IDT_SET_VEC 55, ioapic_edge_20 + IDT_SET_VEC 56, ioapic_edge_21 + IDT_SET_VEC 57, ioapic_edge_22 + IDT_SET_VEC 58, ioapic_edge_23 + IDT_SET_VEC 59, ioapic_edge_24 + IDT_SET_VEC 60, ioapic_edge_25 + IDT_SET_VEC 61, ioapic_edge_26 + IDT_SET_VEC 62, ioapic_edge_27 + IDT_SET_VEC 63, ioapic_edge_28 + IDT_SET_VEC 64, ioapic_edge_29 + IDT_SET_VEC 65, ioapic_edge_30 + IDT_SET_VEC 66, ioapic_edge_31 + IDT_SET_VEC 67, ioapic_edge_32 + IDT_SET_VEC 68, ioapic_edge_33 + IDT_SET_VEC 69, ioapic_edge_34 + IDT_SET_VEC 70, ioapic_edge_35 + IDT_SET_VEC 71, ioapic_edge_36 + IDT_SET_VEC 72, ioapic_edge_37 + IDT_SET_VEC 73, ioapic_edge_38 + IDT_SET_VEC 74, ioapic_edge_39 + IDT_SET_VEC 75, ioapic_edge_40 + IDT_SET_VEC 76, ioapic_edge_41 + IDT_SET_VEC 77, ioapic_edge_42 + IDT_SET_VEC 78, ioapic_edge_43 + IDT_SET_VEC 79, ioapic_edge_44 + IDT_SET_VEC 80, ioapic_edge_45 + IDT_SET_VEC 81, ioapic_edge_46 + IDT_SET_VEC 82, ioapic_edge_47 + IDT_SET_VEC 83, ioapic_edge_48 + IDT_SET_VEC 84, ioapic_edge_49 + IDT_SET_VEC 85, ioapic_edge_50 + IDT_SET_VEC 86, ioapic_edge_51 + IDT_SET_VEC 87, ioapic_edge_52 + IDT_SET_VEC 88, ioapic_edge_53 + IDT_SET_VEC 89, ioapic_edge_54 + IDT_SET_VEC 90, ioapic_edge_55 + IDT_SET_VEC 91, ioapic_edge_56 + IDT_SET_VEC 92, ioapic_edge_57 + IDT_SET_VEC 93, ioapic_edge_58 + IDT_SET_VEC 94, ioapic_edge_59 + IDT_SET_VEC 95, ioapic_edge_60 + IDT_SET_VEC 96, ioapic_edge_61 + IDT_SET_VEC 97, ioapic_edge_62 + IDT_SET_VEC 97, ioapic_edge_63 + ret + +/* I/O APIC edge ISRs */ +INTRENTRY(ioapic_edge_0, ioapic_common_func) +INTRENTRY(ioapic_edge_1, ioapic_common_func) +INTRENTRY(ioapic_edge_2, ioapic_common_func) +INTRENTRY(ioapic_edge_3, ioapic_common_func) +INTRENTRY(ioapic_edge_4, ioapic_common_func) +INTRENTRY(ioapic_edge_5, ioapic_common_func) +INTRENTRY(ioapic_edge_6, ioapic_common_func) +INTRENTRY(ioapic_edge_7, ioapic_common_func) +INTRENTRY(ioapic_edge_8, ioapic_common_func) +INTRENTRY(ioapic_edge_9, ioapic_common_func) +INTRENTRY(ioapic_edge_10, ioapic_common_func) +INTRENTRY(ioapic_edge_11, ioapic_common_func) +INTRENTRY(ioapic_edge_12, ioapic_common_func) +INTRENTRY(ioapic_edge_13, ioapic_common_func) +INTRENTRY(ioapic_edge_14, ioapic_common_func) +INTRENTRY(ioapic_edge_15, ioapic_common_func) +INTRENTRY(ioapic_edge_16, ioapic_common_func) +INTRENTRY(ioapic_edge_17, ioapic_common_func) +INTRENTRY(ioapic_edge_18, ioapic_common_func) +INTRENTRY(ioapic_edge_19, ioapic_common_func) +INTRENTRY(ioapic_edge_20, ioapic_common_func) +INTRENTRY(ioapic_edge_21, ioapic_common_func) +INTRENTRY(ioapic_edge_22, ioapic_common_func) +INTRENTRY(ioapic_edge_23, ioapic_common_func) +INTRENTRY(ioapic_edge_24, ioapic_common_func) +INTRENTRY(ioapic_edge_25, ioapic_common_func) +INTRENTRY(ioapic_edge_26, ioapic_common_func) +INTRENTRY(ioapic_edge_27, ioapic_common_func) +INTRENTRY(ioapic_edge_28, ioapic_common_func) +INTRENTRY(ioapic_edge_29, ioapic_common_func) +INTRENTRY(ioapic_edge_30, ioapic_common_func) +INTRENTRY(ioapic_edge_31, ioapic_common_func) +INTRENTRY(ioapic_edge_32, ioapic_common_func) +INTRENTRY(ioapic_edge_33, ioapic_common_func) +INTRENTRY(ioapic_edge_34, ioapic_common_func) +INTRENTRY(ioapic_edge_35, ioapic_common_func) +INTRENTRY(ioapic_edge_36, ioapic_common_func) +INTRENTRY(ioapic_edge_37, ioapic_common_func) +INTRENTRY(ioapic_edge_38, ioapic_common_func) +INTRENTRY(ioapic_edge_39, ioapic_common_func) +INTRENTRY(ioapic_edge_40, ioapic_common_func) +INTRENTRY(ioapic_edge_41, ioapic_common_func) +INTRENTRY(ioapic_edge_42, ioapic_common_func) +INTRENTRY(ioapic_edge_43, ioapic_common_func) +INTRENTRY(ioapic_edge_44, ioapic_common_func) +INTRENTRY(ioapic_edge_45, ioapic_common_func) +INTRENTRY(ioapic_edge_46, ioapic_common_func) +INTRENTRY(ioapic_edge_47, ioapic_common_func) +INTRENTRY(ioapic_edge_48, ioapic_common_func) +INTRENTRY(ioapic_edge_49, ioapic_common_func) +INTRENTRY(ioapic_edge_50, ioapic_common_func) +INTRENTRY(ioapic_edge_51, ioapic_common_func) +INTRENTRY(ioapic_edge_52, ioapic_common_func) +INTRENTRY(ioapic_edge_53, ioapic_common_func) +INTRENTRY(ioapic_edge_54, ioapic_common_func) +INTRENTRY(ioapic_edge_55, ioapic_common_func) +INTRENTRY(ioapic_edge_56, ioapic_common_func) +INTRENTRY(ioapic_edge_57, ioapic_common_func) +INTRENTRY(ioapic_edge_58, ioapic_common_func) +INTRENTRY(ioapic_edge_59, ioapic_common_func) +INTRENTRY(ioapic_edge_60, ioapic_common_func) +INTRENTRY(ioapic_edge_61, ioapic_common_func) +INTRENTRY(ioapic_edge_62, ioapic_common_func) +INTRENTRY(ioapic_edge_63, ioapic_common_func) diff --git a/sys/arch/amd64/conf/GENERIC b/sys/arch/amd64/conf/GENERIC index 19c9a62..69e071a 100644 --- a/sys/arch/amd64/conf/GENERIC +++ b/sys/arch/amd64/conf/GENERIC @@ -1,6 +1,17 @@ +// // Kernel options -option SPECTRE_IBRS no -option SERIAL_DEBUG yes +// +// XXX: Indirect branch restricted speculation (SPECTRE_IBRS) +// is disabled by default as it can lead to significant +// performance degradation. +// +option SPECTRE_IBRS no // Enable the IBRS CPU feature +option SERIAL_DEBUG yes // Enable kmsg serial logging +option USER_KMSG no // Show kmsg in user consoles // Kernel constants -setval SCHED_NQUEUE 4 +setval SCHED_NQUEUE 4 // Number of scheduler queues (for MLFQ) + +// Console attributes +setval CONSOLE_BG 0x000000 +setval CONSOLE_FG 0xB57614 diff --git a/sys/arch/amd64/conf/link.ld b/sys/arch/amd64/conf/link.ld index 9c47a81..a43824f 100644 --- a/sys/arch/amd64/conf/link.ld +++ b/sys/arch/amd64/conf/link.ld @@ -29,6 +29,12 @@ SECTIONS __drivers_init_end = .; } :rodata + .drivers.defer : { + __driversd_init_start = .; + *(.drivers.defer .drivers.defer) + __driversd_init_end = .; + } :rodata + . += CONSTANT(MAXPAGESIZE); .data : { diff --git a/sys/arch/amd64/isa/i8042.c b/sys/arch/amd64/isa/i8042.c index ea4fc65..eb8960c 100644 --- a/sys/arch/amd64/isa/i8042.c +++ b/sys/arch/amd64/isa/i8042.c @@ -39,6 +39,7 @@ #include <dev/acpi/acpi.h> #include <dev/timer.h> #include <dev/cons/cons.h> +#include <dev/dmi/dmi.h> #include <machine/cpu.h> #include <machine/pio.h> #include <machine/isa/i8042var.h> @@ -57,15 +58,6 @@ #define pr_error(...) pr_trace(__VA_ARGS__) #define IO_NOP() inb(0x80) -#define OBUF_WAIT() do { \ - i8042_statpoll(I8042_OBUFF, false, NULL); \ - IO_NOP(); \ - } while (0); - -#define IBUF_WAIT() do { \ - i8042_statpoll(I8042_IBUFF, false, NULL); \ - IO_NOP(); \ - } while (0); static struct spinlock data_lock; static struct spinlock isr_lock; @@ -77,6 +69,7 @@ static struct proc polltd; static struct timer tmr; static bool is_init = false; +static void i8042_ibuf_wait(void); static int dev_send(bool aux, uint8_t data); static int i8042_kb_getc(uint8_t sc, char *chr); static void i8042_drain(void); @@ -112,43 +105,30 @@ kbd_set_leds(uint8_t mask) dev_send(false, mask); } -/* - * Poll the i8042 status register - * - * @bits: Status bits. - * @pollset: True to poll if set - * @io: Routine to invoke per iter (NULL if none) - * @flush: True to flush i8042 data per iter - */ -static int -i8042_statpoll(uint8_t bits, bool pollset, bool flush) +static void +i8042_obuf_wait(void) { - size_t usec_start, usec; - size_t elapsed_msec; - uint8_t val; - bool tmp; + uint8_t status; - usec_start = tmr.get_time_usec(); for (;;) { - val = inb(I8042_STATUS); - tmp = (pollset) ? ISSET(val, bits) : !ISSET(val, bits); - usec = tmr.get_time_usec(); - elapsed_msec = (usec - usec_start) / 1000; - - IO_NOP(); - - /* If tmp is set, the register updated in time */ - if (tmp) { - break; + status = inb(I8042_STATUS); + if (ISSET(status, I8042_OBUFF)) { + return; } + } +} - /* Exit with an error if we timeout */ - if (elapsed_msec > I8042_DELAY) { - return -ETIME; +static void +i8042_ibuf_wait(void) +{ + uint8_t status; + + for (;;) { + status = inb(I8042_STATUS); + if (!ISSET(status, I8042_IBUFF)) { + return; } } - - return val; } /* @@ -158,7 +138,9 @@ static void i8042_drain(void) { spinlock_acquire(&data_lock); - i8042_statpoll(I8042_OBUFF, false, true); + while (ISSET(inb(I8042_STATUS), I8042_OBUFF)) { + inb(I8042_DATA); + } spinlock_release(&data_lock); } @@ -171,33 +153,47 @@ i8042_drain(void) static void i8042_write(uint16_t port, uint8_t val) { - IBUF_WAIT(); + i8042_ibuf_wait(); outb(port, val); } /* - * Read the i8042 config register + * Read from an i8042 register. + * + * @port: I/O port + */ +static uint8_t +i8042_read(uint16_t port) +{ + i8042_obuf_wait(); + return inb(port); +} + +/* + * Read the i8042 controller configuration + * byte. */ static uint8_t i8042_read_conf(void) { - i8042_drain(); + uint8_t conf; + i8042_write(I8042_CMD, I8042_GET_CONFB); - OBUF_WAIT(); - return inb(I8042_DATA); + i8042_obuf_wait(); + conf = i8042_read(I8042_DATA); + return conf; } /* - * Write the i8042 config register + * Write a new value to the i8042 controller + * configuration byte. */ static void -i8042_write_conf(uint8_t value) +i8042_write_conf(uint8_t conf) { - i8042_drain(); - IBUF_WAIT(); i8042_write(I8042_CMD, I8042_SET_CONFB); - IBUF_WAIT(); - i8042_write(I8042_DATA, value); + i8042_ibuf_wait(); + i8042_write(I8042_DATA, conf); } /* @@ -213,16 +209,16 @@ dev_send(bool aux, uint8_t data) i8042_write(I8042_CMD, I8042_PORT1_SEND); } - IBUF_WAIT(); i8042_write(I8042_DATA, data); - OBUF_WAIT(); + i8042_obuf_wait(); return inb(I8042_DATA); } -void -i8042_kb_event(void) +static int +i8042_kb_event(void *sp) { struct cpu_info *ci; + struct cons_input input; uint8_t data; char c; @@ -235,53 +231,35 @@ i8042_kb_event(void) /* No data useful */ goto done; } - cons_putch(&g_root_scr, c); - /* TODO */ + input.scancode = data; + input.chr = c; + cons_ibuf_push(&g_root_scr, input); done: - ci->irq_mask &= CPU_IRQ(1); + ci->irq_mask &= ~CPU_IRQ(1); spinlock_release(&isr_lock); - lapic_eoi(); + return 1; /* handled */ } static void i8042_en_intr(void) { + struct intr_hand ih; uint8_t conf; - int vec; - pr_trace("ENTER -> i8042_en_intr\n"); - i8042_write(I8042_CMD, I8042_DISABLE_PORT0); - pr_trace("port 0 disabled\n"); + ih.func = i8042_kb_event; + ih.priority = IPL_BIO; + ih.irq = KB_IRQ; + intr_register("i8042-kb", &ih); - vec = intr_alloc_vector("i8042-kb", IPL_BIO); - idt_set_desc(vec, IDT_INT_GATE, ISR(i8042_kb_isr), IST_HW_IRQ); - ioapic_set_vec(KB_IRQ, vec); - ioapic_irq_unmask(KB_IRQ); - pr_trace("irq 1 -> vec[%x]\n", vec); - - /* Setup config bits */ + /* + * Enable the clock of PS/2 port 0 and tell + * the controller that we are accepting + * interrupts. + */ conf = i8042_read_conf(); + conf &= ~I8042_PORT0_CLK; conf |= I8042_PORT0_INTR; - conf &= ~I8042_PORT1_INTR; i8042_write_conf(conf); - pr_trace("conf written\n"); - - i8042_write(I8042_CMD, I8042_ENABLE_PORT0); - pr_trace("port 0 enabled\n"); -} - -static void -esckey_reboot(void) -{ - syslock(); - kprintf("** Machine going down for a reboot"); - - for (size_t i = 0; i < 3; ++i) { - kprintf(OMIT_TIMESTAMP "."); - tmr.msleep(1000); - } - - cpu_reboot(0); } /* @@ -298,10 +276,6 @@ i8042_kb_getc(uint8_t sc, char *chr) bool release = ISSET(sc, BIT(7)); switch (sc) { - /* Left alt [press] */ - case 0x38: - esckey_reboot(); - break; /* Caps lock [press] */ case 0x3A: /* @@ -357,42 +331,35 @@ i8042_kb_getc(uint8_t sc, char *chr) return 0; } -static void -i8042_sync_loop(void) -{ - for (;;) { - i8042_sync(); - md_pause(); - } -} - /* * Grabs a key from the keyboard, used typically * for syncing the machine however can be used - * to bypass IRQs in case of buggy EC. + * to bypass IRQs to prevent lost bytes. */ void i8042_sync(void) { static struct spinlock lock; - uint8_t data; + struct cons_input input; + uint8_t data, status; char c; if (spinlock_try_acquire(&lock)) { return; } - if (ISSET(quirks, I8042_HOSTILE) && is_init) { - if (i8042_statpoll(I8042_OBUFF, true, NULL) < 0) { - /* No data ready */ + if (is_init) { + status = inb(I8042_STATUS); + if (!ISSET(status, I8042_OBUFF)) { goto done; } - data = inb(I8042_DATA); + data = inb(I8042_DATA); if (i8042_kb_getc(data, &c) == 0) { - cons_putch(&g_root_scr, c); + input.scancode = data; + input.chr = c; + cons_ibuf_push(&g_root_scr, input); } - md_pause(); } done: spinlock_release(&lock); @@ -404,9 +371,20 @@ i8042_quirk(int mask) quirks |= mask; } +static void +i8042_sync_loop(void) +{ + for (;;) { + i8042_obuf_wait(); + i8042_sync(); + } +} + static int i8042_init(void) { + const char *prodver = NULL; + /* Try to request a general purpose timer */ if (req_timer(TIMER_GP, &tmr) != TMRR_SUCCESS) { pr_error("failed to fetch general purpose timer\n"); @@ -425,6 +403,9 @@ i8042_init(void) return -ENODEV; } + i8042_write(I8042_CMD, I8042_DISABLE_PORT0); + i8042_write(I8042_CMD, I8042_DISABLE_PORT1); + /* * On some thinkpads, e.g., the T420s, the EC implementing * the i8042 logic likes to play cop and throw NMIs at us @@ -432,11 +413,14 @@ i8042_init(void) * etc... As of now, treat the i8042 like a fucking bomb * if this bit is set. */ - if (strcmp(acpi_oemid(), "LENOVO") == 0) { + if ((prodver = dmi_prodver()) == NULL) { + prodver = "None"; + } + if (strcmp(prodver, "ThinkPad T420s") == 0) { quirks |= I8042_HOSTILE; - pr_trace("lenovo device, assuming hostile\n"); + pr_trace("ThinkPad T420s detected, assuming hostile\n"); pr_trace("disabling irq 1, polling as fallback\n"); - fork1(&polltd, 0, i8042_sync_loop, NULL); + spawn(&polltd, i8042_sync_loop, NULL, 0, NULL); } if (!ISSET(quirks, I8042_HOSTILE)) { @@ -445,11 +429,8 @@ i8042_init(void) i8042_en_intr(); } - if (dev_send(false, 0xFF) == 0xFC) { - pr_error("kbd self test failure\n"); - return -EIO; - } - + i8042_write(I8042_CMD, I8042_ENABLE_PORT0); + i8042_drain(); is_init = true; return 0; } diff --git a/sys/arch/amd64/isa/mc1468.c b/sys/arch/amd64/isa/mc1468.c new file mode 100644 index 0000000..bbaa3d1 --- /dev/null +++ b/sys/arch/amd64/isa/mc1468.c @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <sys/param.h> +#include <sys/time.h> +#include <sys/driver.h> +#include <sys/device.h> +#include <sys/syslog.h> +#include <fs/devfs.h> +#include <machine/pio.h> +#include <machine/cdefs.h> +#include <string.h> + +#define MC1468_REGSEL 0x70 +#define MC1468_DATA 0x71 + +/* Register A flags */ +#define MC1468_UPDATING BIT(7) + +/* Register B flags */ +#define MC1468_DAYSAVE BIT(1) +#define MC1468_CLOCK24 BIT(2) + +static struct cdevsw mc1468_cdevsw; + +static uint8_t +bin_dabble(uint8_t bin) +{ + uint8_t retval = 0; + uint8_t nibble; + + for (int i = 7; i >= 0; --i) { + retval <<= 1; + if (bin & (1 << i)) { + retval |= 1; + } + + for (int j = 0; j < 2; ++j) { + nibble = retval & (retval >> (4 * nibble)) & 0x0F; + if (nibble >= 5) { + retval += 0x03 << (4 * nibble); + } + } + } + + return retval; +} + +/* + * Read a byte from an MC1468XX register. + */ +static uint8_t +mc1468_read(uint8_t reg) +{ + outb(MC1468_REGSEL, reg); + return inb(MC1468_DATA); +} + +/* + * Write a byte to the MC1468XX register. + */ +static void +mc1468_write(uint8_t reg, uint8_t val) +{ + outb(MC1468_REGSEL, reg); + outb(MC1468_DATA, val); +} + +/* + * Returns true if the MC1468XX is updating + * its time registers. + */ +static bool +mc1468_updating(void) +{ + uint8_t reg_b; + + reg_b = mc1468_read(0xB); + return ISSET(reg_b, MC1468_UPDATING) != 0; +} + +/* + * Check if date `a' and date `b' are synced. + * Used to make sure a bogus date caused by a + * read right before an MC1468XX register + * update doesn't occur. + */ +static bool +mc1468_date_synced(struct date *a, struct date *b) +{ + if (a->year != b->year) + return false; + if (a->month != b->month) + return false; + if (a->day != b->day) + return false; + if (a->sec != b->sec) + return false; + if (a->min != b->min) + return false; + if (a->hour != b->hour) + return false; + + return true; +} + +/* + * Sometimes the clock chip may encode the + * date in binary-coded-decimal. This function + * converts a date in BCD format to plain binary. + */ +static void +mc1468_bcd_conv(struct date *dp) +{ + dp->year = (dp->year & 0x0F) + ((dp->year / 16) * 10); + dp->month = (dp->month & 0x0F) + ((dp->month / 16) * 10); + dp->day = (dp->day & 0x0F) + ((dp->day / 16) * 10); + dp->sec = (dp->sec & 0x0F) + ((dp->sec / 16) * 10); + dp->min = (dp->min & 0x0F) + ((dp->min / 16) * 10); + dp->hour = (dp->hour & 0x0F) + (((dp->hour & 0x70) / 16) * 10); + dp->hour |= dp->hour & 0x80; +} + +/* + * Read the time for the clock without syncing + * it up. + * + * XXX: Please use mc1468_get_date() instead as + * this function may return inconsistent + * values if not used correctly. + */ +static void +__mc1468_get_time(struct date *dp) +{ + dp->year = mc1468_read(0x09); + dp->month = mc1468_read(0x08); + dp->day = mc1468_read(0x07); + dp->sec = mc1468_read(0x00); + dp->min = mc1468_read(0x02); + dp->hour = mc1468_read(0x04); +} + +/* + * Write a new time/date to the chip. + */ +static void +mc1468_set_date(const struct date *dp) +{ + while (mc1468_updating()) { + md_pause(); + } + + mc1468_write(0x08, bin_dabble(dp->month)); + mc1468_write(0x07, bin_dabble(dp->day)); + mc1468_write(0x04, bin_dabble(dp->hour)); + mc1468_write(0x02, bin_dabble(dp->min)); + mc1468_write(0x00, bin_dabble(dp->sec)); +} + +static int +mc1468_get_date(struct date *dp) +{ + struct date date_cur, date_last; + uint8_t reg_b = mc1468_read(0x0B); + + while (mc1468_updating()) { + __mc1468_get_time(&date_last); + } + + /* + * Get the current date and time. + * + * XXX: The date and time returned by __mc1468_get_time() + * may at times be out of sync, read it twice to + * make sure everything is synced up. + */ + do { + while (mc1468_updating()) { + md_pause(); + } + __mc1468_get_time(&date_last); + date_cur.year = date_last.year; + date_cur.month = date_last.month; + date_cur.day = date_last.day; + date_cur.sec = date_last.sec; + date_cur.min = date_last.min; + date_cur.hour = date_last.hour; + } while (!mc1468_date_synced(&date_cur, &date_last)); + + /* Is this in BCD? */ + if (!ISSET(reg_b, 0x04)) { + mc1468_bcd_conv(&date_cur); + } + + /* 24-hour mode? */ + if (ISSET(reg_b, MC1468_CLOCK24)) { + date_cur.hour = ((date_cur.hour & 0x7F) + 12) % 24; + } + + date_cur.year += 2000; + *dp = date_cur; + return 0; +} + +static int +mc1468_dev_read(dev_t dev, struct sio_txn *sio, int flags) +{ + struct date d; + size_t len = sizeof(d); + + if (sio->len > len) { + sio->len = len; + } + + mc1468_get_date(&d); + memcpy(sio->buf, &d, sio->len); + return sio->len; +} + +static int +mc1468_dev_write(dev_t dev, struct sio_txn *sio, int flags) +{ + struct date d; + size_t len = sizeof(d); + + if (sio->len > len) { + sio->len = len; + } + + memcpy(&d, sio->buf, sio->len); + mc1468_set_date(&d); + return sio->len; +} + +static int +mc1468_init(void) +{ + char devname[] = "rtc"; + devmajor_t major; + dev_t dev; + + major = dev_alloc_major(); + dev = dev_alloc(major); + dev_register(major, dev, &mc1468_cdevsw); + devfs_create_entry(devname, major, dev, 0444); + return 0; +} + +static struct cdevsw mc1468_cdevsw = { + .read = mc1468_dev_read, + .write = mc1468_dev_write, +}; + +DRIVER_EXPORT(mc1468_init); diff --git a/sys/arch/amd64/isa/spkr.c b/sys/arch/amd64/isa/spkr.c index b1bd2a2..b2f63b0 100644 --- a/sys/arch/amd64/isa/spkr.c +++ b/sys/arch/amd64/isa/spkr.c @@ -30,14 +30,60 @@ #include <sys/cdefs.h> #include <sys/errno.h> #include <sys/param.h> +#include <sys/device.h> +#include <sys/driver.h> +#include <fs/devfs.h> #include <dev/timer.h> #include <machine/isa/spkr.h> #include <machine/isa/i8254.h> #include <machine/pio.h> +#include <string.h> #define DIVIDEND 1193180 #define CTRL_PORT 0x61 +static struct cdevsw beep_cdevsw; + +/* + * Write to the pcspkr + * + * Bits 15:0 - frequency (hz) + * Bits 31:16 - duration (msec) + */ +static int +dev_write(dev_t dev, struct sio_txn *sio, int flags) +{ + uint32_t payload = 0; + uint16_t hz; + uint16_t duration; + size_t len = sizeof(payload); + + if (sio->len < len) { + return -EINVAL; + } + + memcpy(&payload, sio->buf, len); + hz = payload & 0xFFFF; + duration = (payload >> 16) & 0xFFFF; + pcspkr_tone(hz, duration); + return sio->len; +} + +static int +beep_init(void) +{ + char devname[] = "beep"; + devmajor_t major; + dev_t dev; + + /* Register the device here */ + major = dev_alloc_major(); + dev = dev_alloc(major); + dev_register(major, dev, &beep_cdevsw); + devfs_create_entry(devname, major, dev, 0666); + return 0; +} + int pcspkr_tone(uint16_t freq, uint32_t msec) { @@ -67,3 +113,10 @@ pcspkr_tone(uint16_t freq, uint32_t msec) outb(CTRL_PORT, tmp & ~3); return 0; } + +static struct cdevsw beep_cdevsw = { + .read = noread, + .write = dev_write +}; + +DRIVER_EXPORT(beep_init); diff --git a/sys/arch/amd64/pci/pci_machdep.c b/sys/arch/amd64/pci/pci_machdep.c index 43065b0..5b49a78 100644 --- a/sys/arch/amd64/pci/pci_machdep.c +++ b/sys/arch/amd64/pci/pci_machdep.c @@ -33,6 +33,7 @@ #include <sys/mmio.h> #include <dev/pci/pci.h> #include <dev/pci/pciregs.h> +#include <machine/pci/pci.h> #include <machine/pio.h> #include <machine/bus.h> #include <machine/cpu.h> @@ -73,8 +74,8 @@ pci_get_barreg(struct pci_device *dev, uint8_t bar) } } -pcireg_t -pci_readl(struct pci_device *dev, uint32_t offset) +__weak pcireg_t +md_pci_readl(struct pci_device *dev, uint32_t offset) { uint32_t address; @@ -83,8 +84,8 @@ pci_readl(struct pci_device *dev, uint32_t offset) return inl(0xCFC) >> ((offset & 3) * 8); } -void -pci_writel(struct pci_device *dev, uint32_t offset, pcireg_t val) +__weak void +md_pci_writel(struct pci_device *dev, uint32_t offset, pcireg_t val) { uint32_t address; @@ -163,6 +164,7 @@ pci_enable_msix(struct pci_device *dev, const struct msi_intr *intr) { volatile uint64_t *tbl; struct cpu_info *ci; + struct intr_hand ih, *ih_res; uint32_t data, msg_ctl; uint64_t msg_addr, tmp; uint16_t tbl_off; @@ -184,9 +186,14 @@ pci_enable_msix(struct pci_device *dev, const struct msi_intr *intr) tbl = (void *)((dev->bar[bir] & PCI_BAR_MEMMASK) + MMIO_OFFSET); tbl = (void *)((char *)tbl + tbl_off); - /* Get the vector and setup handler */ - vector = intr_alloc_vector(intr->name, IPL_BIO); - idt_set_desc(vector, IDT_INT_GATE, ISR(intr->handler), 0); + ih.func = intr->handler; + ih.priority = IPL_BIO; + ih.irq = -1; + ih_res = intr_register(intr->name, &ih); + if (ih_res == NULL) { + return -EIO; + } + vector = ih_res->vector; /* * Setup the message data at bits 95:64 of the message diff --git a/sys/dev/acpi/acpi_init.c b/sys/dev/acpi/acpi_init.c index ecfb129..67eed29 100644 --- a/sys/dev/acpi/acpi_init.c +++ b/sys/dev/acpi/acpi_init.c @@ -46,6 +46,7 @@ static char oemid[OEMID_SIZE]; static struct acpi_root_sdt *root_sdt = NULL; static size_t root_sdt_entries = 0; +static paddr_t rsdp_pa = 0; static volatile struct limine_rsdp_request rsdp_req = { .id = LIMINE_RSDP_REQUEST, .revision = 0 @@ -99,6 +100,12 @@ acpi_oemid(void) return oemid; } +paddr_t +acpi_rsdp(void) +{ + return rsdp_pa; +} + void acpi_init(void) { @@ -112,6 +119,7 @@ acpi_init(void) rsdp = rsdp_req.response->address; acpi_print_oemid("RSDP", rsdp->oemid); memcpy(oemid, rsdp->oemid, OEMID_SIZE); + rsdp_pa = VIRT_TO_PHYS(rsdp); /* Fetch the root SDT */ if (rsdp->revision >= 2) { diff --git a/sys/dev/acpi/acpi_sleep.c b/sys/dev/acpi/acpi_sleep.c new file mode 100644 index 0000000..5c72031 --- /dev/null +++ b/sys/dev/acpi/acpi_sleep.c @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/errno.h> +#include <sys/syslog.h> +#include <machine/cdefs.h> +#include <machine/cpu.h> +#include <dev/acpi/acpi.h> +#include <uacpi/sleep.h> + +#define pr_trace(fmt, ...) kprintf("acpi: " fmt, ##__VA_ARGS__) +#define pr_error(...) pr_trace(__VA_ARGS__) + +int +acpi_sleep(int type) +{ + uacpi_status error; + uacpi_sleep_state state; + const uacpi_char *error_str; + + switch (type) { + case ACPI_SLEEP_S5: + state = UACPI_SLEEP_STATE_S5; + break; + default: + return -EINVAL; + } + + error = uacpi_prepare_for_sleep_state(state); + if (uacpi_unlikely_error(error)) { + error_str = uacpi_status_to_string(error); + pr_error("failed to prep sleep: %s\n", error_str); + return -EIO; + } + + /* + * If we are entering the S5 sleep state, bring + * everything down first. + */ + if (type == ACPI_SLEEP_S5) { + pr_trace("powering off, halting all cores...\n"); + cpu_halt_others(); + md_intoff(); + } + + error = uacpi_enter_sleep_state(UACPI_SLEEP_STATE_S5); + if (uacpi_unlikely_error(error)) { + error_str = uacpi_status_to_string(error); + pr_error("could not enter S5 state: %s\n", error_str); + return -EIO; + } + + return 0; +} diff --git a/sys/dev/acpi/uacpi.c b/sys/dev/acpi/uacpi.c new file mode 100644 index 0000000..612f23b --- /dev/null +++ b/sys/dev/acpi/uacpi.c @@ -0,0 +1,579 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/spinlock.h> +#include <sys/proc.h> +#include <sys/param.h> +#include <sys/syslog.h> +#include <sys/panic.h> +#include <dev/timer.h> +#include <uacpi/kernel_api.h> +#include <uacpi/platform/arch_helpers.h> +#include <uacpi/types.h> +#include <uacpi/event.h> +#include <uacpi/sleep.h> +#include <machine/cdefs.h> +#include <machine/pio.h> +#include <machine/cpu.h> +#if defined(__x86_64__) +#include <machine/idt.h> +#include <machine/ioapic.h> +#include <machine/intr.h> +#endif /* __x86_64__ */ +#include <dev/acpi/uacpi.h> +#include <dev/acpi/acpi.h> +#include <dev/pci/pci.h> +#include <vm/dynalloc.h> +#include <vm/vm.h> +#include <string.h> + +typedef struct { + uacpi_io_addr base; + uacpi_size length; +} io_range_t; + +/* + * TODO: Schedule a system shutdown + */ +static uacpi_interrupt_ret +power_button_handler(uacpi_handle ctx) +{ + md_intoff(); + kprintf("power button pressed\n"); + kprintf("halting machine...\n"); + cpu_halt_all(); + return UACPI_INTERRUPT_HANDLED; +} + +void * +uacpi_kernel_alloc(uacpi_size size) +{ + return dynalloc(size); +} + +void +uacpi_kernel_free(void *mem) +{ + dynfree(mem); +} + +uacpi_status +uacpi_kernel_get_rsdp(uacpi_phys_addr *out_rsdp_address) +{ + paddr_t pa; + + pa = acpi_rsdp(); + if (pa == 0) { + return UACPI_STATUS_NOT_FOUND; + } + + *out_rsdp_address = pa; + return UACPI_STATUS_OK; +} + +/* TODO: Actual mutex */ +uacpi_handle +uacpi_kernel_create_mutex(void) +{ + struct spinlock *lp; + + lp = dynalloc(sizeof(*lp)); + if (lp == NULL) { + return NULL; + } + memset(lp, 0, sizeof(*lp)); + return lp; +} + +void +uacpi_kernel_free_mutex(uacpi_handle handle) +{ + dynfree(handle); +} + +uacpi_status +uacpi_kernel_acquire_mutex(uacpi_handle handle, [[maybe_unused]] uacpi_u16 timeout) +{ + spinlock_acquire((struct spinlock *)handle); + return UACPI_STATUS_OK; +} + +void +uacpi_kernel_release_mutex(uacpi_handle handle) +{ + spinlock_release((struct spinlock *)handle); +} + +uacpi_thread_id +uacpi_kernel_get_thread_id(void) +{ + struct proc *td = this_td(); + + if (td == NULL) { + return 0; /* PID 0 */ + } + + return &td->pid; +} + +uacpi_status +uacpi_kernel_handle_firmware_request(uacpi_firmware_request *request) +{ + switch (request->type) { + case UACPI_FIRMWARE_REQUEST_TYPE_FATAL: + panic("uacpi: fatal firmware request\n"); + break; + } + + return UACPI_STATUS_OK; +} + +uacpi_handle +uacpi_kernel_create_spinlock(void) +{ + struct spinlock *lp; + + lp = dynalloc(sizeof(*lp)); + if (lp == NULL) { + return NULL; + } + memset(lp, 0, sizeof(*lp)); + return lp; +} + +void +uacpi_kernel_free_spinlock(uacpi_handle lock) +{ + dynfree(lock); +} + +uacpi_cpu_flags +uacpi_kernel_lock_spinlock(uacpi_handle lock) +{ + struct spinlock *lp = lock; + + return __atomic_test_and_set(&lp->lock, __ATOMIC_ACQUIRE); +} + +void +uacpi_kernel_unlock_spinlock(uacpi_handle lock, uacpi_cpu_flags interrupt_state) +{ + spinlock_release((struct spinlock *)lock); +} + +uacpi_handle +uacpi_kernel_create_event(void) +{ + size_t *counter; + + counter = dynalloc(sizeof(*counter)); + if (counter == NULL) { + return NULL; + } + + *counter = 0; + return counter; +} + +void +uacpi_kernel_free_event(uacpi_handle handle) +{ + dynfree(handle); +} + +uacpi_bool +uacpi_kernel_wait_for_event(uacpi_handle handle, uacpi_u16 timeout) +{ + size_t *counter = (size_t *)handle; + struct timer tmr; + size_t usec_start, usec; + size_t elapsed_msec; + + if (timeout == 0xFFFF) { + while (*counter != 0) { + md_pause(); + } + return UACPI_TRUE; + } + + req_timer(TIMER_GP, &tmr); + usec_start = tmr.get_time_usec(); + + for (;;) { + if (*counter == 0) { + return UACPI_TRUE; + } + + usec = tmr.get_time_usec(); + elapsed_msec = (usec - usec_start) / 1000; + if (elapsed_msec >= timeout) { + break; + } + + md_pause(); + } + + __atomic_fetch_sub((size_t *)handle, 1, __ATOMIC_SEQ_CST); + return UACPI_FALSE; +} + +void +uacpi_kernel_signal_event(uacpi_handle handle) +{ + __atomic_fetch_add((size_t *)handle, 1, __ATOMIC_SEQ_CST); +} + +void +uacpi_kernel_reset_event(uacpi_handle handle) +{ + __atomic_store_n((size_t *)handle, 0, __ATOMIC_SEQ_CST); +} + +uacpi_status +uacpi_kernel_install_interrupt_handler(uacpi_u32 irq, uacpi_interrupt_handler fn, + uacpi_handle ctx, uacpi_handle *out_irq_handle) +{ + struct intr_hand ih; + + ih.func = (void *)fn; + ih.priority = IPL_HIGH; + ih.irq = irq; + if (intr_register("acpi", &ih) == NULL) { + return UACPI_STATUS_INTERNAL_ERROR; + } + + return UACPI_STATUS_OK; +} + +uacpi_status +uacpi_kernel_uninstall_interrupt_handler([[maybe_unused]] uacpi_interrupt_handler fn, uacpi_handle irq_handle) +{ + return UACPI_STATUS_UNIMPLEMENTED; +} + +uacpi_status +uacpi_kernel_schedule_work(uacpi_work_type, uacpi_work_handler, uacpi_handle ctx) +{ + return UACPI_STATUS_UNIMPLEMENTED; +} + +uacpi_status +uacpi_kernel_wait_for_work_completion(void) +{ + return UACPI_STATUS_UNIMPLEMENTED; +} + +void uacpi_kernel_stall(uacpi_u8 usec) +{ + /* XXX: STUB */ + (void)usec; +} + +void +uacpi_kernel_sleep(uacpi_u64 msec) +{ + struct timer tmr; + + req_timer(TIMER_GP, &tmr); + tmr.msleep(msec); +} + +void * +uacpi_kernel_map(uacpi_phys_addr addr, [[maybe_unused]] uacpi_size len) +{ + return PHYS_TO_VIRT(addr); +} + +void +uacpi_kernel_unmap([[maybe_unused]] void *addr, [[maybe_unused]] uacpi_size len) +{ + /* XXX: no-op */ + (void)addr; + (void)len; +} + +uacpi_status +uacpi_kernel_io_read8(uacpi_handle handle, uacpi_size offset, uacpi_u8 *out_value) +{ + io_range_t *rp = (io_range_t *)handle; + + if (offset >= rp->length) { + return UACPI_STATUS_INVALID_ARGUMENT; + } + + *out_value = inb(rp->base + offset); + return UACPI_STATUS_OK; +} + +uacpi_status +uacpi_kernel_io_read16(uacpi_handle handle, uacpi_size offset, uacpi_u16 *out_value) +{ + io_range_t *rp = (io_range_t *)handle; + + if (offset >= rp->length) { + return UACPI_STATUS_INVALID_ARGUMENT; + } + + *out_value = inw(rp->base + offset); + return UACPI_STATUS_OK; +} + +uacpi_status +uacpi_kernel_io_read32(uacpi_handle handle, uacpi_size offset, uacpi_u32 *out_value) +{ + io_range_t *rp = (io_range_t *)handle; + + if (offset >= rp->length) { + return UACPI_STATUS_INVALID_ARGUMENT; + } + + *out_value = inl(rp->base + offset); + return UACPI_STATUS_OK; +} + +uacpi_status +uacpi_kernel_io_write8(uacpi_handle handle, uacpi_size offset, uacpi_u8 in_value) +{ + io_range_t *rp = (io_range_t *)handle; + + if (offset >= rp->length) { + return UACPI_STATUS_INVALID_ARGUMENT; + } + + outb(rp->base + offset, in_value); + return UACPI_STATUS_OK; +} + +uacpi_status +uacpi_kernel_io_write16(uacpi_handle handle, uacpi_size offset, uacpi_u16 in_value) +{ + io_range_t *rp = (io_range_t *)handle; + + if (offset >= rp->length) { + return UACPI_STATUS_INVALID_ARGUMENT; + } + + outw(rp->base + offset, in_value); + return UACPI_STATUS_OK; +} + +uacpi_status +uacpi_kernel_io_write32(uacpi_handle handle, uacpi_size offset, uacpi_u32 in_value) +{ + io_range_t *rp = (io_range_t *)handle; + + if (offset >= rp->length) { + return UACPI_STATUS_INVALID_ARGUMENT; + } + + outl(rp->base + offset, in_value); + return UACPI_STATUS_OK; +} + +uacpi_status +uacpi_kernel_io_map(uacpi_io_addr base, uacpi_size len, uacpi_handle *out_handle) +{ + io_range_t *rp; + + rp = dynalloc(sizeof(*rp)); + if (rp == NULL) { + return UACPI_STATUS_OUT_OF_MEMORY; + } + + rp->base = base; + rp->length = len; + *out_handle = rp; + return UACPI_STATUS_OK; +} + +void +uacpi_kernel_io_unmap(uacpi_handle handle) +{ + dynfree(handle); +} + +void +uacpi_kernel_pci_device_close([[maybe_unused]] uacpi_handle handle) +{ + /* XXX: no-op */ + (void)handle; +} + +uacpi_status +uacpi_kernel_pci_device_open(uacpi_pci_address address, uacpi_handle *out_handle) +{ + struct pci_device *devp; + + devp = dynalloc(sizeof(*devp)); + if (devp == NULL) { + return UACPI_STATUS_OUT_OF_MEMORY; + } + + devp->segment = address.segment; + devp->bus = address.bus; + devp->slot = address.device; + devp->func = address.function; + pci_add_device(devp); + + *out_handle = devp; + return UACPI_STATUS_OK; +} + +uacpi_status +uacpi_kernel_pci_read8(uacpi_handle handle, uacpi_size offset, uacpi_u8 *out_value) +{ + struct pci_device *devp = handle; + uint32_t v; + + v = pci_readl(devp, offset); + *out_value = (v >> ((offset & 3) * 8)) & MASK(8); + return UACPI_STATUS_OK; +} + +uacpi_status +uacpi_kernel_pci_read16(uacpi_handle handle, uacpi_size offset, uacpi_u16 *out_value) +{ + struct pci_device *devp = handle; + uint32_t v; + + v = pci_readl(devp, offset); + *out_value = (v >> ((offset & 2) * 8)) & MASK(16); + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_kernel_pci_read32(uacpi_handle handle, uacpi_size offset, uacpi_u32 *out_value) +{ + struct pci_device *devp = handle; + *out_value = pci_readl(devp, offset); + return UACPI_STATUS_OK; +} + +uacpi_status +uacpi_kernel_pci_write8(uacpi_handle handle, uacpi_size offset, uacpi_u8 in_value) +{ + struct pci_device *devp = handle; + uint32_t v; + + uacpi_kernel_pci_read8(handle, offset, (void *)&v); + v &= ~(0xFFFF >> ((offset & 3) * 8)); + v |= (in_value >> ((offset & 3) * 8)); + pci_writel(devp, offset, v); + return UACPI_STATUS_OK; +} + +uacpi_status +uacpi_kernel_pci_write16(uacpi_handle handle, uacpi_size offset, uacpi_u16 in_value) +{ + struct pci_device *devp = handle; + uint32_t v; + + uacpi_kernel_pci_read8(handle, offset, (void *)&v); + v &= ~(0xFFFF >> ((offset & 2) * 8)); + v |= (in_value >> ((offset & 2) * 8)); + pci_writel(devp, offset, v); + return UACPI_STATUS_OK; +} + +uacpi_status +uacpi_kernel_pci_write32(uacpi_handle handle, uacpi_size offset, uacpi_u32 in_value) +{ + struct pci_device *devp = handle; + + pci_writel(devp, offset, in_value); + return UACPI_STATUS_OK; +} + +uacpi_u64 +uacpi_kernel_get_nanoseconds_since_boot(void) +{ + static uacpi_u64 time = 0; + static struct timer tmr = {0}; + tmrr_status_t tmr_error; + + if (time == 0) { + tmr_error = req_timer(TIMER_GP, &tmr); + if (tmr_error != TMRR_SUCCESS) { + time += 1000000; + return time; + } + } + + time = tmr.get_time_nsec(); + return time; +} + +void +uacpi_kernel_log(uacpi_log_level level, const uacpi_char *p) +{ + kprintf(p); +} + +int +uacpi_init(void) +{ + uacpi_status ret; + + ret = uacpi_initialize(0); + if (uacpi_unlikely_error(ret)) { + kprintf("uacpi init error: %s\n", uacpi_status_to_string(ret)); + return -1; + } + + ret = uacpi_namespace_load(); + if (uacpi_unlikely_error(ret)) { + kprintf("uacpi namespace load error: %s\n", uacpi_status_to_string(ret)); + return -1; + } + + ret = uacpi_namespace_initialize(); + if (uacpi_unlikely_error(ret)) { + kprintf("uacpi namespace init error: %s\n", uacpi_status_to_string(ret)); + return -1; + } + + ret = uacpi_finalize_gpe_initialization(); + if (uacpi_unlikely_error(ret)) { + kprintf("uacpi GPE init error: %s\n", uacpi_status_to_string(ret)); + return -1; + } + + ret = uacpi_install_fixed_event_handler( + UACPI_FIXED_EVENT_POWER_BUTTON, + power_button_handler, UACPI_NULL + ); + + if (uacpi_unlikely_error(ret)) { + kprintf("failed to install power button event: %s\n", + uacpi_status_to_string(ret) + ); + return -1; + } + + return 0; +} diff --git a/sys/dev/acpi/uacpi/default_handlers.c b/sys/dev/acpi/uacpi/default_handlers.c new file mode 100644 index 0000000..32259d6 --- /dev/null +++ b/sys/dev/acpi/uacpi/default_handlers.c @@ -0,0 +1,336 @@ +#include <uacpi/internal/opregion.h> +#include <uacpi/internal/namespace.h> +#include <uacpi/internal/utilities.h> +#include <uacpi/internal/helpers.h> +#include <uacpi/internal/log.h> +#include <uacpi/internal/io.h> +#include <uacpi/kernel_api.h> +#include <uacpi/uacpi.h> + +#ifndef UACPI_BAREBONES_MODE + +#define PCI_ROOT_PNP_ID "PNP0A03" +#define PCI_EXPRESS_ROOT_PNP_ID "PNP0A08" + +static uacpi_namespace_node *find_pci_root(uacpi_namespace_node *node) +{ + static const uacpi_char *pci_root_ids[] = { + PCI_ROOT_PNP_ID, + PCI_EXPRESS_ROOT_PNP_ID, + UACPI_NULL + }; + uacpi_namespace_node *parent = node->parent; + + while (parent != uacpi_namespace_root()) { + if (uacpi_device_matches_pnp_id(parent, pci_root_ids)) { + uacpi_trace( + "found a PCI root node %.4s controlling region %.4s\n", + parent->name.text, node->name.text + ); + return parent; + } + + parent = parent->parent; + } + + uacpi_trace_region_error( + node, "unable to find PCI root controlling", + UACPI_STATUS_NOT_FOUND + ); + return node; +} + +static uacpi_status pci_region_attach(uacpi_region_attach_data *data) +{ + uacpi_namespace_node *node, *pci_root, *device; + uacpi_pci_address address = { 0 }; + uacpi_u64 value; + uacpi_status ret; + + node = data->region_node; + pci_root = find_pci_root(node); + + /* + * Find the actual device object that is supposed to be controlling + * this operation region. + */ + device = node; + while (device) { + uacpi_object_type type; + + ret = uacpi_namespace_node_type(device, &type); + if (uacpi_unlikely_error(ret)) + return ret; + + if (type == UACPI_OBJECT_DEVICE) + break; + + device = device->parent; + } + + if (uacpi_unlikely(device == UACPI_NULL)) { + ret = UACPI_STATUS_NOT_FOUND; + uacpi_trace_region_error( + node, "unable to find device responsible for", ret + ); + return ret; + } + + ret = uacpi_eval_simple_integer(device, "_ADR", &value); + if (ret == UACPI_STATUS_OK) { + address.function = (value >> 0) & 0xFF; + address.device = (value >> 16) & 0xFF; + } + + ret = uacpi_eval_simple_integer(pci_root, "_SEG", &value); + if (ret == UACPI_STATUS_OK) + address.segment = value; + + ret = uacpi_eval_simple_integer(pci_root, "_BBN", &value); + if (ret == UACPI_STATUS_OK) + address.bus = value; + + uacpi_trace( + "detected PCI device %.4s@%04X:%02X:%02X:%01X\n", + device->name.text, address.segment, address.bus, + address.device, address.function + ); + + return uacpi_kernel_pci_device_open(address, &data->out_region_context); +} + +static uacpi_status pci_region_detach(uacpi_region_detach_data *data) +{ + uacpi_kernel_pci_device_close(data->region_context); + return UACPI_STATUS_OK; +} + +static uacpi_status pci_region_do_rw( + uacpi_region_op op, uacpi_region_rw_data *data +) +{ + uacpi_handle dev = data->region_context; + uacpi_u8 width; + uacpi_size offset; + + offset = data->offset; + width = data->byte_width; + + return op == UACPI_REGION_OP_READ ? + uacpi_pci_read(dev, offset, width, &data->value) : + uacpi_pci_write(dev, offset, width, data->value); +} + +static uacpi_status handle_pci_region(uacpi_region_op op, uacpi_handle op_data) +{ + switch (op) { + case UACPI_REGION_OP_ATTACH: + return pci_region_attach(op_data); + case UACPI_REGION_OP_DETACH: + return pci_region_detach(op_data); + case UACPI_REGION_OP_READ: + case UACPI_REGION_OP_WRITE: + return pci_region_do_rw(op, op_data); + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } +} + +struct memory_region_ctx { + uacpi_phys_addr phys; + uacpi_u8 *virt; + uacpi_size size; +}; + +static uacpi_status memory_region_attach(uacpi_region_attach_data *data) +{ + struct memory_region_ctx *ctx; + uacpi_status ret = UACPI_STATUS_OK; + + ctx = uacpi_kernel_alloc(sizeof(*ctx)); + if (ctx == UACPI_NULL) + return UACPI_STATUS_OUT_OF_MEMORY; + + ctx->size = data->generic_info.length; + + // FIXME: this really shouldn't try to map everything at once + ctx->phys = data->generic_info.base; + ctx->virt = uacpi_kernel_map(ctx->phys, ctx->size); + + if (uacpi_unlikely(ctx->virt == UACPI_NULL)) { + ret = UACPI_STATUS_MAPPING_FAILED; + uacpi_trace_region_error(data->region_node, "unable to map", ret); + uacpi_free(ctx, sizeof(*ctx)); + goto out; + } + + data->out_region_context = ctx; +out: + return ret; +} + +static uacpi_status memory_region_detach(uacpi_region_detach_data *data) +{ + struct memory_region_ctx *ctx = data->region_context; + + uacpi_kernel_unmap(ctx->virt, ctx->size); + uacpi_free(ctx, sizeof(*ctx)); + return UACPI_STATUS_OK; +} + +struct io_region_ctx { + uacpi_io_addr base; + uacpi_handle handle; +}; + +static uacpi_status io_region_attach(uacpi_region_attach_data *data) +{ + struct io_region_ctx *ctx; + uacpi_generic_region_info *info = &data->generic_info; + uacpi_status ret; + + ctx = uacpi_kernel_alloc(sizeof(*ctx)); + if (ctx == UACPI_NULL) + return UACPI_STATUS_OUT_OF_MEMORY; + + ctx->base = info->base; + + ret = uacpi_kernel_io_map(ctx->base, info->length, &ctx->handle); + if (uacpi_unlikely_error(ret)) { + uacpi_trace_region_error( + data->region_node, "unable to map an IO", ret + ); + uacpi_free(ctx, sizeof(*ctx)); + return ret; + } + + data->out_region_context = ctx; + return ret; +} + +static uacpi_status io_region_detach(uacpi_region_detach_data *data) +{ + struct io_region_ctx *ctx = data->region_context; + + uacpi_kernel_io_unmap(ctx->handle); + uacpi_free(ctx, sizeof(*ctx)); + return UACPI_STATUS_OK; +} + +static uacpi_status memory_region_do_rw( + uacpi_region_op op, uacpi_region_rw_data *data +) +{ + struct memory_region_ctx *ctx = data->region_context; + uacpi_size offset; + + offset = data->address - ctx->phys; + + return op == UACPI_REGION_OP_READ ? + uacpi_system_memory_read(ctx->virt, offset, data->byte_width, &data->value) : + uacpi_system_memory_write(ctx->virt, offset, data->byte_width, data->value); +} + +static uacpi_status handle_memory_region(uacpi_region_op op, uacpi_handle op_data) +{ + switch (op) { + case UACPI_REGION_OP_ATTACH: + return memory_region_attach(op_data); + case UACPI_REGION_OP_DETACH: + return memory_region_detach(op_data); + case UACPI_REGION_OP_READ: + case UACPI_REGION_OP_WRITE: + return memory_region_do_rw(op, op_data); + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } +} + +static uacpi_status table_data_region_do_rw( + uacpi_region_op op, uacpi_region_rw_data *data +) +{ + void *addr = UACPI_VIRT_ADDR_TO_PTR((uacpi_virt_addr)data->offset); + + return op == UACPI_REGION_OP_READ ? + uacpi_system_memory_read(addr, 0, data->byte_width, &data->value) : + uacpi_system_memory_write(addr, 0, data->byte_width, data->value); +} + +static uacpi_status handle_table_data_region(uacpi_region_op op, uacpi_handle op_data) +{ + switch (op) { + case UACPI_REGION_OP_ATTACH: + case UACPI_REGION_OP_DETACH: + return UACPI_STATUS_OK; + case UACPI_REGION_OP_READ: + case UACPI_REGION_OP_WRITE: + return table_data_region_do_rw(op, op_data); + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } +} + +static uacpi_status io_region_do_rw( + uacpi_region_op op, uacpi_region_rw_data *data +) +{ + struct io_region_ctx *ctx = data->region_context; + uacpi_u8 width; + uacpi_size offset; + + offset = data->offset - ctx->base; + width = data->byte_width; + + return op == UACPI_REGION_OP_READ ? + uacpi_system_io_read(ctx->handle, offset, width, &data->value) : + uacpi_system_io_write(ctx->handle, offset, width, data->value); +} + +static uacpi_status handle_io_region(uacpi_region_op op, uacpi_handle op_data) +{ + switch (op) { + case UACPI_REGION_OP_ATTACH: + return io_region_attach(op_data); + case UACPI_REGION_OP_DETACH: + return io_region_detach(op_data); + case UACPI_REGION_OP_READ: + case UACPI_REGION_OP_WRITE: + return io_region_do_rw(op, op_data); + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } +} + +void uacpi_install_default_address_space_handlers(void) +{ + uacpi_namespace_node *root; + + root = uacpi_namespace_root(); + + uacpi_install_address_space_handler_with_flags( + root, UACPI_ADDRESS_SPACE_SYSTEM_MEMORY, + handle_memory_region, UACPI_NULL, + UACPI_ADDRESS_SPACE_HANDLER_DEFAULT + ); + + uacpi_install_address_space_handler_with_flags( + root, UACPI_ADDRESS_SPACE_SYSTEM_IO, + handle_io_region, UACPI_NULL, + UACPI_ADDRESS_SPACE_HANDLER_DEFAULT + ); + + uacpi_install_address_space_handler_with_flags( + root, UACPI_ADDRESS_SPACE_PCI_CONFIG, + handle_pci_region, UACPI_NULL, + UACPI_ADDRESS_SPACE_HANDLER_DEFAULT + ); + + uacpi_install_address_space_handler_with_flags( + root, UACPI_ADDRESS_SPACE_TABLE_DATA, + handle_table_data_region, UACPI_NULL, + UACPI_ADDRESS_SPACE_HANDLER_DEFAULT + ); +} + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/dev/acpi/uacpi/event.c b/sys/dev/acpi/uacpi/event.c new file mode 100644 index 0000000..0c58372 --- /dev/null +++ b/sys/dev/acpi/uacpi/event.c @@ -0,0 +1,2449 @@ +#include <uacpi/internal/event.h> +#include <uacpi/internal/registers.h> +#include <uacpi/internal/context.h> +#include <uacpi/internal/io.h> +#include <uacpi/internal/log.h> +#include <uacpi/internal/namespace.h> +#include <uacpi/internal/interpreter.h> +#include <uacpi/internal/notify.h> +#include <uacpi/internal/utilities.h> +#include <uacpi/internal/mutex.h> +#include <uacpi/internal/stdlib.h> +#include <uacpi/acpi.h> + +#define UACPI_EVENT_DISABLED 0 +#define UACPI_EVENT_ENABLED 1 + +#if !defined(UACPI_REDUCED_HARDWARE) && !defined(UACPI_BAREBONES_MODE) + +static uacpi_handle g_gpe_state_slock; +static struct uacpi_recursive_lock g_event_lock; +static uacpi_bool g_gpes_finalized; + +struct fixed_event { + uacpi_u8 enable_field; + uacpi_u8 status_field; + uacpi_u16 enable_mask; + uacpi_u16 status_mask; +}; + +struct fixed_event_handler { + uacpi_interrupt_handler handler; + uacpi_handle ctx; +}; + +static const struct fixed_event fixed_events[UACPI_FIXED_EVENT_MAX + 1] = { + [UACPI_FIXED_EVENT_GLOBAL_LOCK] = { + .status_field = UACPI_REGISTER_FIELD_GBL_STS, + .enable_field = UACPI_REGISTER_FIELD_GBL_EN, + .enable_mask = ACPI_PM1_EN_GBL_EN_MASK, + .status_mask = ACPI_PM1_STS_GBL_STS_MASK, + }, + [UACPI_FIXED_EVENT_TIMER_STATUS] = { + .status_field = UACPI_REGISTER_FIELD_TMR_STS, + .enable_field = UACPI_REGISTER_FIELD_TMR_EN, + .enable_mask = ACPI_PM1_EN_TMR_EN_MASK, + .status_mask = ACPI_PM1_STS_TMR_STS_MASK, + }, + [UACPI_FIXED_EVENT_POWER_BUTTON] = { + .status_field = UACPI_REGISTER_FIELD_PWRBTN_STS, + .enable_field = UACPI_REGISTER_FIELD_PWRBTN_EN, + .enable_mask = ACPI_PM1_EN_PWRBTN_EN_MASK, + .status_mask = ACPI_PM1_STS_PWRBTN_STS_MASK, + }, + [UACPI_FIXED_EVENT_SLEEP_BUTTON] = { + .status_field = UACPI_REGISTER_FIELD_SLPBTN_STS, + .enable_field = UACPI_REGISTER_FIELD_SLPBTN_EN, + .enable_mask = ACPI_PM1_EN_SLPBTN_EN_MASK, + .status_mask = ACPI_PM1_STS_SLPBTN_STS_MASK, + }, + [UACPI_FIXED_EVENT_RTC] = { + .status_field = UACPI_REGISTER_FIELD_RTC_STS, + .enable_field = UACPI_REGISTER_FIELD_RTC_EN, + .enable_mask = ACPI_PM1_EN_RTC_EN_MASK, + .status_mask = ACPI_PM1_STS_RTC_STS_MASK, + }, +}; + +static struct fixed_event_handler +fixed_event_handlers[UACPI_FIXED_EVENT_MAX + 1]; + +static uacpi_status initialize_fixed_events(void) +{ + uacpi_size i; + + for (i = 0; i < UACPI_FIXED_EVENT_MAX; ++i) { + uacpi_write_register_field( + fixed_events[i].enable_field, UACPI_EVENT_DISABLED + ); + } + + return UACPI_STATUS_OK; +} + +static uacpi_status set_event(uacpi_u8 event, uacpi_u8 value) +{ + uacpi_status ret; + uacpi_u64 raw_value; + const struct fixed_event *ev = &fixed_events[event]; + + ret = uacpi_write_register_field(ev->enable_field, value); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_read_register_field(ev->enable_field, &raw_value); + if (uacpi_unlikely_error(ret)) + return ret; + + if (raw_value != value) { + uacpi_error("failed to %sable fixed event %d\n", + value ? "en" : "dis", event); + return UACPI_STATUS_HARDWARE_TIMEOUT; + } + + uacpi_trace("fixed event %d %sabled successfully\n", + event, value ? "en" : "dis"); + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_enable_fixed_event(uacpi_fixed_event event) +{ + uacpi_status ret; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) + return UACPI_STATUS_INVALID_ARGUMENT; + if (uacpi_is_hardware_reduced()) + return UACPI_STATUS_OK; + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + /* + * Attempting to enable an event that doesn't have a handler is most likely + * an error, don't allow it. + */ + if (uacpi_unlikely(fixed_event_handlers[event].handler == UACPI_NULL)) { + ret = UACPI_STATUS_NO_HANDLER; + goto out; + } + + ret = set_event(event, UACPI_EVENT_ENABLED); + +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +uacpi_status uacpi_disable_fixed_event(uacpi_fixed_event event) +{ + uacpi_status ret; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) + return UACPI_STATUS_INVALID_ARGUMENT; + if (uacpi_is_hardware_reduced()) + return UACPI_STATUS_OK; + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = set_event(event, UACPI_EVENT_DISABLED); + + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +uacpi_status uacpi_clear_fixed_event(uacpi_fixed_event event) +{ + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) + return UACPI_STATUS_INVALID_ARGUMENT; + if (uacpi_is_hardware_reduced()) + return UACPI_STATUS_OK; + + return uacpi_write_register_field( + fixed_events[event].status_field, ACPI_PM1_STS_CLEAR + ); +} + +static uacpi_interrupt_ret dispatch_fixed_event( + const struct fixed_event *ev, uacpi_fixed_event event +) +{ + uacpi_status ret; + struct fixed_event_handler *evh = &fixed_event_handlers[event]; + + ret = uacpi_write_register_field(ev->status_field, ACPI_PM1_STS_CLEAR); + if (uacpi_unlikely_error(ret)) + return UACPI_INTERRUPT_NOT_HANDLED; + + if (uacpi_unlikely(evh->handler == UACPI_NULL)) { + uacpi_warn( + "fixed event %d fired but no handler installed, disabling...\n", + event + ); + uacpi_write_register_field(ev->enable_field, UACPI_EVENT_DISABLED); + return UACPI_INTERRUPT_NOT_HANDLED; + } + + return evh->handler(evh->ctx); +} + +static uacpi_interrupt_ret handle_fixed_events(void) +{ + uacpi_interrupt_ret int_ret = UACPI_INTERRUPT_NOT_HANDLED; + uacpi_status ret; + uacpi_u64 enable_mask, status_mask; + uacpi_size i; + + ret = uacpi_read_register(UACPI_REGISTER_PM1_STS, &status_mask); + if (uacpi_unlikely_error(ret)) + return int_ret; + + ret = uacpi_read_register(UACPI_REGISTER_PM1_EN, &enable_mask); + if (uacpi_unlikely_error(ret)) + return int_ret; + + for (i = 0; i < UACPI_FIXED_EVENT_MAX; ++i) + { + const struct fixed_event *ev = &fixed_events[i]; + + if (!(status_mask & ev->status_mask) || + !(enable_mask & ev->enable_mask)) + continue; + + int_ret |= dispatch_fixed_event(ev, i); + } + + return int_ret; +} + +struct gpe_native_handler { + uacpi_gpe_handler cb; + uacpi_handle ctx; + + /* + * Preserved values to be used for state restoration if this handler is + * removed at any point. + */ + uacpi_handle previous_handler; + uacpi_u8 previous_triggering : 1; + uacpi_u8 previous_handler_type : 3; + uacpi_u8 previously_enabled : 1; +}; + +struct gpe_implicit_notify_handler { + struct gpe_implicit_notify_handler *next; + uacpi_namespace_node *device; +}; + +#define EVENTS_PER_GPE_REGISTER 8 + +/* + * NOTE: + * This API and handler types are inspired by ACPICA, let's not reinvent the + * wheel and follow a similar path that people ended up finding useful after + * years of dealing with ACPI. Obviously credit goes to them for inventing + * "implicit notify" and other neat API. + */ +enum gpe_handler_type { + GPE_HANDLER_TYPE_NONE = 0, + GPE_HANDLER_TYPE_AML_HANDLER = 1, + GPE_HANDLER_TYPE_NATIVE_HANDLER = 2, + GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW = 3, + GPE_HANDLER_TYPE_IMPLICIT_NOTIFY = 4, +}; + +struct gp_event { + union { + struct gpe_native_handler *native_handler; + struct gpe_implicit_notify_handler *implicit_handler; + uacpi_namespace_node *aml_handler; + uacpi_handle *any_handler; + }; + + struct gpe_register *reg; + uacpi_u16 idx; + + // "reference count" of the number of times this event has been enabled + uacpi_u8 num_users; + + uacpi_u8 handler_type : 3; + uacpi_u8 triggering : 1; + uacpi_u8 wake : 1; + uacpi_u8 block_interrupts : 1; +}; + +struct gpe_register { + uacpi_mapped_gas status; + uacpi_mapped_gas enable; + + uacpi_u8 runtime_mask; + uacpi_u8 wake_mask; + uacpi_u8 masked_mask; + uacpi_u8 current_mask; + + uacpi_u16 base_idx; +}; + +struct gpe_block { + struct gpe_block *prev, *next; + + /* + * Technically this can only refer to \_GPE, but there's also apparently a + * "GPE Block Device" with id "ACPI0006", which is not used by anyone. We + * still keep it as a possibility that someone might eventually use it, so + * it is supported here. + */ + uacpi_namespace_node *device_node; + + struct gpe_register *registers; + struct gp_event *events; + struct gpe_interrupt_ctx *irq_ctx; + + uacpi_u16 num_registers; + uacpi_u16 num_events; + uacpi_u16 base_idx; +}; + +struct gpe_interrupt_ctx { + struct gpe_interrupt_ctx *prev, *next; + + struct gpe_block *gpe_head; + uacpi_handle irq_handle; + uacpi_u32 irq; +}; +static struct gpe_interrupt_ctx *g_gpe_interrupt_head; + +static uacpi_u8 gpe_get_mask(struct gp_event *event) +{ + return 1 << (event->idx - event->reg->base_idx); +} + +enum gpe_state { + GPE_STATE_ENABLED, + GPE_STATE_ENABLED_CONDITIONALLY, + GPE_STATE_DISABLED, +}; + +static uacpi_status set_gpe_state(struct gp_event *event, enum gpe_state state) +{ + uacpi_status ret; + struct gpe_register *reg = event->reg; + uacpi_u64 enable_mask; + uacpi_u8 event_bit; + uacpi_cpu_flags flags; + + event_bit = gpe_get_mask(event); + if (state != GPE_STATE_DISABLED && (reg->masked_mask & event_bit)) + return UACPI_STATUS_OK; + + if (state == GPE_STATE_ENABLED_CONDITIONALLY) { + if (!(reg->current_mask & event_bit)) + return UACPI_STATUS_OK; + + state = GPE_STATE_ENABLED; + } + + flags = uacpi_kernel_lock_spinlock(g_gpe_state_slock); + + ret = uacpi_gas_read_mapped(®->enable, &enable_mask); + if (uacpi_unlikely_error(ret)) + goto out; + + switch (state) { + case GPE_STATE_ENABLED: + enable_mask |= event_bit; + break; + case GPE_STATE_DISABLED: + enable_mask &= ~event_bit; + break; + default: + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + + ret = uacpi_gas_write_mapped(®->enable, enable_mask); +out: + uacpi_kernel_unlock_spinlock(g_gpe_state_slock, flags); + return ret; +} + +static uacpi_status clear_gpe(struct gp_event *event) +{ + struct gpe_register *reg = event->reg; + + return uacpi_gas_write_mapped(®->status, gpe_get_mask(event)); +} + +static uacpi_status restore_gpe(struct gp_event *event) +{ + uacpi_status ret; + + if (event->triggering == UACPI_GPE_TRIGGERING_LEVEL) { + ret = clear_gpe(event); + if (uacpi_unlikely_error(ret)) + return ret; + } + + ret = set_gpe_state(event, GPE_STATE_ENABLED_CONDITIONALLY); + event->block_interrupts = UACPI_FALSE; + + return ret; +} + +static void async_restore_gpe(uacpi_handle opaque) +{ + uacpi_status ret; + struct gp_event *event = opaque; + + ret = restore_gpe(event); + if (uacpi_unlikely_error(ret)) { + uacpi_error("unable to restore GPE(%02X): %s\n", + event->idx, uacpi_status_to_string(ret)); + } +} + +static void async_run_gpe_handler(uacpi_handle opaque) +{ + uacpi_status ret; + struct gp_event *event = opaque; + + ret = uacpi_namespace_write_lock(); + if (uacpi_unlikely_error(ret)) + goto out_no_unlock; + + switch (event->handler_type) { + case GPE_HANDLER_TYPE_AML_HANDLER: { + uacpi_object *method_obj; + uacpi_object_name name; + + method_obj = uacpi_namespace_node_get_object_typed( + event->aml_handler, UACPI_OBJECT_METHOD_BIT + ); + if (uacpi_unlikely(method_obj == UACPI_NULL)) { + uacpi_error("GPE(%02X) AML handler gone\n", event->idx); + break; + } + + name = uacpi_namespace_node_name(event->aml_handler); + uacpi_trace( + "executing GPE(%02X) handler %.4s\n", + event->idx, name.text + ); + + ret = uacpi_execute_control_method( + event->aml_handler, method_obj->method, UACPI_NULL, UACPI_NULL + ); + if (uacpi_unlikely_error(ret)) { + uacpi_error( + "error while executing GPE(%02X) handler %.4s: %s\n", + event->idx, event->aml_handler->name.text, + uacpi_status_to_string(ret) + ); + } + break; + } + + case GPE_HANDLER_TYPE_IMPLICIT_NOTIFY: { + struct gpe_implicit_notify_handler *handler; + + handler = event->implicit_handler; + while (handler) { + /* + * 2 - Device Wake. Used to notify OSPM that the device has signaled + * its wake event, and that OSPM needs to notify OSPM native device + * driver for the device. + */ + uacpi_notify_all(handler->device, 2); + handler = handler->next; + } + break; + } + + default: + break; + } + + uacpi_namespace_write_unlock(); + +out_no_unlock: + /* + * We schedule the work as NOTIFICATION to make sure all other notifications + * finish before this GPE is re-enabled. + */ + ret = uacpi_kernel_schedule_work( + UACPI_WORK_NOTIFICATION, async_restore_gpe, event + ); + if (uacpi_unlikely_error(ret)) { + uacpi_error("unable to schedule GPE(%02X) restore: %s\n", + event->idx, uacpi_status_to_string(ret)); + async_restore_gpe(event); + } +} + +static uacpi_interrupt_ret dispatch_gpe( + uacpi_namespace_node *device_node, struct gp_event *event +) +{ + uacpi_status ret; + uacpi_interrupt_ret int_ret = UACPI_INTERRUPT_NOT_HANDLED; + + /* + * For raw handlers we don't do any management whatsoever, we just let the + * handler know a GPE has triggered and let it handle disable/enable as + * well as clearing. + */ + if (event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) { + return event->native_handler->cb( + event->native_handler->ctx, device_node, event->idx + ); + } + + ret = set_gpe_state(event, GPE_STATE_DISABLED); + if (uacpi_unlikely_error(ret)) { + uacpi_error("failed to disable GPE(%02X): %s\n", + event->idx, uacpi_status_to_string(ret)); + return int_ret; + } + + event->block_interrupts = UACPI_TRUE; + + if (event->triggering == UACPI_GPE_TRIGGERING_EDGE) { + ret = clear_gpe(event); + if (uacpi_unlikely_error(ret)) { + uacpi_error("unable to clear GPE(%02X): %s\n", + event->idx, uacpi_status_to_string(ret)); + set_gpe_state(event, GPE_STATE_ENABLED_CONDITIONALLY); + return int_ret; + } + } + + switch (event->handler_type) { + case GPE_HANDLER_TYPE_NATIVE_HANDLER: + int_ret = event->native_handler->cb( + event->native_handler->ctx, device_node, event->idx + ); + if (!(int_ret & UACPI_GPE_REENABLE)) + break; + + ret = restore_gpe(event); + if (uacpi_unlikely_error(ret)) { + uacpi_error("unable to restore GPE(%02X): %s\n", + event->idx, uacpi_status_to_string(ret)); + } + break; + + case GPE_HANDLER_TYPE_AML_HANDLER: + case GPE_HANDLER_TYPE_IMPLICIT_NOTIFY: + ret = uacpi_kernel_schedule_work( + UACPI_WORK_GPE_EXECUTION, async_run_gpe_handler, event + ); + if (uacpi_unlikely_error(ret)) { + uacpi_warn( + "unable to schedule GPE(%02X) for execution: %s\n", + event->idx, uacpi_status_to_string(ret) + ); + } + break; + + default: + uacpi_warn("GPE(%02X) fired but no handler, keeping disabled\n", + event->idx); + break; + } + + return UACPI_INTERRUPT_HANDLED; +} + +static uacpi_interrupt_ret detect_gpes(struct gpe_block *block) +{ + uacpi_status ret; + uacpi_interrupt_ret int_ret = UACPI_INTERRUPT_NOT_HANDLED; + struct gpe_register *reg; + struct gp_event *event; + uacpi_u64 status, enable; + uacpi_size i, j; + + while (block) { + for (i = 0; i < block->num_registers; ++i) { + reg = &block->registers[i]; + + if (!reg->runtime_mask && !reg->wake_mask) + continue; + + ret = uacpi_gas_read_mapped(®->status, &status); + if (uacpi_unlikely_error(ret)) + return int_ret; + + ret = uacpi_gas_read_mapped(®->enable, &enable); + if (uacpi_unlikely_error(ret)) + return int_ret; + + if (status == 0) + continue; + + for (j = 0; j < EVENTS_PER_GPE_REGISTER; ++j) { + if (!((status & enable) & (1ull << j))) + continue; + + event = &block->events[j + i * EVENTS_PER_GPE_REGISTER]; + int_ret |= dispatch_gpe(block->device_node, event); + } + } + + block = block->next; + } + + return int_ret; +} + +static uacpi_status maybe_dispatch_gpe( + uacpi_namespace_node *gpe_device, struct gp_event *event +) +{ + uacpi_status ret; + struct gpe_register *reg = event->reg; + uacpi_u64 status; + + ret = uacpi_gas_read_mapped(®->status, &status); + if (uacpi_unlikely_error(ret)) + return ret; + + if (!(status & gpe_get_mask(event))) + return ret; + + dispatch_gpe(gpe_device, event); + return ret; +} + +static uacpi_interrupt_ret handle_gpes(uacpi_handle opaque) +{ + struct gpe_interrupt_ctx *ctx = opaque; + + if (uacpi_unlikely(ctx == UACPI_NULL)) + return UACPI_INTERRUPT_NOT_HANDLED; + + return detect_gpes(ctx->gpe_head); +} + +static uacpi_status find_or_create_gpe_interrupt_ctx( + uacpi_u32 irq, struct gpe_interrupt_ctx **out_ctx +) +{ + uacpi_status ret; + struct gpe_interrupt_ctx *entry = g_gpe_interrupt_head; + + while (entry) { + if (entry->irq == irq) { + *out_ctx = entry; + return UACPI_STATUS_OK; + } + + entry = entry->next; + } + + entry = uacpi_kernel_alloc_zeroed(sizeof(*entry)); + if (uacpi_unlikely(entry == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + /* + * SCI interrupt is installed by other code and is responsible for more + * things than just the GPE handling. Don't install it here. + */ + if (irq != g_uacpi_rt_ctx.fadt.sci_int) { + ret = uacpi_kernel_install_interrupt_handler( + irq, handle_gpes, entry, &entry->irq_handle + ); + if (uacpi_unlikely_error(ret)) { + uacpi_free(entry, sizeof(*entry)); + return ret; + } + } + + entry->irq = irq; + entry->next = g_gpe_interrupt_head; + g_gpe_interrupt_head = entry; + + *out_ctx = entry; + return UACPI_STATUS_OK; +} + +static void gpe_release_implicit_notify_handlers(struct gp_event *event) +{ + struct gpe_implicit_notify_handler *handler, *next_handler; + + handler = event->implicit_handler; + while (handler) { + next_handler = handler->next; + uacpi_free(handler, sizeof(*handler)); + handler = next_handler; + } + + event->implicit_handler = UACPI_NULL; +} + +enum gpe_block_action +{ + GPE_BLOCK_ACTION_DISABLE_ALL, + GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME, + GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE, + GPE_BLOCK_ACTION_CLEAR_ALL, +}; + +static uacpi_status gpe_block_apply_action( + struct gpe_block *block, enum gpe_block_action action +) +{ + uacpi_status ret; + uacpi_size i; + uacpi_u8 value; + struct gpe_register *reg; + + for (i = 0; i < block->num_registers; ++i) { + reg = &block->registers[i]; + + switch (action) { + case GPE_BLOCK_ACTION_DISABLE_ALL: + value = 0; + break; + case GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME: + value = reg->runtime_mask & ~reg->masked_mask; + break; + case GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE: + value = reg->wake_mask; + break; + case GPE_BLOCK_ACTION_CLEAR_ALL: + ret = uacpi_gas_write_mapped(®->status, 0xFF); + if (uacpi_unlikely_error(ret)) + return ret; + continue; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + reg->current_mask = value; + ret = uacpi_gas_write_mapped(®->enable, value); + if (uacpi_unlikely_error(ret)) + return ret; + } + + return UACPI_STATUS_OK; +} + +static void gpe_block_mask_safe(struct gpe_block *block) +{ + uacpi_size i; + struct gpe_register *reg; + + for (i = 0; i < block->num_registers; ++i) { + reg = &block->registers[i]; + + // No need to flush or do anything if it's not currently enabled + if (!reg->current_mask) + continue; + + // 1. Mask the GPEs, this makes sure their state is no longer modifyable + reg->masked_mask = 0xFF; + + /* + * 2. Wait for in-flight work & IRQs to finish, these might already + * be past the respective "if (masked)" check and therefore may + * try to re-enable a masked GPE. + */ + uacpi_kernel_wait_for_work_completion(); + + /* + * 3. Now that this GPE's state is unmodifyable and we know that + * currently in-flight IRQs will see the masked state, we can + * safely disable all events knowing they won't be re-enabled by + * a racing IRQ. + */ + uacpi_gas_write_mapped(®->enable, 0x00); + + /* + * 4. Wait for the last possible IRQ to finish, now that this event is + * disabled. + */ + uacpi_kernel_wait_for_work_completion(); + } +} + +static void uninstall_gpe_block(struct gpe_block *block) +{ + if (block->registers != UACPI_NULL) { + struct gpe_register *reg; + uacpi_size i; + + gpe_block_mask_safe(block); + + for (i = 0; i < block->num_registers; ++i) { + reg = &block->registers[i]; + + if (reg->enable.total_bit_width) + uacpi_unmap_gas_nofree(®->enable); + if (reg->status.total_bit_width) + uacpi_unmap_gas_nofree(®->status); + } + } + + if (block->prev) + block->prev->next = block->next; + + if (block->irq_ctx) { + struct gpe_interrupt_ctx *ctx = block->irq_ctx; + + // Are we the first GPE block? + if (block == ctx->gpe_head) { + ctx->gpe_head = ctx->gpe_head->next; + } else { + struct gpe_block *prev_block = ctx->gpe_head; + + // We're not, do a search + while (prev_block) { + if (prev_block->next == block) { + prev_block->next = block->next; + break; + } + + prev_block = prev_block->next; + } + } + + // This GPE block was the last user of this interrupt context, remove it + if (ctx->gpe_head == UACPI_NULL) { + if (ctx->prev) + ctx->prev->next = ctx->next; + + if (ctx->irq != g_uacpi_rt_ctx.fadt.sci_int) { + uacpi_kernel_uninstall_interrupt_handler( + handle_gpes, ctx->irq_handle + ); + } + + uacpi_free(block->irq_ctx, sizeof(*block->irq_ctx)); + } + } + + if (block->events != UACPI_NULL) { + uacpi_size i; + struct gp_event *event; + + for (i = 0; i < block->num_events; ++i) { + event = &block->events[i]; + + switch (event->handler_type) { + case GPE_HANDLER_TYPE_NONE: + case GPE_HANDLER_TYPE_AML_HANDLER: + break; + + case GPE_HANDLER_TYPE_NATIVE_HANDLER: + case GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW: + uacpi_free(event->native_handler, + sizeof(*event->native_handler)); + break; + + case GPE_HANDLER_TYPE_IMPLICIT_NOTIFY: { + gpe_release_implicit_notify_handlers(event); + break; + } + + default: + break; + } + } + + } + + uacpi_free(block->registers, + sizeof(*block->registers) * block->num_registers); + uacpi_free(block->events, + sizeof(*block->events) * block->num_events); + uacpi_free(block, sizeof(*block)); +} + +static struct gp_event *gpe_from_block(struct gpe_block *block, uacpi_u16 idx) +{ + uacpi_u16 offset; + + if (idx < block->base_idx) + return UACPI_NULL; + + offset = idx - block->base_idx; + if (offset > block->num_events) + return UACPI_NULL; + + return &block->events[offset]; +} + +struct gpe_match_ctx { + struct gpe_block *block; + uacpi_u32 matched_count; + uacpi_bool post_dynamic_table_load; +}; + +static uacpi_iteration_decision do_match_gpe_methods( + uacpi_handle opaque, uacpi_namespace_node *node, uacpi_u32 depth +) +{ + uacpi_status ret; + struct gpe_match_ctx *ctx = opaque; + struct gp_event *event; + uacpi_u8 triggering; + uacpi_u64 idx; + + UACPI_UNUSED(depth); + + if (node->name.text[0] != '_') + return UACPI_ITERATION_DECISION_CONTINUE; + + switch (node->name.text[1]) { + case 'L': + triggering = UACPI_GPE_TRIGGERING_LEVEL; + break; + case 'E': + triggering = UACPI_GPE_TRIGGERING_EDGE; + break; + default: + return UACPI_ITERATION_DECISION_CONTINUE; + } + + ret = uacpi_string_to_integer(&node->name.text[2], 2, UACPI_BASE_HEX, &idx); + if (uacpi_unlikely_error(ret)) { + uacpi_trace("invalid GPE method name %.4s, ignored\n", node->name.text); + return UACPI_ITERATION_DECISION_CONTINUE; + } + + event = gpe_from_block(ctx->block, idx); + if (event == UACPI_NULL) + return UACPI_ITERATION_DECISION_CONTINUE; + + switch (event->handler_type) { + /* + * This had implicit notify configured but this is no longer needed as we + * now have an actual AML handler. Free the implicit notify list and switch + * this handler to AML mode. + */ + case GPE_HANDLER_TYPE_IMPLICIT_NOTIFY: + gpe_release_implicit_notify_handlers(event); + UACPI_FALLTHROUGH; + case GPE_HANDLER_TYPE_NONE: + event->aml_handler = node; + event->handler_type = GPE_HANDLER_TYPE_AML_HANDLER; + break; + + case GPE_HANDLER_TYPE_AML_HANDLER: + // This is okay, since we're re-running the detection code + if (!ctx->post_dynamic_table_load) { + uacpi_warn( + "GPE(%02X) already matched %.4s, skipping %.4s\n", + (uacpi_u32)idx, event->aml_handler->name.text, node->name.text + ); + } + return UACPI_ITERATION_DECISION_CONTINUE; + + case GPE_HANDLER_TYPE_NATIVE_HANDLER: + case GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW: + uacpi_trace( + "not assigning GPE(%02X) to %.4s, override " + "installed by user\n", (uacpi_u32)idx, node->name.text + ); + UACPI_FALLTHROUGH; + default: + return UACPI_ITERATION_DECISION_CONTINUE; + } + + uacpi_trace("assigned GPE(%02X) -> %.4s\n", + (uacpi_u32)idx, node->name.text); + event->triggering = triggering; + ctx->matched_count++; + + return UACPI_ITERATION_DECISION_CONTINUE; +} + +void uacpi_events_match_post_dynamic_table_load(void) +{ + struct gpe_match_ctx match_ctx = { + .post_dynamic_table_load = UACPI_TRUE, + }; + struct gpe_interrupt_ctx *irq_ctx; + + uacpi_namespace_write_unlock(); + + if (uacpi_unlikely_error(uacpi_recursive_lock_acquire(&g_event_lock))) + goto out; + + irq_ctx = g_gpe_interrupt_head; + + while (irq_ctx) { + match_ctx.block = irq_ctx->gpe_head; + + while (match_ctx.block) { + uacpi_namespace_do_for_each_child( + match_ctx.block->device_node, do_match_gpe_methods, UACPI_NULL, + UACPI_OBJECT_METHOD_BIT, UACPI_MAX_DEPTH_ANY, + UACPI_SHOULD_LOCK_YES, UACPI_PERMANENT_ONLY_YES, &match_ctx + ); + match_ctx.block = match_ctx.block->next; + } + + irq_ctx = irq_ctx->next; + } + + if (match_ctx.matched_count) { + uacpi_info("matched %u additional GPEs post dynamic table load\n", + match_ctx.matched_count); + } + +out: + uacpi_recursive_lock_release(&g_event_lock); + uacpi_namespace_write_lock(); +} + +static uacpi_status create_gpe_block( + uacpi_namespace_node *device_node, uacpi_u32 irq, uacpi_u16 base_idx, + uacpi_u64 address, uacpi_u8 address_space_id, uacpi_u16 num_registers +) +{ + uacpi_status ret = UACPI_STATUS_OUT_OF_MEMORY; + struct gpe_match_ctx match_ctx = { 0 }; + struct gpe_block *block; + struct gpe_register *reg; + struct gp_event *event; + struct acpi_gas tmp_gas = { 0 }; + uacpi_size i, j; + + tmp_gas.address_space_id = address_space_id; + tmp_gas.register_bit_width = 8; + + block = uacpi_kernel_alloc_zeroed(sizeof(*block)); + if (uacpi_unlikely(block == UACPI_NULL)) + return ret; + + block->device_node = device_node; + block->base_idx = base_idx; + + block->num_registers = num_registers; + block->registers = uacpi_kernel_alloc_zeroed( + num_registers * sizeof(*block->registers) + ); + if (uacpi_unlikely(block->registers == UACPI_NULL)) + goto error_out; + + block->num_events = num_registers * EVENTS_PER_GPE_REGISTER; + block->events = uacpi_kernel_alloc_zeroed( + block->num_events * sizeof(*block->events) + ); + if (uacpi_unlikely(block->events == UACPI_NULL)) + goto error_out; + + for (reg = block->registers, event = block->events, i = 0; + i < num_registers; ++i, ++reg) { + + /* + * Initialize this register pair as well as all the events within it. + * + * Each register has two sub registers: status & enable, 8 bits each. + * Each bit corresponds to one event that we initialize below. + */ + reg->base_idx = base_idx + (i * EVENTS_PER_GPE_REGISTER); + + + tmp_gas.address = address + i; + ret = uacpi_map_gas_noalloc(&tmp_gas, ®->status); + if (uacpi_unlikely_error(ret)) + goto error_out; + + tmp_gas.address += num_registers; + ret = uacpi_map_gas_noalloc(&tmp_gas, ®->enable); + if (uacpi_unlikely_error(ret)) + goto error_out; + + for (j = 0; j < EVENTS_PER_GPE_REGISTER; ++j, ++event) { + event->idx = reg->base_idx + j; + event->reg = reg; + } + + /* + * Disable all GPEs in this register & clear anything that might be + * pending from earlier. + */ + ret = uacpi_gas_write_mapped(®->enable, 0x00); + if (uacpi_unlikely_error(ret)) + goto error_out; + + ret = uacpi_gas_write_mapped(®->status, 0xFF); + if (uacpi_unlikely_error(ret)) + goto error_out; + } + + ret = find_or_create_gpe_interrupt_ctx(irq, &block->irq_ctx); + if (uacpi_unlikely_error(ret)) + goto error_out; + + block->next = block->irq_ctx->gpe_head; + block->irq_ctx->gpe_head = block; + match_ctx.block = block; + + uacpi_namespace_do_for_each_child( + device_node, do_match_gpe_methods, UACPI_NULL, + UACPI_OBJECT_METHOD_BIT, UACPI_MAX_DEPTH_ANY, + UACPI_SHOULD_LOCK_YES, UACPI_PERMANENT_ONLY_YES, &match_ctx + ); + + uacpi_trace("initialized GPE block %.4s[%d->%d], %d AML handlers (IRQ %d)\n", + device_node->name.text, base_idx, base_idx + block->num_events, + match_ctx.matched_count, irq); + return UACPI_STATUS_OK; + +error_out: + uninstall_gpe_block(block); + return ret; +} + +typedef uacpi_iteration_decision (*gpe_block_iteration_callback) + (struct gpe_block*, uacpi_handle); + +static void for_each_gpe_block( + gpe_block_iteration_callback cb, uacpi_handle handle +) +{ + uacpi_iteration_decision decision; + struct gpe_interrupt_ctx *irq_ctx = g_gpe_interrupt_head; + struct gpe_block *block; + + while (irq_ctx) { + block = irq_ctx->gpe_head; + + while (block) { + decision = cb(block, handle); + if (decision == UACPI_ITERATION_DECISION_BREAK) + return; + + block = block->next; + } + + irq_ctx = irq_ctx->next; + } +} + +struct gpe_search_ctx { + uacpi_namespace_node *gpe_device; + uacpi_u16 idx; + struct gpe_block *out_block; + struct gp_event *out_event; +}; + +static uacpi_iteration_decision do_find_gpe( + struct gpe_block *block, uacpi_handle opaque +) +{ + struct gpe_search_ctx *ctx = opaque; + + if (block->device_node != ctx->gpe_device) + return UACPI_ITERATION_DECISION_CONTINUE; + + ctx->out_block = block; + ctx->out_event = gpe_from_block(block, ctx->idx); + if (ctx->out_event == UACPI_NULL) + return UACPI_ITERATION_DECISION_CONTINUE; + + return UACPI_ITERATION_DECISION_BREAK; +} + +static struct gp_event *get_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +) +{ + struct gpe_search_ctx ctx = { 0 }; + + ctx.gpe_device = gpe_device; + ctx.idx = idx; + + for_each_gpe_block(do_find_gpe, &ctx); + return ctx.out_event; +} + +static void gp_event_toggle_masks(struct gp_event *event, uacpi_bool set_on) +{ + uacpi_u8 this_mask; + struct gpe_register *reg = event->reg; + + this_mask = gpe_get_mask(event); + + if (set_on) { + reg->runtime_mask |= this_mask; + reg->current_mask = reg->runtime_mask; + return; + } + + reg->runtime_mask &= ~this_mask; + reg->current_mask = reg->runtime_mask; +} + +static uacpi_status gpe_remove_user(struct gp_event *event) +{ + uacpi_status ret = UACPI_STATUS_OK; + + if (uacpi_unlikely(event->num_users == 0)) + return UACPI_STATUS_INVALID_ARGUMENT; + + if (--event->num_users == 0) { + gp_event_toggle_masks(event, UACPI_FALSE); + + ret = set_gpe_state(event, GPE_STATE_DISABLED); + if (uacpi_unlikely_error(ret)) { + gp_event_toggle_masks(event, UACPI_TRUE); + event->num_users++; + } + } + + return ret; +} + +enum event_clear_if_first { + EVENT_CLEAR_IF_FIRST_YES, + EVENT_CLEAR_IF_FIRST_NO, +}; + +static uacpi_status gpe_add_user( + struct gp_event *event, enum event_clear_if_first clear_if_first +) +{ + uacpi_status ret = UACPI_STATUS_OK; + + if (uacpi_unlikely(event->num_users == 0xFF)) + return UACPI_STATUS_INVALID_ARGUMENT; + + if (++event->num_users == 1) { + if (clear_if_first == EVENT_CLEAR_IF_FIRST_YES) + clear_gpe(event); + + gp_event_toggle_masks(event, UACPI_TRUE); + + ret = set_gpe_state(event, GPE_STATE_ENABLED); + if (uacpi_unlikely_error(ret)) { + gp_event_toggle_masks(event, UACPI_FALSE); + event->num_users--; + } + } + + return ret; +} + +const uacpi_char *uacpi_gpe_triggering_to_string( + uacpi_gpe_triggering triggering +) +{ + switch (triggering) { + case UACPI_GPE_TRIGGERING_EDGE: + return "edge"; + case UACPI_GPE_TRIGGERING_LEVEL: + return "level"; + default: + return "invalid"; + } +} + +static uacpi_bool gpe_needs_polling(struct gp_event *event) +{ + return event->num_users && event->triggering == UACPI_GPE_TRIGGERING_EDGE; +} + +static uacpi_status gpe_mask_unmask( + struct gp_event *event, uacpi_bool should_mask +) +{ + struct gpe_register *reg; + uacpi_u8 mask; + + reg = event->reg; + mask = gpe_get_mask(event); + + if (should_mask) { + if (reg->masked_mask & mask) + return UACPI_STATUS_INVALID_ARGUMENT; + + // 1. Mask the GPE, this makes sure its state is no longer modifyable + reg->masked_mask |= mask; + + /* + * 2. Wait for in-flight work & IRQs to finish, these might already + * be past the respective "if (masked)" check and therefore may + * try to re-enable a masked GPE. + */ + uacpi_kernel_wait_for_work_completion(); + + /* + * 3. Now that this GPE's state is unmodifyable and we know that currently + * in-flight IRQs will see the masked state, we can safely disable this + * event knowing it won't be re-enabled by a racing IRQ. + */ + set_gpe_state(event, GPE_STATE_DISABLED); + + /* + * 4. Wait for the last possible IRQ to finish, now that this event is + * disabled. + */ + uacpi_kernel_wait_for_work_completion(); + + return UACPI_STATUS_OK; + } + + if (!(reg->masked_mask & mask)) + return UACPI_STATUS_INVALID_ARGUMENT; + + reg->masked_mask &= ~mask; + if (!event->block_interrupts && event->num_users) + set_gpe_state(event, GPE_STATE_ENABLED_CONDITIONALLY); + + return UACPI_STATUS_OK; +} + +/* + * Safely mask the event before we modify its handlers. + * + * This makes sure we can't get an IRQ in the middle of modifying this + * event's structures. + */ +static uacpi_bool gpe_mask_safe(struct gp_event *event) +{ + // No need to flush or do anything if it's not currently enabled + if (!(event->reg->current_mask & gpe_get_mask(event))) + return UACPI_FALSE; + + gpe_mask_unmask(event, UACPI_TRUE); + return UACPI_TRUE; +} + +static uacpi_iteration_decision do_initialize_gpe_block( + struct gpe_block *block, uacpi_handle opaque +) +{ + uacpi_status ret; + uacpi_bool *poll_blocks = opaque; + uacpi_size i, j, count_enabled = 0; + struct gp_event *event; + + for (i = 0; i < block->num_registers; ++i) { + for (j = 0; j < EVENTS_PER_GPE_REGISTER; ++j) { + event = &block->events[j + i * EVENTS_PER_GPE_REGISTER]; + + if (event->wake || + event->handler_type != GPE_HANDLER_TYPE_AML_HANDLER) + continue; + + ret = gpe_add_user(event, EVENT_CLEAR_IF_FIRST_NO); + if (uacpi_unlikely_error(ret)) { + uacpi_warn("failed to enable GPE(%02X): %s\n", + event->idx, uacpi_status_to_string(ret)); + continue; + } + + *poll_blocks |= gpe_needs_polling(event); + count_enabled++; + } + } + + if (count_enabled) { + uacpi_info( + "enabled %zu GPEs in block %.4s@[%d->%d]\n", + count_enabled, block->device_node->name.text, + block->base_idx, block->base_idx + block->num_events + ); + } + return UACPI_ITERATION_DECISION_CONTINUE; +} + +uacpi_status uacpi_finalize_gpe_initialization(void) +{ + uacpi_status ret; + uacpi_bool poll_blocks = UACPI_FALSE; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + if (g_gpes_finalized) + goto out; + + g_gpes_finalized = UACPI_TRUE; + + for_each_gpe_block(do_initialize_gpe_block, &poll_blocks); + if (poll_blocks) + detect_gpes(g_gpe_interrupt_head->gpe_head); + +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +static uacpi_status sanitize_device_and_find_gpe( + uacpi_namespace_node **gpe_device, uacpi_u16 idx, + struct gp_event **out_event +) +{ + if (*gpe_device == UACPI_NULL) { + *gpe_device = uacpi_namespace_get_predefined( + UACPI_PREDEFINED_NAMESPACE_GPE + ); + } + + *out_event = get_gpe(*gpe_device, idx); + if (*out_event == UACPI_NULL) + return UACPI_STATUS_NOT_FOUND; + + return UACPI_STATUS_OK; +} + +static uacpi_status do_install_gpe_handler( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_gpe_triggering triggering, enum gpe_handler_type type, + uacpi_gpe_handler handler, uacpi_handle ctx +) +{ + uacpi_status ret; + struct gp_event *event; + struct gpe_native_handler *native_handler; + uacpi_bool did_mask; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + if (uacpi_unlikely(triggering > UACPI_GPE_TRIGGERING_MAX)) + return UACPI_STATUS_INVALID_ARGUMENT; + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + if (event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER || + event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) { + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out; + } + + native_handler = uacpi_kernel_alloc(sizeof(*native_handler)); + if (uacpi_unlikely(native_handler == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto out; + } + + native_handler->cb = handler; + native_handler->ctx = ctx; + native_handler->previous_handler = event->any_handler; + native_handler->previous_handler_type = event->handler_type; + native_handler->previous_triggering = event->triggering; + native_handler->previously_enabled = UACPI_FALSE; + + did_mask = gpe_mask_safe(event); + + if ((event->handler_type == GPE_HANDLER_TYPE_AML_HANDLER || + event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) && + event->num_users != 0) { + native_handler->previously_enabled = UACPI_TRUE; + gpe_remove_user(event); + + if (uacpi_unlikely(event->triggering != triggering)) { + uacpi_warn( + "GPE(%02X) user handler claims %s triggering, originally " + "configured as %s\n", idx, + uacpi_gpe_triggering_to_string(triggering), + uacpi_gpe_triggering_to_string(event->triggering) + ); + } + } + + event->native_handler = native_handler; + event->handler_type = type; + event->triggering = triggering; + + if (did_mask) + gpe_mask_unmask(event, UACPI_FALSE); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +uacpi_status uacpi_install_gpe_handler( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, + uacpi_handle ctx +) +{ + return do_install_gpe_handler( + gpe_device, idx, triggering, GPE_HANDLER_TYPE_NATIVE_HANDLER, + handler, ctx + ); +} + +uacpi_status uacpi_install_gpe_handler_raw( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, + uacpi_handle ctx +) +{ + return do_install_gpe_handler( + gpe_device, idx, triggering, GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW, + handler, ctx + ); +} + +uacpi_status uacpi_uninstall_gpe_handler( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_gpe_handler handler +) +{ + uacpi_status ret; + struct gp_event *event; + struct gpe_native_handler *native_handler; + uacpi_bool did_mask; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + if (event->handler_type != GPE_HANDLER_TYPE_NATIVE_HANDLER && + event->handler_type != GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) { + ret = UACPI_STATUS_NOT_FOUND; + goto out; + } + + native_handler = event->native_handler; + if (uacpi_unlikely(native_handler->cb != handler)) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + + did_mask = gpe_mask_safe(event); + + event->aml_handler = native_handler->previous_handler; + event->triggering = native_handler->previous_triggering; + event->handler_type = native_handler->previous_handler_type; + + if ((event->handler_type == GPE_HANDLER_TYPE_AML_HANDLER || + event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) && + native_handler->previously_enabled) { + gpe_add_user(event, EVENT_CLEAR_IF_FIRST_NO); + } + + uacpi_free(native_handler, sizeof(*native_handler)); + + if (did_mask) + gpe_mask_unmask(event, UACPI_FALSE); + + if (gpe_needs_polling(event)) + maybe_dispatch_gpe(gpe_device, event); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +uacpi_status uacpi_enable_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +) +{ + uacpi_status ret; + struct gp_event *event; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + if (uacpi_unlikely(event->handler_type == GPE_HANDLER_TYPE_NONE)) { + ret = UACPI_STATUS_NO_HANDLER; + goto out; + } + + ret = gpe_add_user(event, EVENT_CLEAR_IF_FIRST_YES); + if (uacpi_unlikely_error(ret)) + goto out; + + if (gpe_needs_polling(event)) + maybe_dispatch_gpe(gpe_device, event); + +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +uacpi_status uacpi_disable_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +) +{ + uacpi_status ret; + struct gp_event *event; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + ret = gpe_remove_user(event); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +uacpi_status uacpi_clear_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +) +{ + uacpi_status ret; + struct gp_event *event; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + ret = clear_gpe(event); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +static uacpi_status gpe_suspend_resume( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, enum gpe_state state +) +{ + uacpi_status ret; + struct gp_event *event; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + event->block_interrupts = state == GPE_STATE_DISABLED; + ret = set_gpe_state(event, state); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +uacpi_status uacpi_suspend_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +) +{ + return gpe_suspend_resume(gpe_device, idx, GPE_STATE_DISABLED); +} + +uacpi_status uacpi_resume_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +) +{ + return gpe_suspend_resume(gpe_device, idx, GPE_STATE_ENABLED); +} + +uacpi_status uacpi_finish_handling_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +) +{ + uacpi_status ret; + struct gp_event *event; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + event = get_gpe(gpe_device, idx); + if (uacpi_unlikely(event == UACPI_NULL)) { + ret = UACPI_STATUS_NOT_FOUND; + goto out; + } + + ret = restore_gpe(event); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; + +} + +static uacpi_status gpe_get_mask_unmask( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_bool should_mask +) +{ + uacpi_status ret; + struct gp_event *event; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + ret = gpe_mask_unmask(event, should_mask); + +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +uacpi_status uacpi_mask_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +) +{ + return gpe_get_mask_unmask(gpe_device, idx, UACPI_TRUE); +} + +uacpi_status uacpi_unmask_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +) +{ + return gpe_get_mask_unmask(gpe_device, idx, UACPI_FALSE); +} + +uacpi_status uacpi_setup_gpe_for_wake( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_namespace_node *wake_device +) +{ + uacpi_status ret; + struct gp_event *event; + uacpi_bool did_mask; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + if (wake_device != UACPI_NULL) { + uacpi_bool is_dev = wake_device == uacpi_namespace_root(); + + if (!is_dev) { + ret = uacpi_namespace_node_is(wake_device, UACPI_OBJECT_DEVICE, &is_dev); + if (uacpi_unlikely_error(ret)) + return ret; + } + + if (!is_dev) + return UACPI_STATUS_INVALID_ARGUMENT; + } + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + did_mask = gpe_mask_safe(event); + + if (wake_device != UACPI_NULL) { + switch (event->handler_type) { + case GPE_HANDLER_TYPE_NONE: + event->handler_type = GPE_HANDLER_TYPE_IMPLICIT_NOTIFY; + event->triggering = UACPI_GPE_TRIGGERING_LEVEL; + break; + + case GPE_HANDLER_TYPE_AML_HANDLER: + /* + * An AML handler already exists, we expect it to call Notify() as + * it sees fit. For now just make sure this event is disabled if it + * had been enabled automatically previously during initialization. + */ + gpe_remove_user(event); + break; + + case GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW: + case GPE_HANDLER_TYPE_NATIVE_HANDLER: + uacpi_warn( + "not configuring implicit notify for GPE(%02X) -> %.4s: " + " a user handler already installed\n", event->idx, + wake_device->name.text + ); + break; + + // We will re-check this below + case GPE_HANDLER_TYPE_IMPLICIT_NOTIFY: + break; + + default: + uacpi_warn("invalid GPE(%02X) handler type: %d\n", + event->idx, event->handler_type); + ret = UACPI_STATUS_INTERNAL_ERROR; + goto out_unmask; + } + + /* + * This GPE has no known AML handler, so we configure it to receive + * implicit notifications for wake devices when we get a corresponding + * GPE triggered. Usually it's the job of a matching AML handler, but + * we didn't find any. + */ + if (event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) { + struct gpe_implicit_notify_handler *implicit_handler; + + implicit_handler = event->implicit_handler; + while (implicit_handler) { + if (implicit_handler->device == wake_device) { + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out_unmask; + } + + implicit_handler = implicit_handler->next; + } + + implicit_handler = uacpi_kernel_alloc(sizeof(*implicit_handler)); + if (uacpi_likely(implicit_handler != UACPI_NULL)) { + implicit_handler->device = wake_device; + implicit_handler->next = event->implicit_handler; + event->implicit_handler = implicit_handler; + } else { + uacpi_warn( + "unable to configure implicit wake for GPE(%02X) -> %.4s: " + "out of memory\n", event->idx, wake_device->name.text + ); + } + } + } + + event->wake = UACPI_TRUE; + +out_unmask: + if (did_mask) + gpe_mask_unmask(event, UACPI_FALSE); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +static uacpi_status gpe_enable_disable_for_wake( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_bool enabled +) +{ + uacpi_status ret; + struct gp_event *event; + struct gpe_register *reg; + uacpi_u8 mask; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + if (!event->wake) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + + reg = event->reg; + mask = gpe_get_mask(event); + + if (enabled) + reg->wake_mask |= mask; + else + reg->wake_mask &= mask; + +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +uacpi_status uacpi_enable_gpe_for_wake( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +) +{ + return gpe_enable_disable_for_wake(gpe_device, idx, UACPI_TRUE); +} + +uacpi_status uacpi_disable_gpe_for_wake( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +) +{ + return gpe_enable_disable_for_wake(gpe_device, idx, UACPI_FALSE); +} + +struct do_for_all_gpes_ctx { + enum gpe_block_action action; + uacpi_status ret; +}; + +static uacpi_iteration_decision do_for_all_gpes( + struct gpe_block *block, uacpi_handle opaque +) +{ + struct do_for_all_gpes_ctx *ctx = opaque; + + ctx->ret = gpe_block_apply_action(block, ctx->action); + if (uacpi_unlikely_error(ctx->ret)) + return UACPI_ITERATION_DECISION_BREAK; + + return UACPI_ITERATION_DECISION_CONTINUE; +} + +static uacpi_status for_all_gpes_locked(struct do_for_all_gpes_ctx *ctx) +{ + uacpi_status ret; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + for_each_gpe_block(do_for_all_gpes, ctx); + + uacpi_recursive_lock_release(&g_event_lock); + return ctx->ret; +} + +uacpi_status uacpi_disable_all_gpes(void) +{ + struct do_for_all_gpes_ctx ctx = { + .action = GPE_BLOCK_ACTION_DISABLE_ALL, + }; + return for_all_gpes_locked(&ctx); +} + +uacpi_status uacpi_enable_all_runtime_gpes(void) +{ + struct do_for_all_gpes_ctx ctx = { + .action = GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME, + }; + return for_all_gpes_locked(&ctx); +} + +uacpi_status uacpi_enable_all_wake_gpes(void) +{ + struct do_for_all_gpes_ctx ctx = { + .action = GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE, + }; + return for_all_gpes_locked(&ctx); +} + +static uacpi_status initialize_gpes(void) +{ + uacpi_status ret; + uacpi_namespace_node *gpe_node; + struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt; + uacpi_u8 gpe0_regs = 0, gpe1_regs = 0; + + gpe_node = uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_GPE); + + if (fadt->x_gpe0_blk.address && fadt->gpe0_blk_len) { + gpe0_regs = fadt->gpe0_blk_len / 2; + + ret = create_gpe_block( + gpe_node, fadt->sci_int, 0, fadt->x_gpe0_blk.address, + fadt->x_gpe0_blk.address_space_id, gpe0_regs + ); + if (uacpi_unlikely_error(ret)) { + uacpi_error("unable to create FADT GPE block 0: %s\n", + uacpi_status_to_string(ret)); + } + } + + if (fadt->x_gpe1_blk.address && fadt->gpe1_blk_len) { + gpe1_regs = fadt->gpe1_blk_len / 2; + + if (uacpi_unlikely((gpe0_regs * EVENTS_PER_GPE_REGISTER) > + fadt->gpe1_base)) { + uacpi_error( + "FADT GPE block 1 [%d->%d] collides with GPE block 0 " + "[%d->%d], ignoring\n", + 0, gpe0_regs * EVENTS_PER_GPE_REGISTER, fadt->gpe1_base, + gpe1_regs * EVENTS_PER_GPE_REGISTER + ); + gpe1_regs = 0; + goto out; + } + + ret = create_gpe_block( + gpe_node, fadt->sci_int, fadt->gpe1_base, fadt->x_gpe1_blk.address, + fadt->x_gpe1_blk.address_space_id, gpe1_regs + ); + if (uacpi_unlikely_error(ret)) { + uacpi_error("unable to create FADT GPE block 1: %s\n", + uacpi_status_to_string(ret)); + } + } + + if (gpe0_regs == 0 && gpe1_regs == 0) + uacpi_trace("platform has no FADT GPE events\n"); + +out: + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_install_gpe_block( + uacpi_namespace_node *gpe_device, uacpi_u64 address, + uacpi_address_space address_space, uacpi_u16 num_registers, uacpi_u32 irq +) +{ + uacpi_status ret; + uacpi_bool is_dev; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_namespace_node_is(gpe_device, UACPI_OBJECT_DEVICE, &is_dev); + if (uacpi_unlikely_error(ret)) + return ret; + if (!is_dev) + return UACPI_STATUS_INVALID_ARGUMENT; + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + if (uacpi_unlikely(get_gpe(gpe_device, 0) != UACPI_NULL)) { + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out; + } + + ret = create_gpe_block( + gpe_device, irq, 0, address, address_space, num_registers + ); + +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +uacpi_status uacpi_uninstall_gpe_block( + uacpi_namespace_node *gpe_device +) +{ + uacpi_status ret; + uacpi_bool is_dev; + struct gpe_search_ctx search_ctx = { 0 }; + + search_ctx.idx = 0; + search_ctx.gpe_device = gpe_device; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_namespace_node_is(gpe_device, UACPI_OBJECT_DEVICE, &is_dev); + if (uacpi_unlikely_error(ret)) + return ret; + if (!is_dev) + return UACPI_STATUS_INVALID_ARGUMENT; + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + for_each_gpe_block(do_find_gpe, &search_ctx); + if (search_ctx.out_block == UACPI_NULL) { + ret = UACPI_STATUS_NOT_FOUND; + goto out; + } + + uninstall_gpe_block(search_ctx.out_block); + +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +static uacpi_interrupt_ret handle_global_lock(uacpi_handle ctx) +{ + uacpi_cpu_flags flags; + UACPI_UNUSED(ctx); + + if (uacpi_unlikely(!g_uacpi_rt_ctx.has_global_lock)) { + uacpi_warn("platform has no global lock but a release event " + "was fired anyway?\n"); + return UACPI_INTERRUPT_HANDLED; + } + + flags = uacpi_kernel_lock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock); + if (!g_uacpi_rt_ctx.global_lock_pending) { + uacpi_trace("spurious firmware global lock release notification\n"); + goto out; + } + + uacpi_trace("received a firmware global lock release notification\n"); + + uacpi_kernel_signal_event(g_uacpi_rt_ctx.global_lock_event); + g_uacpi_rt_ctx.global_lock_pending = UACPI_FALSE; + +out: + uacpi_kernel_unlock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock, flags); + return UACPI_INTERRUPT_HANDLED; +} + +static uacpi_interrupt_ret handle_sci(uacpi_handle ctx) +{ + uacpi_interrupt_ret int_ret = UACPI_INTERRUPT_NOT_HANDLED; + + int_ret |= handle_fixed_events(); + int_ret |= handle_gpes(ctx); + + return int_ret; +} + +uacpi_status uacpi_initialize_events_early(void) +{ + uacpi_status ret; + + if (uacpi_is_hardware_reduced()) + return UACPI_STATUS_OK; + + g_gpe_state_slock = uacpi_kernel_create_spinlock(); + if (uacpi_unlikely(g_gpe_state_slock == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + ret = uacpi_recursive_lock_init(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = initialize_fixed_events(); + if (uacpi_unlikely_error(ret)) + return ret; + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_initialize_events(void) +{ + uacpi_status ret; + + if (uacpi_is_hardware_reduced()) + return UACPI_STATUS_OK; + + ret = initialize_gpes(); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_kernel_install_interrupt_handler( + g_uacpi_rt_ctx.fadt.sci_int, handle_sci, g_gpe_interrupt_head, + &g_uacpi_rt_ctx.sci_handle + ); + if (uacpi_unlikely_error(ret)) { + uacpi_error( + "unable to install SCI interrupt handler: %s\n", + uacpi_status_to_string(ret) + ); + return ret; + } + g_uacpi_rt_ctx.sci_handle_valid = UACPI_TRUE; + + g_uacpi_rt_ctx.global_lock_event = uacpi_kernel_create_event(); + if (uacpi_unlikely(g_uacpi_rt_ctx.global_lock_event == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + g_uacpi_rt_ctx.global_lock_spinlock = uacpi_kernel_create_spinlock(); + if (uacpi_unlikely(g_uacpi_rt_ctx.global_lock_spinlock == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + ret = uacpi_install_fixed_event_handler( + UACPI_FIXED_EVENT_GLOBAL_LOCK, handle_global_lock, UACPI_NULL + ); + if (uacpi_likely_success(ret)) { + if (uacpi_unlikely(g_uacpi_rt_ctx.facs == UACPI_NULL)) { + uacpi_uninstall_fixed_event_handler(UACPI_FIXED_EVENT_GLOBAL_LOCK); + uacpi_warn("platform has global lock but no FACS was provided\n"); + return ret; + } + g_uacpi_rt_ctx.has_global_lock = UACPI_TRUE; + } else if (ret == UACPI_STATUS_HARDWARE_TIMEOUT) { + // has_global_lock remains set to false + uacpi_trace("platform has no global lock\n"); + ret = UACPI_STATUS_OK; + } + + return ret; +} + +void uacpi_deinitialize_events(void) +{ + struct gpe_interrupt_ctx *ctx, *next_ctx = g_gpe_interrupt_head; + uacpi_size i; + + g_gpes_finalized = UACPI_FALSE; + + if (g_uacpi_rt_ctx.sci_handle_valid) { + uacpi_kernel_uninstall_interrupt_handler( + handle_sci, g_uacpi_rt_ctx.sci_handle + ); + g_uacpi_rt_ctx.sci_handle_valid = UACPI_FALSE; + } + + while (next_ctx) { + struct gpe_block *block, *next_block; + + ctx = next_ctx; + next_ctx = ctx->next; + + next_block = ctx->gpe_head; + while (next_block) { + block = next_block; + next_block = block->next; + uninstall_gpe_block(block); + } + } + + for (i = 0; i < UACPI_FIXED_EVENT_MAX; ++i) { + if (fixed_event_handlers[i].handler) + uacpi_uninstall_fixed_event_handler(i); + } + + if (g_gpe_state_slock != UACPI_NULL) { + uacpi_kernel_free_spinlock(g_gpe_state_slock); + g_gpe_state_slock = UACPI_NULL; + } + + uacpi_recursive_lock_deinit(&g_event_lock); + + g_gpe_interrupt_head = UACPI_NULL; +} + +uacpi_status uacpi_install_fixed_event_handler( + uacpi_fixed_event event, uacpi_interrupt_handler handler, + uacpi_handle user +) +{ + uacpi_status ret; + struct fixed_event_handler *ev; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) + return UACPI_STATUS_INVALID_ARGUMENT; + if (uacpi_is_hardware_reduced()) + return UACPI_STATUS_OK; + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ev = &fixed_event_handlers[event]; + + if (ev->handler != UACPI_NULL) { + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out; + } + + ev->handler = handler; + ev->ctx = user; + + ret = set_event(event, UACPI_EVENT_ENABLED); + if (uacpi_unlikely_error(ret)) { + ev->handler = UACPI_NULL; + ev->ctx = UACPI_NULL; + } + +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +uacpi_status uacpi_uninstall_fixed_event_handler( + uacpi_fixed_event event +) +{ + uacpi_status ret; + struct fixed_event_handler *ev; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) + return UACPI_STATUS_INVALID_ARGUMENT; + if (uacpi_is_hardware_reduced()) + return UACPI_STATUS_OK; + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ev = &fixed_event_handlers[event]; + + ret = set_event(event, UACPI_EVENT_DISABLED); + if (uacpi_unlikely_error(ret)) + goto out; + + uacpi_kernel_wait_for_work_completion(); + + ev->handler = UACPI_NULL; + ev->ctx = UACPI_NULL; + +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +uacpi_status uacpi_fixed_event_info( + uacpi_fixed_event event, uacpi_event_info *out_info +) +{ + uacpi_status ret; + const struct fixed_event *ev; + uacpi_u64 raw_value; + uacpi_event_info info = 0; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) + return UACPI_STATUS_INVALID_ARGUMENT; + if (uacpi_is_hardware_reduced()) + return UACPI_STATUS_NOT_FOUND; + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + if (fixed_event_handlers[event].handler != UACPI_NULL) + info |= UACPI_EVENT_INFO_HAS_HANDLER; + + ev = &fixed_events[event]; + + ret = uacpi_read_register_field(ev->enable_field, &raw_value); + if (uacpi_unlikely_error(ret)) + goto out; + if (raw_value) + info |= UACPI_EVENT_INFO_ENABLED | UACPI_EVENT_INFO_HW_ENABLED; + + ret = uacpi_read_register_field(ev->status_field, &raw_value); + if (uacpi_unlikely_error(ret)) + goto out; + if (raw_value) + info |= UACPI_EVENT_INFO_HW_STATUS; + + *out_info = info; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +uacpi_status uacpi_gpe_info( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_event_info *out_info +) +{ + uacpi_status ret; + struct gp_event *event; + struct gpe_register *reg; + uacpi_u8 mask; + uacpi_u64 raw_value; + uacpi_event_info info = 0; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + if (event->handler_type != GPE_HANDLER_TYPE_NONE) + info |= UACPI_EVENT_INFO_HAS_HANDLER; + + mask = gpe_get_mask(event); + reg = event->reg; + + if (reg->runtime_mask & mask) + info |= UACPI_EVENT_INFO_ENABLED; + if (reg->masked_mask & mask) + info |= UACPI_EVENT_INFO_MASKED; + if (reg->wake_mask & mask) + info |= UACPI_EVENT_INFO_ENABLED_FOR_WAKE; + + ret = uacpi_gas_read_mapped(®->enable, &raw_value); + if (uacpi_unlikely_error(ret)) + goto out; + if (raw_value & mask) + info |= UACPI_EVENT_INFO_HW_ENABLED; + + ret = uacpi_gas_read_mapped(®->status, &raw_value); + if (uacpi_unlikely_error(ret)) + goto out; + if (raw_value & mask) + info |= UACPI_EVENT_INFO_HW_STATUS; + + *out_info = info; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +#define PM1_STATUS_BITS ( \ + ACPI_PM1_STS_TMR_STS_MASK | \ + ACPI_PM1_STS_BM_STS_MASK | \ + ACPI_PM1_STS_GBL_STS_MASK | \ + ACPI_PM1_STS_PWRBTN_STS_MASK | \ + ACPI_PM1_STS_SLPBTN_STS_MASK | \ + ACPI_PM1_STS_RTC_STS_MASK | \ + ACPI_PM1_STS_PCIEXP_WAKE_STS_MASK | \ + ACPI_PM1_STS_WAKE_STS_MASK \ +) + +uacpi_status uacpi_clear_all_events(void) +{ + uacpi_status ret; + struct do_for_all_gpes_ctx ctx = { + .action = GPE_BLOCK_ACTION_CLEAR_ALL, + }; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_write_register(UACPI_REGISTER_PM1_STS, PM1_STATUS_BITS); + if (uacpi_unlikely_error(ret)) + goto out; + + for_each_gpe_block(do_for_all_gpes, &ctx); + ret = ctx.ret; + +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +#endif // !UACPI_REDUCED_HARDWARE && !UACPI_BAREBONES_MODE diff --git a/sys/dev/acpi/uacpi/interpreter.c b/sys/dev/acpi/uacpi/interpreter.c new file mode 100644 index 0000000..8ffb8d5 --- /dev/null +++ b/sys/dev/acpi/uacpi/interpreter.c @@ -0,0 +1,6053 @@ +#include <uacpi/internal/types.h> +#include <uacpi/internal/interpreter.h> +#include <uacpi/internal/dynamic_array.h> +#include <uacpi/internal/opcodes.h> +#include <uacpi/internal/namespace.h> +#include <uacpi/internal/stdlib.h> +#include <uacpi/internal/context.h> +#include <uacpi/internal/shareable.h> +#include <uacpi/internal/tables.h> +#include <uacpi/internal/helpers.h> +#include <uacpi/kernel_api.h> +#include <uacpi/internal/utilities.h> +#include <uacpi/internal/opregion.h> +#include <uacpi/internal/io.h> +#include <uacpi/internal/notify.h> +#include <uacpi/internal/resources.h> +#include <uacpi/internal/event.h> +#include <uacpi/internal/mutex.h> +#include <uacpi/internal/osi.h> + +#ifndef UACPI_BAREBONES_MODE + +enum item_type { + ITEM_NONE = 0, + ITEM_NAMESPACE_NODE, + ITEM_OBJECT, + ITEM_EMPTY_OBJECT, + ITEM_PACKAGE_LENGTH, + ITEM_IMMEDIATE, +}; + +struct package_length { + uacpi_u32 begin; + uacpi_u32 end; +}; + +struct item { + uacpi_u8 type; + union { + uacpi_handle handle; + uacpi_object *obj; + struct uacpi_namespace_node *node; + struct package_length pkg; + uacpi_u64 immediate; + uacpi_u8 immediate_bytes[8]; + }; +}; + +DYNAMIC_ARRAY_WITH_INLINE_STORAGE(item_array, struct item, 8) +DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(item_array, struct item, static) + +struct op_context { + uacpi_u8 pc; + uacpi_bool preempted; + + /* + * == 0 -> none + * >= 1 -> item[idx - 1] + */ + uacpi_u8 tracked_pkg_idx; + + uacpi_aml_op switched_from; + + const struct uacpi_op_spec *op; + struct item_array items; +}; + +DYNAMIC_ARRAY_WITH_INLINE_STORAGE(op_context_array, struct op_context, 8) +DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL( + op_context_array, struct op_context, static +) + +static struct op_context *op_context_array_one_before_last( + struct op_context_array *arr +) +{ + uacpi_size size; + + size = op_context_array_size(arr); + + if (size < 2) + return UACPI_NULL; + + return op_context_array_at(arr, size - 2); +} + +enum code_block_type { + CODE_BLOCK_IF = 1, + CODE_BLOCK_ELSE = 2, + CODE_BLOCK_WHILE = 3, + CODE_BLOCK_SCOPE = 4, +}; + +struct code_block { + enum code_block_type type; + uacpi_u32 begin, end; + union { + struct uacpi_namespace_node *node; + uacpi_u64 expiration_point; + }; +}; + +DYNAMIC_ARRAY_WITH_INLINE_STORAGE(code_block_array, struct code_block, 8) +DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL( + code_block_array, struct code_block, static +) + +DYNAMIC_ARRAY_WITH_INLINE_STORAGE(held_mutexes_array, uacpi_mutex*, 8) +DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL( + held_mutexes_array, uacpi_mutex*, static +) + +static uacpi_status held_mutexes_array_push( + struct held_mutexes_array *arr, uacpi_mutex *mutex +) +{ + uacpi_mutex **slot; + + slot = held_mutexes_array_alloc(arr); + if (uacpi_unlikely(slot == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + *slot = mutex; + uacpi_shareable_ref(mutex); + return UACPI_STATUS_OK; +} + +static void held_mutexes_array_remove_idx( + struct held_mutexes_array *arr, uacpi_size i +) +{ + uacpi_size size; + + size = held_mutexes_array_inline_capacity(arr); + + // Only the dynamic array part is affected + if (i >= size) { + i -= size; + size = arr->size_including_inline - size; + size -= i + 1; + + uacpi_memmove( + &arr->dynamic_storage[i], &arr->dynamic_storage[i + 1], + size * sizeof(arr->inline_storage[0]) + ); + + held_mutexes_array_pop(arr); + return; + } + + size = UACPI_MIN(held_mutexes_array_inline_capacity(arr), + arr->size_including_inline); + size -= i + 1; + uacpi_memmove( + &arr->inline_storage[i], &arr->inline_storage[i + 1], + size * sizeof(arr->inline_storage[0]) + ); + + size = held_mutexes_array_size(arr); + i = held_mutexes_array_inline_capacity(arr); + + /* + * This array has dynamic storage as well, now we have to take the first + * dynamic item, move it to the top of inline storage, and then shift all + * dynamic items backward by 1 as well. + */ + if (size > i) { + arr->inline_storage[i - 1] = arr->dynamic_storage[0]; + size -= i + 1; + + uacpi_memmove( + &arr->dynamic_storage[0], &arr->dynamic_storage[1], + size * sizeof(arr->inline_storage[0]) + ); + } + + held_mutexes_array_pop(arr); +} + +enum force_release { + FORCE_RELEASE_NO, + FORCE_RELEASE_YES, +}; + +static uacpi_status held_mutexes_array_remove_and_release( + struct held_mutexes_array *arr, uacpi_mutex *mutex, + enum force_release force +) +{ + uacpi_mutex *item; + uacpi_size i; + + if (uacpi_unlikely(held_mutexes_array_size(arr) == 0)) + return UACPI_STATUS_INVALID_ARGUMENT; + + item = *held_mutexes_array_last(arr); + + if (uacpi_unlikely(item->sync_level != mutex->sync_level && + force != FORCE_RELEASE_YES)) { + uacpi_warn( + "ignoring mutex @%p release due to sync level mismatch: %d vs %d\n", + mutex, mutex->sync_level, item->sync_level + ); + + // We still return OK because we don't want to abort because of this + return UACPI_STATUS_OK; + } + + if (mutex->depth > 1 && force == FORCE_RELEASE_NO) { + uacpi_release_aml_mutex(mutex); + return UACPI_STATUS_OK; + } + + // Fast path for well-behaved AML that releases mutexes in descending order + if (uacpi_likely(item == mutex)) { + held_mutexes_array_pop(arr); + goto do_release; + } + + /* + * The mutex being released is not the last one acquired, although we did + * verify that at least it has the same sync level. Anyway, now we have + * to search for it and then remove it from the array while shifting + * everything backwards. + */ + i = held_mutexes_array_size(arr); + for (;;) { + item = *held_mutexes_array_at(arr, --i); + if (item == mutex) + break; + + if (uacpi_unlikely(i == 0)) + return UACPI_STATUS_INVALID_ARGUMENT; + } + + held_mutexes_array_remove_idx(arr, i); + +do_release: + // This is either a force release, or depth was already 1 to begin with + mutex->depth = 1; + uacpi_release_aml_mutex(mutex); + + uacpi_mutex_unref(mutex); + return UACPI_STATUS_OK; +} + +DYNAMIC_ARRAY_WITH_INLINE_STORAGE( + temp_namespace_node_array, uacpi_namespace_node*, 8) +DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL( + temp_namespace_node_array, uacpi_namespace_node*, static +) + +static uacpi_status temp_namespace_node_array_push( + struct temp_namespace_node_array *arr, uacpi_namespace_node *node +) +{ + uacpi_namespace_node **slot; + + slot = temp_namespace_node_array_alloc(arr); + if (uacpi_unlikely(slot == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + *slot = node; + return UACPI_STATUS_OK; +} + +struct call_frame { + struct uacpi_control_method *method; + + uacpi_object *args[7]; + uacpi_object *locals[8]; + + struct op_context_array pending_ops; + struct code_block_array code_blocks; + struct temp_namespace_node_array temp_nodes; + struct code_block *last_while; + uacpi_u64 prev_while_expiration; + uacpi_u32 prev_while_code_offset; + + uacpi_u32 code_offset; + + struct uacpi_namespace_node *cur_scope; + + // Only used if the method is serialized + uacpi_u8 prev_sync_level; +}; + +static void *call_frame_cursor(struct call_frame *frame) +{ + return frame->method->code + frame->code_offset; +} + +static uacpi_size call_frame_code_bytes_left(struct call_frame *frame) +{ + return frame->method->size - frame->code_offset; +} + +static uacpi_bool call_frame_has_code(struct call_frame *frame) +{ + return call_frame_code_bytes_left(frame) > 0; +} + +DYNAMIC_ARRAY_WITH_INLINE_STORAGE(call_frame_array, struct call_frame, 4) +DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL( + call_frame_array, struct call_frame, static +) + +static struct call_frame *call_frame_array_one_before_last( + struct call_frame_array *arr +) +{ + uacpi_size size; + + size = call_frame_array_size(arr); + + if (size < 2) + return UACPI_NULL; + + return call_frame_array_at(arr, size - 2); +} + +// NOTE: Try to keep size under 2 pages +struct execution_context { + uacpi_object *ret; + struct call_frame_array call_stack; + struct held_mutexes_array held_mutexes; + + struct call_frame *cur_frame; + struct code_block *cur_block; + const struct uacpi_op_spec *cur_op; + struct op_context *prev_op_ctx; + struct op_context *cur_op_ctx; + + uacpi_u8 sync_level; +}; + +#define AML_READ(ptr, offset) (*(((uacpi_u8*)(ptr)) + offset)) + +static uacpi_status parse_nameseg(uacpi_u8 *cursor, + uacpi_object_name *out_name) +{ + if (uacpi_unlikely(!uacpi_is_valid_nameseg(cursor))) + return UACPI_STATUS_AML_INVALID_NAMESTRING; + + uacpi_memcpy(&out_name->id, cursor, 4); + return UACPI_STATUS_OK; +} + +/* + * ------------------------------------------------------------- + * RootChar := ‘\’ + * ParentPrefixChar := ‘^’ + * ‘\’ := 0x5C + * ‘^’ := 0x5E + * ------------------------------------------------------------ + * NameSeg := <leadnamechar namechar namechar namechar> + * NameString := <rootchar namepath> | <prefixpath namepath> + * PrefixPath := Nothing | <’^’ prefixpath> + * NamePath := NameSeg | DualNamePath | MultiNamePath | NullName + * DualNamePath := DualNamePrefix NameSeg NameSeg + * MultiNamePath := MultiNamePrefix SegCount NameSeg(SegCount) + */ + +static uacpi_status name_string_to_path( + struct call_frame *frame, uacpi_size offset, + uacpi_char **out_string, uacpi_size *out_size +) +{ + uacpi_size bytes_left, prefix_bytes, nameseg_bytes = 0, namesegs; + uacpi_char *base_cursor, *cursor; + uacpi_char prev_char; + + bytes_left = frame->method->size - offset; + cursor = (uacpi_char*)frame->method->code + offset; + base_cursor = cursor; + namesegs = 0; + + prefix_bytes = 0; + for (;;) { + if (uacpi_unlikely(bytes_left == 0)) + return UACPI_STATUS_AML_INVALID_NAMESTRING; + + prev_char = *cursor; + + switch (prev_char) { + case '^': + case '\\': + prefix_bytes++; + cursor++; + bytes_left--; + break; + default: + break; + } + + if (prev_char != '^') + break; + } + + // At least a NullName byte is expected here + if (uacpi_unlikely(bytes_left == 0)) + return UACPI_STATUS_AML_INVALID_NAMESTRING; + + namesegs = 0; + bytes_left--; + switch (*cursor++) + { + case UACPI_DUAL_NAME_PREFIX: + namesegs = 2; + break; + case UACPI_MULTI_NAME_PREFIX: + if (uacpi_unlikely(bytes_left == 0)) + return UACPI_STATUS_AML_INVALID_NAMESTRING; + + namesegs = *(uacpi_u8*)cursor; + if (uacpi_unlikely(namesegs == 0)) { + uacpi_error("MultiNamePrefix but SegCount is 0\n"); + return UACPI_STATUS_AML_INVALID_NAMESTRING; + } + + cursor++; + bytes_left--; + break; + case UACPI_NULL_NAME: + break; + default: + /* + * Might be an invalid byte, but assume single nameseg for now, + * the code below will validate it for us. + */ + cursor--; + bytes_left++; + namesegs = 1; + break; + } + + if (uacpi_unlikely((namesegs * 4) > bytes_left)) + return UACPI_STATUS_AML_INVALID_NAMESTRING; + + if (namesegs) { + // 4 chars per nameseg + nameseg_bytes = namesegs * 4; + + // dot separator for every nameseg + nameseg_bytes += namesegs - 1; + } + + *out_size = nameseg_bytes + prefix_bytes + 1; + + *out_string = uacpi_kernel_alloc(*out_size); + if (*out_string == UACPI_NULL) + return UACPI_STATUS_OUT_OF_MEMORY; + + uacpi_memcpy(*out_string, base_cursor, prefix_bytes); + + base_cursor = *out_string; + base_cursor += prefix_bytes; + + while (namesegs-- > 0) { + uacpi_memcpy(base_cursor, cursor, 4); + cursor += 4; + base_cursor += 4; + + if (namesegs) + *base_cursor++ = '.'; + } + + *base_cursor = '\0'; + return UACPI_STATUS_OK; +} + +enum resolve_behavior { + RESOLVE_CREATE_LAST_NAMESEG_FAIL_IF_EXISTS, + RESOLVE_FAIL_IF_DOESNT_EXIST, +}; + +static uacpi_status resolve_name_string( + struct call_frame *frame, + enum resolve_behavior behavior, + struct uacpi_namespace_node **out_node +) +{ + uacpi_status ret = UACPI_STATUS_OK; + uacpi_u8 *cursor; + uacpi_size bytes_left, namesegs = 0; + struct uacpi_namespace_node *parent, *cur_node = frame->cur_scope; + uacpi_char prev_char = 0; + uacpi_bool just_one_nameseg = UACPI_TRUE; + + bytes_left = call_frame_code_bytes_left(frame); + cursor = call_frame_cursor(frame); + + for (;;) { + if (uacpi_unlikely(bytes_left == 0)) + return UACPI_STATUS_AML_INVALID_NAMESTRING; + + switch (*cursor) { + case '\\': + if (prev_char == '^') + return UACPI_STATUS_AML_INVALID_NAMESTRING; + + cur_node = uacpi_namespace_root(); + break; + case '^': + // Tried to go behind root + if (uacpi_unlikely(cur_node == uacpi_namespace_root())) + return UACPI_STATUS_AML_INVALID_NAMESTRING; + + cur_node = cur_node->parent; + break; + default: + break; + } + + prev_char = *cursor; + + switch (prev_char) { + case '^': + case '\\': + just_one_nameseg = UACPI_FALSE; + cursor++; + bytes_left--; + break; + default: + break; + } + + if (prev_char != '^') + break; + } + + // At least a NullName byte is expected here + if (uacpi_unlikely(bytes_left == 0)) + return UACPI_STATUS_AML_INVALID_NAMESTRING; + + bytes_left--; + switch (*cursor++) + { + case UACPI_DUAL_NAME_PREFIX: + namesegs = 2; + just_one_nameseg = UACPI_FALSE; + break; + case UACPI_MULTI_NAME_PREFIX: + if (uacpi_unlikely(bytes_left == 0)) + return UACPI_STATUS_AML_INVALID_NAMESTRING; + + namesegs = *cursor; + if (uacpi_unlikely(namesegs == 0)) { + uacpi_error("MultiNamePrefix but SegCount is 0\n"); + return UACPI_STATUS_AML_INVALID_NAMESTRING; + } + + cursor++; + bytes_left--; + just_one_nameseg = UACPI_FALSE; + break; + case UACPI_NULL_NAME: + if (behavior == RESOLVE_CREATE_LAST_NAMESEG_FAIL_IF_EXISTS || + just_one_nameseg) + return UACPI_STATUS_AML_INVALID_NAMESTRING; + + goto out; + default: + /* + * Might be an invalid byte, but assume single nameseg for now, + * the code below will validate it for us. + */ + cursor--; + bytes_left++; + namesegs = 1; + break; + } + + if (uacpi_unlikely((namesegs * 4) > bytes_left)) + return UACPI_STATUS_AML_INVALID_NAMESTRING; + + for (; namesegs; cursor += 4, namesegs--) { + uacpi_object_name name; + + ret = parse_nameseg(cursor, &name); + if (uacpi_unlikely_error(ret)) + return ret; + + parent = cur_node; + cur_node = uacpi_namespace_node_find_sub_node(parent, name); + + switch (behavior) { + case RESOLVE_CREATE_LAST_NAMESEG_FAIL_IF_EXISTS: + if (namesegs == 1) { + if (cur_node) { + cur_node = UACPI_NULL; + ret = UACPI_STATUS_AML_OBJECT_ALREADY_EXISTS; + goto out; + } + + // Create the node and link to parent but don't install YET + cur_node = uacpi_namespace_node_alloc(name); + if (uacpi_unlikely(cur_node == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto out; + } + + cur_node->parent = parent; + } + break; + case RESOLVE_FAIL_IF_DOESNT_EXIST: + if (just_one_nameseg) { + while (!cur_node && parent != uacpi_namespace_root()) { + cur_node = parent; + parent = cur_node->parent; + + cur_node = uacpi_namespace_node_find_sub_node(parent, name); + } + } + break; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + if (cur_node == UACPI_NULL) { + ret = UACPI_STATUS_NOT_FOUND; + break; + } + } + +out: + cursor += namesegs * 4; + frame->code_offset = cursor - frame->method->code; + + if (uacpi_likely_success(ret) && behavior == RESOLVE_FAIL_IF_DOESNT_EXIST) + uacpi_shareable_ref(cur_node); + + *out_node = cur_node; + return ret; +} + +static uacpi_status do_install_node_item(struct call_frame *frame, + struct item *item) +{ + uacpi_status ret; + + ret = uacpi_namespace_node_install(item->node->parent, item->node); + if (uacpi_unlikely_error(ret)) + return ret; + + if (!frame->method->named_objects_persist) + ret = temp_namespace_node_array_push(&frame->temp_nodes, item->node); + + if (uacpi_likely_success(ret)) + item->node = UACPI_NULL; + + return ret; +} + +static uacpi_u8 peek_next_op(struct call_frame *frame, uacpi_aml_op *out_op) +{ + uacpi_aml_op op; + uacpi_size bytes_left; + uacpi_u8 length = 0; + uacpi_u8 *cursor; + struct code_block *block; + + block = code_block_array_last(&frame->code_blocks); + bytes_left = block->end - frame->code_offset; + if (bytes_left == 0) + return 0; + + cursor = call_frame_cursor(frame); + + op = AML_READ(cursor, length++); + if (op == UACPI_EXT_PREFIX) { + if (uacpi_unlikely(bytes_left < 2)) + return 0; + + op <<= 8; + op |= AML_READ(cursor, length++); + } + + *out_op = op; + return length; +} + +static uacpi_status get_op(struct execution_context *ctx) +{ + uacpi_aml_op op; + uacpi_u8 length; + + length = peek_next_op(ctx->cur_frame, &op); + if (uacpi_unlikely(length == 0)) + return UACPI_STATUS_AML_BAD_ENCODING; + + ctx->cur_frame->code_offset += length; + g_uacpi_rt_ctx.opcodes_executed++; + + ctx->cur_op = uacpi_get_op_spec(op); + if (uacpi_unlikely(ctx->cur_op->properties & UACPI_OP_PROPERTY_RESERVED)) { + uacpi_error( + "invalid opcode '%s' encountered in bytestream\n", + ctx->cur_op->name + ); + return UACPI_STATUS_AML_INVALID_OPCODE; + } + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_buffer(struct execution_context *ctx) +{ + struct package_length *pkg; + uacpi_u8 *src; + uacpi_object *dst, *declared_size; + uacpi_u32 buffer_size, init_size, aml_offset; + struct op_context *op_ctx = ctx->cur_op_ctx; + + aml_offset = item_array_at(&op_ctx->items, 2)->immediate; + src = ctx->cur_frame->method->code; + src += aml_offset; + + pkg = &item_array_at(&op_ctx->items, 0)->pkg; + init_size = pkg->end - aml_offset; + + // TODO: do package bounds checking at parse time + if (uacpi_unlikely(pkg->end > ctx->cur_frame->method->size)) + return UACPI_STATUS_AML_BAD_ENCODING; + + declared_size = item_array_at(&op_ctx->items, 1)->obj; + + if (uacpi_unlikely(declared_size->integer > 0xE0000000)) { + uacpi_error( + "buffer is too large (%"UACPI_PRIu64"), assuming corrupted " + "bytestream\n", UACPI_FMT64(declared_size->integer) + ); + return UACPI_STATUS_AML_BAD_ENCODING; + } + + if (uacpi_unlikely(declared_size->integer == 0)) { + uacpi_error("attempted to create an empty buffer\n"); + return UACPI_STATUS_AML_BAD_ENCODING; + } + + buffer_size = declared_size->integer; + if (uacpi_unlikely(init_size > buffer_size)) { + uacpi_error( + "too many buffer initializers: %u (size is %u)\n", + init_size, buffer_size + ); + return UACPI_STATUS_AML_BAD_ENCODING; + } + + dst = item_array_at(&op_ctx->items, 3)->obj; + dst->buffer->data = uacpi_kernel_alloc(buffer_size); + if (uacpi_unlikely(dst->buffer->data == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + dst->buffer->size = buffer_size; + + uacpi_memcpy_zerout(dst->buffer->data, src, buffer_size, init_size); + return UACPI_STATUS_OK; +} + +static uacpi_status handle_string(struct execution_context *ctx) +{ + struct call_frame *frame = ctx->cur_frame; + uacpi_object *obj; + + uacpi_char *string; + uacpi_size length, max_bytes; + + obj = item_array_last(&ctx->cur_op_ctx->items)->obj; + string = call_frame_cursor(frame); + + // TODO: sanitize string for valid UTF-8 + max_bytes = call_frame_code_bytes_left(frame); + length = uacpi_strnlen(string, max_bytes); + + if (uacpi_unlikely((length == max_bytes) || (string[length++] != 0x00))) + return UACPI_STATUS_AML_BAD_ENCODING; + + obj->buffer->text = uacpi_kernel_alloc(length); + if (uacpi_unlikely(obj->buffer->text == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + uacpi_memcpy(obj->buffer->text, string, length); + obj->buffer->size = length; + frame->code_offset += length; + return UACPI_STATUS_OK; +} + +static uacpi_status handle_package(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_package *package; + uacpi_u32 num_elements, num_defined_elements, i; + + /* + * Layout of items here: + * [0] -> Package length, not interesting + * [1] -> Immediate or integer object, depending on PackageOp/VarPackageOp + * [2..N-2] -> AML pc+Package element pairs + * [N-1] -> The resulting package object that we're constructing + */ + package = item_array_last(&op_ctx->items)->obj->package; + + // 1. Detect how many elements we have, do sanity checking + if (op_ctx->op->code == UACPI_AML_OP_VarPackageOp) { + uacpi_object *var_num_elements; + + var_num_elements = item_array_at(&op_ctx->items, 1)->obj; + if (uacpi_unlikely(var_num_elements->integer > 0xE0000000)) { + uacpi_error( + "package is too large (%"UACPI_PRIu64"), assuming " + "corrupted bytestream\n", UACPI_FMT64(var_num_elements->integer) + ); + return UACPI_STATUS_AML_BAD_ENCODING; + } + num_elements = var_num_elements->integer; + } else { + num_elements = item_array_at(&op_ctx->items, 1)->immediate; + } + + num_defined_elements = (item_array_size(&op_ctx->items) - 3) / 2; + if (uacpi_unlikely(num_defined_elements > num_elements)) { + uacpi_warn( + "too many package initializers: %u, truncating to %u\n", + num_defined_elements, num_elements + ); + + num_defined_elements = num_elements; + } + + // 2. Create every object in the package, start as uninitialized + if (uacpi_unlikely(!uacpi_package_fill(package, num_elements, + UACPI_PREALLOC_OBJECTS_YES))) + return UACPI_STATUS_OUT_OF_MEMORY; + + // 3. Go through every defined object and copy it into the package + for (i = 0; i < num_defined_elements; ++i) { + uacpi_size base_pkg_index; + uacpi_status ret; + struct item *item; + uacpi_object *obj; + + base_pkg_index = (i * 2) + 2; + item = item_array_at(&op_ctx->items, base_pkg_index + 1); + obj = item->obj; + + if (obj != UACPI_NULL && obj->type == UACPI_OBJECT_REFERENCE) { + /* + * For named objects we don't actually need the object itself, but + * simply the path to it. Often times objects referenced by the + * package are not defined until later so it's not possible to + * resolve them. For uniformity and to follow the behavior of NT, + * simply convert the name string to a path string object to be + * resolved later when actually needed. + */ + if (obj->flags == UACPI_REFERENCE_KIND_NAMED) { + uacpi_object_unref(obj); + item->obj = UACPI_NULL; + obj = UACPI_NULL; + } else { + obj = uacpi_unwrap_internal_reference(obj); + } + } + + if (obj == UACPI_NULL) { + uacpi_size length; + uacpi_char *path; + + obj = uacpi_create_object(UACPI_OBJECT_STRING); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + ret = name_string_to_path( + ctx->cur_frame, + item_array_at(&op_ctx->items, base_pkg_index)->immediate, + &path, &length + ); + if (uacpi_unlikely_error(ret)) + return ret; + + obj->flags = UACPI_STRING_KIND_PATH; + obj->buffer->text = path; + obj->buffer->size = length; + + item->obj = obj; + item->type = ITEM_OBJECT; + } + + ret = uacpi_object_assign(package->objects[i], obj, + UACPI_ASSIGN_BEHAVIOR_DEEP_COPY); + if (uacpi_unlikely_error(ret)) + return ret; + } + + return UACPI_STATUS_OK; +} + +static uacpi_size sizeof_int(void) +{ + return g_uacpi_rt_ctx.is_rev1 ? 4 : 8; +} + +static uacpi_status get_object_storage( + uacpi_object *obj, uacpi_data_view *out_buf, uacpi_bool include_null +) +{ + switch (obj->type) { + case UACPI_OBJECT_INTEGER: + out_buf->length = sizeof_int(); + out_buf->data = &obj->integer; + break; + case UACPI_OBJECT_STRING: + out_buf->length = obj->buffer->size; + if (out_buf->length && !include_null) + out_buf->length--; + + out_buf->text = obj->buffer->text; + break; + case UACPI_OBJECT_BUFFER: + if (obj->buffer->size == 0) { + out_buf->bytes = UACPI_NULL; + out_buf->length = 0; + break; + } + + out_buf->length = obj->buffer->size; + out_buf->bytes = obj->buffer->data; + break; + case UACPI_OBJECT_REFERENCE: + return UACPI_STATUS_INVALID_ARGUMENT; + default: + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } + + return UACPI_STATUS_OK; +} + +static uacpi_u8 *buffer_index_cursor(uacpi_buffer_index *buf_idx) +{ + uacpi_u8 *out_cursor; + + out_cursor = buf_idx->buffer->data; + out_cursor += buf_idx->idx; + + return out_cursor; +} + +static void write_buffer_index(uacpi_buffer_index *buf_idx, + uacpi_data_view *src_buf) +{ + uacpi_memcpy_zerout(buffer_index_cursor(buf_idx), src_buf->bytes, + 1, src_buf->length); +} + +/* + * The word "implicit cast" here is only because it's called that in + * the specification. In reality, we just copy one buffer to another + * because that's what NT does. + */ +static uacpi_status object_assign_with_implicit_cast( + uacpi_object *dst, uacpi_object *src, uacpi_data_view *wtr_response +) +{ + uacpi_status ret; + uacpi_data_view src_buf; + + ret = get_object_storage(src, &src_buf, UACPI_FALSE); + if (uacpi_unlikely_error(ret)) + goto out_bad_cast; + + switch (dst->type) { + case UACPI_OBJECT_INTEGER: + case UACPI_OBJECT_STRING: + case UACPI_OBJECT_BUFFER: { + uacpi_data_view dst_buf; + + ret = get_object_storage(dst, &dst_buf, UACPI_FALSE); + if (uacpi_unlikely_error(ret)) + goto out_bad_cast; + + uacpi_memcpy_zerout( + dst_buf.bytes, src_buf.bytes, dst_buf.length, src_buf.length + ); + break; + } + + case UACPI_OBJECT_BUFFER_FIELD: + uacpi_write_buffer_field( + &dst->buffer_field, src_buf.bytes, src_buf.length + ); + break; + + case UACPI_OBJECT_FIELD_UNIT: + return uacpi_write_field_unit( + dst->field_unit, src_buf.bytes, src_buf.length, + wtr_response + ); + + case UACPI_OBJECT_BUFFER_INDEX: + write_buffer_index(&dst->buffer_index, &src_buf); + break; + + default: + ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + goto out_bad_cast; + } + + return ret; + +out_bad_cast: + uacpi_error( + "attempted to perform an invalid implicit cast (%s -> %s)\n", + uacpi_object_type_to_string(src->type), + uacpi_object_type_to_string(dst->type) + ); + return ret; +} + +enum argx_or_localx { + ARGX, + LOCALX, +}; + +static uacpi_status handle_arg_or_local( + struct execution_context *ctx, + uacpi_size idx, enum argx_or_localx type +) +{ + uacpi_object **src; + struct item *dst; + enum uacpi_reference_kind kind; + + if (type == ARGX) { + src = &ctx->cur_frame->args[idx]; + kind = UACPI_REFERENCE_KIND_ARG; + } else { + src = &ctx->cur_frame->locals[idx]; + kind = UACPI_REFERENCE_KIND_LOCAL; + } + + if (*src == UACPI_NULL) { + uacpi_object *default_value; + + default_value = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED); + if (uacpi_unlikely(default_value == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + *src = uacpi_create_internal_reference(kind, default_value); + if (uacpi_unlikely(*src == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + uacpi_object_unref(default_value); + } + + dst = item_array_last(&ctx->cur_op_ctx->items); + dst->obj = *src; + dst->type = ITEM_OBJECT; + uacpi_object_ref(dst->obj); + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_local(struct execution_context *ctx) +{ + uacpi_size idx; + struct op_context *op_ctx = ctx->cur_op_ctx; + + idx = op_ctx->op->code - UACPI_AML_OP_Local0Op; + return handle_arg_or_local(ctx, idx, LOCALX); +} + +static uacpi_status handle_arg(struct execution_context *ctx) +{ + uacpi_size idx; + struct op_context *op_ctx = ctx->cur_op_ctx; + + idx = op_ctx->op->code - UACPI_AML_OP_Arg0Op; + return handle_arg_or_local(ctx, idx, ARGX); +} + +static uacpi_status handle_named_object(struct execution_context *ctx) +{ + struct uacpi_namespace_node *src; + struct item *dst; + + src = item_array_at(&ctx->cur_op_ctx->items, 0)->node; + dst = item_array_at(&ctx->cur_op_ctx->items, 1); + + dst->obj = src->object; + dst->type = ITEM_OBJECT; + uacpi_object_ref(dst->obj); + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_create_alias(struct execution_context *ctx) +{ + uacpi_namespace_node *src, *dst; + + src = item_array_at(&ctx->cur_op_ctx->items, 0)->node; + dst = item_array_at(&ctx->cur_op_ctx->items, 1)->node; + + dst->object = src->object; + dst->flags = UACPI_NAMESPACE_NODE_FLAG_ALIAS; + uacpi_object_ref(dst->object); + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_create_op_region(struct execution_context *ctx) +{ + uacpi_namespace_node *node; + uacpi_object *obj; + uacpi_operation_region *op_region; + uacpi_u64 region_end; + + node = item_array_at(&ctx->cur_op_ctx->items, 0)->node; + obj = item_array_at(&ctx->cur_op_ctx->items, 4)->obj; + op_region = obj->op_region; + + op_region->space = item_array_at(&ctx->cur_op_ctx->items, 1)->immediate; + op_region->offset = item_array_at(&ctx->cur_op_ctx->items, 2)->obj->integer; + op_region->length = item_array_at(&ctx->cur_op_ctx->items, 3)->obj->integer; + region_end = op_region->offset + op_region->length; + + if (uacpi_unlikely(op_region->length == 0)) { + // Don't abort here, as long as it's never accessed we don't care + uacpi_warn("unusable/empty operation region %.4s\n", node->name.text); + } else if (uacpi_unlikely(op_region->offset > region_end)) { + uacpi_error( + "invalid operation region %.4s bounds: offset=0x%"UACPI_PRIX64 + " length=0x%"UACPI_PRIX64"\n", node->name.text, + UACPI_FMT64(op_region->offset), UACPI_FMT64(op_region->length) + ); + return UACPI_STATUS_AML_BAD_ENCODING; + } + + if (op_region->space == UACPI_ADDRESS_SPACE_PCC && op_region->offset > 255) { + uacpi_warn( + "invalid PCC operation region %.4s subspace %"UACPI_PRIX64"\n", + node->name.text, UACPI_FMT64(op_region->offset) + ); + } + + node->object = uacpi_create_internal_reference( + UACPI_REFERENCE_KIND_NAMED, obj + ); + if (uacpi_unlikely(node->object == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + uacpi_initialize_opregion_node(node); + return UACPI_STATUS_OK; +} + +static uacpi_status table_id_error( + const uacpi_char *opcode, const uacpi_char *arg, + uacpi_buffer *str +) +{ + uacpi_error("%s: invalid %s '%s'\n", opcode, arg, str->text); + return UACPI_STATUS_AML_BAD_ENCODING; +} + +static void report_table_id_find_error( + const uacpi_char *opcode, struct uacpi_table_identifiers *id, + uacpi_status ret +) +{ + uacpi_error( + "%s: unable to find table '%.4s' (OEM ID '%.6s', " + "OEM Table ID '%.8s'): %s\n", + opcode, id->signature.text, id->oemid, id->oem_table_id, + uacpi_status_to_string(ret) + ); +} + +static uacpi_status build_table_id( + const uacpi_char *opcode, + struct uacpi_table_identifiers *out_id, + uacpi_buffer *signature, uacpi_buffer *oem_id, + uacpi_buffer *oem_table_id +) +{ + if (uacpi_unlikely(signature->size != (sizeof(uacpi_object_name) + 1))) + return table_id_error(opcode, "SignatureString", signature); + + uacpi_memcpy(out_id->signature.text, signature->text, + sizeof(uacpi_object_name)); + + if (uacpi_unlikely(oem_id->size > (sizeof(out_id->oemid) + 1))) + return table_id_error(opcode, "OemIDString", oem_id); + + uacpi_memcpy_zerout( + out_id->oemid, oem_id->text, + sizeof(out_id->oemid), oem_id->size ? oem_id->size - 1 : 0 + ); + + if (uacpi_unlikely(oem_table_id->size > (sizeof(out_id->oem_table_id) + 1))) + return table_id_error(opcode, "OemTableIDString", oem_table_id); + + uacpi_memcpy_zerout( + out_id->oem_table_id, oem_table_id->text, + sizeof(out_id->oem_table_id), + oem_table_id->size ? oem_table_id->size - 1 : 0 + ); + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_create_data_region(struct execution_context *ctx) +{ + uacpi_status ret; + struct item_array *items = &ctx->cur_op_ctx->items; + struct uacpi_table_identifiers table_id; + uacpi_table table; + uacpi_namespace_node *node; + uacpi_object *obj; + uacpi_operation_region *op_region; + + node = item_array_at(items, 0)->node; + + ret = build_table_id( + "DataTableRegion", &table_id, + item_array_at(items, 1)->obj->buffer, + item_array_at(items, 2)->obj->buffer, + item_array_at(items, 3)->obj->buffer + ); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_table_find(&table_id, &table); + if (uacpi_unlikely_error(ret)) { + report_table_id_find_error("DataTableRegion", &table_id, ret); + return ret; + } + + obj = item_array_at(items, 4)->obj; + op_region = obj->op_region; + op_region->space = UACPI_ADDRESS_SPACE_TABLE_DATA; + op_region->offset = table.virt_addr; + op_region->length = table.hdr->length; + op_region->table_idx = table.index; + + node->object = uacpi_create_internal_reference( + UACPI_REFERENCE_KIND_NAMED, obj + ); + if (uacpi_unlikely(node->object == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + uacpi_initialize_opregion_node(node); + return UACPI_STATUS_OK; +} + +static uacpi_bool is_dynamic_table_load(enum uacpi_table_load_cause cause) +{ + return cause != UACPI_TABLE_LOAD_CAUSE_INIT; +} + +static void prepare_table_load( + void *ptr, enum uacpi_table_load_cause cause, uacpi_control_method *in_method +) +{ + struct acpi_dsdt *dsdt = ptr; + enum uacpi_log_level log_level = UACPI_LOG_TRACE; + const uacpi_char *log_prefix = "load of"; + + if (is_dynamic_table_load(cause)) { + log_prefix = cause == UACPI_TABLE_LOAD_CAUSE_HOST ? + "host-invoked load of" : "dynamic load of"; + log_level = UACPI_LOG_INFO; + } + + uacpi_log_lvl( + log_level, "%s "UACPI_PRI_TBL_HDR"\n", + log_prefix, UACPI_FMT_TBL_HDR(&dsdt->hdr) + ); + + in_method->code = dsdt->definition_block; + in_method->size = dsdt->hdr.length - sizeof(dsdt->hdr); + in_method->named_objects_persist = UACPI_TRUE; +} + +static uacpi_status do_load_table( + uacpi_namespace_node *parent, struct acpi_sdt_hdr *tbl, + enum uacpi_table_load_cause cause +) +{ + struct uacpi_control_method method = { 0 }; + uacpi_status ret; + + prepare_table_load(tbl, cause, &method); + + ret = uacpi_execute_control_method(parent, &method, UACPI_NULL, UACPI_NULL); + if (uacpi_unlikely_error(ret)) + return ret; + + if (is_dynamic_table_load(cause)) + uacpi_events_match_post_dynamic_table_load(); + + return ret; +} + +static uacpi_status handle_load_table(struct execution_context *ctx) +{ + uacpi_status ret; + struct item_array *items = &ctx->cur_op_ctx->items; + struct item *root_node_item; + struct uacpi_table_identifiers table_id; + uacpi_table table; + uacpi_buffer *root_path, *param_path; + uacpi_control_method *method; + uacpi_namespace_node *root_node, *param_node = UACPI_NULL; + + /* + * If we already have the last true/false object loaded, this is a second + * invocation of this handler. For the second invocation we want to detect + * new AML GPE handlers that might've been loaded, as well as potentially + * remove the target. + */ + if (item_array_size(items) == 12) { + uacpi_size idx; + struct uacpi_table tmp_table = { 0 }; + + idx = item_array_at(items, 2)->immediate; + tmp_table.index = idx; + uacpi_table_unref(&tmp_table); + + /* + * If this load failed, remove the target that was provided via + * ParameterPathString so that it doesn't get stored to. + */ + if (uacpi_unlikely(item_array_at(items, 11)->obj->integer == 0)) { + uacpi_object *target; + + target = item_array_at(items, 3)->obj; + if (target != UACPI_NULL) { + uacpi_object_unref(target); + item_array_at(items, 3)->obj = UACPI_NULL; + } + + return UACPI_STATUS_OK; + } + + uacpi_events_match_post_dynamic_table_load(); + return UACPI_STATUS_OK; + } + + ret = build_table_id( + "LoadTable", &table_id, + item_array_at(items, 5)->obj->buffer, + item_array_at(items, 6)->obj->buffer, + item_array_at(items, 7)->obj->buffer + ); + if (uacpi_unlikely_error(ret)) + return ret; + + root_path = item_array_at(items, 8)->obj->buffer; + param_path = item_array_at(items, 9)->obj->buffer; + root_node_item = item_array_at(items, 0); + + if (root_path->size > 1) { + ret = uacpi_namespace_node_resolve( + ctx->cur_frame->cur_scope, root_path->text, UACPI_SHOULD_LOCK_NO, + UACPI_MAY_SEARCH_ABOVE_PARENT_YES, UACPI_PERMANENT_ONLY_NO, + &root_node + ); + if (uacpi_unlikely_error(ret)) { + table_id_error("LoadTable", "RootPathString", root_path); + if (ret == UACPI_STATUS_NOT_FOUND) + ret = UACPI_STATUS_AML_UNDEFINED_REFERENCE; + return ret; + } + } else { + root_node = uacpi_namespace_root(); + } + + root_node_item->node = root_node; + root_node_item->type = ITEM_NAMESPACE_NODE; + uacpi_shareable_ref(root_node); + + if (param_path->size > 1) { + struct item *param_item; + + ret = uacpi_namespace_node_resolve( + root_node, param_path->text, UACPI_SHOULD_LOCK_NO, + UACPI_MAY_SEARCH_ABOVE_PARENT_YES, UACPI_PERMANENT_ONLY_NO, + ¶m_node + ); + if (uacpi_unlikely_error(ret)) { + table_id_error("LoadTable", "ParameterPathString", root_path); + if (ret == UACPI_STATUS_NOT_FOUND) + ret = UACPI_STATUS_AML_UNDEFINED_REFERENCE; + return ret; + } + + param_item = item_array_at(items, 3); + param_item->obj = param_node->object; + uacpi_object_ref(param_item->obj); + param_item->type = ITEM_OBJECT; + } + + ret = uacpi_table_find(&table_id, &table); + if (uacpi_unlikely_error(ret)) { + report_table_id_find_error("LoadTable", &table_id, ret); + return ret; + } + uacpi_table_mark_as_loaded(table.index); + + item_array_at(items, 2)->immediate = table.index; + method = item_array_at(items, 1)->obj->method; + prepare_table_load(table.hdr, UACPI_TABLE_LOAD_CAUSE_LOAD_TABLE_OP, method); + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_load(struct execution_context *ctx) +{ + uacpi_status ret; + struct item_array *items = &ctx->cur_op_ctx->items; + uacpi_table table; + uacpi_control_method *method; + uacpi_object *src; + struct acpi_sdt_hdr *src_table = UACPI_NULL; + void *table_buffer; + uacpi_size declared_size; + uacpi_bool unmap_src = UACPI_FALSE; + + /* + * If we already have the last true/false object loaded, this is a second + * invocation of this handler. For the second invocation we simply want to + * detect new AML GPE handlers that might've been loaded. + * We do this only if table load was successful though. + */ + if (item_array_size(items) == 5) { + if (item_array_at(items, 4)->obj->integer != 0) + uacpi_events_match_post_dynamic_table_load(); + return UACPI_STATUS_OK; + } + + src = item_array_at(items, 2)->obj; + + switch (src->type) { + case UACPI_OBJECT_OPERATION_REGION: { + uacpi_operation_region *op_region; + + op_region = src->op_region; + if (uacpi_unlikely( + op_region->space != UACPI_ADDRESS_SPACE_SYSTEM_MEMORY + )) { + uacpi_error("Load: operation region is not SystemMemory\n"); + goto error_out; + } + + if (uacpi_unlikely(op_region->length < sizeof(struct acpi_sdt_hdr))) { + uacpi_error( + "Load: operation region is too small: %"UACPI_PRIu64"\n", + UACPI_FMT64(op_region->length) + ); + goto error_out; + } + + src_table = uacpi_kernel_map(op_region->offset, op_region->length); + if (uacpi_unlikely(src_table == UACPI_NULL)) { + uacpi_error( + "Load: failed to map operation region " + "0x%016"UACPI_PRIX64" -> 0x%016"UACPI_PRIX64"\n", + UACPI_FMT64(op_region->offset), + UACPI_FMT64(op_region->offset + op_region->length) + ); + goto error_out; + } + + unmap_src = UACPI_TRUE; + declared_size = op_region->length; + break; + } + + case UACPI_OBJECT_BUFFER: { + uacpi_buffer *buffer; + + buffer = src->buffer; + if (buffer->size < sizeof(struct acpi_sdt_hdr)) { + uacpi_error( + "Load: buffer is too small: %zu\n", + buffer->size + ); + goto error_out; + } + + src_table = buffer->data; + declared_size = buffer->size; + break; + } + + default: + uacpi_error( + "Load: invalid argument '%s', expected " + "Buffer/Field/OperationRegion\n", + uacpi_object_type_to_string(src->type) + ); + goto error_out; + } + + if (uacpi_unlikely(src_table->length > declared_size)) { + uacpi_error( + "Load: table size %u is larger than the declared size %zu\n", + src_table->length, declared_size + ); + goto error_out; + } + + if (uacpi_unlikely(src_table->length < sizeof(struct acpi_sdt_hdr))) { + uacpi_error("Load: table size %u is too small\n", src_table->length); + goto error_out; + } + + table_buffer = uacpi_kernel_alloc(src_table->length); + if (uacpi_unlikely(table_buffer == UACPI_NULL)) + goto error_out; + + uacpi_memcpy(table_buffer, src_table, src_table->length); + + if (unmap_src) { + uacpi_kernel_unmap(src_table, declared_size); + unmap_src = UACPI_FALSE; + } + + ret = uacpi_table_install_with_origin( + table_buffer, UACPI_TABLE_ORIGIN_FIRMWARE_VIRTUAL, &table + ); + if (uacpi_unlikely_error(ret)) { + uacpi_free(table_buffer, src_table->length); + + if (ret != UACPI_STATUS_OVERRIDDEN) + goto error_out; + } + uacpi_table_mark_as_loaded(table.index); + + item_array_at(items, 0)->node = uacpi_namespace_root(); + + method = item_array_at(items, 1)->obj->method; + prepare_table_load(table.ptr, UACPI_TABLE_LOAD_CAUSE_LOAD_OP, method); + + return UACPI_STATUS_OK; + +error_out: + if (unmap_src && src_table) + uacpi_kernel_unmap(src_table, declared_size); + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_execute_table(void *tbl, enum uacpi_table_load_cause cause) +{ + uacpi_status ret; + + ret = uacpi_namespace_write_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = do_load_table(uacpi_namespace_root(), tbl, cause); + + uacpi_namespace_write_unlock(); + return ret; +} + +static uacpi_u32 get_field_length(struct item *item) +{ + struct package_length *pkg = &item->pkg; + return pkg->end - pkg->begin; +} + +struct field_specific_data { + uacpi_namespace_node *region; + struct uacpi_field_unit *field0; + struct uacpi_field_unit *field1; + uacpi_u64 value; +}; + +static uacpi_status ensure_is_a_field_unit(uacpi_namespace_node *node, + uacpi_field_unit **out_field) +{ + uacpi_object *obj; + + obj = uacpi_namespace_node_get_object(node); + if (obj->type != UACPI_OBJECT_FIELD_UNIT) { + uacpi_error( + "invalid argument: '%.4s' is not a field unit (%s)\n", + node->name.text, uacpi_object_type_to_string(obj->type) + ); + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } + + *out_field = obj->field_unit; + return UACPI_STATUS_OK; +} + +static uacpi_status ensure_is_an_op_region(uacpi_namespace_node *node, + uacpi_namespace_node **out_node) +{ + uacpi_object *obj; + + obj = uacpi_namespace_node_get_object(node); + if (obj->type != UACPI_OBJECT_OPERATION_REGION) { + uacpi_error( + "invalid argument: '%.4s' is not an operation region (%s)\n", + node->name.text, uacpi_object_type_to_string(obj->type) + ); + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } + + *out_node = node; + return UACPI_STATUS_OK; +} + +static uacpi_status handle_create_field(struct execution_context *ctx) +{ + uacpi_status ret; + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_namespace_node *node; + uacpi_object *obj, *connection_obj = UACPI_NULL; + struct field_specific_data field_data = { 0 }; + uacpi_size i = 1, bit_offset = 0; + uacpi_u32 length, pin_offset = 0; + + uacpi_u8 raw_value, access_type, lock_rule, update_rule; + uacpi_u8 access_attrib = 0, access_length = 0; + + switch (op_ctx->op->code) { + case UACPI_AML_OP_FieldOp: + node = item_array_at(&op_ctx->items, i++)->node; + ret = ensure_is_an_op_region(node, &field_data.region); + if (uacpi_unlikely_error(ret)) + return ret; + break; + + case UACPI_AML_OP_BankFieldOp: + node = item_array_at(&op_ctx->items, i++)->node; + ret = ensure_is_an_op_region(node, &field_data.region); + if (uacpi_unlikely_error(ret)) + return ret; + + node = item_array_at(&op_ctx->items, i++)->node; + ret = ensure_is_a_field_unit(node, &field_data.field0); + if (uacpi_unlikely_error(ret)) + return ret; + + field_data.value = item_array_at(&op_ctx->items, i++)->obj->integer; + break; + + case UACPI_AML_OP_IndexFieldOp: + node = item_array_at(&op_ctx->items, i++)->node; + ret = ensure_is_a_field_unit(node, &field_data.field0); + if (uacpi_unlikely_error(ret)) + return ret; + + node = item_array_at(&op_ctx->items, i++)->node; + ret = ensure_is_a_field_unit(node, &field_data.field1); + if (uacpi_unlikely_error(ret)) + return ret; + break; + + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + /* + * ByteData + * bit 0-3: AccessType + * 0 AnyAcc + * 1 ByteAcc + * 2 WordAcc + * 3 DWordAcc + * 4 QWordAcc + * 5 BufferAcc + * 6 Reserved + * 7-15 Reserved + * bit 4: LockRule + * 0 NoLock + * 1 Lock + * bit 5-6: UpdateRule + * 0 Preserve + * 1 WriteAsOnes + * 2 WriteAsZeros + * bit 7: Reserved (must be 0) + */ + raw_value = item_array_at(&op_ctx->items, i++)->immediate; + access_type = (raw_value >> 0) & 0xF; + lock_rule = (raw_value >> 4) & 0x1; + update_rule = (raw_value >> 5) & 0x3; + + while (i < item_array_size(&op_ctx->items)) { + struct item *item; + item = item_array_at(&op_ctx->items, i++); + + // An actual field object + if (item->type == ITEM_NAMESPACE_NODE) { + uacpi_field_unit *field; + + length = get_field_length(item_array_at(&op_ctx->items, i++)); + node = item->node; + + obj = item_array_at(&op_ctx->items, i++)->obj; + field = obj->field_unit; + + field->update_rule = update_rule; + field->lock_rule = lock_rule; + field->attributes = access_attrib; + field->access_length = access_length; + + /* + * 0 AnyAcc + * 1 ByteAcc + * 2 WordAcc + * 3 DWordAcc + * 4 QWordAcc + * 5 BufferAcc + * 6 Reserved + * 7-15 Reserved + */ + switch (access_type) { + case 0: + // TODO: optimize to calculate best access strategy + UACPI_FALLTHROUGH; + case 1: + case 5: + field->access_width_bytes = 1; + break; + case 2: + field->access_width_bytes = 2; + break; + case 3: + field->access_width_bytes = 4; + break; + case 4: + field->access_width_bytes = 8; + break; + default: + uacpi_error("invalid field '%.4s' access type %d\n", + node->name.text, access_type); + return UACPI_STATUS_AML_BAD_ENCODING; + } + + field->bit_length = length; + field->pin_offset = pin_offset; + + // FIXME: overflow, OOB, etc checks + field->byte_offset = UACPI_ALIGN_DOWN( + bit_offset / 8, + field->access_width_bytes, + uacpi_u32 + ); + + field->bit_offset_within_first_byte = bit_offset; + field->bit_offset_within_first_byte = + bit_offset & ((field->access_width_bytes * 8) - 1); + + switch (op_ctx->op->code) { + case UACPI_AML_OP_FieldOp: + field->region = field_data.region; + uacpi_shareable_ref(field->region); + + field->kind = UACPI_FIELD_UNIT_KIND_NORMAL; + break; + + case UACPI_AML_OP_BankFieldOp: + field->bank_region = field_data.region; + uacpi_shareable_ref(field->bank_region); + + field->bank_selection = field_data.field0; + uacpi_shareable_ref(field->bank_selection); + + field->bank_value = field_data.value; + field->kind = UACPI_FIELD_UNIT_KIND_BANK; + break; + + case UACPI_AML_OP_IndexFieldOp: + field->index = field_data.field0; + uacpi_shareable_ref(field->index); + + field->data = field_data.field1; + uacpi_shareable_ref(field->data); + + field->kind = UACPI_FIELD_UNIT_KIND_INDEX; + break; + + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + field->connection = connection_obj; + if (field->connection) + uacpi_object_ref(field->connection); + + node->object = uacpi_create_internal_reference( + UACPI_REFERENCE_KIND_NAMED, obj + ); + if (uacpi_unlikely(node->object == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + ret = do_install_node_item(ctx->cur_frame, item); + if (uacpi_unlikely_error(ret)) + return ret; + + bit_offset += length; + pin_offset += length; + continue; + } + + // All other stuff + switch ((int)item->immediate) { + // ReservedField := 0x00 PkgLength + case 0x00: + length = get_field_length(item_array_at(&op_ctx->items, i++)); + bit_offset += length; + pin_offset += length; + break; + + // AccessField := 0x01 AccessType AccessAttrib + // ExtendedAccessField := 0x03 AccessType ExtendedAccessAttrib AccessLength + case 0x01: + case 0x03: + raw_value = item_array_at(&op_ctx->items, i++)->immediate; + + access_type = raw_value & 0xF; + access_attrib = (raw_value >> 6) & 0x3; + + raw_value = item_array_at(&op_ctx->items, i++)->immediate; + + /* + * Bits 7:6 + * 0 = AccessAttrib = Normal Access Attributes + * 1 = AccessAttrib = AttribBytes (x) + * 2 = AccessAttrib = AttribRawBytes (x) + * 3 = AccessAttrib = AttribRawProcessBytes (x) + * x is encoded as bits 0:7 of the AccessAttrib byte. + */ + if (access_attrib) { + switch (access_attrib) { + case 1: + access_attrib = UACPI_ACCESS_ATTRIBUTE_BYTES; + break; + case 2: + access_attrib = UACPI_ACCESS_ATTRIBUTE_RAW_BYTES; + break; + case 3: + access_attrib = UACPI_ACCESS_ATTRIBUTE_RAW_PROCESS_BYTES; + break; + } + + access_length = raw_value; + } else { // Normal access attributes + access_attrib = raw_value; + } + + if (item->immediate == 3) + access_length = item_array_at(&op_ctx->items, i++)->immediate; + break; + + // ConnectField := <0x02 NameString> | <0x02 BufferData> + case 0x02: + connection_obj = item_array_at(&op_ctx->items, i++)->obj; + pin_offset = 0; + break; + + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + } + + return UACPI_STATUS_OK; +} + +static void truncate_number_if_needed(uacpi_object *obj) +{ + if (!g_uacpi_rt_ctx.is_rev1) + return; + + obj->integer &= 0xFFFFFFFF; +} + +static uacpi_u64 ones(void) +{ + return g_uacpi_rt_ctx.is_rev1 ? 0xFFFFFFFF : 0xFFFFFFFFFFFFFFFF; +} + +static uacpi_status method_get_ret_target(struct execution_context *ctx, + uacpi_object **out_operand) +{ + uacpi_size depth; + + // Check if we're targeting the previous call frame + depth = call_frame_array_size(&ctx->call_stack); + if (depth > 1) { + struct op_context *op_ctx; + struct call_frame *frame; + + frame = call_frame_array_at(&ctx->call_stack, depth - 2); + depth = op_context_array_size(&frame->pending_ops); + + // Ok, no one wants the return value at call site. Discard it. + if (!depth) { + *out_operand = UACPI_NULL; + return UACPI_STATUS_OK; + } + + op_ctx = op_context_array_at(&frame->pending_ops, depth - 1); + + /* + * Prevent the table being dynamically loaded from attempting to return + * a value to the caller. This is unlikely to be ever encountered in the + * wild, but we should still guard against the possibility. + */ + if (uacpi_unlikely(op_ctx->op->code == UACPI_AML_OP_LoadOp || + op_ctx->op->code == UACPI_AML_OP_LoadTableOp)) { + *out_operand = UACPI_NULL; + return UACPI_STATUS_OK; + } + + *out_operand = item_array_last(&op_ctx->items)->obj; + return UACPI_STATUS_OK; + } + + return UACPI_STATUS_NOT_FOUND; +} + +static uacpi_status method_get_ret_object(struct execution_context *ctx, + uacpi_object **out_obj) +{ + uacpi_status ret; + + ret = method_get_ret_target(ctx, out_obj); + if (ret == UACPI_STATUS_NOT_FOUND) { + *out_obj = ctx->ret; + return UACPI_STATUS_OK; + } + if (ret != UACPI_STATUS_OK || *out_obj == UACPI_NULL) + return ret; + + *out_obj = uacpi_unwrap_internal_reference(*out_obj); + return UACPI_STATUS_OK; +} + +static struct code_block *find_last_block(struct code_block_array *blocks, + enum code_block_type type) +{ + uacpi_size i; + + i = code_block_array_size(blocks); + while (i-- > 0) { + struct code_block *block; + + block = code_block_array_at(blocks, i); + if (block->type == type) + return block; + } + + return UACPI_NULL; +} + +static void update_scope(struct call_frame *frame) +{ + struct code_block *block; + + block = find_last_block(&frame->code_blocks, CODE_BLOCK_SCOPE); + if (block == UACPI_NULL) { + frame->cur_scope = uacpi_namespace_root(); + return; + } + + frame->cur_scope = block->node; +} + +static uacpi_status begin_block_execution(struct execution_context *ctx) +{ + struct call_frame *cur_frame = ctx->cur_frame; + struct op_context *op_ctx = ctx->cur_op_ctx; + struct package_length *pkg; + struct code_block *block; + + block = code_block_array_alloc(&cur_frame->code_blocks); + if (uacpi_unlikely(block == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + pkg = &item_array_at(&op_ctx->items, 0)->pkg; + + // Disarm the tracked package so that we don't skip the Scope + op_ctx->tracked_pkg_idx = 0; + + switch (op_ctx->op->code) { + case UACPI_AML_OP_IfOp: + block->type = CODE_BLOCK_IF; + break; + case UACPI_AML_OP_ElseOp: + block->type = CODE_BLOCK_ELSE; + break; + case UACPI_AML_OP_WhileOp: + block->type = CODE_BLOCK_WHILE; + + if (pkg->begin == cur_frame->prev_while_code_offset) { + uacpi_u64 cur_ticks; + + cur_ticks = uacpi_kernel_get_nanoseconds_since_boot(); + + if (uacpi_unlikely(cur_ticks > block->expiration_point)) { + uacpi_error("loop time out after running for %u seconds\n", + g_uacpi_rt_ctx.loop_timeout_seconds); + code_block_array_pop(&cur_frame->code_blocks); + return UACPI_STATUS_AML_LOOP_TIMEOUT; + } + + block->expiration_point = cur_frame->prev_while_expiration; + } else { + /* + * Calculate the expiration point for this loop. + * If a loop is executed past this point, it will get aborted. + */ + block->expiration_point = uacpi_kernel_get_nanoseconds_since_boot(); + block->expiration_point += + g_uacpi_rt_ctx.loop_timeout_seconds * UACPI_NANOSECONDS_PER_SEC; + } + break; + case UACPI_AML_OP_ScopeOp: + case UACPI_AML_OP_DeviceOp: + case UACPI_AML_OP_ProcessorOp: + case UACPI_AML_OP_PowerResOp: + case UACPI_AML_OP_ThermalZoneOp: + block->type = CODE_BLOCK_SCOPE; + block->node = item_array_at(&op_ctx->items, 1)->node; + break; + default: + code_block_array_pop(&cur_frame->code_blocks); + return UACPI_STATUS_INVALID_ARGUMENT; + } + + // -1 because we want to re-evaluate at the start of the op next time + block->begin = pkg->begin - 1; + block->end = pkg->end; + ctx->cur_block = block; + + cur_frame->last_while = find_last_block(&cur_frame->code_blocks, + CODE_BLOCK_WHILE); + update_scope(cur_frame); + return UACPI_STATUS_OK; +} + +static void frame_reset_post_end_block(struct execution_context *ctx, + enum code_block_type type) +{ + struct call_frame *frame = ctx->cur_frame; + + if (type == CODE_BLOCK_WHILE) { + struct code_block *block = ctx->cur_block; + + // + 1 here to skip the WhileOp and get to the PkgLength + frame->prev_while_code_offset = block->begin + 1; + frame->prev_while_expiration = block->expiration_point; + } + + code_block_array_pop(&frame->code_blocks); + ctx->cur_block = code_block_array_last(&frame->code_blocks); + + if (type == CODE_BLOCK_WHILE) { + frame->last_while = find_last_block(&frame->code_blocks, type); + } else if (type == CODE_BLOCK_SCOPE) { + update_scope(frame); + } +} + +static void debug_store_no_recurse(const uacpi_char *prefix, uacpi_object *src) +{ + switch (src->type) { + case UACPI_OBJECT_UNINITIALIZED: + uacpi_trace("%s Uninitialized\n", prefix); + break; + case UACPI_OBJECT_STRING: + uacpi_trace("%s String => \"%s\"\n", prefix, src->buffer->text); + break; + case UACPI_OBJECT_INTEGER: + if (g_uacpi_rt_ctx.is_rev1) { + uacpi_trace( + "%s Integer => 0x%08X\n", prefix, (uacpi_u32)src->integer + ); + } else { + uacpi_trace( + "%s Integer => 0x%016"UACPI_PRIX64"\n", prefix, + UACPI_FMT64(src->integer) + ); + } + break; + case UACPI_OBJECT_REFERENCE: + uacpi_trace("%s Reference @%p => %p\n", prefix, src, src->inner_object); + break; + case UACPI_OBJECT_PACKAGE: + uacpi_trace( + "%s Package @%p (%p) (%zu elements)\n", + prefix, src, src->package, src->package->count + ); + break; + case UACPI_OBJECT_BUFFER: + uacpi_trace( + "%s Buffer @%p (%p) (%zu bytes)\n", + prefix, src, src->buffer, src->buffer->size + ); + break; + case UACPI_OBJECT_OPERATION_REGION: + uacpi_trace( + "%s OperationRegion (ASID %d) 0x%016"UACPI_PRIX64 + " -> 0x%016"UACPI_PRIX64"\n", prefix, + src->op_region->space, UACPI_FMT64(src->op_region->offset), + UACPI_FMT64(src->op_region->offset + src->op_region->length) + ); + break; + case UACPI_OBJECT_POWER_RESOURCE: + uacpi_trace( + "%s Power Resource %d %d\n", + prefix, src->power_resource.system_level, + src->power_resource.resource_order + ); + break; + case UACPI_OBJECT_PROCESSOR: + uacpi_trace( + "%s Processor[%d] 0x%08X (%d)\n", + prefix, src->processor->id, src->processor->block_address, + src->processor->block_length + ); + break; + case UACPI_OBJECT_BUFFER_INDEX: + uacpi_trace( + "%s Buffer Index %p[%zu] => 0x%02X\n", + prefix, src->buffer_index.buffer->data, src->buffer_index.idx, + *buffer_index_cursor(&src->buffer_index) + ); + break; + case UACPI_OBJECT_MUTEX: + uacpi_trace( + "%s Mutex @%p (%p => %p) sync level %d\n", + prefix, src, src->mutex, src->mutex->handle, + src->mutex->sync_level + ); + break; + case UACPI_OBJECT_METHOD: + uacpi_trace("%s Method @%p (%p)\n", prefix, src, src->method); + break; + default: + uacpi_trace( + "%s %s @%p\n", + prefix, uacpi_object_type_to_string(src->type), src + ); + } +} + +static uacpi_status debug_store(uacpi_object *src) +{ + /* + * Don't bother running the body if current log level is not set to trace. + * All DebugOp logging is done as TRACE exclusively. + */ + if (!uacpi_should_log(UACPI_LOG_TRACE)) + return UACPI_STATUS_OK; + + src = uacpi_unwrap_internal_reference(src); + + debug_store_no_recurse("[AML DEBUG]", src); + + if (src->type == UACPI_OBJECT_PACKAGE) { + uacpi_package *pkg = src->package; + uacpi_size i; + + for (i = 0; i < pkg->count; ++i) { + uacpi_object *obj = pkg->objects[i]; + if (obj->type == UACPI_OBJECT_REFERENCE && + obj->flags == UACPI_REFERENCE_KIND_PKG_INDEX) + obj = obj->inner_object; + + debug_store_no_recurse("Element:", obj); + } + } + + return UACPI_STATUS_OK; +} + +/* + * NOTE: this function returns the parent object + */ +static uacpi_object *reference_unwind(uacpi_object *obj) +{ + uacpi_object *parent = obj; + + while (obj) { + if (obj->type != UACPI_OBJECT_REFERENCE) + return parent; + + parent = obj; + obj = parent->inner_object; + } + + // This should be unreachable + return UACPI_NULL; +} + +static uacpi_iteration_decision opregion_try_detach_from_parent( + void *user, uacpi_namespace_node *node, uacpi_u32 node_depth +) +{ + uacpi_object *target_object = user; + UACPI_UNUSED(node_depth); + + if (node->object == target_object) { + uacpi_opregion_uninstall_handler(node); + return UACPI_ITERATION_DECISION_BREAK; + } + + return UACPI_ITERATION_DECISION_CONTINUE; +} + +static void object_replace_child(uacpi_object *parent, uacpi_object *new_child) +{ + if (parent->flags == UACPI_REFERENCE_KIND_NAMED && + uacpi_object_is(parent->inner_object, UACPI_OBJECT_OPERATION_REGION)) { + + /* + * We're doing a CopyObject or similar to a namespace node that is an + * operation region. Try to find the parent node and manually detach + * the handler. + */ + opregion_try_detach_from_parent(parent, uacpi_namespace_root(), 0); + uacpi_namespace_do_for_each_child( + uacpi_namespace_root(), opregion_try_detach_from_parent, UACPI_NULL, + UACPI_OBJECT_OPERATION_REGION_BIT, UACPI_MAX_DEPTH_ANY, + UACPI_SHOULD_LOCK_NO, UACPI_PERMANENT_ONLY_NO, parent + ); + } + + uacpi_object_detach_child(parent); + uacpi_object_attach_child(parent, new_child); +} + +/* + * Breakdown of what happens here: + * + * CopyObject(..., Obj) where Obj is: + * 1. LocalX -> Overwrite LocalX. + * 2. NAME -> Overwrite NAME. + * 3. ArgX -> Overwrite ArgX unless ArgX is a reference, in that case + * overwrite the referenced object. + * 4. RefOf -> Not allowed here. + * 5. Index -> Overwrite Object stored at the index. + */ + static uacpi_status copy_object_to_reference(uacpi_object *dst, + uacpi_object *src) +{ + uacpi_status ret; + uacpi_object *src_obj, *new_obj; + + switch (dst->flags) { + case UACPI_REFERENCE_KIND_ARG: { + uacpi_object *referenced_obj; + + referenced_obj = uacpi_unwrap_internal_reference(dst); + if (referenced_obj->type == UACPI_OBJECT_REFERENCE) { + dst = reference_unwind(referenced_obj); + break; + } + + UACPI_FALLTHROUGH; + } + case UACPI_REFERENCE_KIND_LOCAL: + case UACPI_REFERENCE_KIND_PKG_INDEX: + case UACPI_REFERENCE_KIND_NAMED: + break; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + src_obj = uacpi_unwrap_internal_reference(src); + + new_obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED); + if (uacpi_unlikely(new_obj == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + ret = uacpi_object_assign(new_obj, src_obj, + UACPI_ASSIGN_BEHAVIOR_DEEP_COPY); + if (uacpi_unlikely_error(ret)) + return ret; + + object_replace_child(dst, new_obj); + uacpi_object_unref(new_obj); + + return UACPI_STATUS_OK; +} + +/* + * if Store(..., Obj) where Obj is: + * 1. LocalX/Index -> OVERWRITE unless the object is a reference, in that + * case store to the referenced object _with_ implicit + * cast. + * 2. ArgX -> OVERWRITE unless the object is a reference, in that + * case OVERWRITE the referenced object. + * 3. NAME -> Store with implicit cast. + * 4. RefOf -> Not allowed here. + */ +static uacpi_status store_to_reference( + uacpi_object *dst, uacpi_object *src, uacpi_data_view *wtr_response +) +{ + uacpi_object *src_obj; + uacpi_bool overwrite = UACPI_FALSE; + + switch (dst->flags) { + case UACPI_REFERENCE_KIND_LOCAL: + case UACPI_REFERENCE_KIND_ARG: + case UACPI_REFERENCE_KIND_PKG_INDEX: { + uacpi_object *referenced_obj; + + if (dst->flags == UACPI_REFERENCE_KIND_PKG_INDEX) + referenced_obj = dst->inner_object; + else + referenced_obj = uacpi_unwrap_internal_reference(dst); + + if (referenced_obj->type == UACPI_OBJECT_REFERENCE) { + overwrite = dst->flags == UACPI_REFERENCE_KIND_ARG; + dst = reference_unwind(referenced_obj); + break; + } + + overwrite = UACPI_TRUE; + break; + } + case UACPI_REFERENCE_KIND_NAMED: + dst = reference_unwind(dst); + break; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + src_obj = uacpi_unwrap_internal_reference(src); + overwrite |= dst->inner_object->type == UACPI_OBJECT_UNINITIALIZED; + + if (overwrite) { + uacpi_status ret; + uacpi_object *new_obj; + + new_obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED); + if (uacpi_unlikely(new_obj == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + ret = uacpi_object_assign(new_obj, src_obj, + UACPI_ASSIGN_BEHAVIOR_DEEP_COPY); + if (uacpi_unlikely_error(ret)) { + uacpi_object_unref(new_obj); + return ret; + } + + object_replace_child(dst, new_obj); + uacpi_object_unref(new_obj); + return UACPI_STATUS_OK; + } + + return object_assign_with_implicit_cast( + dst->inner_object, src_obj, wtr_response + ); +} + +static uacpi_status handle_ref_or_deref_of(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_object *dst, *src; + + src = item_array_at(&op_ctx->items, 0)->obj; + + if (op_ctx->op->code == UACPI_AML_OP_CondRefOfOp) + dst = item_array_at(&op_ctx->items, 2)->obj; + else + dst = item_array_at(&op_ctx->items, 1)->obj; + + if (op_ctx->op->code == UACPI_AML_OP_DerefOfOp) { + uacpi_bool was_a_reference = UACPI_FALSE; + + if (src->type == UACPI_OBJECT_REFERENCE) { + was_a_reference = UACPI_TRUE; + + /* + * Explicit dereferencing [DerefOf] behavior: + * Simply grabs the bottom-most object that is not a reference. + * This mimics the behavior of NT Acpi.sys: any DerfOf fetches + * the bottom-most reference. Note that this is different from + * ACPICA where DerefOf dereferences one level. + */ + src = reference_unwind(src)->inner_object; + } + + if (src->type == UACPI_OBJECT_BUFFER_INDEX) { + uacpi_buffer_index *buf_idx = &src->buffer_index; + + dst->type = UACPI_OBJECT_INTEGER; + uacpi_memcpy_zerout( + &dst->integer, buffer_index_cursor(buf_idx), + sizeof(dst->integer), 1 + ); + return UACPI_STATUS_OK; + } + + if (!was_a_reference) { + uacpi_error( + "invalid DerefOf argument: %s, expected a reference\n", + uacpi_object_type_to_string(src->type) + ); + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } + + return uacpi_object_assign(dst, src, + UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY); + } + + dst->type = UACPI_OBJECT_REFERENCE; + dst->inner_object = src; + uacpi_object_ref(src); + return UACPI_STATUS_OK; +} + +static uacpi_status do_binary_math( + uacpi_object *arg0, uacpi_object *arg1, + uacpi_object *tgt0, uacpi_object *tgt1, + uacpi_aml_op op +) +{ + uacpi_u64 lhs, rhs, res; + uacpi_bool should_negate = UACPI_FALSE; + + lhs = arg0->integer; + rhs = arg1->integer; + + switch (op) + { + case UACPI_AML_OP_AddOp: + res = lhs + rhs; + break; + case UACPI_AML_OP_SubtractOp: + res = lhs - rhs; + break; + case UACPI_AML_OP_MultiplyOp: + res = lhs * rhs; + break; + case UACPI_AML_OP_ShiftLeftOp: + case UACPI_AML_OP_ShiftRightOp: + if (rhs <= (g_uacpi_rt_ctx.is_rev1 ? 31 : 63)) { + if (op == UACPI_AML_OP_ShiftLeftOp) + res = lhs << rhs; + else + res = lhs >> rhs; + } else { + res = 0; + } + break; + case UACPI_AML_OP_NandOp: + should_negate = UACPI_TRUE; + UACPI_FALLTHROUGH; + case UACPI_AML_OP_AndOp: + res = rhs & lhs; + break; + case UACPI_AML_OP_NorOp: + should_negate = UACPI_TRUE; + UACPI_FALLTHROUGH; + case UACPI_AML_OP_OrOp: + res = rhs | lhs; + break; + case UACPI_AML_OP_XorOp: + res = rhs ^ lhs; + break; + case UACPI_AML_OP_DivideOp: + if (uacpi_unlikely(rhs == 0)) { + uacpi_error("attempted to divide by zero\n"); + return UACPI_STATUS_AML_BAD_ENCODING; + } + tgt1->integer = lhs / rhs; + res = lhs % rhs; + break; + case UACPI_AML_OP_ModOp: + if (uacpi_unlikely(rhs == 0)) { + uacpi_error("attempted to calculate modulo of zero\n"); + return UACPI_STATUS_AML_BAD_ENCODING; + } + res = lhs % rhs; + break; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + if (should_negate) + res = ~res; + + tgt0->integer = res; + return UACPI_STATUS_OK; +} + +static uacpi_status handle_binary_math(struct execution_context *ctx) +{ + uacpi_object *arg0, *arg1, *tgt0, *tgt1; + struct item_array *items = &ctx->cur_op_ctx->items; + uacpi_aml_op op = ctx->cur_op_ctx->op->code; + + arg0 = item_array_at(items, 0)->obj; + arg1 = item_array_at(items, 1)->obj; + + if (op == UACPI_AML_OP_DivideOp) { + tgt0 = item_array_at(items, 4)->obj; + tgt1 = item_array_at(items, 5)->obj; + } else { + tgt0 = item_array_at(items, 3)->obj; + tgt1 = UACPI_NULL; + } + + return do_binary_math(arg0, arg1, tgt0, tgt1, op); +} + +static uacpi_status handle_unary_math(struct execution_context *ctx) +{ + uacpi_object *arg, *tgt; + struct item_array *items = &ctx->cur_op_ctx->items; + uacpi_aml_op op = ctx->cur_op_ctx->op->code; + + arg = item_array_at(items, 0)->obj; + tgt = item_array_at(items, 2)->obj; + + switch (op) { + case UACPI_AML_OP_NotOp: + tgt->integer = ~arg->integer; + truncate_number_if_needed(tgt); + break; + case UACPI_AML_OP_FindSetRightBitOp: + tgt->integer = uacpi_bit_scan_forward(arg->integer); + break; + case UACPI_AML_OP_FindSetLeftBitOp: + tgt->integer = uacpi_bit_scan_backward(arg->integer); + break; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + return UACPI_STATUS_OK; +} + +static uacpi_status ensure_valid_idx(uacpi_object *obj, uacpi_size idx, + uacpi_size src_size) +{ + if (uacpi_likely(idx < src_size)) + return UACPI_STATUS_OK; + + uacpi_error( + "invalid index %zu, %s@%p has %zu elements\n", + idx, uacpi_object_type_to_string(obj->type), obj, src_size + ); + return UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX; +} + +static uacpi_status handle_index(struct execution_context *ctx) +{ + uacpi_status ret; + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_object *src; + struct item *dst; + uacpi_size idx; + + src = item_array_at(&op_ctx->items, 0)->obj; + idx = item_array_at(&op_ctx->items, 1)->obj->integer; + dst = item_array_at(&op_ctx->items, 3); + + switch (src->type) { + case UACPI_OBJECT_BUFFER: + case UACPI_OBJECT_STRING: { + uacpi_buffer_index *buf_idx; + uacpi_data_view buf; + get_object_storage(src, &buf, UACPI_FALSE); + + ret = ensure_valid_idx(src, idx, buf.length); + if (uacpi_unlikely_error(ret)) + return ret; + + dst->type = ITEM_OBJECT; + dst->obj = uacpi_create_object(UACPI_OBJECT_BUFFER_INDEX); + if (uacpi_unlikely(dst->obj == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + buf_idx = &dst->obj->buffer_index; + buf_idx->idx = idx; + buf_idx->buffer = src->buffer; + uacpi_shareable_ref(buf_idx->buffer); + + break; + } + case UACPI_OBJECT_PACKAGE: { + uacpi_package *pkg = src->package; + uacpi_object *obj; + + ret = ensure_valid_idx(src, idx, pkg->count); + if (uacpi_unlikely_error(ret)) + return ret; + + /* + * Lazily transform the package element into an internal reference + * to itself of type PKG_INDEX. This is needed to support stuff like + * CopyObject(..., Index(pkg, X)) where the new object must be + * propagated to anyone else with a currently alive index object. + * + * Sidenote: Yes, IndexOp is not a SimpleName, so technically it is + * illegal to CopyObject to it. However, yet again we fall + * victim to the NT ACPI driver implementation, which allows + * it just fine. + */ + obj = pkg->objects[idx]; + if (obj->type != UACPI_OBJECT_REFERENCE || + obj->flags != UACPI_REFERENCE_KIND_PKG_INDEX) { + + obj = uacpi_create_internal_reference( + UACPI_REFERENCE_KIND_PKG_INDEX, obj + ); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + pkg->objects[idx] = obj; + uacpi_object_unref(obj->inner_object); + } + + dst->obj = obj; + dst->type = ITEM_OBJECT; + uacpi_object_ref(dst->obj); + break; + } + default: + uacpi_error( + "invalid argument for Index: %s, " + "expected String/Buffer/Package\n", + uacpi_object_type_to_string(src->type) + ); + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } + + return UACPI_STATUS_OK; +} + +static uacpi_u64 object_to_integer(const uacpi_object *obj, + uacpi_size max_buffer_bytes) +{ + uacpi_u64 dst; + + switch (obj->type) { + case UACPI_OBJECT_INTEGER: + dst = obj->integer; + break; + case UACPI_OBJECT_BUFFER: { + uacpi_size bytes; + bytes = UACPI_MIN(max_buffer_bytes, obj->buffer->size); + uacpi_memcpy_zerout(&dst, obj->buffer->data, sizeof(dst), bytes); + break; + } + case UACPI_OBJECT_STRING: + uacpi_string_to_integer( + obj->buffer->text, obj->buffer->size, UACPI_BASE_AUTO, &dst + ); + break; + default: + dst = 0; + break; + } + + return dst; +} + +static uacpi_status integer_to_string( + uacpi_u64 integer, uacpi_buffer *str, uacpi_bool is_hex +) +{ + int repr_len; + uacpi_char int_buf[21]; + uacpi_size final_size; + + repr_len = uacpi_snprintf( + int_buf, sizeof(int_buf), + is_hex ? "%"UACPI_PRIX64 : "%"UACPI_PRIu64, + UACPI_FMT64(integer) + ); + if (uacpi_unlikely(repr_len < 0)) + return UACPI_STATUS_INVALID_ARGUMENT; + + // 0x prefix + repr + \0 + final_size = (is_hex ? 2 : 0) + repr_len + 1; + + str->data = uacpi_kernel_alloc(final_size); + if (uacpi_unlikely(str->data == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + if (is_hex) { + str->text[0] = '0'; + str->text[1] = 'x'; + } + uacpi_memcpy(str->text + (is_hex ? 2 : 0), int_buf, repr_len + 1); + str->size = final_size; + + return UACPI_STATUS_OK; +} + +static uacpi_status buffer_to_string( + uacpi_buffer *buf, uacpi_buffer *str, uacpi_bool is_hex +) +{ + int repr_len; + uacpi_char int_buf[5]; + uacpi_size i, final_size; + uacpi_char *cursor; + + if (is_hex) { + final_size = 4 * buf->size; + } else { + final_size = 0; + + for (i = 0; i < buf->size; ++i) { + uacpi_u8 value = ((uacpi_u8*)buf->data)[i]; + + if (value < 10) + final_size += 1; + else if (value < 100) + final_size += 2; + else + final_size += 3; + } + } + + // Comma for every value but one + final_size += buf->size - 1; + + // Null terminator + final_size += 1; + + str->data = uacpi_kernel_alloc(final_size); + if (uacpi_unlikely(str->data == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + cursor = str->data; + + for (i = 0; i < buf->size; ++i) { + repr_len = uacpi_snprintf( + int_buf, sizeof(int_buf), + is_hex ? "0x%02X" : "%d", + ((uacpi_u8*)buf->data)[i] + ); + if (uacpi_unlikely(repr_len < 0)) { + uacpi_free(str->data, final_size); + str->data = UACPI_NULL; + return UACPI_STATUS_INVALID_ARGUMENT; + } + + uacpi_memcpy(cursor, int_buf, repr_len + 1); + cursor += repr_len; + + if (i != buf->size - 1) + *cursor++ = ','; + } + + str->size = final_size; + return UACPI_STATUS_OK; +} + +static uacpi_status do_make_empty_object(uacpi_buffer *buf, + uacpi_bool is_string) +{ + buf->text = uacpi_kernel_alloc_zeroed(sizeof(uacpi_char)); + if (uacpi_unlikely(buf->text == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + if (is_string) + buf->size = sizeof(uacpi_char); + + return UACPI_STATUS_OK; +} + +static uacpi_status make_null_string(uacpi_buffer *buf) +{ + return do_make_empty_object(buf, UACPI_TRUE); +} + +static uacpi_status make_null_buffer(uacpi_buffer *buf) +{ + /* + * Allocate at least 1 byte just to be safe, + * even for empty buffers. We still set the + * size to 0 though. + */ + return do_make_empty_object(buf, UACPI_FALSE); +} + +static uacpi_status handle_to(struct execution_context *ctx) +{ + uacpi_status ret = UACPI_STATUS_OK; + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_object *src, *dst; + + src = item_array_at(&op_ctx->items, 0)->obj; + dst = item_array_at(&op_ctx->items, 2)->obj; + + switch (op_ctx->op->code) { + case UACPI_AML_OP_ToIntegerOp: + // NT always takes the first 8 bytes, even for revision 1 + dst->integer = object_to_integer(src, 8); + break; + + case UACPI_AML_OP_ToHexStringOp: + case UACPI_AML_OP_ToDecimalStringOp: { + uacpi_bool is_hex = op_ctx->op->code == UACPI_AML_OP_ToHexStringOp; + + if (src->type == UACPI_OBJECT_INTEGER) { + ret = integer_to_string(src->integer, dst->buffer, is_hex); + break; + } else if (src->type == UACPI_OBJECT_BUFFER) { + if (uacpi_unlikely(src->buffer->size == 0)) + return make_null_string(dst->buffer); + + ret = buffer_to_string(src->buffer, dst->buffer, is_hex); + break; + } + UACPI_FALLTHROUGH; + } + case UACPI_AML_OP_ToBufferOp: { + uacpi_data_view buf; + uacpi_u8 *dst_buf; + + ret = get_object_storage(src, &buf, UACPI_TRUE); + if (uacpi_unlikely_error(ret)) + return ret; + + if (uacpi_unlikely(buf.length == 0)) + return make_null_buffer(dst->buffer); + + dst_buf = uacpi_kernel_alloc(buf.length); + if (uacpi_unlikely(dst_buf == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + uacpi_memcpy(dst_buf, buf.bytes, buf.length); + dst->buffer->data = dst_buf; + dst->buffer->size = buf.length; + break; + } + + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + return ret; +} + +static uacpi_status handle_to_string(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_buffer *src_buf, *dst_buf; + uacpi_size req_len, len; + + src_buf = item_array_at(&op_ctx->items, 0)->obj->buffer; + req_len = item_array_at(&op_ctx->items, 1)->obj->integer; + dst_buf = item_array_at(&op_ctx->items, 3)->obj->buffer; + + len = UACPI_MIN(req_len, src_buf->size); + if (uacpi_unlikely(len == 0)) + return make_null_string(dst_buf); + + len = uacpi_strnlen(src_buf->text, len); + + dst_buf->text = uacpi_kernel_alloc(len + 1); + if (uacpi_unlikely(dst_buf->text == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + uacpi_memcpy(dst_buf->text, src_buf->data, len); + dst_buf->text[len] = '\0'; + dst_buf->size = len + 1; + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_mid(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_object *src, *dst; + uacpi_data_view src_buf; + uacpi_buffer *dst_buf; + uacpi_size idx, len; + uacpi_bool is_string; + + src = item_array_at(&op_ctx->items, 0)->obj; + if (uacpi_unlikely(src->type != UACPI_OBJECT_STRING && + src->type != UACPI_OBJECT_BUFFER)) { + uacpi_error( + "invalid argument for Mid: %s, expected String/Buffer\n", + uacpi_object_type_to_string(src->type) + ); + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } + + idx = item_array_at(&op_ctx->items, 1)->obj->integer; + len = item_array_at(&op_ctx->items, 2)->obj->integer; + dst = item_array_at(&op_ctx->items, 4)->obj; + dst_buf = dst->buffer; + + is_string = src->type == UACPI_OBJECT_STRING; + get_object_storage(src, &src_buf, UACPI_FALSE); + + if (uacpi_unlikely(src_buf.length == 0 || idx >= src_buf.length || + len == 0)) { + if (src->type == UACPI_OBJECT_STRING) { + dst->type = UACPI_OBJECT_STRING; + return make_null_string(dst_buf); + } + + return make_null_buffer(dst_buf); + } + + // Guaranteed to be at least 1 here + len = UACPI_MIN(len, src_buf.length - idx); + + dst_buf->data = uacpi_kernel_alloc(len + is_string); + if (uacpi_unlikely(dst_buf->data == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + uacpi_memcpy(dst_buf->data, (uacpi_u8*)src_buf.bytes + idx, len); + dst_buf->size = len; + + if (is_string) { + dst_buf->text[dst_buf->size++] = '\0'; + dst->type = UACPI_OBJECT_STRING; + } + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_concatenate(struct execution_context *ctx) +{ + uacpi_status ret = UACPI_STATUS_OK; + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_object *arg0, *arg1, *dst; + uacpi_u8 *dst_buf; + uacpi_size buf_size = 0; + + arg0 = item_array_at(&op_ctx->items, 0)->obj; + arg1 = item_array_at(&op_ctx->items, 1)->obj; + dst = item_array_at(&op_ctx->items, 3)->obj; + + switch (arg0->type) { + case UACPI_OBJECT_INTEGER: { + uacpi_u64 arg1_as_int; + uacpi_size int_size; + + int_size = sizeof_int(); + buf_size = int_size * 2; + + dst_buf = uacpi_kernel_alloc(buf_size); + if (uacpi_unlikely(dst_buf == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + arg1_as_int = object_to_integer(arg1, 8); + + uacpi_memcpy(dst_buf, &arg0->integer, int_size); + uacpi_memcpy(dst_buf+ int_size, &arg1_as_int, int_size); + break; + } + case UACPI_OBJECT_BUFFER: { + uacpi_buffer *arg0_buf = arg0->buffer; + uacpi_data_view arg1_buf = { 0 }; + + get_object_storage(arg1, &arg1_buf, UACPI_TRUE); + buf_size = arg0_buf->size + arg1_buf.length; + + dst_buf = uacpi_kernel_alloc(buf_size); + if (uacpi_unlikely(dst_buf == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + uacpi_memcpy(dst_buf, arg0_buf->data, arg0_buf->size); + uacpi_memcpy(dst_buf + arg0_buf->size, arg1_buf.bytes, arg1_buf.length); + break; + } + case UACPI_OBJECT_STRING: { + uacpi_char int_buf[17]; + void *arg1_ptr; + uacpi_size arg0_size, arg1_size; + uacpi_buffer *arg0_buf = arg0->buffer; + + switch (arg1->type) { + case UACPI_OBJECT_INTEGER: { + int size; + size = uacpi_snprintf(int_buf, sizeof(int_buf), "%"UACPI_PRIx64, + UACPI_FMT64(arg1->integer)); + if (size < 0) + return UACPI_STATUS_INVALID_ARGUMENT; + + arg1_ptr = int_buf; + arg1_size = size + 1; + break; + } + case UACPI_OBJECT_STRING: + arg1_ptr = arg1->buffer->data; + arg1_size = arg1->buffer->size; + break; + case UACPI_OBJECT_BUFFER: { + uacpi_buffer tmp_buf; + + ret = buffer_to_string(arg1->buffer, &tmp_buf, UACPI_TRUE); + if (uacpi_unlikely_error(ret)) + return ret; + + arg1_ptr = tmp_buf.data; + arg1_size = tmp_buf.size; + break; + } + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + arg0_size = arg0_buf->size ? arg0_buf->size - 1 : arg0_buf->size; + buf_size = arg0_size + arg1_size; + + dst_buf = uacpi_kernel_alloc(buf_size); + if (uacpi_unlikely(dst_buf == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto cleanup; + } + + uacpi_memcpy(dst_buf, arg0_buf->data, arg0_size); + uacpi_memcpy(dst_buf + arg0_size, arg1_ptr, arg1_size); + dst->type = UACPI_OBJECT_STRING; + + cleanup: + if (arg1->type == UACPI_OBJECT_BUFFER) + uacpi_free(arg1_ptr, arg1_size); + break; + } + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + if (uacpi_likely_success(ret)) { + dst->buffer->data = dst_buf; + dst->buffer->size = buf_size; + } + return ret; +} + +static uacpi_status handle_concatenate_res(struct execution_context *ctx) +{ + uacpi_status ret; + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_data_view buffer; + uacpi_object *arg0, *arg1, *dst; + uacpi_u8 *dst_buf; + uacpi_size dst_size, arg0_size, arg1_size; + + arg0 = item_array_at(&op_ctx->items, 0)->obj; + arg1 = item_array_at(&op_ctx->items, 1)->obj; + dst = item_array_at(&op_ctx->items, 3)->obj; + + uacpi_buffer_to_view(arg0->buffer, &buffer); + ret = uacpi_find_aml_resource_end_tag(buffer, &arg0_size); + if (uacpi_unlikely_error(ret)) + return ret; + + uacpi_buffer_to_view(arg1->buffer, &buffer); + ret = uacpi_find_aml_resource_end_tag(buffer, &arg1_size); + if (uacpi_unlikely_error(ret)) + return ret; + + dst_size = arg0_size + arg1_size + sizeof(struct acpi_resource_end_tag); + + dst_buf = uacpi_kernel_alloc(dst_size); + if (uacpi_unlikely(dst_buf == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + dst->buffer->data = dst_buf; + dst->buffer->size = dst_size; + + uacpi_memcpy(dst_buf, arg0->buffer->data, arg0_size); + uacpi_memcpy(dst_buf + arg0_size, arg1->buffer->data, arg1_size); + + /* + * Small item (0), End Tag (0x0F), length 1 + * Leave the checksum as 0 + */ + dst_buf[dst_size - 2] = + (ACPI_RESOURCE_END_TAG << ACPI_SMALL_ITEM_NAME_IDX) | + (sizeof(struct acpi_resource_end_tag) - 1); + dst_buf[dst_size - 1] = 0; + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_sizeof(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_object *src, *dst; + + src = item_array_at(&op_ctx->items, 0)->obj; + dst = item_array_at(&op_ctx->items, 1)->obj; + + if (uacpi_likely(src->type == UACPI_OBJECT_REFERENCE)) + src = reference_unwind(src)->inner_object; + + switch (src->type) { + case UACPI_OBJECT_STRING: + case UACPI_OBJECT_BUFFER: { + uacpi_data_view buf; + get_object_storage(src, &buf, UACPI_FALSE); + + dst->integer = buf.length; + break; + } + + case UACPI_OBJECT_PACKAGE: + dst->integer = src->package->count; + break; + + default: + uacpi_error( + "invalid argument for Sizeof: %s, " + "expected String/Buffer/Package\n", + uacpi_object_type_to_string(src->type) + ); + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_object_type(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_object *src, *dst; + + src = item_array_at(&op_ctx->items, 0)->obj; + dst = item_array_at(&op_ctx->items, 1)->obj; + + if (uacpi_likely(src->type == UACPI_OBJECT_REFERENCE)) + src = reference_unwind(src)->inner_object; + + dst->integer = src->type; + if (dst->integer == UACPI_OBJECT_BUFFER_INDEX) + dst->integer = UACPI_OBJECT_BUFFER_FIELD; + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_timer(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_object *dst; + + dst = item_array_at(&op_ctx->items, 0)->obj; + dst->integer = uacpi_kernel_get_nanoseconds_since_boot() / 100; + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_stall_or_sleep(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_u64 time; + + time = item_array_at(&op_ctx->items, 0)->obj->integer; + + if (op_ctx->op->code == UACPI_AML_OP_SleepOp) { + /* + * ACPICA doesn't allow sleeps longer than 2 seconds, + * so we shouldn't either. + */ + if (time > 2000) + time = 2000; + + uacpi_namespace_write_unlock(); + uacpi_kernel_sleep(time); + uacpi_namespace_write_lock(); + } else { + // Spec says this must evaluate to a ByteData + if (time > 0xFF) + time = 0xFF; + uacpi_kernel_stall(time); + } + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_bcd(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_u64 src, dst = 0; + uacpi_size i; + uacpi_object *dst_obj; + + src = item_array_at(&op_ctx->items, 0)->obj->integer; + dst_obj = item_array_at(&op_ctx->items, 2)->obj; + i = 64; + + /* + * NOTE: ACPICA just errors out for invalid BCD, but NT allows it just fine. + * FromBCD matches NT behavior 1:1 even for invalid BCD, but ToBCD + * produces different results when the input is too large. + */ + if (op_ctx->op->code == UACPI_AML_OP_FromBCDOp) { + do { + i -= 4; + dst *= 10; + dst += (src >> i) & 0xF; + } while (i); + } else { + while (src != 0) { + dst >>= 4; + i -= 4; + dst |= (src % 10) << 60; + src /= 10; + } + + dst >>= (i % 64); + } + + dst_obj->integer = dst; + return UACPI_STATUS_OK; +} + +static uacpi_status handle_unload(struct execution_context *ctx) +{ + UACPI_UNUSED(ctx); + + /* + * Technically this doesn't exist in the wild, from the dumps that I have + * the only user of the Unload opcode is the Surface Pro 3, which triggers + * an unload of some I2C-related table as a response to some event. + * + * This op has been long deprecated by the specification exactly because + * it hasn't really been used by anyone and the fact that it introduces + * an enormous layer of complexity, which no driver is really prepared to + * deal with (aka namespace nodes disappearing under its feet). + * + * Just pretend we have actually unloaded whatever the AML asked for, if it + * ever tries to re-load this table that will just skip opcodes that create + * already existing objects, which should be good enough and mostly + * transparent to the AML. + */ + uacpi_warn("refusing to unload a table from AML\n"); + return UACPI_STATUS_OK; +} + +static uacpi_status handle_logical_not(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_object *src, *dst; + + src = item_array_at(&op_ctx->items, 0)->obj; + dst = item_array_at(&op_ctx->items, 1)->obj; + + dst->type = UACPI_OBJECT_INTEGER; + dst->integer = src->integer ? 0 : ones(); + + return UACPI_STATUS_OK; +} + +static uacpi_bool handle_logical_equality(uacpi_object *lhs, uacpi_object *rhs) +{ + uacpi_bool res = UACPI_FALSE; + + if (lhs->type == UACPI_OBJECT_STRING || lhs->type == UACPI_OBJECT_BUFFER) { + res = lhs->buffer->size == rhs->buffer->size; + + if (res && lhs->buffer->size) { + res = uacpi_memcmp( + lhs->buffer->data, + rhs->buffer->data, + lhs->buffer->size + ) == 0; + } + } else if (lhs->type == UACPI_OBJECT_INTEGER) { + res = lhs->integer == rhs->integer; + } + + return res; +} + +static uacpi_bool handle_logical_less_or_greater( + uacpi_aml_op op, uacpi_object *lhs, uacpi_object *rhs +) +{ + if (lhs->type == UACPI_OBJECT_STRING || lhs->type == UACPI_OBJECT_BUFFER) { + int res; + uacpi_buffer *lhs_buf, *rhs_buf; + + lhs_buf = lhs->buffer; + rhs_buf = rhs->buffer; + + res = uacpi_memcmp(lhs_buf->data, rhs_buf->data, + UACPI_MIN(lhs_buf->size, rhs_buf->size)); + if (res == 0) { + if (lhs_buf->size < rhs_buf->size) + res = -1; + else if (lhs_buf->size > rhs_buf->size) + res = 1; + } + + if (op == UACPI_AML_OP_LLessOp) + return res < 0; + + return res > 0; + } + + if (op == UACPI_AML_OP_LLessOp) + return lhs->integer < rhs->integer; + + return lhs->integer > rhs->integer; +} + +static uacpi_status handle_binary_logic(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_aml_op op = op_ctx->op->code; + uacpi_object *lhs, *rhs, *dst; + uacpi_bool res; + + lhs = item_array_at(&op_ctx->items, 0)->obj; + rhs = item_array_at(&op_ctx->items, 1)->obj; + dst = item_array_at(&op_ctx->items, 2)->obj; + + switch (op) { + case UACPI_AML_OP_LEqualOp: + case UACPI_AML_OP_LLessOp: + case UACPI_AML_OP_LGreaterOp: + // TODO: typecheck at parse time + if (lhs->type != rhs->type) { + uacpi_error( + "don't know how to do a logical comparison of '%s' and '%s'\n", + uacpi_object_type_to_string(lhs->type), + uacpi_object_type_to_string(rhs->type) + ); + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } + + if (op == UACPI_AML_OP_LEqualOp) + res = handle_logical_equality(lhs, rhs); + else + res = handle_logical_less_or_greater(op, lhs, rhs); + break; + default: { + uacpi_u64 lhs_int, rhs_int; + + // NT only looks at the first 4 bytes of a buffer + lhs_int = object_to_integer(lhs, 4); + rhs_int = object_to_integer(rhs, 4); + + if (op == UACPI_AML_OP_LandOp) + res = lhs_int && rhs_int; + else + res = lhs_int || rhs_int; + break; + } + } + + dst->integer = res ? ones() : 0; + return UACPI_STATUS_OK; +} + +enum match_op { + MTR = 0, + MEQ = 1, + MLE = 2, + MLT = 3, + MGE = 4, + MGT = 5, +}; + +static uacpi_bool match_one(enum match_op op, uacpi_u64 lhs, uacpi_u64 rhs) +{ + switch (op) { + case MTR: + return UACPI_TRUE; + case MEQ: + return lhs == rhs; + case MLE: + return lhs <= rhs; + case MLT: + return lhs < rhs; + case MGE: + return lhs >= rhs; + case MGT: + return lhs > rhs; + default: + return UACPI_FALSE; + } +} + +static uacpi_status handle_match(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_package *pkg; + uacpi_u64 operand0, operand1, start_idx, i; + enum match_op mop0, mop1; + uacpi_object *dst; + + pkg = item_array_at(&op_ctx->items, 0)->obj->package; + mop0 = item_array_at(&op_ctx->items, 1)->immediate; + operand0 = item_array_at(&op_ctx->items, 2)->obj->integer; + mop1 = item_array_at(&op_ctx->items, 3)->immediate; + operand1 = item_array_at(&op_ctx->items, 4)->obj->integer; + start_idx = item_array_at(&op_ctx->items, 5)->obj->integer; + dst = item_array_at(&op_ctx->items, 6)->obj; + + for (i = start_idx; i < pkg->count; ++i) { + uacpi_object *obj = pkg->objects[i]; + + if (obj->type != UACPI_OBJECT_INTEGER) + continue; + + if (match_one(mop0, obj->integer, operand0) && + match_one(mop1, obj->integer, operand1)) + break; + } + + if (i < pkg->count) + dst->integer = i; + else + dst->integer = ones(); + + return UACPI_STATUS_OK; +} + +/* + * PkgLength := + * PkgLeadByte | + * <pkgleadbyte bytedata> | + * <pkgleadbyte bytedata bytedata> | <pkgleadbyte bytedata bytedata bytedata> + * PkgLeadByte := + * <bit 7-6: bytedata count that follows (0-3)> + * <bit 5-4: only used if pkglength < 63> + * <bit 3-0: least significant package length nybble> + */ +static uacpi_status parse_package_length(struct call_frame *frame, + struct package_length *out_pkg) +{ + uacpi_u32 left, size; + uacpi_u8 *data, marker_length; + + out_pkg->begin = frame->code_offset; + marker_length = 1; + + left = call_frame_code_bytes_left(frame); + if (uacpi_unlikely(left < 1)) + return UACPI_STATUS_AML_BAD_ENCODING; + + data = call_frame_cursor(frame); + marker_length += *data >> 6; + + if (uacpi_unlikely(left < marker_length)) + return UACPI_STATUS_AML_BAD_ENCODING; + + switch (marker_length) { + case 1: + size = *data & 0x3F; + break; + case 2: + case 3: + case 4: { + uacpi_u32 temp_byte = 0; + + size = *data & 0xF; + uacpi_memcpy(&temp_byte, data + 1, marker_length - 1); + + // marker_length - 1 is at most 3, so this shift is safe + size |= temp_byte << 4; + break; + } + } + + frame->code_offset += marker_length; + + out_pkg->end = out_pkg->begin + size; + if (uacpi_unlikely(out_pkg->end < out_pkg->begin)) { + uacpi_error( + "PkgLength overflow: start=%u, size=%u\n", out_pkg->begin, size + ); + return UACPI_STATUS_AML_BAD_ENCODING; + } + + return UACPI_STATUS_OK; +} + +/* + * ByteData + * // bit 0-2: ArgCount (0-7) + * // bit 3: SerializeFlag + * // 0 NotSerialized + * // 1 Serialized + * // bit 4-7: SyncLevel (0x00-0x0f) + */ +static void init_method_flags(uacpi_control_method *method, uacpi_u8 flags_byte) +{ + method->args = flags_byte & 0x7; + method->is_serialized = (flags_byte >> 3) & 1; + method->sync_level = flags_byte >> 4; +} + +static uacpi_status handle_create_method(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + struct uacpi_control_method *this_method, *method; + struct package_length *pkg; + struct uacpi_namespace_node *node; + struct uacpi_object *dst; + uacpi_u32 method_begin_offset, method_size; + + this_method = ctx->cur_frame->method; + pkg = &item_array_at(&op_ctx->items, 0)->pkg; + node = item_array_at(&op_ctx->items, 1)->node; + method_begin_offset = item_array_at(&op_ctx->items, 3)->immediate; + + if (uacpi_unlikely(pkg->end < pkg->begin || + pkg->end < method_begin_offset || + pkg->end > this_method->size)) { + uacpi_error( + "invalid method %.4s bounds [%u..%u] (parent size is %u)\n", + node->name.text, method_begin_offset, pkg->end, this_method->size + ); + return UACPI_STATUS_AML_BAD_ENCODING; + } + + dst = item_array_at(&op_ctx->items, 4)->obj; + + method = dst->method; + method_size = pkg->end - method_begin_offset; + + if (method_size) { + method->code = uacpi_kernel_alloc(method_size); + if (uacpi_unlikely(method->code == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + uacpi_memcpy( + method->code, + ctx->cur_frame->method->code + method_begin_offset, + method_size + ); + method->size = method_size; + method->owns_code = 1; + } + + init_method_flags(method, item_array_at(&op_ctx->items, 2)->immediate); + + node->object = uacpi_create_internal_reference(UACPI_REFERENCE_KIND_NAMED, + dst); + if (uacpi_unlikely(node->object == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_create_mutex_or_event(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_namespace_node *node; + uacpi_object *dst; + + node = item_array_at(&op_ctx->items, 0)->node; + + if (op_ctx->op->code == UACPI_AML_OP_MutexOp) { + dst = item_array_at(&op_ctx->items, 2)->obj; + + // bits 0-3: SyncLevel (0x00-0x0f), bits 4-7: Reserved (must be 0) + dst->mutex->sync_level = item_array_at(&op_ctx->items, 1)->immediate; + dst->mutex->sync_level &= 0xF; + } else { + dst = item_array_at(&op_ctx->items, 1)->obj; + } + + node->object = uacpi_create_internal_reference( + UACPI_REFERENCE_KIND_NAMED, + dst + ); + if (uacpi_unlikely(node->object == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_event_ctl(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_object *obj; + + obj = uacpi_unwrap_internal_reference( + item_array_at(&op_ctx->items, 0)->obj + ); + if (uacpi_unlikely(obj->type != UACPI_OBJECT_EVENT)) { + uacpi_error( + "%s: invalid argument '%s', expected an Event object\n", + op_ctx->op->name, uacpi_object_type_to_string(obj->type) + ); + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } + + switch (op_ctx->op->code) + { + case UACPI_AML_OP_SignalOp: + uacpi_kernel_signal_event(obj->event->handle); + break; + case UACPI_AML_OP_ResetOp: + uacpi_kernel_reset_event(obj->event->handle); + break; + case UACPI_AML_OP_WaitOp: { + uacpi_u64 timeout; + uacpi_bool ret; + + timeout = item_array_at(&op_ctx->items, 1)->obj->integer; + if (timeout > 0xFFFF) + timeout = 0xFFFF; + + uacpi_namespace_write_unlock(); + ret = uacpi_kernel_wait_for_event(obj->event->handle, timeout); + uacpi_namespace_write_lock(); + + /* + * The return value here is inverted, we return 0 for success and Ones + * for timeout and everything else. + */ + if (ret) + item_array_at(&op_ctx->items, 2)->obj->integer = 0; + break; + } + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_mutex_ctl(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_object *obj; + + obj = uacpi_unwrap_internal_reference( + item_array_at(&op_ctx->items, 0)->obj + ); + if (uacpi_unlikely(obj->type != UACPI_OBJECT_MUTEX)) { + uacpi_error( + "%s: invalid argument '%s', expected a Mutex object\n", + op_ctx->op->name, uacpi_object_type_to_string(obj->type) + ); + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } + + switch (op_ctx->op->code) + { + case UACPI_AML_OP_AcquireOp: { + uacpi_u64 timeout; + uacpi_u64 *return_value; + uacpi_status ret; + + return_value = &item_array_at(&op_ctx->items, 2)->obj->integer; + + if (uacpi_unlikely(ctx->sync_level > obj->mutex->sync_level)) { + uacpi_warn( + "ignoring attempt to acquire mutex @%p with a lower sync level " + "(%d < %d)\n", obj->mutex, obj->mutex->sync_level, + ctx->sync_level + ); + break; + } + + timeout = item_array_at(&op_ctx->items, 1)->immediate; + if (timeout > 0xFFFF) + timeout = 0xFFFF; + + if (uacpi_this_thread_owns_aml_mutex(obj->mutex)) { + ret = uacpi_acquire_aml_mutex(obj->mutex, timeout); + if (uacpi_likely_success(ret)) + *return_value = 0; + break; + } + + ret = uacpi_acquire_aml_mutex(obj->mutex, timeout); + if (uacpi_unlikely_error(ret)) + break; + + ret = held_mutexes_array_push(&ctx->held_mutexes, obj->mutex); + if (uacpi_unlikely_error(ret)) { + uacpi_release_aml_mutex(obj->mutex); + return ret; + } + + ctx->sync_level = obj->mutex->sync_level; + *return_value = 0; + break; + } + + case UACPI_AML_OP_ReleaseOp: { + uacpi_status ret; + + if (!uacpi_this_thread_owns_aml_mutex(obj->mutex)) { + uacpi_warn( + "attempted to release not-previously-acquired mutex object " + "@%p (%p)\n", obj->mutex, obj->mutex->handle + ); + break; + } + + ret = held_mutexes_array_remove_and_release( + &ctx->held_mutexes, obj->mutex, + FORCE_RELEASE_NO + ); + if (uacpi_likely_success(ret)) { + uacpi_mutex **last_mutex; + + last_mutex = held_mutexes_array_last(&ctx->held_mutexes); + if (last_mutex == UACPI_NULL) { + ctx->sync_level = 0; + break; + } + + ctx->sync_level = (*last_mutex)->sync_level; + } + break; + } + + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_notify(struct execution_context *ctx) +{ + uacpi_status ret; + struct op_context *op_ctx = ctx->cur_op_ctx; + struct uacpi_namespace_node *node; + uacpi_u64 value; + + node = item_array_at(&op_ctx->items, 0)->node; + value = item_array_at(&op_ctx->items, 1)->obj->integer; + + ret = uacpi_notify_all(node, value); + if (uacpi_likely_success(ret)) + return ret; + + if (ret == UACPI_STATUS_NO_HANDLER) { + const uacpi_char *path; + + path = uacpi_namespace_node_generate_absolute_path(node); + uacpi_warn( + "ignoring firmware Notify(%s, 0x%"UACPI_PRIX64") request, " + "no listeners\n", path, UACPI_FMT64(value) + ); + uacpi_free_dynamic_string(path); + + return UACPI_STATUS_OK; + } + + if (ret == UACPI_STATUS_INVALID_ARGUMENT) { + uacpi_error("Notify() called on an invalid object %.4s\n", + node->name.text); + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } + + return ret; +} + +static uacpi_status handle_firmware_request(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_firmware_request req = { 0 }; + + switch (op_ctx->op->code) { + case UACPI_AML_OP_BreakPointOp: + req.type = UACPI_FIRMWARE_REQUEST_TYPE_BREAKPOINT; + req.breakpoint.ctx = ctx; + break; + case UACPI_AML_OP_FatalOp: + req.type = UACPI_FIRMWARE_REQUEST_TYPE_FATAL; + req.fatal.type = item_array_at(&op_ctx->items, 0)->immediate; + req.fatal.code = item_array_at(&op_ctx->items, 1)->immediate; + req.fatal.arg = item_array_at(&op_ctx->items, 2)->obj->integer; + break; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + uacpi_namespace_write_unlock(); + uacpi_kernel_handle_firmware_request(&req); + uacpi_namespace_write_lock(); + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_create_named(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + struct uacpi_namespace_node *node; + uacpi_object *src; + + node = item_array_at(&op_ctx->items, 0)->node; + src = item_array_at(&op_ctx->items, 1)->obj; + + node->object = uacpi_create_internal_reference(UACPI_REFERENCE_KIND_NAMED, + src); + if (uacpi_unlikely(node->object == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + return UACPI_STATUS_OK; +} + +static uacpi_object_type buffer_field_get_read_type( + struct uacpi_buffer_field *field +) +{ + if (field->bit_length > (g_uacpi_rt_ctx.is_rev1 ? 32u : 64u) || + field->force_buffer) + return UACPI_OBJECT_BUFFER; + + return UACPI_OBJECT_INTEGER; +} + +static uacpi_status field_get_read_type( + uacpi_object *obj, uacpi_object_type *out_type +) +{ + if (obj->type == UACPI_OBJECT_BUFFER_FIELD) { + *out_type = buffer_field_get_read_type(&obj->buffer_field); + return UACPI_STATUS_OK; + } + + return uacpi_field_unit_get_read_type(obj->field_unit, out_type); +} + +static uacpi_status field_byte_size( + uacpi_object *obj, uacpi_size *out_size +) +{ + uacpi_size bit_length; + + if (obj->type == UACPI_OBJECT_BUFFER_FIELD) { + bit_length = obj->buffer_field.bit_length; + } else { + uacpi_status ret; + + ret = uacpi_field_unit_get_bit_length(obj->field_unit, &bit_length); + if (uacpi_unlikely_error(ret)) + return ret; + } + + *out_size = uacpi_round_up_bits_to_bytes(bit_length); + return UACPI_STATUS_OK; +} + +static uacpi_status handle_field_read(struct execution_context *ctx) +{ + uacpi_status ret; + struct op_context *op_ctx = ctx->cur_op_ctx; + struct uacpi_namespace_node *node; + uacpi_object *src_obj, *dst_obj; + uacpi_size dst_size; + void *dst = UACPI_NULL; + uacpi_data_view wtr_response = { 0 }; + + node = item_array_at(&op_ctx->items, 0)->node; + src_obj = uacpi_namespace_node_get_object(node); + dst_obj = item_array_at(&op_ctx->items, 1)->obj; + + if (op_ctx->op->code == UACPI_AML_OP_InternalOpReadFieldAsBuffer) { + uacpi_buffer *buf; + + ret = field_byte_size(src_obj, &dst_size); + if (uacpi_unlikely_error(ret)) + return ret; + + if (dst_size != 0) { + buf = dst_obj->buffer; + + dst = uacpi_kernel_alloc_zeroed(dst_size); + if (dst == UACPI_NULL) + return UACPI_STATUS_OUT_OF_MEMORY; + + buf->data = dst; + buf->size = dst_size; + } + } else { + dst = &dst_obj->integer; + dst_size = sizeof(uacpi_u64); + } + + if (src_obj->type == UACPI_OBJECT_BUFFER_FIELD) { + uacpi_read_buffer_field(&src_obj->buffer_field, dst); + return UACPI_STATUS_OK; + } + + ret = uacpi_read_field_unit( + src_obj->field_unit, dst, dst_size, &wtr_response + ); + if (uacpi_unlikely_error(ret)) + return ret; + + if (wtr_response.data != UACPI_NULL) { + uacpi_buffer *buf; + + buf = dst_obj->buffer; + buf->data = wtr_response.data; + buf->size = wtr_response.length; + } + + return ret; +} + +static uacpi_status handle_create_buffer_field(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + struct uacpi_namespace_node *node; + uacpi_buffer *src_buf; + uacpi_object *field_obj; + uacpi_buffer_field *field; + + /* + * Layout of items here: + * [0] -> Type checked source buffer object + * [1] -> Byte/bit index integer object + * [2] ( if CreateField) -> bit length integer object + * [3] (2 if not CreateField) -> the new namespace node + * [4] (3 if not CreateField) -> the buffer field object we're creating here + */ + src_buf = item_array_at(&op_ctx->items, 0)->obj->buffer; + + if (op_ctx->op->code == UACPI_AML_OP_CreateFieldOp) { + uacpi_object *idx_obj, *len_obj; + + idx_obj = item_array_at(&op_ctx->items, 1)->obj; + len_obj = item_array_at(&op_ctx->items, 2)->obj; + node = item_array_at(&op_ctx->items, 3)->node; + field_obj = item_array_at(&op_ctx->items, 4)->obj; + field = &field_obj->buffer_field; + + field->bit_index = idx_obj->integer; + + if (uacpi_unlikely(!len_obj->integer || + len_obj->integer > 0xFFFFFFFF)) { + uacpi_error("invalid bit field length (%u)\n", field->bit_length); + return UACPI_STATUS_AML_BAD_ENCODING; + } + + field->bit_length = len_obj->integer; + field->force_buffer = UACPI_TRUE; + } else { + uacpi_object *idx_obj; + + idx_obj = item_array_at(&op_ctx->items, 1)->obj; + node = item_array_at(&op_ctx->items, 2)->node; + field_obj = item_array_at(&op_ctx->items, 3)->obj; + field = &field_obj->buffer_field; + + field->bit_index = idx_obj->integer; + switch (op_ctx->op->code) { + case UACPI_AML_OP_CreateBitFieldOp: + field->bit_length = 1; + break; + case UACPI_AML_OP_CreateByteFieldOp: + field->bit_length = 8; + break; + case UACPI_AML_OP_CreateWordFieldOp: + field->bit_length = 16; + break; + case UACPI_AML_OP_CreateDWordFieldOp: + field->bit_length = 32; + break; + case UACPI_AML_OP_CreateQWordFieldOp: + field->bit_length = 64; + break; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + if (op_ctx->op->code != UACPI_AML_OP_CreateBitFieldOp) + field->bit_index *= 8; + } + + if (uacpi_unlikely((field->bit_index + field->bit_length) > + src_buf->size * 8)) { + uacpi_error( + "invalid buffer field: bits [%zu..%zu], buffer size is %zu bytes\n", + field->bit_index, field->bit_index + field->bit_length, + src_buf->size + ); + return UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX; + } + + field->backing = src_buf; + uacpi_shareable_ref(field->backing); + node->object = uacpi_create_internal_reference(UACPI_REFERENCE_KIND_NAMED, + field_obj); + if (uacpi_unlikely(node->object == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_control_flow(struct execution_context *ctx) +{ + struct call_frame *frame = ctx->cur_frame; + struct op_context *op_ctx = ctx->cur_op_ctx; + + if (uacpi_unlikely(frame->last_while == UACPI_NULL)) { + uacpi_error( + "attempting to %s outside of a While block\n", + op_ctx->op->code == UACPI_AML_OP_BreakOp ? "Break" : "Continue" + ); + return UACPI_STATUS_AML_BAD_ENCODING; + } + + for (;;) { + if (ctx->cur_block != frame->last_while) { + frame_reset_post_end_block(ctx, ctx->cur_block->type); + continue; + } + + if (op_ctx->op->code == UACPI_AML_OP_BreakOp) + frame->code_offset = ctx->cur_block->end; + else + frame->code_offset = ctx->cur_block->begin; + frame_reset_post_end_block(ctx, ctx->cur_block->type); + break; + } + + return UACPI_STATUS_OK; +} + +static uacpi_status create_named_scope(struct op_context *op_ctx) +{ + uacpi_namespace_node *node; + uacpi_object *obj; + + node = item_array_at(&op_ctx->items, 1)->node; + obj = item_array_last(&op_ctx->items)->obj; + + switch (op_ctx->op->code) { + case UACPI_AML_OP_ProcessorOp: { + uacpi_processor *proc = obj->processor; + proc->id = item_array_at(&op_ctx->items, 2)->immediate; + proc->block_address = item_array_at(&op_ctx->items, 3)->immediate; + proc->block_length = item_array_at(&op_ctx->items, 4)->immediate; + break; + } + + case UACPI_AML_OP_PowerResOp: { + uacpi_power_resource *power_res = &obj->power_resource; + power_res->system_level = item_array_at(&op_ctx->items, 2)->immediate; + power_res->resource_order = item_array_at(&op_ctx->items, 3)->immediate; + break; + } + + default: + break; + } + + node->object = uacpi_create_internal_reference(UACPI_REFERENCE_KIND_NAMED, + obj); + if (uacpi_unlikely(node->object == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_code_block(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + + switch (op_ctx->op->code) { + case UACPI_AML_OP_ProcessorOp: + case UACPI_AML_OP_PowerResOp: + case UACPI_AML_OP_ThermalZoneOp: + case UACPI_AML_OP_DeviceOp: { + uacpi_status ret; + + ret = create_named_scope(op_ctx); + if (uacpi_unlikely_error(ret)) + return ret; + + UACPI_FALLTHROUGH; + } + case UACPI_AML_OP_ScopeOp: + case UACPI_AML_OP_IfOp: + case UACPI_AML_OP_ElseOp: + case UACPI_AML_OP_WhileOp: { + break; + } + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + return begin_block_execution(ctx); +} + +static uacpi_status handle_return(struct execution_context *ctx) +{ + uacpi_status ret; + uacpi_object *dst = UACPI_NULL; + + ctx->cur_frame->code_offset = ctx->cur_frame->method->size; + ret = method_get_ret_object(ctx, &dst); + + if (uacpi_unlikely_error(ret)) + return ret; + if (dst == UACPI_NULL) + return UACPI_STATUS_OK; + + /* + * Should be possible to move here if method returns a literal + * like Return(Buffer { ... }), otherwise we have to copy just to + * be safe. + */ + return uacpi_object_assign( + dst, + item_array_at(&ctx->cur_op_ctx->items, 0)->obj, + UACPI_ASSIGN_BEHAVIOR_DEEP_COPY + ); +} + +static void refresh_ctx_pointers(struct execution_context *ctx) +{ + struct call_frame *frame = ctx->cur_frame; + + if (frame == UACPI_NULL) { + ctx->cur_op_ctx = UACPI_NULL; + ctx->prev_op_ctx = UACPI_NULL; + ctx->cur_block = UACPI_NULL; + return; + } + + ctx->cur_op_ctx = op_context_array_last(&frame->pending_ops); + ctx->prev_op_ctx = op_context_array_one_before_last(&frame->pending_ops); + ctx->cur_block = code_block_array_last(&frame->code_blocks); +} + +static uacpi_bool ctx_has_non_preempted_op(struct execution_context *ctx) +{ + return ctx->cur_op_ctx && !ctx->cur_op_ctx->preempted; +} + +enum op_trace_action_type { + OP_TRACE_ACTION_BEGIN, + OP_TRACE_ACTION_RESUME, + OP_TRACE_ACTION_END, +}; + +static const uacpi_char *const op_trace_action_types[3] = { + [OP_TRACE_ACTION_BEGIN] = "BEGIN", + [OP_TRACE_ACTION_RESUME] = "RESUME", + [OP_TRACE_ACTION_END] = "END", +}; + +static inline void trace_op( + const struct uacpi_op_spec *op, enum op_trace_action_type action +) +{ + uacpi_debug( + "%s OP '%s' (0x%04X)\n", + op_trace_action_types[action], op->name, op->code + ); +} + +static inline void trace_pop(uacpi_u8 pop) +{ + uacpi_debug(" pOP: %s (0x%02X)\n", uacpi_parse_op_to_string(pop), pop); +} + +static uacpi_status frame_push_args(struct call_frame *frame, + struct op_context *op_ctx) +{ + uacpi_size i; + + /* + * MethodCall items: + * items[0] -> method namespace node + * items[1] -> immediate that was used for parsing the arguments + * items[2...nargs-1] -> method arguments + * items[-1] -> return value object + * + * Here we only care about the arguments though. + */ + for (i = 2; i < item_array_size(&op_ctx->items) - 1; i++) { + uacpi_object *src, *dst; + + src = item_array_at(&op_ctx->items, i)->obj; + + dst = uacpi_create_internal_reference(UACPI_REFERENCE_KIND_ARG, src); + if (uacpi_unlikely(dst == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + frame->args[i - 2] = dst; + } + + return UACPI_STATUS_OK; +} + +static uacpi_status frame_setup_base_scope(struct call_frame *frame, + uacpi_namespace_node *scope, + uacpi_control_method *method) +{ + struct code_block *block; + + block = code_block_array_alloc(&frame->code_blocks); + if (uacpi_unlikely(block == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + block->type = CODE_BLOCK_SCOPE; + block->node = scope; + block->begin = 0; + block->end = method->size; + frame->method = method; + frame->cur_scope = scope; + return UACPI_STATUS_OK; +} + +static uacpi_status push_new_frame(struct execution_context *ctx, + struct call_frame **out_frame) +{ + struct call_frame_array *call_stack = &ctx->call_stack; + struct call_frame *prev_frame; + + *out_frame = call_frame_array_calloc(call_stack); + if (uacpi_unlikely(*out_frame == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + /* + * Allocating a new frame might have reallocated the dynamic buffer so our + * execution_context members might now be pointing to freed memory. + * Refresh them here. + */ + prev_frame = call_frame_array_one_before_last(call_stack); + ctx->cur_frame = prev_frame; + refresh_ctx_pointers(ctx); + + return UACPI_STATUS_OK; +} + +static uacpi_bool maybe_end_block(struct execution_context *ctx) +{ + struct code_block *block = ctx->cur_block; + struct call_frame *cur_frame = ctx->cur_frame; + + if (!block) + return UACPI_FALSE; + if (cur_frame->code_offset != block->end) + return UACPI_FALSE; + + if (block->type == CODE_BLOCK_WHILE) + cur_frame->code_offset = block->begin; + + frame_reset_post_end_block(ctx, block->type); + return UACPI_TRUE; +} + +static uacpi_status store_to_target( + uacpi_object *dst, uacpi_object *src, uacpi_data_view *wtr_response +) +{ + uacpi_status ret; + + switch (dst->type) { + case UACPI_OBJECT_DEBUG: + ret = debug_store(src); + break; + case UACPI_OBJECT_REFERENCE: + ret = store_to_reference(dst, src, wtr_response); + break; + + case UACPI_OBJECT_BUFFER_INDEX: + src = uacpi_unwrap_internal_reference(src); + ret = object_assign_with_implicit_cast(dst, src, wtr_response); + break; + + case UACPI_OBJECT_INTEGER: + // NULL target + if (dst->integer == 0) { + ret = UACPI_STATUS_OK; + break; + } + UACPI_FALLTHROUGH; + default: + uacpi_error("attempted to store to an invalid target: %s\n", + uacpi_object_type_to_string(dst->type)); + ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } + + return ret; +} + +static uacpi_status handle_copy_object_or_store(struct execution_context *ctx) +{ + uacpi_object *src, *dst; + struct op_context *op_ctx = ctx->cur_op_ctx; + + src = item_array_at(&op_ctx->items, 0)->obj; + dst = item_array_at(&op_ctx->items, 1)->obj; + + if (op_ctx->op->code == UACPI_AML_OP_StoreOp) { + uacpi_status ret; + uacpi_data_view wtr_response = { 0 }; + + ret = store_to_target(dst, src, &wtr_response); + if (uacpi_unlikely_error(ret)) + return ret; + + /* + * This was a write-then-read field access since we got a response + * buffer back from this store. Now we have to return this buffer + * as a prvalue from the StoreOp so that it can be used by AML to + * retrieve the response. + */ + if (wtr_response.data != UACPI_NULL) { + uacpi_object *wtr_response_obj; + + wtr_response_obj = uacpi_create_object(UACPI_OBJECT_BUFFER); + if (uacpi_unlikely(wtr_response_obj == UACPI_NULL)) { + uacpi_free(wtr_response.data, wtr_response.length); + return UACPI_STATUS_OUT_OF_MEMORY; + } + + wtr_response_obj->buffer->data = wtr_response.data; + wtr_response_obj->buffer->size = wtr_response.length; + + uacpi_object_unref(src); + item_array_at(&op_ctx->items, 0)->obj = wtr_response_obj; + } + + return ret; + } + + if (dst->type != UACPI_OBJECT_REFERENCE) + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + + return copy_object_to_reference(dst, src); +} + +static uacpi_status handle_inc_dec(struct execution_context *ctx) +{ + uacpi_object *src, *dst; + struct op_context *op_ctx = ctx->cur_op_ctx; + uacpi_bool field_allowed = UACPI_FALSE; + uacpi_object_type true_src_type; + uacpi_status ret; + + src = item_array_at(&op_ctx->items, 0)->obj; + dst = item_array_at(&op_ctx->items, 1)->obj; + + if (src->type == UACPI_OBJECT_REFERENCE) { + /* + * Increment/Decrement are the only two operators that modify the value + * in-place, thus we need very specific dereference rules here. + * + * Reading buffer fields & field units is only allowed if we were passed + * a namestring directly as opposed to some nested reference chain + * containing a field at the bottom. + */ + if (src->flags == UACPI_REFERENCE_KIND_NAMED) + field_allowed = src->inner_object->type != UACPI_OBJECT_REFERENCE; + + src = reference_unwind(src)->inner_object; + } // else buffer index + + true_src_type = src->type; + + switch (true_src_type) { + case UACPI_OBJECT_INTEGER: + dst->integer = src->integer; + break; + case UACPI_OBJECT_FIELD_UNIT: + case UACPI_OBJECT_BUFFER_FIELD: + if (uacpi_unlikely(!field_allowed)) + goto out_bad_type; + + ret = field_get_read_type(src, &true_src_type); + if (uacpi_unlikely_error(ret)) + goto out_bad_type; + if (true_src_type != UACPI_OBJECT_INTEGER) + goto out_bad_type; + + if (src->type == UACPI_OBJECT_FIELD_UNIT) { + ret = uacpi_read_field_unit( + src->field_unit, &dst->integer, sizeof_int(), + UACPI_NULL + ); + if (uacpi_unlikely_error(ret)) + return ret; + } else { + uacpi_read_buffer_field(&src->buffer_field, &dst->integer); + } + break; + case UACPI_OBJECT_BUFFER_INDEX: + dst->integer = *buffer_index_cursor(&src->buffer_index); + break; + default: + goto out_bad_type; + } + + if (op_ctx->op->code == UACPI_AML_OP_IncrementOp) + dst->integer++; + else + dst->integer--; + + return UACPI_STATUS_OK; + +out_bad_type: + uacpi_error("Increment/Decrement: invalid object type '%s'\n", + uacpi_object_type_to_string(true_src_type)); + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; +} + +static uacpi_status enter_method( + struct execution_context *ctx, struct call_frame *new_frame, + uacpi_control_method *method +) +{ + uacpi_status ret = UACPI_STATUS_OK; + + uacpi_shareable_ref(method); + + if (!method->is_serialized) + return ret; + + if (uacpi_unlikely(ctx->sync_level > method->sync_level)) { + uacpi_error( + "cannot invoke method @%p, sync level %d is too low " + "(current is %d)\n", + method, method->sync_level, ctx->sync_level + ); + return UACPI_STATUS_AML_SYNC_LEVEL_TOO_HIGH; + } + + if (method->mutex == UACPI_NULL) { + method->mutex = uacpi_create_mutex(); + if (uacpi_unlikely(method->mutex == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + method->mutex->sync_level = method->sync_level; + } + + if (!uacpi_this_thread_owns_aml_mutex(method->mutex)) { + ret = uacpi_acquire_aml_mutex(method->mutex, 0xFFFF); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = held_mutexes_array_push(&ctx->held_mutexes, method->mutex); + if (uacpi_unlikely_error(ret)) { + uacpi_release_aml_mutex(method->mutex); + return ret; + } + } + + new_frame->prev_sync_level = ctx->sync_level; + ctx->sync_level = method->sync_level; + return UACPI_STATUS_OK; +} + +static uacpi_status push_op(struct execution_context *ctx) +{ + struct call_frame *frame = ctx->cur_frame; + struct op_context *op_ctx; + + op_ctx = op_context_array_calloc(&frame->pending_ops); + if (op_ctx == UACPI_NULL) + return UACPI_STATUS_OUT_OF_MEMORY; + + op_ctx->op = ctx->cur_op; + refresh_ctx_pointers(ctx); + return UACPI_STATUS_OK; +} + +static uacpi_bool pop_item(struct op_context *op_ctx) +{ + struct item *item; + + if (item_array_size(&op_ctx->items) == 0) + return UACPI_FALSE; + + item = item_array_last(&op_ctx->items); + + if (item->type == ITEM_OBJECT) + uacpi_object_unref(item->obj); + + if (item->type == ITEM_NAMESPACE_NODE) + uacpi_namespace_node_unref(item->node); + + item_array_pop(&op_ctx->items); + return UACPI_TRUE; +} + +static void pop_op(struct execution_context *ctx) +{ + struct call_frame *frame = ctx->cur_frame; + struct op_context *cur_op_ctx = ctx->cur_op_ctx; + + while (pop_item(cur_op_ctx)); + + item_array_clear(&cur_op_ctx->items); + op_context_array_pop(&frame->pending_ops); + refresh_ctx_pointers(ctx); +} + +static void call_frame_clear(struct call_frame *frame) +{ + uacpi_size i; + op_context_array_clear(&frame->pending_ops); + code_block_array_clear(&frame->code_blocks); + + while (temp_namespace_node_array_size(&frame->temp_nodes) != 0) { + uacpi_namespace_node *node; + + node = *temp_namespace_node_array_last(&frame->temp_nodes); + uacpi_namespace_node_uninstall(node); + temp_namespace_node_array_pop(&frame->temp_nodes); + } + temp_namespace_node_array_clear(&frame->temp_nodes); + + for (i = 0; i < 7; ++i) + uacpi_object_unref(frame->args[i]); + for (i = 0; i < 8; ++i) + uacpi_object_unref(frame->locals[i]); + + uacpi_method_unref(frame->method); +} + +static uacpi_u8 parse_op_generates_item[0x100] = { + [UACPI_PARSE_OP_SIMPLE_NAME] = ITEM_EMPTY_OBJECT, + [UACPI_PARSE_OP_SUPERNAME] = ITEM_EMPTY_OBJECT, + [UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED] = ITEM_EMPTY_OBJECT, + [UACPI_PARSE_OP_TERM_ARG] = ITEM_EMPTY_OBJECT, + [UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL] = ITEM_EMPTY_OBJECT, + [UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT] = ITEM_EMPTY_OBJECT, + [UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED] = ITEM_EMPTY_OBJECT, + [UACPI_PARSE_OP_OPERAND] = ITEM_EMPTY_OBJECT, + [UACPI_PARSE_OP_STRING] = ITEM_EMPTY_OBJECT, + [UACPI_PARSE_OP_COMPUTATIONAL_DATA] = ITEM_EMPTY_OBJECT, + [UACPI_PARSE_OP_TARGET] = ITEM_EMPTY_OBJECT, + [UACPI_PARSE_OP_PKGLEN] = ITEM_PACKAGE_LENGTH, + [UACPI_PARSE_OP_TRACKED_PKGLEN] = ITEM_PACKAGE_LENGTH, + [UACPI_PARSE_OP_CREATE_NAMESTRING] = ITEM_NAMESPACE_NODE, + [UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD] = ITEM_NAMESPACE_NODE, + [UACPI_PARSE_OP_EXISTING_NAMESTRING] = ITEM_NAMESPACE_NODE, + [UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL] = ITEM_NAMESPACE_NODE, + [UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD] = ITEM_NAMESPACE_NODE, + [UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT] = ITEM_OBJECT, + [UACPI_PARSE_OP_LOAD_INLINE_IMM] = ITEM_IMMEDIATE, + [UACPI_PARSE_OP_LOAD_ZERO_IMM] = ITEM_IMMEDIATE, + [UACPI_PARSE_OP_LOAD_IMM] = ITEM_IMMEDIATE, + [UACPI_PARSE_OP_LOAD_IMM_AS_OBJECT] = ITEM_OBJECT, + [UACPI_PARSE_OP_LOAD_FALSE_OBJECT] = ITEM_OBJECT, + [UACPI_PARSE_OP_LOAD_TRUE_OBJECT] = ITEM_OBJECT, + [UACPI_PARSE_OP_OBJECT_ALLOC] = ITEM_OBJECT, + [UACPI_PARSE_OP_OBJECT_ALLOC_TYPED] = ITEM_OBJECT, + [UACPI_PARSE_OP_EMPTY_OBJECT_ALLOC] = ITEM_EMPTY_OBJECT, + [UACPI_PARSE_OP_OBJECT_CONVERT_TO_SHALLOW_COPY] = ITEM_OBJECT, + [UACPI_PARSE_OP_OBJECT_CONVERT_TO_DEEP_COPY] = ITEM_OBJECT, + [UACPI_PARSE_OP_RECORD_AML_PC] = ITEM_IMMEDIATE, +}; + +static const uacpi_u8 *op_decode_cursor(const struct op_context *ctx) +{ + const struct uacpi_op_spec *spec = ctx->op; + + if (spec->properties & UACPI_OP_PROPERTY_OUT_OF_LINE) + return &spec->indirect_decode_ops[ctx->pc]; + + return &spec->decode_ops[ctx->pc]; +} + +static uacpi_u8 op_decode_byte(struct op_context *ctx) +{ + uacpi_u8 byte; + + byte = *op_decode_cursor(ctx); + ctx->pc++; + + return byte; +} + +static uacpi_aml_op op_decode_aml_op(struct op_context *op_ctx) +{ + uacpi_aml_op op = 0; + + op |= op_decode_byte(op_ctx); + op |= op_decode_byte(op_ctx) << 8; + + return op; +} + +// MSVC doesn't support __VA_OPT__ so we do this weirdness +#define EXEC_OP_DO_LVL(lvl, reason, ...) \ + uacpi_##lvl("Op 0x%04X ('%s'): "reason"\n", \ + op_ctx->op->code, op_ctx->op->name __VA_ARGS__) + +#define EXEC_OP_DO_ERR(reason, ...) EXEC_OP_DO_LVL(error, reason, __VA_ARGS__) +#define EXEC_OP_DO_WARN(reason, ...) EXEC_OP_DO_LVL(warn, reason, __VA_ARGS__) + +#define EXEC_OP_ERR_2(reason, arg0, arg1) EXEC_OP_DO_ERR(reason, ,arg0, arg1) +#define EXEC_OP_ERR_1(reason, arg0) EXEC_OP_DO_ERR(reason, ,arg0) +#define EXEC_OP_ERR(reason) EXEC_OP_DO_ERR(reason) + +#define EXEC_OP_WARN(reason) EXEC_OP_DO_WARN(reason) + +#define SPEC_SIMPLE_NAME "SimpleName := NameString | ArgObj | LocalObj" +#define SPEC_SUPER_NAME \ + "SuperName := SimpleName | DebugObj | ReferenceTypeOpcode" +#define SPEC_TERM_ARG \ + "TermArg := ExpressionOpcode | DataObject | ArgObj | LocalObj" +#define SPEC_OPERAND "Operand := TermArg => Integer" +#define SPEC_STRING "String := TermArg => String" +#define SPEC_TARGET "Target := SuperName | NullName" + +#define SPEC_COMPUTATIONAL_DATA \ + "ComputationalData := ByteConst | WordConst | DWordConst | QWordConst " \ + "| String | ConstObj | RevisionOp | DefBuffer" + +static uacpi_bool op_wants_supername(enum uacpi_parse_op op) +{ + switch (op) { + case UACPI_PARSE_OP_SIMPLE_NAME: + case UACPI_PARSE_OP_SUPERNAME: + case UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED: + case UACPI_PARSE_OP_TARGET: + return UACPI_TRUE; + default: + return UACPI_FALSE; + } +} + +static uacpi_bool op_wants_term_arg_or_operand(enum uacpi_parse_op op) +{ + switch (op) { + case UACPI_PARSE_OP_TERM_ARG: + case UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL: + case UACPI_PARSE_OP_OPERAND: + case UACPI_PARSE_OP_STRING: + case UACPI_PARSE_OP_COMPUTATIONAL_DATA: + return UACPI_TRUE; + default: + return UACPI_FALSE; + } +} + +static uacpi_bool op_allows_unresolved(enum uacpi_parse_op op) +{ + switch (op) { + case UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED: + case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED: + case UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL: + return UACPI_TRUE; + default: + return UACPI_FALSE; + } +} + +static uacpi_bool op_allows_unresolved_if_load(enum uacpi_parse_op op) +{ + switch (op) { + case UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD: + case UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD: + return UACPI_TRUE; + default: + return UACPI_FALSE; + } +} + +static uacpi_status op_typecheck(const struct op_context *op_ctx, + const struct op_context *cur_op_ctx) +{ + const uacpi_char *expected_type_str; + uacpi_u8 ok_mask = 0; + uacpi_u8 props = cur_op_ctx->op->properties; + + switch (*op_decode_cursor(op_ctx)) { + // SimpleName := NameString | ArgObj | LocalObj + case UACPI_PARSE_OP_SIMPLE_NAME: + expected_type_str = SPEC_SIMPLE_NAME; + ok_mask |= UACPI_OP_PROPERTY_SIMPLE_NAME; + break; + + // Target := SuperName | NullName + case UACPI_PARSE_OP_TARGET: + expected_type_str = SPEC_TARGET; + ok_mask |= UACPI_OP_PROPERTY_TARGET | UACPI_OP_PROPERTY_SUPERNAME; + break; + + // SuperName := SimpleName | DebugObj | ReferenceTypeOpcode + case UACPI_PARSE_OP_SUPERNAME: + case UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED: + expected_type_str = SPEC_SUPER_NAME; + ok_mask |= UACPI_OP_PROPERTY_SUPERNAME; + break; + + // TermArg := ExpressionOpcode | DataObject | ArgObj | LocalObj + case UACPI_PARSE_OP_TERM_ARG: + case UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL: + case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT: + case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED: + case UACPI_PARSE_OP_OPERAND: + case UACPI_PARSE_OP_STRING: + case UACPI_PARSE_OP_COMPUTATIONAL_DATA: + expected_type_str = SPEC_TERM_ARG; + ok_mask |= UACPI_OP_PROPERTY_TERM_ARG; + break; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + if (!(props & ok_mask)) { + EXEC_OP_ERR_2("invalid argument: '%s', expected a %s", + cur_op_ctx->op->name, expected_type_str); + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } + + return UACPI_STATUS_OK; +} + +static uacpi_status typecheck_obj( + const struct op_context *op_ctx, + const uacpi_object *obj, + enum uacpi_object_type expected_type, + const uacpi_char *spec_desc +) +{ + if (uacpi_likely(obj->type == expected_type)) + return UACPI_STATUS_OK; + + EXEC_OP_ERR_2("invalid argument type: %s, expected a %s", + uacpi_object_type_to_string(obj->type), spec_desc); + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; +} + +static uacpi_status typecheck_operand( + const struct op_context *op_ctx, + const uacpi_object *obj +) +{ + return typecheck_obj(op_ctx, obj, UACPI_OBJECT_INTEGER, SPEC_OPERAND); +} + +static uacpi_status typecheck_string( + const struct op_context *op_ctx, + const uacpi_object *obj +) +{ + return typecheck_obj(op_ctx, obj, UACPI_OBJECT_STRING, SPEC_STRING); +} + +static uacpi_status typecheck_computational_data( + const struct op_context *op_ctx, + const uacpi_object *obj +) +{ + switch (obj->type) { + case UACPI_OBJECT_STRING: + case UACPI_OBJECT_BUFFER: + case UACPI_OBJECT_INTEGER: + return UACPI_STATUS_OK; + default: + EXEC_OP_ERR_2( + "invalid argument type: %s, expected a %s", + uacpi_object_type_to_string(obj->type), + SPEC_COMPUTATIONAL_DATA + ); + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } +} + +static void emit_op_skip_warn(const struct op_context *op_ctx) +{ + EXEC_OP_WARN("skipping due to previous errors"); +} + +static void trace_named_object_lookup_or_creation_failure( + struct call_frame *frame, uacpi_size offset, enum uacpi_parse_op op, + uacpi_status ret, enum uacpi_log_level level +) +{ + static const uacpi_char *oom_prefix = "<...>"; + static const uacpi_char *empty_string = ""; + static const uacpi_char *unknown_path = "<unknown-path>"; + static const uacpi_char *invalid_path = "<invalid-path>"; + + uacpi_status conv_ret; + const uacpi_char *action; + const uacpi_char *requested_path_to_print; + const uacpi_char *middle_part = UACPI_NULL; + const uacpi_char *prefix_path = UACPI_NULL; + uacpi_char *requested_path = UACPI_NULL; + uacpi_size length; + uacpi_bool is_create; + + is_create = op == UACPI_PARSE_OP_CREATE_NAMESTRING || + op == UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD; + + if (is_create) + action = "create"; + else + action = "lookup"; + + conv_ret = name_string_to_path( + frame, offset, &requested_path, &length + ); + if (uacpi_unlikely_error(conv_ret)) { + if (conv_ret == UACPI_STATUS_OUT_OF_MEMORY) + requested_path_to_print = unknown_path; + else + requested_path_to_print = invalid_path; + } else { + requested_path_to_print = requested_path; + } + + if (requested_path && requested_path[0] != '\\') { + prefix_path = uacpi_namespace_node_generate_absolute_path( + frame->cur_scope + ); + if (uacpi_unlikely(prefix_path == UACPI_NULL)) + prefix_path = oom_prefix; + + if (prefix_path[1] != '\0') + middle_part = "."; + } else { + prefix_path = empty_string; + } + + if (middle_part == UACPI_NULL) + middle_part = empty_string; + + if (length == 5 && !is_create) { + uacpi_log_lvl( + level, + "unable to %s named object '%s' within (or above) " + "scope '%s': %s\n", action, requested_path_to_print, + prefix_path, uacpi_status_to_string(ret) + ); + } else { + uacpi_log_lvl( + level, + "unable to %s named object '%s%s%s': %s\n", + action, prefix_path, middle_part, + requested_path_to_print, uacpi_status_to_string(ret) + ); + } + + uacpi_free(requested_path, length); + if (prefix_path != oom_prefix && prefix_path != empty_string) + uacpi_free_dynamic_string(prefix_path); +} + +static uacpi_status uninstalled_op_handler(struct execution_context *ctx) +{ + struct op_context *op_ctx = ctx->cur_op_ctx; + + EXEC_OP_ERR("no dedicated handler installed"); + return UACPI_STATUS_UNIMPLEMENTED; +} + +enum op_handler { + OP_HANDLER_UNINSTALLED = 0, + OP_HANDLER_LOCAL, + OP_HANDLER_ARG, + OP_HANDLER_STRING, + OP_HANDLER_BINARY_MATH, + OP_HANDLER_CONTROL_FLOW, + OP_HANDLER_CODE_BLOCK, + OP_HANDLER_RETURN, + OP_HANDLER_CREATE_METHOD, + OP_HANDLER_COPY_OBJECT_OR_STORE, + OP_HANDLER_INC_DEC, + OP_HANDLER_REF_OR_DEREF_OF, + OP_HANDLER_LOGICAL_NOT, + OP_HANDLER_BINARY_LOGIC, + OP_HANDLER_NAMED_OBJECT, + OP_HANDLER_BUFFER, + OP_HANDLER_PACKAGE, + OP_HANDLER_CREATE_NAMED, + OP_HANDLER_CREATE_BUFFER_FIELD, + OP_HANDLER_READ_FIELD, + OP_HANDLER_ALIAS, + OP_HANDLER_CONCATENATE, + OP_HANDLER_CONCATENATE_RES, + OP_HANDLER_SIZEOF, + OP_HANDLER_UNARY_MATH, + OP_HANDLER_INDEX, + OP_HANDLER_OBJECT_TYPE, + OP_HANDLER_CREATE_OP_REGION, + OP_HANDLER_CREATE_DATA_REGION, + OP_HANDLER_CREATE_FIELD, + OP_HANDLER_TO, + OP_HANDLER_TO_STRING, + OP_HANDLER_TIMER, + OP_HANDLER_MID, + OP_HANDLER_MATCH, + OP_HANDLER_CREATE_MUTEX_OR_EVENT, + OP_HANDLER_BCD, + OP_HANDLER_UNLOAD, + OP_HANDLER_LOAD_TABLE, + OP_HANDLER_LOAD, + OP_HANDLER_STALL_OR_SLEEP, + OP_HANDLER_EVENT_CTL, + OP_HANDLER_MUTEX_CTL, + OP_HANDLER_NOTIFY, + OP_HANDLER_FIRMWARE_REQUEST, +}; + +static uacpi_status (*op_handlers[])(struct execution_context *ctx) = { + /* + * All OPs that don't have a handler dispatch to here if + * UACPI_PARSE_OP_INVOKE_HANDLER is reached. + */ + [OP_HANDLER_UNINSTALLED] = uninstalled_op_handler, + [OP_HANDLER_LOCAL] = handle_local, + [OP_HANDLER_ARG] = handle_arg, + [OP_HANDLER_NAMED_OBJECT] = handle_named_object, + [OP_HANDLER_STRING] = handle_string, + [OP_HANDLER_BINARY_MATH] = handle_binary_math, + [OP_HANDLER_CONTROL_FLOW] = handle_control_flow, + [OP_HANDLER_CODE_BLOCK] = handle_code_block, + [OP_HANDLER_RETURN] = handle_return, + [OP_HANDLER_CREATE_METHOD] = handle_create_method, + [OP_HANDLER_CREATE_MUTEX_OR_EVENT] = handle_create_mutex_or_event, + [OP_HANDLER_COPY_OBJECT_OR_STORE] = handle_copy_object_or_store, + [OP_HANDLER_INC_DEC] = handle_inc_dec, + [OP_HANDLER_REF_OR_DEREF_OF] = handle_ref_or_deref_of, + [OP_HANDLER_LOGICAL_NOT] = handle_logical_not, + [OP_HANDLER_BINARY_LOGIC] = handle_binary_logic, + [OP_HANDLER_BUFFER] = handle_buffer, + [OP_HANDLER_PACKAGE] = handle_package, + [OP_HANDLER_CREATE_NAMED] = handle_create_named, + [OP_HANDLER_CREATE_BUFFER_FIELD] = handle_create_buffer_field, + [OP_HANDLER_READ_FIELD] = handle_field_read, + [OP_HANDLER_TO] = handle_to, + [OP_HANDLER_ALIAS] = handle_create_alias, + [OP_HANDLER_CONCATENATE] = handle_concatenate, + [OP_HANDLER_CONCATENATE_RES] = handle_concatenate_res, + [OP_HANDLER_SIZEOF] = handle_sizeof, + [OP_HANDLER_UNARY_MATH] = handle_unary_math, + [OP_HANDLER_INDEX] = handle_index, + [OP_HANDLER_OBJECT_TYPE] = handle_object_type, + [OP_HANDLER_CREATE_OP_REGION] = handle_create_op_region, + [OP_HANDLER_CREATE_DATA_REGION] = handle_create_data_region, + [OP_HANDLER_CREATE_FIELD] = handle_create_field, + [OP_HANDLER_TIMER] = handle_timer, + [OP_HANDLER_TO_STRING] = handle_to_string, + [OP_HANDLER_MID] = handle_mid, + [OP_HANDLER_MATCH] = handle_match, + [OP_HANDLER_BCD] = handle_bcd, + [OP_HANDLER_UNLOAD] = handle_unload, + [OP_HANDLER_LOAD_TABLE] = handle_load_table, + [OP_HANDLER_LOAD] = handle_load, + [OP_HANDLER_STALL_OR_SLEEP] = handle_stall_or_sleep, + [OP_HANDLER_EVENT_CTL] = handle_event_ctl, + [OP_HANDLER_MUTEX_CTL] = handle_mutex_ctl, + [OP_HANDLER_NOTIFY] = handle_notify, + [OP_HANDLER_FIRMWARE_REQUEST] = handle_firmware_request, +}; + +static uacpi_u8 handler_idx_of_op[0x100] = { + [UACPI_AML_OP_Local0Op] = OP_HANDLER_LOCAL, + [UACPI_AML_OP_Local1Op] = OP_HANDLER_LOCAL, + [UACPI_AML_OP_Local2Op] = OP_HANDLER_LOCAL, + [UACPI_AML_OP_Local3Op] = OP_HANDLER_LOCAL, + [UACPI_AML_OP_Local4Op] = OP_HANDLER_LOCAL, + [UACPI_AML_OP_Local5Op] = OP_HANDLER_LOCAL, + [UACPI_AML_OP_Local6Op] = OP_HANDLER_LOCAL, + [UACPI_AML_OP_Local7Op] = OP_HANDLER_LOCAL, + + [UACPI_AML_OP_Arg0Op] = OP_HANDLER_ARG, + [UACPI_AML_OP_Arg1Op] = OP_HANDLER_ARG, + [UACPI_AML_OP_Arg2Op] = OP_HANDLER_ARG, + [UACPI_AML_OP_Arg3Op] = OP_HANDLER_ARG, + [UACPI_AML_OP_Arg4Op] = OP_HANDLER_ARG, + [UACPI_AML_OP_Arg5Op] = OP_HANDLER_ARG, + [UACPI_AML_OP_Arg6Op] = OP_HANDLER_ARG, + + [UACPI_AML_OP_StringPrefix] = OP_HANDLER_STRING, + + [UACPI_AML_OP_AddOp] = OP_HANDLER_BINARY_MATH, + [UACPI_AML_OP_SubtractOp] = OP_HANDLER_BINARY_MATH, + [UACPI_AML_OP_MultiplyOp] = OP_HANDLER_BINARY_MATH, + [UACPI_AML_OP_DivideOp] = OP_HANDLER_BINARY_MATH, + [UACPI_AML_OP_ShiftLeftOp] = OP_HANDLER_BINARY_MATH, + [UACPI_AML_OP_ShiftRightOp] = OP_HANDLER_BINARY_MATH, + [UACPI_AML_OP_AndOp] = OP_HANDLER_BINARY_MATH, + [UACPI_AML_OP_NandOp] = OP_HANDLER_BINARY_MATH, + [UACPI_AML_OP_OrOp] = OP_HANDLER_BINARY_MATH, + [UACPI_AML_OP_NorOp] = OP_HANDLER_BINARY_MATH, + [UACPI_AML_OP_XorOp] = OP_HANDLER_BINARY_MATH, + [UACPI_AML_OP_ModOp] = OP_HANDLER_BINARY_MATH, + + [UACPI_AML_OP_IfOp] = OP_HANDLER_CODE_BLOCK, + [UACPI_AML_OP_ElseOp] = OP_HANDLER_CODE_BLOCK, + [UACPI_AML_OP_WhileOp] = OP_HANDLER_CODE_BLOCK, + [UACPI_AML_OP_ScopeOp] = OP_HANDLER_CODE_BLOCK, + + [UACPI_AML_OP_ContinueOp] = OP_HANDLER_CONTROL_FLOW, + [UACPI_AML_OP_BreakOp] = OP_HANDLER_CONTROL_FLOW, + + [UACPI_AML_OP_ReturnOp] = OP_HANDLER_RETURN, + + [UACPI_AML_OP_MethodOp] = OP_HANDLER_CREATE_METHOD, + + [UACPI_AML_OP_StoreOp] = OP_HANDLER_COPY_OBJECT_OR_STORE, + [UACPI_AML_OP_CopyObjectOp] = OP_HANDLER_COPY_OBJECT_OR_STORE, + + [UACPI_AML_OP_IncrementOp] = OP_HANDLER_INC_DEC, + [UACPI_AML_OP_DecrementOp] = OP_HANDLER_INC_DEC, + + [UACPI_AML_OP_RefOfOp] = OP_HANDLER_REF_OR_DEREF_OF, + [UACPI_AML_OP_DerefOfOp] = OP_HANDLER_REF_OR_DEREF_OF, + + [UACPI_AML_OP_LnotOp] = OP_HANDLER_LOGICAL_NOT, + + [UACPI_AML_OP_LEqualOp] = OP_HANDLER_BINARY_LOGIC, + [UACPI_AML_OP_LandOp] = OP_HANDLER_BINARY_LOGIC, + [UACPI_AML_OP_LorOp] = OP_HANDLER_BINARY_LOGIC, + [UACPI_AML_OP_LGreaterOp] = OP_HANDLER_BINARY_LOGIC, + [UACPI_AML_OP_LLessOp] = OP_HANDLER_BINARY_LOGIC, + + [UACPI_AML_OP_InternalOpNamedObject] = OP_HANDLER_NAMED_OBJECT, + + [UACPI_AML_OP_BufferOp] = OP_HANDLER_BUFFER, + + [UACPI_AML_OP_PackageOp] = OP_HANDLER_PACKAGE, + [UACPI_AML_OP_VarPackageOp] = OP_HANDLER_PACKAGE, + + [UACPI_AML_OP_NameOp] = OP_HANDLER_CREATE_NAMED, + + [UACPI_AML_OP_CreateBitFieldOp] = OP_HANDLER_CREATE_BUFFER_FIELD, + [UACPI_AML_OP_CreateByteFieldOp] = OP_HANDLER_CREATE_BUFFER_FIELD, + [UACPI_AML_OP_CreateWordFieldOp] = OP_HANDLER_CREATE_BUFFER_FIELD, + [UACPI_AML_OP_CreateDWordFieldOp] = OP_HANDLER_CREATE_BUFFER_FIELD, + [UACPI_AML_OP_CreateQWordFieldOp] = OP_HANDLER_CREATE_BUFFER_FIELD, + + [UACPI_AML_OP_InternalOpReadFieldAsBuffer] = OP_HANDLER_READ_FIELD, + [UACPI_AML_OP_InternalOpReadFieldAsInteger] = OP_HANDLER_READ_FIELD, + + [UACPI_AML_OP_ToIntegerOp] = OP_HANDLER_TO, + [UACPI_AML_OP_ToBufferOp] = OP_HANDLER_TO, + [UACPI_AML_OP_ToDecimalStringOp] = OP_HANDLER_TO, + [UACPI_AML_OP_ToHexStringOp] = OP_HANDLER_TO, + [UACPI_AML_OP_ToStringOp] = OP_HANDLER_TO_STRING, + + [UACPI_AML_OP_AliasOp] = OP_HANDLER_ALIAS, + + [UACPI_AML_OP_ConcatOp] = OP_HANDLER_CONCATENATE, + [UACPI_AML_OP_ConcatResOp] = OP_HANDLER_CONCATENATE_RES, + + [UACPI_AML_OP_SizeOfOp] = OP_HANDLER_SIZEOF, + + [UACPI_AML_OP_NotOp] = OP_HANDLER_UNARY_MATH, + [UACPI_AML_OP_FindSetLeftBitOp] = OP_HANDLER_UNARY_MATH, + [UACPI_AML_OP_FindSetRightBitOp] = OP_HANDLER_UNARY_MATH, + + [UACPI_AML_OP_IndexOp] = OP_HANDLER_INDEX, + + [UACPI_AML_OP_ObjectTypeOp] = OP_HANDLER_OBJECT_TYPE, + + [UACPI_AML_OP_MidOp] = OP_HANDLER_MID, + + [UACPI_AML_OP_MatchOp] = OP_HANDLER_MATCH, + + [UACPI_AML_OP_NotifyOp] = OP_HANDLER_NOTIFY, + + [UACPI_AML_OP_BreakPointOp] = OP_HANDLER_FIRMWARE_REQUEST, +}; + +#define EXT_OP_IDX(op) (op & 0xFF) + +static uacpi_u8 handler_idx_of_ext_op[0x100] = { + [EXT_OP_IDX(UACPI_AML_OP_CreateFieldOp)] = OP_HANDLER_CREATE_BUFFER_FIELD, + [EXT_OP_IDX(UACPI_AML_OP_CondRefOfOp)] = OP_HANDLER_REF_OR_DEREF_OF, + [EXT_OP_IDX(UACPI_AML_OP_OpRegionOp)] = OP_HANDLER_CREATE_OP_REGION, + [EXT_OP_IDX(UACPI_AML_OP_DeviceOp)] = OP_HANDLER_CODE_BLOCK, + [EXT_OP_IDX(UACPI_AML_OP_ProcessorOp)] = OP_HANDLER_CODE_BLOCK, + [EXT_OP_IDX(UACPI_AML_OP_PowerResOp)] = OP_HANDLER_CODE_BLOCK, + [EXT_OP_IDX(UACPI_AML_OP_ThermalZoneOp)] = OP_HANDLER_CODE_BLOCK, + [EXT_OP_IDX(UACPI_AML_OP_TimerOp)] = OP_HANDLER_TIMER, + [EXT_OP_IDX(UACPI_AML_OP_MutexOp)] = OP_HANDLER_CREATE_MUTEX_OR_EVENT, + [EXT_OP_IDX(UACPI_AML_OP_EventOp)] = OP_HANDLER_CREATE_MUTEX_OR_EVENT, + + [EXT_OP_IDX(UACPI_AML_OP_FieldOp)] = OP_HANDLER_CREATE_FIELD, + [EXT_OP_IDX(UACPI_AML_OP_IndexFieldOp)] = OP_HANDLER_CREATE_FIELD, + [EXT_OP_IDX(UACPI_AML_OP_BankFieldOp)] = OP_HANDLER_CREATE_FIELD, + + [EXT_OP_IDX(UACPI_AML_OP_FromBCDOp)] = OP_HANDLER_BCD, + [EXT_OP_IDX(UACPI_AML_OP_ToBCDOp)] = OP_HANDLER_BCD, + + [EXT_OP_IDX(UACPI_AML_OP_DataRegionOp)] = OP_HANDLER_CREATE_DATA_REGION, + + [EXT_OP_IDX(UACPI_AML_OP_LoadTableOp)] = OP_HANDLER_LOAD_TABLE, + [EXT_OP_IDX(UACPI_AML_OP_LoadOp)] = OP_HANDLER_LOAD, + [EXT_OP_IDX(UACPI_AML_OP_UnloadOp)] = OP_HANDLER_UNLOAD, + + [EXT_OP_IDX(UACPI_AML_OP_StallOp)] = OP_HANDLER_STALL_OR_SLEEP, + [EXT_OP_IDX(UACPI_AML_OP_SleepOp)] = OP_HANDLER_STALL_OR_SLEEP, + + [EXT_OP_IDX(UACPI_AML_OP_SignalOp)] = OP_HANDLER_EVENT_CTL, + [EXT_OP_IDX(UACPI_AML_OP_ResetOp)] = OP_HANDLER_EVENT_CTL, + [EXT_OP_IDX(UACPI_AML_OP_WaitOp)] = OP_HANDLER_EVENT_CTL, + + [EXT_OP_IDX(UACPI_AML_OP_AcquireOp)] = OP_HANDLER_MUTEX_CTL, + [EXT_OP_IDX(UACPI_AML_OP_ReleaseOp)] = OP_HANDLER_MUTEX_CTL, + + [EXT_OP_IDX(UACPI_AML_OP_FatalOp)] = OP_HANDLER_FIRMWARE_REQUEST, +}; + +enum method_call_type { + METHOD_CALL_NATIVE, + METHOD_CALL_AML, + METHOD_CALL_TABLE_LOAD, +}; + +static uacpi_status prepare_method_call( + struct execution_context *ctx, uacpi_namespace_node *node, + uacpi_control_method *method, enum method_call_type type, + const uacpi_object_array *args +) +{ + uacpi_status ret; + struct call_frame *frame; + + if (uacpi_unlikely(call_frame_array_size(&ctx->call_stack) >= + g_uacpi_rt_ctx.max_call_stack_depth)) + return UACPI_STATUS_AML_CALL_STACK_DEPTH_LIMIT; + + ret = push_new_frame(ctx, &frame); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = enter_method(ctx, frame, method); + if (uacpi_unlikely_error(ret)) + goto method_dispatch_error; + + if (type == METHOD_CALL_NATIVE) { + uacpi_u8 arg_count; + + arg_count = args ? args->count : 0; + if (uacpi_unlikely(arg_count != method->args)) { + uacpi_error( + "invalid number of arguments %zu to call %.4s, expected %d\n", + args ? args->count : 0, node->name.text, method->args + ); + + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto method_dispatch_error; + } + + if (args != UACPI_NULL) { + uacpi_u8 i; + + for (i = 0; i < method->args; ++i) { + frame->args[i] = args->objects[i]; + uacpi_object_ref(args->objects[i]); + } + } + } else if (type == METHOD_CALL_AML) { + ret = frame_push_args(frame, ctx->cur_op_ctx); + if (uacpi_unlikely_error(ret)) + goto method_dispatch_error; + } + + ret = frame_setup_base_scope(frame, node, method); + if (uacpi_unlikely_error(ret)) + goto method_dispatch_error; + + ctx->cur_frame = frame; + ctx->cur_op_ctx = UACPI_NULL; + ctx->prev_op_ctx = UACPI_NULL; + ctx->cur_block = code_block_array_last(&ctx->cur_frame->code_blocks); + + if (method->native_call) { + uacpi_object *retval; + + ret = method_get_ret_object(ctx, &retval); + if (uacpi_unlikely_error(ret)) + goto method_dispatch_error; + + return method->handler(ctx, retval); + } + + return UACPI_STATUS_OK; + +method_dispatch_error: + call_frame_clear(frame); + call_frame_array_pop(&ctx->call_stack); + return ret; +} + +static void apply_tracked_pkg( + struct call_frame *frame, struct op_context *op_ctx +) +{ + struct item *item; + + if (op_ctx->tracked_pkg_idx == 0) + return; + + item = item_array_at(&op_ctx->items, op_ctx->tracked_pkg_idx - 1); + frame->code_offset = item->pkg.end; +} + +static uacpi_status exec_op(struct execution_context *ctx) +{ + uacpi_status ret = UACPI_STATUS_OK; + struct call_frame *frame = ctx->cur_frame; + struct op_context *op_ctx; + struct item *item = UACPI_NULL; + enum uacpi_parse_op prev_op = 0, op; + + /* + * Allocate a new op context if previous is preempted (looking for a + * dynamic argument), or doesn't exist at all. + */ + if (!ctx_has_non_preempted_op(ctx)) { + ret = push_op(ctx); + if (uacpi_unlikely_error(ret)) + return ret; + } else { + trace_op(ctx->cur_op_ctx->op, OP_TRACE_ACTION_RESUME); + } + + if (ctx->prev_op_ctx) + prev_op = *op_decode_cursor(ctx->prev_op_ctx); + + for (;;) { + if (uacpi_unlikely_error(ret)) + return ret; + + op_ctx = ctx->cur_op_ctx; + frame = ctx->cur_frame; + + if (op_ctx->pc == 0 && ctx->prev_op_ctx) { + /* + * Type check the current arg type against what is expected by the + * preempted op. This check is able to catch most type violations + * with the only exception being Operand as we only know whether + * that evaluates to an integer after the fact. + */ + ret = op_typecheck(ctx->prev_op_ctx, ctx->cur_op_ctx); + if (uacpi_unlikely_error(ret)) + return ret; + } + + op = op_decode_byte(op_ctx); + trace_pop(op); + + if (parse_op_generates_item[op] != ITEM_NONE) { + item = item_array_alloc(&op_ctx->items); + if (uacpi_unlikely(item == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + item->type = parse_op_generates_item[op]; + if (item->type == ITEM_OBJECT) { + enum uacpi_object_type type = UACPI_OBJECT_UNINITIALIZED; + + if (op == UACPI_PARSE_OP_OBJECT_ALLOC_TYPED) + type = op_decode_byte(op_ctx); + + item->obj = uacpi_create_object(type); + if (uacpi_unlikely(item->obj == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + } else { + uacpi_memzero(&item->immediate, sizeof(item->immediate)); + } + } else if (item == UACPI_NULL) { + item = item_array_last(&op_ctx->items); + } + + switch (op) { + case UACPI_PARSE_OP_END: + case UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL: { + trace_op(ctx->cur_op_ctx->op, OP_TRACE_ACTION_END); + + if (op == UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL) { + uacpi_u8 idx; + + idx = op_decode_byte(op_ctx); + if (item_array_at(&op_ctx->items, idx)->handle != UACPI_NULL) + break; + + emit_op_skip_warn(op_ctx); + } + + apply_tracked_pkg(frame, op_ctx); + + pop_op(ctx); + if (ctx->cur_op_ctx) { + ctx->cur_op_ctx->preempted = UACPI_FALSE; + ctx->cur_op_ctx->pc++; + } + + return UACPI_STATUS_OK; + } + + case UACPI_PARSE_OP_EMIT_SKIP_WARN: + emit_op_skip_warn(op_ctx); + break; + + case UACPI_PARSE_OP_SIMPLE_NAME: + case UACPI_PARSE_OP_SUPERNAME: + case UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED: + case UACPI_PARSE_OP_TERM_ARG: + case UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL: + case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT: + case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED: + case UACPI_PARSE_OP_OPERAND: + case UACPI_PARSE_OP_STRING: + case UACPI_PARSE_OP_COMPUTATIONAL_DATA: + case UACPI_PARSE_OP_TARGET: + /* + * Preempt this op parsing for now as we wait for the dynamic arg + * to be parsed. + */ + op_ctx->preempted = UACPI_TRUE; + op_ctx->pc--; + return UACPI_STATUS_OK; + + case UACPI_PARSE_OP_TRACKED_PKGLEN: + op_ctx->tracked_pkg_idx = item_array_size(&op_ctx->items); + UACPI_FALLTHROUGH; + case UACPI_PARSE_OP_PKGLEN: + ret = parse_package_length(frame, &item->pkg); + break; + + case UACPI_PARSE_OP_LOAD_INLINE_IMM: + case UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT: { + void *dst; + uacpi_u8 src_width; + + if (op == UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT) { + item->obj->type = UACPI_OBJECT_INTEGER; + dst = &item->obj->integer; + src_width = 8; + } else { + dst = &item->immediate; + src_width = op_decode_byte(op_ctx); + } + + uacpi_memcpy_zerout( + dst, op_decode_cursor(op_ctx), + sizeof(uacpi_u64), src_width + ); + op_ctx->pc += src_width; + break; + } + + case UACPI_PARSE_OP_LOAD_ZERO_IMM: + break; + + case UACPI_PARSE_OP_LOAD_IMM: + case UACPI_PARSE_OP_LOAD_IMM_AS_OBJECT: { + uacpi_u8 width; + void *dst; + + width = op_decode_byte(op_ctx); + if (uacpi_unlikely(call_frame_code_bytes_left(frame) < width)) + return UACPI_STATUS_AML_BAD_ENCODING; + + if (op == UACPI_PARSE_OP_LOAD_IMM_AS_OBJECT) { + item->obj->type = UACPI_OBJECT_INTEGER; + item->obj->integer = 0; + dst = &item->obj->integer; + } else { + dst = item->immediate_bytes; + } + + uacpi_memcpy(dst, call_frame_cursor(frame), width); + frame->code_offset += width; + break; + } + + case UACPI_PARSE_OP_LOAD_FALSE_OBJECT: + case UACPI_PARSE_OP_LOAD_TRUE_OBJECT: { + uacpi_object *obj = item->obj; + obj->type = UACPI_OBJECT_INTEGER; + obj->integer = op == UACPI_PARSE_OP_LOAD_FALSE_OBJECT ? 0 : ones(); + break; + } + + case UACPI_PARSE_OP_RECORD_AML_PC: + item->immediate = frame->code_offset; + break; + + case UACPI_PARSE_OP_TRUNCATE_NUMBER: + truncate_number_if_needed(item->obj); + break; + + case UACPI_PARSE_OP_TYPECHECK: { + enum uacpi_object_type expected_type; + + expected_type = op_decode_byte(op_ctx); + + if (uacpi_unlikely(item->obj->type != expected_type)) { + EXEC_OP_ERR_2("bad object type: expected %s, got %s!", + uacpi_object_type_to_string(expected_type), + uacpi_object_type_to_string(item->obj->type)); + ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } + + break; + } + + case UACPI_PARSE_OP_BAD_OPCODE: + case UACPI_PARSE_OP_UNREACHABLE: + EXEC_OP_ERR("invalid/unexpected opcode"); + ret = UACPI_STATUS_AML_INVALID_OPCODE; + break; + + case UACPI_PARSE_OP_AML_PC_DECREMENT: + frame->code_offset--; + break; + + case UACPI_PARSE_OP_IMM_DECREMENT: + item_array_at(&op_ctx->items, op_decode_byte(op_ctx))->immediate--; + break; + + case UACPI_PARSE_OP_ITEM_POP: + pop_item(op_ctx); + item = item_array_last(&op_ctx->items); + break; + + case UACPI_PARSE_OP_IF_HAS_DATA: { + uacpi_size pkg_idx = op_ctx->tracked_pkg_idx - 1; + struct package_length *pkg; + uacpi_u8 bytes_skip; + + bytes_skip = op_decode_byte(op_ctx); + pkg = &item_array_at(&op_ctx->items, pkg_idx)->pkg; + + if (frame->code_offset >= pkg->end) + op_ctx->pc += bytes_skip; + + break; + } + + case UACPI_PARSE_OP_IF_NOT_NULL: + case UACPI_PARSE_OP_IF_NULL: + case UACPI_PARSE_OP_IF_LAST_NULL: + case UACPI_PARSE_OP_IF_LAST_NOT_NULL: { + uacpi_u8 idx, bytes_skip; + uacpi_bool is_null, skip_if_null; + + if (op == UACPI_PARSE_OP_IF_LAST_NULL || + op == UACPI_PARSE_OP_IF_LAST_NOT_NULL) { + is_null = item->handle == UACPI_NULL; + } else { + idx = op_decode_byte(op_ctx); + is_null = item_array_at(&op_ctx->items, idx)->handle == UACPI_NULL; + } + + bytes_skip = op_decode_byte(op_ctx); + skip_if_null = op == UACPI_PARSE_OP_IF_NOT_NULL || + op == UACPI_PARSE_OP_IF_LAST_NOT_NULL; + + if (is_null == skip_if_null) + op_ctx->pc += bytes_skip; + + break; + } + + case UACPI_PARSE_OP_IF_LAST_EQUALS: { + uacpi_u8 value, bytes_skip; + + value = op_decode_byte(op_ctx); + bytes_skip = op_decode_byte(op_ctx); + + if (item->immediate != value) + op_ctx->pc += bytes_skip; + + break; + } + + case UACPI_PARSE_OP_IF_LAST_FALSE: + case UACPI_PARSE_OP_IF_LAST_TRUE: { + uacpi_u8 bytes_skip; + uacpi_bool is_false, skip_if_false; + + bytes_skip = op_decode_byte(op_ctx); + is_false = item->obj->integer == 0; + skip_if_false = op == UACPI_PARSE_OP_IF_LAST_TRUE; + + if (is_false == skip_if_false) + op_ctx->pc += bytes_skip; + + break; + } + + case UACPI_PARSE_OP_JMP: { + op_ctx->pc = op_decode_byte(op_ctx); + break; + } + + case UACPI_PARSE_OP_CREATE_NAMESTRING: + case UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD: + case UACPI_PARSE_OP_EXISTING_NAMESTRING: + case UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL: + case UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD: { + uacpi_size offset = frame->code_offset; + enum resolve_behavior behavior; + + if (op == UACPI_PARSE_OP_CREATE_NAMESTRING || + op == UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD) + behavior = RESOLVE_CREATE_LAST_NAMESEG_FAIL_IF_EXISTS; + else + behavior = RESOLVE_FAIL_IF_DOESNT_EXIST; + + ret = resolve_name_string(frame, behavior, &item->node); + + if (ret == UACPI_STATUS_NOT_FOUND) { + uacpi_bool is_ok; + + if (prev_op) { + is_ok = op_allows_unresolved(prev_op); + is_ok &= op_allows_unresolved(op); + } else { + // This is the only standalone op where we allow unresolved + is_ok = op_ctx->op->code == UACPI_AML_OP_ExternalOp; + } + + if (is_ok) + ret = UACPI_STATUS_OK; + } + + if (uacpi_unlikely_error(ret)) { + enum uacpi_log_level lvl = UACPI_LOG_ERROR; + uacpi_status trace_ret = ret; + uacpi_bool abort_whileif = UACPI_FALSE; + + if (frame->method->named_objects_persist && + (ret == UACPI_STATUS_AML_OBJECT_ALREADY_EXISTS || + ret == UACPI_STATUS_NOT_FOUND)) { + struct op_context *first_ctx; + + first_ctx = op_context_array_at(&frame->pending_ops, 0); + abort_whileif = first_ctx->op->code == UACPI_AML_OP_WhileOp || + first_ctx->op->code == UACPI_AML_OP_IfOp; + + if (op_allows_unresolved_if_load(op) || abort_whileif) { + lvl = UACPI_LOG_WARN; + ret = UACPI_STATUS_OK; + } + } + + trace_named_object_lookup_or_creation_failure( + frame, offset, op, trace_ret, lvl + ); + + if (abort_whileif) { + while (op_context_array_size(&frame->pending_ops) != 1) + pop_op(ctx); + + op_ctx = op_context_array_at(&frame->pending_ops, 0); + op_ctx->pc++; + op_ctx->preempted = UACPI_FALSE; + break; + } + + if (ret == UACPI_STATUS_NOT_FOUND) + ret = UACPI_STATUS_AML_UNDEFINED_REFERENCE; + } + + if (behavior == RESOLVE_CREATE_LAST_NAMESEG_FAIL_IF_EXISTS && + !frame->method->named_objects_persist) + item->node->flags |= UACPI_NAMESPACE_NODE_FLAG_TEMPORARY; + + break; + } + + case UACPI_PARSE_OP_INVOKE_HANDLER: { + uacpi_aml_op code = op_ctx->op->code; + uacpi_u8 idx; + + if (code <= 0xFF) + idx = handler_idx_of_op[code]; + else + idx = handler_idx_of_ext_op[EXT_OP_IDX(code)]; + + ret = op_handlers[idx](ctx); + break; + } + + case UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE: + item = item_array_at(&op_ctx->items, op_decode_byte(op_ctx)); + ret = do_install_node_item(frame, item); + break; + + case UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV: + case UACPI_PARSE_OP_OBJECT_COPY_TO_PREV: { + uacpi_object *src; + struct item *dst; + + if (!ctx->prev_op_ctx) + break; + + switch (prev_op) { + case UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL: + case UACPI_PARSE_OP_COMPUTATIONAL_DATA: + case UACPI_PARSE_OP_OPERAND: + case UACPI_PARSE_OP_STRING: + src = uacpi_unwrap_internal_reference(item->obj); + + if (prev_op == UACPI_PARSE_OP_OPERAND) + ret = typecheck_operand(ctx->prev_op_ctx, src); + else if (prev_op == UACPI_PARSE_OP_STRING) + ret = typecheck_string(ctx->prev_op_ctx, src); + else if (prev_op == UACPI_PARSE_OP_COMPUTATIONAL_DATA) + ret = typecheck_computational_data(ctx->prev_op_ctx, src); + + break; + case UACPI_PARSE_OP_SUPERNAME: + case UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED: + src = item->obj; + break; + + case UACPI_PARSE_OP_SIMPLE_NAME: + case UACPI_PARSE_OP_TERM_ARG: + case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT: + case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED: + case UACPI_PARSE_OP_TARGET: + src = item->obj; + break; + + default: + EXEC_OP_ERR_1("don't know how to copy/transfer object to %d", + prev_op); + ret = UACPI_STATUS_INVALID_ARGUMENT; + break; + } + + if (uacpi_likely_success(ret)) { + dst = item_array_last(&ctx->prev_op_ctx->items); + dst->type = ITEM_OBJECT; + + if (op == UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV) { + dst->obj = src; + uacpi_object_ref(dst->obj); + } else { + dst->obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED); + if (uacpi_unlikely(dst->obj == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + break; + } + + ret = uacpi_object_assign(dst->obj, src, + UACPI_ASSIGN_BEHAVIOR_DEEP_COPY); + } + } + break; + } + + case UACPI_PARSE_OP_STORE_TO_TARGET: + case UACPI_PARSE_OP_STORE_TO_TARGET_INDIRECT: { + uacpi_object *dst, *src; + + dst = item_array_at(&op_ctx->items, op_decode_byte(op_ctx))->obj; + + if (op == UACPI_PARSE_OP_STORE_TO_TARGET_INDIRECT) { + src = item_array_at(&op_ctx->items, + op_decode_byte(op_ctx))->obj; + } else { + src = item->obj; + } + + ret = store_to_target(dst, src, UACPI_NULL); + break; + } + + // Nothing to do here, object is allocated automatically + case UACPI_PARSE_OP_OBJECT_ALLOC: + case UACPI_PARSE_OP_OBJECT_ALLOC_TYPED: + case UACPI_PARSE_OP_EMPTY_OBJECT_ALLOC: + break; + + case UACPI_PARSE_OP_OBJECT_CONVERT_TO_SHALLOW_COPY: + case UACPI_PARSE_OP_OBJECT_CONVERT_TO_DEEP_COPY: { + uacpi_object *temp = item->obj; + enum uacpi_assign_behavior behavior; + + item_array_pop(&op_ctx->items); + item = item_array_last(&op_ctx->items); + + if (op == UACPI_PARSE_OP_OBJECT_CONVERT_TO_SHALLOW_COPY) + behavior = UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY; + else + behavior = UACPI_ASSIGN_BEHAVIOR_DEEP_COPY; + + ret = uacpi_object_assign(temp, item->obj, behavior); + if (uacpi_unlikely_error(ret)) + break; + + uacpi_object_unref(item->obj); + item->obj = temp; + break; + } + + case UACPI_PARSE_OP_DISPATCH_METHOD_CALL: { + struct uacpi_namespace_node *node; + struct uacpi_control_method *method; + + node = item_array_at(&op_ctx->items, 0)->node; + method = uacpi_namespace_node_get_object(node)->method; + + ret = prepare_method_call( + ctx, node, method, METHOD_CALL_AML, UACPI_NULL + ); + return ret; + } + + case UACPI_PARSE_OP_DISPATCH_TABLE_LOAD: { + struct uacpi_namespace_node *node; + struct uacpi_control_method *method; + + node = item_array_at(&op_ctx->items, 0)->node; + method = item_array_at(&op_ctx->items, 1)->obj->method; + + ret = prepare_method_call( + ctx, node, method, METHOD_CALL_TABLE_LOAD, UACPI_NULL + ); + return ret; + } + + case UACPI_PARSE_OP_CONVERT_NAMESTRING: { + uacpi_aml_op new_op = UACPI_AML_OP_InternalOpNamedObject; + uacpi_object *obj; + + if (item->node == UACPI_NULL) { + if (!op_allows_unresolved(prev_op)) + ret = UACPI_STATUS_NOT_FOUND; + break; + } + + obj = uacpi_namespace_node_get_object(item->node); + + switch (obj->type) { + case UACPI_OBJECT_METHOD: { + uacpi_bool should_invoke; + + switch (prev_op) { + case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT: + case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED: + should_invoke = UACPI_FALSE; + break; + default: + should_invoke = !op_wants_supername(prev_op); + } + + if (!should_invoke) + break; + + new_op = UACPI_AML_OP_InternalOpMethodCall0Args; + new_op += obj->method->args; + break; + } + + case UACPI_OBJECT_BUFFER_FIELD: + case UACPI_OBJECT_FIELD_UNIT: { + uacpi_object_type type; + + if (!op_wants_term_arg_or_operand(prev_op)) + break; + + ret = field_get_read_type(obj, &type); + if (uacpi_unlikely_error(ret)) { + const uacpi_char *field_path; + + field_path = uacpi_namespace_node_generate_absolute_path( + item->node + ); + + uacpi_error( + "unable to perform a read from field %s: " + "parent opregion gone\n", field_path + ); + uacpi_free_absolute_path(field_path); + } + + switch (type) { + case UACPI_OBJECT_BUFFER: + new_op = UACPI_AML_OP_InternalOpReadFieldAsBuffer; + break; + case UACPI_OBJECT_INTEGER: + new_op = UACPI_AML_OP_InternalOpReadFieldAsInteger; + break; + default: + ret = UACPI_STATUS_INVALID_ARGUMENT; + continue; + } + break; + } + default: + break; + } + + op_ctx->pc = 0; + op_ctx->op = uacpi_get_op_spec(new_op); + break; + } + + case UACPI_PARSE_OP_SWITCH_TO_NEXT_IF_EQUALS: { + uacpi_aml_op op, target_op; + uacpi_u32 cur_offset; + uacpi_u8 op_length; + + cur_offset = frame->code_offset; + apply_tracked_pkg(frame, op_ctx); + op_length = peek_next_op(frame, &op); + + target_op = op_decode_aml_op(op_ctx); + if (op_length == 0 || op != target_op) { + // Revert tracked package + frame->code_offset = cur_offset; + break; + } + + frame->code_offset += op_length; + op_ctx->switched_from = op_ctx->op->code; + op_ctx->op = uacpi_get_op_spec(target_op); + op_ctx->pc = 0; + break; + } + + case UACPI_PARSE_OP_IF_SWITCHED_FROM: { + uacpi_aml_op target_op; + uacpi_u8 skip_bytes; + + target_op = op_decode_aml_op(op_ctx); + skip_bytes = op_decode_byte(op_ctx); + + if (op_ctx->switched_from != target_op) + op_ctx->pc += skip_bytes; + break; + } + + default: + EXEC_OP_ERR_1("unhandled parser op '%d'", op); + ret = UACPI_STATUS_UNIMPLEMENTED; + break; + } + } +} + +static void ctx_reload_post_ret(struct execution_context *ctx) +{ + uacpi_control_method *method = ctx->cur_frame->method; + + if (method->is_serialized) { + held_mutexes_array_remove_and_release( + &ctx->held_mutexes, method->mutex, FORCE_RELEASE_YES + ); + ctx->sync_level = ctx->cur_frame->prev_sync_level; + } + + call_frame_clear(ctx->cur_frame); + call_frame_array_pop(&ctx->call_stack); + + ctx->cur_frame = call_frame_array_last(&ctx->call_stack); + refresh_ctx_pointers(ctx); +} + +static void trace_method_abort(struct code_block *block, uacpi_size depth) +{ + static const uacpi_char *unknown_path = "<unknown>"; + uacpi_char oom_absolute_path[9] = "<?>."; + + const uacpi_char *absolute_path; + + if (block != UACPI_NULL && block->type == CODE_BLOCK_SCOPE) { + absolute_path = uacpi_namespace_node_generate_absolute_path(block->node); + if (uacpi_unlikely(absolute_path == UACPI_NULL)) + uacpi_memcpy(oom_absolute_path + 4, block->node->name.text, 4); + } else { + absolute_path = unknown_path; + } + + uacpi_error(" #%zu in %s()\n", depth, absolute_path); + + if (absolute_path != oom_absolute_path && absolute_path != unknown_path) + uacpi_free_dynamic_string(absolute_path); +} + +static void stack_unwind(struct execution_context *ctx) +{ + uacpi_size depth; + uacpi_bool should_stop; + + /* + * Non-empty call stack here means the execution was aborted at some point, + * probably due to a bytecode error. + */ + depth = call_frame_array_size(&ctx->call_stack); + + if (depth != 0) { + uacpi_size idx = 0; + uacpi_bool table_level_code; + + do { + table_level_code = ctx->cur_frame->method->named_objects_persist; + + if (table_level_code && idx != 0) + /* + * This isn't the first frame that we are aborting. + * If this is table-level code, we have just unwound a call + * chain that had triggered an abort. Stop here, no need to + * abort table load because of it. + */ + break; + + while (op_context_array_size(&ctx->cur_frame->pending_ops) != 0) + pop_op(ctx); + + trace_method_abort( + code_block_array_at(&ctx->cur_frame->code_blocks, 0), idx + ); + + should_stop = idx++ == 0 && table_level_code; + ctx_reload_post_ret(ctx); + } while (--depth && !should_stop); + } +} + +static void execution_context_release(struct execution_context *ctx) +{ + if (ctx->ret) + uacpi_object_unref(ctx->ret); + + while (held_mutexes_array_size(&ctx->held_mutexes) != 0) { + held_mutexes_array_remove_and_release( + &ctx->held_mutexes, + *held_mutexes_array_last(&ctx->held_mutexes), + FORCE_RELEASE_YES + ); + } + + call_frame_array_clear(&ctx->call_stack); + held_mutexes_array_clear(&ctx->held_mutexes); + uacpi_free(ctx, sizeof(*ctx)); +} + +uacpi_status uacpi_execute_control_method( + uacpi_namespace_node *scope, uacpi_control_method *method, + const uacpi_object_array *args, uacpi_object **out_obj +) +{ + uacpi_status ret = UACPI_STATUS_OK; + struct execution_context *ctx; + + ctx = uacpi_kernel_alloc_zeroed(sizeof(*ctx)); + if (uacpi_unlikely(ctx == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + if (out_obj != UACPI_NULL) { + ctx->ret = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED); + if (uacpi_unlikely(ctx->ret == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto out; + } + } + + ret = prepare_method_call(ctx, scope, method, METHOD_CALL_NATIVE, args); + if (uacpi_unlikely_error(ret)) + goto out; + + for (;;) { + if (!ctx_has_non_preempted_op(ctx)) { + if (ctx->cur_frame == UACPI_NULL) + break; + + if (maybe_end_block(ctx)) + continue; + + if (!call_frame_has_code(ctx->cur_frame)) { + ctx_reload_post_ret(ctx); + continue; + } + + ret = get_op(ctx); + if (uacpi_unlikely_error(ret)) + goto handle_method_abort; + + trace_op(ctx->cur_op, OP_TRACE_ACTION_BEGIN); + } + + ret = exec_op(ctx); + if (uacpi_unlikely_error(ret)) + goto handle_method_abort; + + continue; + + handle_method_abort: + uacpi_error("aborting %s due to previous error: %s\n", + ctx->cur_frame->method->named_objects_persist ? + "table load" : "method invocation", + uacpi_status_to_string(ret)); + stack_unwind(ctx); + + /* + * Having a frame here implies that we just aborted a dynamic table + * load. Signal to the caller that it failed by setting the return + * value to false. + */ + if (ctx->cur_frame) { + struct item *it; + + it = item_array_last(&ctx->cur_op_ctx->items); + if (it != UACPI_NULL && it->obj != UACPI_NULL) + it->obj->integer = 0; + } + } + +out: + if (ctx->ret != UACPI_NULL) { + uacpi_object *ret_obj = UACPI_NULL; + + if (ctx->ret->type != UACPI_OBJECT_UNINITIALIZED) { + ret_obj = ctx->ret; + uacpi_object_ref(ret_obj); + } + + *out_obj = ret_obj; + } + + execution_context_release(ctx); + return ret; +} + +uacpi_status uacpi_osi(uacpi_handle handle, uacpi_object *retval) +{ + struct execution_context *ctx = handle; + uacpi_bool is_supported; + uacpi_status ret; + uacpi_object *arg; + + arg = uacpi_unwrap_internal_reference(ctx->cur_frame->args[0]); + if (arg->type != UACPI_OBJECT_STRING) { + uacpi_error("_OSI: invalid argument type %s, expected a String\n", + uacpi_object_type_to_string(arg->type)); + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } + + if (retval == UACPI_NULL) + return UACPI_STATUS_OK; + + retval->type = UACPI_OBJECT_INTEGER; + + ret = uacpi_handle_osi(arg->buffer->text, &is_supported); + if (uacpi_unlikely_error(ret)) + return ret; + + retval->integer = is_supported ? ones() : 0; + + uacpi_trace("_OSI(%s) => reporting as %ssupported\n", + arg->buffer->text, is_supported ? "" : "un"); + return UACPI_STATUS_OK; +} + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/dev/acpi/uacpi/io.c b/sys/dev/acpi/uacpi/io.c new file mode 100644 index 0000000..7d10005 --- /dev/null +++ b/sys/dev/acpi/uacpi/io.c @@ -0,0 +1,1116 @@ +#include <uacpi/internal/io.h> +#include <uacpi/internal/stdlib.h> +#include <uacpi/internal/log.h> +#include <uacpi/internal/opregion.h> +#include <uacpi/internal/utilities.h> +#include <uacpi/internal/mutex.h> +#include <uacpi/internal/namespace.h> + +#ifndef UACPI_BAREBONES_MODE + +uacpi_size uacpi_round_up_bits_to_bytes(uacpi_size bit_length) +{ + return UACPI_ALIGN_UP(bit_length, 8, uacpi_size) / 8; +} + +static void cut_misaligned_tail( + uacpi_u8 *data, uacpi_size offset, uacpi_u32 bit_length +) +{ + uacpi_u8 remainder = bit_length & 7; + + if (remainder == 0) + return; + + data[offset] &= ((1ull << remainder) - 1); +} + +struct bit_span +{ + union { + uacpi_u8 *data; + const uacpi_u8 *const_data; + }; + uacpi_u64 index; + uacpi_u64 length; +}; + +static uacpi_size bit_span_offset(struct bit_span *span, uacpi_size bits) +{ + uacpi_size delta = UACPI_MIN(span->length, bits); + + span->index += delta; + span->length -= delta; + + return delta; +} + +static void bit_copy(struct bit_span *dst, struct bit_span *src) +{ + uacpi_u8 src_shift, dst_shift, bits = 0; + uacpi_u16 dst_mask; + uacpi_u8 *dst_ptr, *src_ptr; + uacpi_u64 dst_count, src_count; + + dst_ptr = dst->data + (dst->index / 8); + src_ptr = src->data + (src->index / 8); + + dst_count = dst->length; + dst_shift = dst->index & 7; + + src_count = src->length; + src_shift = src->index & 7; + + while (dst_count) + { + bits = 0; + + if (src_count) { + bits = *src_ptr >> src_shift; + + if (src_shift && src_count > (uacpi_u32)(8 - src_shift)) + bits |= *(src_ptr + 1) << (8 - src_shift); + + if (src_count < 8) { + bits &= (1 << src_count) - 1; + src_count = 0; + } else { + src_count -= 8; + src_ptr++; + } + } + + dst_mask = (dst_count < 8 ? (1 << dst_count) - 1 : 0xFF) << dst_shift; + *dst_ptr = (*dst_ptr & ~dst_mask) | ((bits << dst_shift) & dst_mask); + + if (dst_shift && dst_count > (uacpi_u32)(8 - dst_shift)) { + dst_mask >>= 8; + *(dst_ptr + 1) &= ~dst_mask; + *(dst_ptr + 1) |= (bits >> (8 - dst_shift)) & dst_mask; + } + + dst_count = dst_count > 8 ? dst_count - 8 : 0; + ++dst_ptr; + } +} + +static void do_misaligned_buffer_read( + const uacpi_buffer_field *field, uacpi_u8 *dst +) +{ + struct bit_span src_span = { 0 }; + struct bit_span dst_span = { 0 }; + + src_span.index = field->bit_index; + src_span.length = field->bit_length; + src_span.const_data = field->backing->data; + + dst_span.data = dst; + dst_span.length = uacpi_round_up_bits_to_bytes(field->bit_length) * 8; + bit_copy(&dst_span, &src_span); +} + +void uacpi_read_buffer_field( + const uacpi_buffer_field *field, void *dst +) +{ + if (!(field->bit_index & 7)) { + uacpi_u8 *src = field->backing->data; + uacpi_size count; + + count = uacpi_round_up_bits_to_bytes(field->bit_length); + uacpi_memcpy(dst, src + (field->bit_index / 8), count); + cut_misaligned_tail(dst, count - 1, field->bit_length); + return; + } + + do_misaligned_buffer_read(field, dst); +} + +static void do_write_misaligned_buffer_field( + uacpi_buffer_field *field, + const void *src, uacpi_size size +) +{ + struct bit_span src_span = { 0 }; + struct bit_span dst_span = { 0 }; + + src_span.length = size * 8; + src_span.const_data = src; + + dst_span.index = field->bit_index; + dst_span.length = field->bit_length; + dst_span.data = field->backing->data; + + bit_copy(&dst_span, &src_span); +} + +void uacpi_write_buffer_field( + uacpi_buffer_field *field, + const void *src, uacpi_size size +) +{ + if (!(field->bit_index & 7)) { + uacpi_u8 *dst, last_byte, tail_shift; + uacpi_size count; + + dst = field->backing->data; + dst += field->bit_index / 8; + count = uacpi_round_up_bits_to_bytes(field->bit_length); + + last_byte = dst[count - 1]; + tail_shift = field->bit_length & 7; + + uacpi_memcpy_zerout(dst, src, count, size); + if (tail_shift) { + uacpi_u8 last_shift = 8 - tail_shift; + dst[count - 1] = dst[count - 1] << last_shift; + dst[count - 1] >>= last_shift; + dst[count - 1] |= (last_byte >> tail_shift) << tail_shift; + } + + return; + } + + do_write_misaligned_buffer_field(field, src, size); +} + +static uacpi_status access_field_unit( + uacpi_field_unit *field, uacpi_u32 offset, uacpi_region_op op, + union uacpi_opregion_io_data data +) +{ + uacpi_status ret = UACPI_STATUS_OK; + + if (field->lock_rule) { + ret = uacpi_acquire_aml_mutex( + g_uacpi_rt_ctx.global_lock_mutex, 0xFFFF + ); + if (uacpi_unlikely_error(ret)) + return ret; + } + + switch (field->kind) { + case UACPI_FIELD_UNIT_KIND_BANK: + ret = uacpi_write_field_unit( + field->bank_selection, &field->bank_value, sizeof(field->bank_value), + UACPI_NULL + ); + break; + case UACPI_FIELD_UNIT_KIND_NORMAL: + break; + case UACPI_FIELD_UNIT_KIND_INDEX: + ret = uacpi_write_field_unit( + field->index, &offset, sizeof(offset), + UACPI_NULL + ); + if (uacpi_unlikely_error(ret)) + goto out; + + switch (op) { + case UACPI_REGION_OP_READ: + ret = uacpi_read_field_unit( + field->data, data.integer, field->access_width_bytes, + UACPI_NULL + ); + break; + case UACPI_REGION_OP_WRITE: + ret = uacpi_write_field_unit( + field->data, data.integer, field->access_width_bytes, + UACPI_NULL + ); + break; + default: + ret = UACPI_STATUS_INVALID_ARGUMENT; + break; + } + + goto out; + + default: + uacpi_error("invalid field unit kind %d\n", field->kind); + ret = UACPI_STATUS_INVALID_ARGUMENT; + } + + if (uacpi_unlikely_error(ret)) + goto out; + + ret = uacpi_dispatch_opregion_io(field, offset, op, data); + +out: + if (field->lock_rule) + uacpi_release_aml_mutex(g_uacpi_rt_ctx.global_lock_mutex); + return ret; +} + +#define SERIAL_HEADER_SIZE 2 +#define IPMI_DATA_SIZE 64 + +static uacpi_status wtr_buffer_size( + uacpi_field_unit *field, uacpi_address_space space, + uacpi_size *out_size +) +{ + switch (space) { + case UACPI_ADDRESS_SPACE_IPMI: + *out_size = SERIAL_HEADER_SIZE + IPMI_DATA_SIZE; + break; + case UACPI_ADDRESS_SPACE_PRM: + *out_size = 26; + break; + case UACPI_ADDRESS_SPACE_FFIXEDHW: + *out_size = 256; + break; + case UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS: + case UACPI_ADDRESS_SPACE_SMBUS: { + uacpi_size size_for_protocol = SERIAL_HEADER_SIZE; + + switch (field->attributes) { + case UACPI_ACCESS_ATTRIBUTE_QUICK: + break; // + 0 + case UACPI_ACCESS_ATTRIBUTE_SEND_RECEIVE: + case UACPI_ACCESS_ATTRIBUTE_BYTE: + size_for_protocol += 1; + break; + + case UACPI_ACCESS_ATTRIBUTE_WORD: + case UACPI_ACCESS_ATTRIBUTE_PROCESS_CALL: + size_for_protocol += 2; + break; + + case UACPI_ACCESS_ATTRIBUTE_BYTES: + size_for_protocol += field->access_length; + break; + + case UACPI_ACCESS_ATTRIBUTE_BLOCK: + case UACPI_ACCESS_ATTRIBUTE_BLOCK_PROCESS_CALL: + case UACPI_ACCESS_ATTRIBUTE_RAW_BYTES: + case UACPI_ACCESS_ATTRIBUTE_RAW_PROCESS_BYTES: + size_for_protocol += 255; + break; + + default: + uacpi_error( + "unsupported field@%p access attribute %d\n", + field, field->attributes + ); + return UACPI_STATUS_UNIMPLEMENTED; + } + + *out_size = size_for_protocol; + break; + } + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + return UACPI_STATUS_OK; +} + +static uacpi_status handle_special_field( + uacpi_field_unit *field, uacpi_data_view buf, + uacpi_region_op op, uacpi_data_view *wtr_response, + uacpi_bool *did_handle +) +{ + uacpi_status ret = UACPI_STATUS_OK; + uacpi_object *obj; + uacpi_operation_region *region; + uacpi_u64 in_out; + uacpi_data_view wtr_buffer; + union uacpi_opregion_io_data data; + + *did_handle = UACPI_FALSE; + + if (field->kind == UACPI_FIELD_UNIT_KIND_INDEX) + return ret; + + obj = uacpi_namespace_node_get_object_typed( + field->region, UACPI_OBJECT_OPERATION_REGION_BIT + ); + if (uacpi_unlikely(obj == UACPI_NULL)) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + uacpi_trace_region_error( + field->region, "attempted access to deleted", ret + ); + goto out_handled; + } + region = obj->op_region; + + switch (region->space) { + case UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO: + if (op == UACPI_REGION_OP_WRITE) { + uacpi_memcpy_zerout( + &in_out, buf.const_data, sizeof(in_out), buf.length + ); + } + + data.integer = &in_out; + ret = access_field_unit(field, 0, op, data); + if (uacpi_unlikely_error(ret)) + goto out_handled; + + if (op == UACPI_REGION_OP_READ) + uacpi_memcpy_zerout(buf.data, &in_out, buf.length, sizeof(in_out)); + goto out_handled; + case UACPI_ADDRESS_SPACE_IPMI: + case UACPI_ADDRESS_SPACE_PRM: + if (uacpi_unlikely(op == UACPI_REGION_OP_READ)) { + ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + uacpi_trace_region_error( + field->region, "attempted to read from a write-only", ret + ); + goto out_handled; + } + UACPI_FALLTHROUGH; + case UACPI_ADDRESS_SPACE_FFIXEDHW: + case UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS: + case UACPI_ADDRESS_SPACE_SMBUS: + goto do_wtr; + default: + return ret; + } + +do_wtr: + ret = wtr_buffer_size(field, region->space, &wtr_buffer.length); + if (uacpi_unlikely_error(ret)) + goto out_handled; + + wtr_buffer.data = uacpi_kernel_alloc(wtr_buffer.length); + if (uacpi_unlikely(wtr_buffer.data == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto out_handled; + } + + uacpi_memcpy_zerout( + wtr_buffer.data, buf.const_data, wtr_buffer.length, buf.length + ); + data.buffer = wtr_buffer; + ret = access_field_unit( + field, field->byte_offset, + op, data + ); + if (uacpi_unlikely_error(ret)) { + uacpi_free(wtr_buffer.data, wtr_buffer.length); + goto out_handled; + } + + if (wtr_response != UACPI_NULL) + *wtr_response = wtr_buffer; + +out_handled: + *did_handle = UACPI_TRUE; + return ret; +} + +static uacpi_status do_read_misaligned_field_unit( + uacpi_field_unit *field, uacpi_u8 *dst, uacpi_size size +) +{ + uacpi_status ret; + uacpi_size reads_to_do; + uacpi_u64 out; + uacpi_u32 byte_offset = field->byte_offset; + uacpi_u32 bits_left = field->bit_length; + uacpi_u8 width_access_bits = field->access_width_bytes * 8; + + struct bit_span src_span = { 0 }; + struct bit_span dst_span = { 0 }; + + src_span.data = (uacpi_u8*)&out; + src_span.index = field->bit_offset_within_first_byte; + + dst_span.data = dst; + dst_span.index = 0; + dst_span.length = size * 8; + + reads_to_do = UACPI_ALIGN_UP( + field->bit_offset_within_first_byte + field->bit_length, + width_access_bits, + uacpi_u32 + ); + reads_to_do /= width_access_bits; + + while (reads_to_do-- > 0) { + union uacpi_opregion_io_data data; + + src_span.length = UACPI_MIN( + bits_left, width_access_bits - src_span.index + ); + + data.integer = &out; + ret = access_field_unit( + field, byte_offset, UACPI_REGION_OP_READ, + data + ); + if (uacpi_unlikely_error(ret)) + return ret; + + bit_copy(&dst_span, &src_span); + bits_left -= src_span.length; + src_span.index = 0; + + bit_span_offset(&dst_span, src_span.length); + byte_offset += field->access_width_bytes; + } + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_read_field_unit( + uacpi_field_unit *field, void *dst, uacpi_size size, + uacpi_data_view *wtr_response +) +{ + uacpi_status ret; + uacpi_u32 field_byte_length; + uacpi_bool did_handle; + uacpi_data_view data_view = { 0 }; + + data_view.data = dst; + data_view.length = size; + + ret = handle_special_field( + field, data_view, UACPI_REGION_OP_READ, + wtr_response, &did_handle + ); + if (did_handle) + return ret; + + field_byte_length = uacpi_round_up_bits_to_bytes(field->bit_length); + + /* + * Very simple fast case: + * - Bit offset within first byte is 0 + * AND + * - Field size is <= access width + */ + if (field->bit_offset_within_first_byte == 0 && + field_byte_length <= field->access_width_bytes) + { + uacpi_u64 out; + union uacpi_opregion_io_data data; + + data.integer = &out; + ret = access_field_unit( + field, field->byte_offset, UACPI_REGION_OP_READ, + data + ); + if (uacpi_unlikely_error(ret)) + return ret; + + uacpi_memcpy_zerout(dst, &out, size, field_byte_length); + if (size >= field_byte_length) + cut_misaligned_tail(dst, field_byte_length - 1, field->bit_length); + + return UACPI_STATUS_OK; + } + + // Slow case + return do_read_misaligned_field_unit(field, dst, size); +} + +static uacpi_status write_generic_field_unit( + uacpi_field_unit *field, const void *src, uacpi_size size +) +{ + uacpi_status ret; + uacpi_u32 bits_left, byte_offset = field->byte_offset; + uacpi_u8 width_access_bits = field->access_width_bytes * 8; + uacpi_u64 in; + struct bit_span src_span = { 0 }; + struct bit_span dst_span = { 0 }; + + src_span.const_data = src; + src_span.index = 0; + src_span.length = size * 8; + + dst_span.data = (uacpi_u8 *)∈ + dst_span.index = field->bit_offset_within_first_byte; + + bits_left = field->bit_length; + + while (bits_left) { + union uacpi_opregion_io_data data; + + in = 0; + dst_span.length = UACPI_MIN( + width_access_bits - dst_span.index, bits_left + ); + + if (dst_span.index != 0 || dst_span.length < width_access_bits) { + switch (field->update_rule) { + case UACPI_UPDATE_RULE_PRESERVE: + data.integer = ∈ + ret = access_field_unit( + field, byte_offset, UACPI_REGION_OP_READ, + data + ); + if (uacpi_unlikely_error(ret)) + return ret; + break; + case UACPI_UPDATE_RULE_WRITE_AS_ONES: + in = ~in; + break; + case UACPI_UPDATE_RULE_WRITE_AS_ZEROES: + break; + default: + uacpi_error("invalid field@%p update rule %d\n", + field, field->update_rule); + return UACPI_STATUS_INVALID_ARGUMENT; + } + } + + bit_copy(&dst_span, &src_span); + bit_span_offset(&src_span, dst_span.length); + + data.integer = ∈ + + ret = access_field_unit( + field, byte_offset, UACPI_REGION_OP_WRITE, + data + ); + if (uacpi_unlikely_error(ret)) + return ret; + + bits_left -= dst_span.length; + dst_span.index = 0; + byte_offset += field->access_width_bytes; + } + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_write_field_unit( + uacpi_field_unit *field, const void *src, uacpi_size size, + uacpi_data_view *wtr_response +) +{ + uacpi_status ret; + uacpi_bool did_handle; + uacpi_data_view data_view = { 0 }; + + data_view.const_data = src; + data_view.length = size; + + ret = handle_special_field( + field, data_view, UACPI_REGION_OP_WRITE, + wtr_response, &did_handle + ); + if (did_handle) + return ret; + + return write_generic_field_unit(field, src, size); +} + +uacpi_status uacpi_field_unit_get_read_type( + struct uacpi_field_unit *field, uacpi_object_type *out_type +) +{ + uacpi_object *obj; + + if (field->kind == UACPI_FIELD_UNIT_KIND_INDEX) + goto out_basic_field; + + obj = uacpi_namespace_node_get_object_typed( + field->region, UACPI_OBJECT_OPERATION_REGION_BIT + ); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + if (uacpi_is_buffer_access_address_space(obj->op_region->space)) { + *out_type = UACPI_OBJECT_BUFFER; + return UACPI_STATUS_OK; + } + +out_basic_field: + if (field->bit_length > (g_uacpi_rt_ctx.is_rev1 ? 32u : 64u)) + *out_type = UACPI_OBJECT_BUFFER; + else + *out_type = UACPI_OBJECT_INTEGER; + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_field_unit_get_bit_length( + struct uacpi_field_unit *field, uacpi_size *out_length +) +{ + uacpi_object *obj; + + if (field->kind == UACPI_FIELD_UNIT_KIND_INDEX) + goto out_basic_field; + + obj = uacpi_namespace_node_get_object_typed( + field->region, UACPI_OBJECT_OPERATION_REGION_BIT + ); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + if (uacpi_is_buffer_access_address_space(obj->op_region->space)) { + /* + * Bit length is protocol specific, the data will be returned + * via the write-then-read response buffer. + */ + *out_length = 0; + return UACPI_STATUS_OK; + } + +out_basic_field: + *out_length = field->bit_length; + return UACPI_STATUS_OK; +} + +static uacpi_u8 gas_get_access_bit_width(const struct acpi_gas *gas) +{ + /* + * Same algorithm as ACPICA. + * + * The reason we do this is apparently GAS bit offset being non-zero means + * that it's an APEI register, as opposed to FADT, which needs special + * handling. In the case of a FADT register we want to ignore the specified + * access size. + */ + uacpi_u8 access_bit_width; + + if (gas->register_bit_offset == 0 && + UACPI_IS_POWER_OF_TWO(gas->register_bit_width, uacpi_u8) && + UACPI_IS_ALIGNED(gas->register_bit_width, 8, uacpi_u8)) { + access_bit_width = gas->register_bit_width; + } else if (gas->access_size) { + access_bit_width = gas->access_size * 8; + } else { + uacpi_u8 msb; + + msb = uacpi_bit_scan_backward( + (gas->register_bit_offset + gas->register_bit_width) - 1 + ); + access_bit_width = 1 << msb; + + if (access_bit_width <= 8) { + access_bit_width = 8; + } else { + /* + * Keep backing off to previous power of two until we find one + * that is aligned to the address specified in GAS. + */ + while (!UACPI_IS_ALIGNED( + gas->address, access_bit_width / 8, uacpi_u64 + )) + access_bit_width /= 2; + } + } + + return UACPI_MIN( + access_bit_width, + gas->address_space_id == UACPI_ADDRESS_SPACE_SYSTEM_IO ? 32 : 64 + ); +} + +static uacpi_status gas_validate( + const struct acpi_gas *gas, uacpi_u8 *access_bit_width, + uacpi_u8 *bit_width +) +{ + uacpi_size total_width, aligned_width; + + if (uacpi_unlikely(gas == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + if (!gas->address) + return UACPI_STATUS_NOT_FOUND; + + if (gas->address_space_id != UACPI_ADDRESS_SPACE_SYSTEM_IO && + gas->address_space_id != UACPI_ADDRESS_SPACE_SYSTEM_MEMORY) { + uacpi_warn("unsupported GAS address space '%s' (%d)\n", + uacpi_address_space_to_string(gas->address_space_id), + gas->address_space_id); + return UACPI_STATUS_UNIMPLEMENTED; + } + + if (gas->access_size > 4) { + uacpi_warn("unsupported GAS access size %d\n", + gas->access_size); + return UACPI_STATUS_UNIMPLEMENTED; + } + + *access_bit_width = gas_get_access_bit_width(gas); + + total_width = gas->register_bit_offset + gas->register_bit_width; + aligned_width = UACPI_ALIGN_UP(total_width, *access_bit_width, uacpi_size); + + if (uacpi_unlikely(aligned_width > 64)) { + uacpi_warn( + "GAS register total width is too large: %zu\n", total_width + ); + return UACPI_STATUS_UNIMPLEMENTED; + } + + *bit_width = total_width; + return UACPI_STATUS_OK; +} + +/* + * Apparently both reading and writing GAS works differently from operation + * region in that bit offsets are not respected when writing the data. + * + * Let's follow ACPICA's approach here so that we don't accidentally + * break any quirky hardware. + */ +uacpi_status uacpi_gas_read_mapped( + const uacpi_mapped_gas *gas, uacpi_u64 *out_value +) +{ + uacpi_status ret; + uacpi_u8 access_byte_width; + uacpi_u8 bit_offset, bits_left, index = 0; + uacpi_u64 data, mask = 0xFFFFFFFFFFFFFFFF; + uacpi_size offset = 0; + + bit_offset = gas->bit_offset; + bits_left = gas->total_bit_width; + + access_byte_width = gas->access_bit_width / 8; + + if (access_byte_width < 8) + mask = ~(mask << gas->access_bit_width); + + *out_value = 0; + + while (bits_left) { + if (bit_offset >= gas->access_bit_width) { + data = 0; + bit_offset -= gas->access_bit_width; + } else { + ret = gas->read(gas->mapping, offset, access_byte_width, &data); + if (uacpi_unlikely_error(ret)) + return ret; + } + + *out_value |= (data & mask) << (index * gas->access_bit_width); + bits_left -= UACPI_MIN(bits_left, gas->access_bit_width); + ++index; + offset += access_byte_width; + } + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_gas_write_mapped( + const uacpi_mapped_gas *gas, uacpi_u64 in_value +) +{ + uacpi_status ret; + uacpi_u8 access_byte_width; + uacpi_u8 bit_offset, bits_left, index = 0; + uacpi_u64 data, mask = 0xFFFFFFFFFFFFFFFF; + uacpi_size offset = 0; + + bit_offset = gas->bit_offset; + bits_left = gas->total_bit_width; + access_byte_width = gas->access_bit_width / 8; + + if (access_byte_width < 8) + mask = ~(mask << gas->access_bit_width); + + while (bits_left) { + data = (in_value >> (index * gas->access_bit_width)) & mask; + + if (bit_offset >= gas->access_bit_width) { + bit_offset -= gas->access_bit_width; + } else { + ret = gas->write(gas->mapping, offset, access_byte_width, data); + if (uacpi_unlikely_error(ret)) + return ret; + } + + bits_left -= UACPI_MIN(bits_left, gas->access_bit_width); + ++index; + offset += access_byte_width; + } + + return UACPI_STATUS_OK; +} + +static void unmap_gas_io(uacpi_handle io_handle, uacpi_size size) +{ + UACPI_UNUSED(size); + uacpi_kernel_io_unmap(io_handle); +} + +uacpi_status uacpi_map_gas_noalloc( + const struct acpi_gas *gas, uacpi_mapped_gas *out_mapped +) +{ + uacpi_status ret; + uacpi_u8 access_bit_width, total_width; + + ret = gas_validate(gas, &access_bit_width, &total_width); + if (ret != UACPI_STATUS_OK) + return ret; + + if (gas->address_space_id == UACPI_ADDRESS_SPACE_SYSTEM_MEMORY) { + out_mapped->mapping = uacpi_kernel_map(gas->address, total_width / 8); + if (uacpi_unlikely(out_mapped->mapping == UACPI_NULL)) + return UACPI_STATUS_MAPPING_FAILED; + + out_mapped->read = uacpi_system_memory_read; + out_mapped->write = uacpi_system_memory_write; + out_mapped->unmap = uacpi_kernel_unmap; + } else { // IO, validated by gas_validate above + ret = uacpi_kernel_io_map(gas->address, total_width / 8, &out_mapped->mapping); + if (uacpi_unlikely_error(ret)) + return ret; + + out_mapped->read = uacpi_system_io_read; + out_mapped->write = uacpi_system_io_write; + out_mapped->unmap = unmap_gas_io; + } + + out_mapped->access_bit_width = access_bit_width; + out_mapped->total_bit_width = total_width; + out_mapped->bit_offset = gas->register_bit_offset; + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_map_gas( + const struct acpi_gas *gas, uacpi_mapped_gas **out_mapped +) +{ + uacpi_status ret; + uacpi_mapped_gas *mapping; + + mapping = uacpi_kernel_alloc(sizeof(*mapping)); + if (uacpi_unlikely(mapping == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + ret = uacpi_map_gas_noalloc(gas, mapping); + if (uacpi_unlikely_error(ret)) { + uacpi_free(mapping, sizeof(*mapping)); + return ret; + } + + *out_mapped = mapping; + return ret; +} + +void uacpi_unmap_gas_nofree(uacpi_mapped_gas *gas) +{ + gas->unmap(gas->mapping, gas->access_bit_width / 8); +} + +void uacpi_unmap_gas(uacpi_mapped_gas *gas) +{ + uacpi_unmap_gas_nofree(gas); + uacpi_free(gas, sizeof(*gas)); +} + +uacpi_status uacpi_gas_read(const struct acpi_gas *gas, uacpi_u64 *out_value) +{ + uacpi_status ret; + uacpi_mapped_gas mapping; + + ret = uacpi_map_gas_noalloc(gas, &mapping); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_gas_read_mapped(&mapping, out_value); + uacpi_unmap_gas_nofree(&mapping); + + return ret; +} + +uacpi_status uacpi_gas_write(const struct acpi_gas *gas, uacpi_u64 in_value) +{ + uacpi_status ret; + uacpi_mapped_gas mapping; + + ret = uacpi_map_gas_noalloc(gas, &mapping); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_gas_write_mapped(&mapping, in_value); + uacpi_unmap_gas_nofree(&mapping); + + return ret; +} + +uacpi_status uacpi_system_memory_read( + void *ptr, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out +) +{ + ptr = UACPI_PTR_ADD(ptr, offset); + + switch (width) { + case 1: + *out = *(volatile uacpi_u8*)ptr; + break; + case 2: + *out = *(volatile uacpi_u16*)ptr; + break; + case 4: + *out = *(volatile uacpi_u32*)ptr; + break; + case 8: + *out = *(volatile uacpi_u64*)ptr; + break; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_system_memory_write( + void *ptr, uacpi_size offset, uacpi_u8 width, uacpi_u64 in +) +{ + ptr = UACPI_PTR_ADD(ptr, offset); + + switch (width) { + case 1: + *(volatile uacpi_u8*)ptr = in; + break; + case 2: + *(volatile uacpi_u16*)ptr = in; + break; + case 4: + *(volatile uacpi_u32*)ptr = in; + break; + case 8: + *(volatile uacpi_u64*)ptr = in; + break; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + return UACPI_STATUS_OK; +} + +union integer_data { + uacpi_u8 byte; + uacpi_u16 word; + uacpi_u32 dword; + uacpi_u64 qword; +}; + +uacpi_status uacpi_system_io_read( + uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out +) +{ + uacpi_status ret; + union integer_data data = { + .qword = 0, + }; + + switch (width) { + case 1: + ret = uacpi_kernel_io_read8(handle, offset, &data.byte); + break; + case 2: + ret = uacpi_kernel_io_read16(handle, offset, &data.word); + break; + case 4: + ret = uacpi_kernel_io_read32(handle, offset, &data.dword); + break; + default: + uacpi_error( + "invalid SystemIO read %p@%zu width=%d\n", + handle, offset, width + ); + return UACPI_STATUS_INVALID_ARGUMENT; + } + + if (uacpi_likely_success(ret)) + *out = data.qword; + return ret; +} + +uacpi_status uacpi_system_io_write( + uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 in +) +{ + uacpi_status ret; + + switch (width) { + case 1: + ret = uacpi_kernel_io_write8(handle, offset, in); + break; + case 2: + ret = uacpi_kernel_io_write16(handle, offset, in); + break; + case 4: + ret = uacpi_kernel_io_write32(handle, offset, in); + break; + default: + uacpi_error( + "invalid SystemIO write %p@%zu width=%d\n", + handle, offset, width + ); + return UACPI_STATUS_INVALID_ARGUMENT; + } + + return ret; +} + +uacpi_status uacpi_pci_read( + uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out +) +{ + uacpi_status ret; + union integer_data data = { + .qword = 0, + }; + + switch (width) { + case 1: + ret = uacpi_kernel_pci_read8(handle, offset, &data.byte); + break; + case 2: + ret = uacpi_kernel_pci_read16(handle, offset, &data.word); + break; + case 4: + ret = uacpi_kernel_pci_read32(handle, offset, &data.dword); + break; + default: + uacpi_error( + "invalid PCI_Config read %p@%zu width=%d\n", + handle, offset, width + ); + return UACPI_STATUS_INVALID_ARGUMENT; + } + + if (uacpi_likely_success(ret)) + *out = data.qword; + return ret; +} + +uacpi_status uacpi_pci_write( + uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 in +) +{ + uacpi_status ret; + + switch (width) { + case 1: + ret = uacpi_kernel_pci_write8(handle, offset, in); + break; + case 2: + ret = uacpi_kernel_pci_write16(handle, offset, in); + break; + case 4: + ret = uacpi_kernel_pci_write32(handle, offset, in); + break; + default: + uacpi_error( + "invalid PCI_Config write %p@%zu width=%d\n", + handle, offset, width + ); + return UACPI_STATUS_INVALID_ARGUMENT; + } + + return ret; +} + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/dev/acpi/uacpi/mutex.c b/sys/dev/acpi/uacpi/mutex.c new file mode 100644 index 0000000..44cbac3 --- /dev/null +++ b/sys/dev/acpi/uacpi/mutex.c @@ -0,0 +1,396 @@ +#include <uacpi/platform/atomic.h> +#include <uacpi/internal/mutex.h> +#include <uacpi/internal/log.h> +#include <uacpi/internal/registers.h> +#include <uacpi/internal/context.h> +#include <uacpi/kernel_api.h> +#include <uacpi/internal/namespace.h> + +#ifndef UACPI_BAREBONES_MODE + +#ifndef UACPI_REDUCED_HARDWARE + +#define GLOBAL_LOCK_PENDING (1 << 0) + +#define GLOBAL_LOCK_OWNED_BIT 1 +#define GLOBAL_LOCK_OWNED (1 << GLOBAL_LOCK_OWNED_BIT) + +#define GLOBAL_LOCK_MASK 3u + +static uacpi_bool try_acquire_global_lock_from_firmware(uacpi_u32 *lock) +{ + uacpi_u32 value, new_value; + uacpi_bool was_owned; + + value = *(volatile uacpi_u32*)lock; + do { + was_owned = (value & GLOBAL_LOCK_OWNED) >> GLOBAL_LOCK_OWNED_BIT; + + // Clear both owned & pending bits. + new_value = value & ~GLOBAL_LOCK_MASK; + + // Set owned unconditionally + new_value |= GLOBAL_LOCK_OWNED; + + // Set pending iff the lock was owned at the time of reading + if (was_owned) + new_value |= GLOBAL_LOCK_PENDING; + } while (!uacpi_atomic_cmpxchg32(lock, &value, new_value)); + + return !was_owned; +} + +static uacpi_bool do_release_global_lock_to_firmware(uacpi_u32 *lock) +{ + uacpi_u32 value, new_value; + + value = *(volatile uacpi_u32*)lock; + do { + new_value = value & ~GLOBAL_LOCK_MASK; + } while (!uacpi_atomic_cmpxchg32(lock, &value, new_value)); + + return value & GLOBAL_LOCK_PENDING; +} + +static uacpi_status uacpi_acquire_global_lock_from_firmware(void) +{ + uacpi_cpu_flags flags; + uacpi_u16 spins = 0; + uacpi_bool success; + + if (!g_uacpi_rt_ctx.has_global_lock) + return UACPI_STATUS_OK; + + flags = uacpi_kernel_lock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock); + for (;;) { + spins++; + uacpi_trace( + "trying to acquire the global lock from firmware... (attempt %u)\n", + spins + ); + + success = try_acquire_global_lock_from_firmware( + &g_uacpi_rt_ctx.facs->global_lock + ); + if (success) + break; + + if (uacpi_unlikely(spins == 0xFFFF)) + break; + + g_uacpi_rt_ctx.global_lock_pending = UACPI_TRUE; + uacpi_trace( + "global lock is owned by firmware, waiting for a release " + "notification...\n" + ); + uacpi_kernel_unlock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock, flags); + + uacpi_kernel_wait_for_event(g_uacpi_rt_ctx.global_lock_event, 0xFFFF); + flags = uacpi_kernel_lock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock); + } + + g_uacpi_rt_ctx.global_lock_pending = UACPI_FALSE; + uacpi_kernel_unlock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock, flags); + + if (uacpi_unlikely(!success)) { + uacpi_error("unable to acquire global lock after %u attempts\n", spins); + return UACPI_STATUS_HARDWARE_TIMEOUT; + } + + uacpi_trace("global lock successfully acquired after %u attempt%s\n", + spins, spins > 1 ? "s" : ""); + return UACPI_STATUS_OK; +} + +static void uacpi_release_global_lock_to_firmware(void) +{ + if (!g_uacpi_rt_ctx.has_global_lock) + return; + + uacpi_trace("releasing the global lock to firmware...\n"); + if (do_release_global_lock_to_firmware(&g_uacpi_rt_ctx.facs->global_lock)) { + uacpi_trace("notifying firmware of the global lock release since the " + "pending bit was set\n"); + uacpi_write_register_field(UACPI_REGISTER_FIELD_GBL_RLS, 1); + } +} +#endif + +UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_acquire_global_lock_from_firmware(void) +) +UACPI_STUB_IF_REDUCED_HARDWARE( + void uacpi_release_global_lock_to_firmware(void) +) + +uacpi_status uacpi_acquire_native_mutex_with_timeout( + uacpi_handle mtx, uacpi_u16 timeout +) +{ + uacpi_status ret; + + if (uacpi_unlikely(mtx == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + ret = uacpi_kernel_acquire_mutex(mtx, timeout); + if (uacpi_likely_success(ret)) + return ret; + + if (uacpi_unlikely(ret != UACPI_STATUS_TIMEOUT || timeout == 0xFFFF)) { + uacpi_error( + "unexpected status %08X (%s) while acquiring %p (timeout=%04X)\n", + ret, uacpi_status_to_string(ret), mtx, timeout + ); + } + + return ret; +} + +uacpi_status uacpi_acquire_global_lock(uacpi_u16 timeout, uacpi_u32 *out_seq) +{ + uacpi_status ret; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(out_seq == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + ret = uacpi_acquire_native_mutex_with_timeout( + g_uacpi_rt_ctx.global_lock_mutex->handle, timeout + ); + if (ret != UACPI_STATUS_OK) + return ret; + + ret = uacpi_acquire_global_lock_from_firmware(); + if (uacpi_unlikely_error(ret)) { + uacpi_release_native_mutex(g_uacpi_rt_ctx.global_lock_mutex->handle); + return ret; + } + + if (uacpi_unlikely(g_uacpi_rt_ctx.global_lock_seq_num == 0xFFFFFFFF)) + g_uacpi_rt_ctx.global_lock_seq_num = 0; + + *out_seq = g_uacpi_rt_ctx.global_lock_seq_num++; + g_uacpi_rt_ctx.global_lock_acquired = UACPI_TRUE; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_release_global_lock(uacpi_u32 seq) +{ + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(!g_uacpi_rt_ctx.global_lock_acquired || + seq != g_uacpi_rt_ctx.global_lock_seq_num)) + return UACPI_STATUS_INVALID_ARGUMENT; + + g_uacpi_rt_ctx.global_lock_acquired = UACPI_FALSE; + uacpi_release_global_lock_to_firmware(); + uacpi_release_native_mutex(g_uacpi_rt_ctx.global_lock_mutex->handle); + + return UACPI_STATUS_OK; +} + +uacpi_bool uacpi_this_thread_owns_aml_mutex(uacpi_mutex *mutex) +{ + uacpi_thread_id id; + + id = UACPI_ATOMIC_LOAD_THREAD_ID(&mutex->owner); + return id == uacpi_kernel_get_thread_id(); +} + +uacpi_status uacpi_acquire_aml_mutex(uacpi_mutex *mutex, uacpi_u16 timeout) +{ + uacpi_thread_id this_id; + uacpi_status ret = UACPI_STATUS_OK; + + this_id = uacpi_kernel_get_thread_id(); + if (UACPI_ATOMIC_LOAD_THREAD_ID(&mutex->owner) == this_id) { + if (uacpi_unlikely(mutex->depth == 0xFFFF)) { + uacpi_warn( + "failing an attempt to acquire mutex @%p, too many recursive " + "acquires\n", mutex + ); + return UACPI_STATUS_DENIED; + } + + mutex->depth++; + return ret; + } + + uacpi_namespace_write_unlock(); + ret = uacpi_acquire_native_mutex_with_timeout(mutex->handle, timeout); + if (ret != UACPI_STATUS_OK) + goto out; + + if (mutex->handle == g_uacpi_rt_ctx.global_lock_mutex->handle) { + ret = uacpi_acquire_global_lock_from_firmware(); + if (uacpi_unlikely_error(ret)) { + uacpi_release_native_mutex(mutex->handle); + goto out; + } + } + + UACPI_ATOMIC_STORE_THREAD_ID(&mutex->owner, this_id); + mutex->depth = 1; + +out: + uacpi_namespace_write_lock(); + return ret; +} + +uacpi_status uacpi_release_aml_mutex(uacpi_mutex *mutex) +{ + if (mutex->depth-- > 1) + return UACPI_STATUS_OK; + + if (mutex->handle == g_uacpi_rt_ctx.global_lock_mutex->handle) + uacpi_release_global_lock_to_firmware(); + + UACPI_ATOMIC_STORE_THREAD_ID(&mutex->owner, UACPI_THREAD_ID_NONE); + uacpi_release_native_mutex(mutex->handle); + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_recursive_lock_init(struct uacpi_recursive_lock *lock) +{ + lock->mutex = uacpi_kernel_create_mutex(); + if (uacpi_unlikely(lock->mutex == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + lock->owner = UACPI_THREAD_ID_NONE; + lock->depth = 0; + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_recursive_lock_deinit(struct uacpi_recursive_lock *lock) +{ + if (uacpi_unlikely(lock->depth)) { + uacpi_warn( + "de-initializing active recursive lock %p with depth=%zu\n", + lock, lock->depth + ); + lock->depth = 0; + } + + lock->owner = UACPI_THREAD_ID_NONE; + + if (lock->mutex != UACPI_NULL) { + uacpi_kernel_free_mutex(lock->mutex); + lock->mutex = UACPI_NULL; + } + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_recursive_lock_acquire(struct uacpi_recursive_lock *lock) +{ + uacpi_thread_id this_id; + uacpi_status ret = UACPI_STATUS_OK; + + this_id = uacpi_kernel_get_thread_id(); + if (UACPI_ATOMIC_LOAD_THREAD_ID(&lock->owner) == this_id) { + lock->depth++; + return ret; + } + + ret = uacpi_acquire_native_mutex(lock->mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + UACPI_ATOMIC_STORE_THREAD_ID(&lock->owner, this_id); + lock->depth = 1; + return ret; +} + +uacpi_status uacpi_recursive_lock_release(struct uacpi_recursive_lock *lock) +{ + if (lock->depth-- > 1) + return UACPI_STATUS_OK; + + UACPI_ATOMIC_STORE_THREAD_ID(&lock->owner, UACPI_THREAD_ID_NONE); + return uacpi_release_native_mutex(lock->mutex); +} + +uacpi_status uacpi_rw_lock_init(struct uacpi_rw_lock *lock) +{ + lock->read_mutex = uacpi_kernel_create_mutex(); + if (uacpi_unlikely(lock->read_mutex == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + lock->write_mutex = uacpi_kernel_create_mutex(); + if (uacpi_unlikely(lock->write_mutex == UACPI_NULL)) { + uacpi_kernel_free_mutex(lock->read_mutex); + lock->read_mutex = UACPI_NULL; + return UACPI_STATUS_OUT_OF_MEMORY; + } + + lock->num_readers = 0; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_rw_lock_deinit(struct uacpi_rw_lock *lock) +{ + if (uacpi_unlikely(lock->num_readers)) { + uacpi_warn("de-initializing rw_lock %p with %zu active readers\n", + lock, lock->num_readers); + lock->num_readers = 0; + } + + if (lock->read_mutex != UACPI_NULL) { + uacpi_kernel_free_mutex(lock->read_mutex); + lock->read_mutex = UACPI_NULL; + } + if (lock->write_mutex != UACPI_NULL) { + uacpi_kernel_free_mutex(lock->write_mutex); + lock->write_mutex = UACPI_NULL; + } + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_rw_lock_read(struct uacpi_rw_lock *lock) +{ + uacpi_status ret; + + ret = uacpi_acquire_native_mutex(lock->read_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + if (lock->num_readers++ == 0) { + ret = uacpi_acquire_native_mutex(lock->write_mutex); + if (uacpi_unlikely_error(ret)) + lock->num_readers = 0; + } + + uacpi_kernel_release_mutex(lock->read_mutex); + return ret; +} + +uacpi_status uacpi_rw_unlock_read(struct uacpi_rw_lock *lock) +{ + uacpi_status ret; + + ret = uacpi_acquire_native_mutex(lock->read_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + if (lock->num_readers-- == 1) + uacpi_release_native_mutex(lock->write_mutex); + + uacpi_kernel_release_mutex(lock->read_mutex); + return ret; +} + +uacpi_status uacpi_rw_lock_write(struct uacpi_rw_lock *lock) +{ + return uacpi_acquire_native_mutex(lock->write_mutex); +} + +uacpi_status uacpi_rw_unlock_write(struct uacpi_rw_lock *lock) +{ + return uacpi_release_native_mutex(lock->write_mutex); +} + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/dev/acpi/uacpi/namespace.c b/sys/dev/acpi/uacpi/namespace.c new file mode 100644 index 0000000..e847dea --- /dev/null +++ b/sys/dev/acpi/uacpi/namespace.c @@ -0,0 +1,1081 @@ +#include <uacpi/namespace.h> +#include <uacpi/internal/namespace.h> +#include <uacpi/internal/types.h> +#include <uacpi/internal/stdlib.h> +#include <uacpi/internal/interpreter.h> +#include <uacpi/internal/opregion.h> +#include <uacpi/internal/log.h> +#include <uacpi/internal/utilities.h> +#include <uacpi/internal/mutex.h> +#include <uacpi/kernel_api.h> + +#ifndef UACPI_BAREBONES_MODE + +#define UACPI_REV_VALUE 2 +#define UACPI_OS_VALUE "Microsoft Windows NT" + +#define MAKE_PREDEFINED(c0, c1, c2, c3) \ + { \ + .name.text = { c0, c1, c2, c3 }, \ + .flags = UACPI_NAMESPACE_NODE_PREDEFINED \ + } + +static uacpi_namespace_node +predefined_namespaces[UACPI_PREDEFINED_NAMESPACE_MAX + 1] = { + [UACPI_PREDEFINED_NAMESPACE_ROOT] = MAKE_PREDEFINED('\\', 0, 0, 0), + [UACPI_PREDEFINED_NAMESPACE_GPE] = MAKE_PREDEFINED('_', 'G', 'P', 'E'), + [UACPI_PREDEFINED_NAMESPACE_PR] = MAKE_PREDEFINED('_', 'P', 'R', '_'), + [UACPI_PREDEFINED_NAMESPACE_SB] = MAKE_PREDEFINED('_', 'S', 'B', '_'), + [UACPI_PREDEFINED_NAMESPACE_SI] = MAKE_PREDEFINED('_', 'S', 'I', '_'), + [UACPI_PREDEFINED_NAMESPACE_TZ] = MAKE_PREDEFINED('_', 'T', 'Z', '_'), + [UACPI_PREDEFINED_NAMESPACE_GL] = MAKE_PREDEFINED('_', 'G', 'L', '_'), + [UACPI_PREDEFINED_NAMESPACE_OS] = MAKE_PREDEFINED('_', 'O', 'S', '_'), + [UACPI_PREDEFINED_NAMESPACE_OSI] = MAKE_PREDEFINED('_', 'O', 'S', 'I'), + [UACPI_PREDEFINED_NAMESPACE_REV] = MAKE_PREDEFINED('_', 'R', 'E', 'V'), +}; + +static struct uacpi_rw_lock namespace_lock; + +uacpi_status uacpi_namespace_read_lock(void) +{ + return uacpi_rw_lock_read(&namespace_lock); +} + +uacpi_status uacpi_namespace_read_unlock(void) +{ + return uacpi_rw_unlock_read(&namespace_lock); +} + +uacpi_status uacpi_namespace_write_lock(void) +{ + return uacpi_rw_lock_write(&namespace_lock); +} + +uacpi_status uacpi_namespace_write_unlock(void) +{ + return uacpi_rw_unlock_write(&namespace_lock); +} + +static uacpi_object *make_object_for_predefined( + enum uacpi_predefined_namespace ns +) +{ + uacpi_object *obj; + + switch (ns) { + case UACPI_PREDEFINED_NAMESPACE_ROOT: + /* + * The real root object is stored in the global context, whereas the \ + * node gets a placeholder uninitialized object instead. This is to + * protect against CopyObject(JUNK, \), so that all of the opregion and + * notify handlers are preserved if AML decides to do that. + */ + g_uacpi_rt_ctx.root_object = uacpi_create_object(UACPI_OBJECT_DEVICE); + if (uacpi_unlikely(g_uacpi_rt_ctx.root_object == UACPI_NULL)) + return UACPI_NULL; + + obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED); + break; + + case UACPI_PREDEFINED_NAMESPACE_OS: + obj = uacpi_create_object(UACPI_OBJECT_STRING); + if (uacpi_unlikely(obj == UACPI_NULL)) + return obj; + + obj->buffer->text = uacpi_kernel_alloc(sizeof(UACPI_OS_VALUE)); + if (uacpi_unlikely(obj->buffer->text == UACPI_NULL)) { + uacpi_object_unref(obj); + return UACPI_NULL; + } + + obj->buffer->size = sizeof(UACPI_OS_VALUE); + uacpi_memcpy(obj->buffer->text, UACPI_OS_VALUE, obj->buffer->size); + break; + + case UACPI_PREDEFINED_NAMESPACE_REV: + obj = uacpi_create_object(UACPI_OBJECT_INTEGER); + if (uacpi_unlikely(obj == UACPI_NULL)) + return obj; + + obj->integer = UACPI_REV_VALUE; + break; + + case UACPI_PREDEFINED_NAMESPACE_GL: + obj = uacpi_create_object(UACPI_OBJECT_MUTEX); + if (uacpi_likely(obj != UACPI_NULL)) { + uacpi_shareable_ref(obj->mutex); + g_uacpi_rt_ctx.global_lock_mutex = obj->mutex; + } + break; + + case UACPI_PREDEFINED_NAMESPACE_OSI: + obj = uacpi_create_object(UACPI_OBJECT_METHOD); + if (uacpi_unlikely(obj == UACPI_NULL)) + return obj; + + obj->method->native_call = UACPI_TRUE; + obj->method->handler = uacpi_osi; + obj->method->args = 1; + break; + + default: + obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED); + break; + } + + return obj; +} + +static void namespace_node_detach_object(uacpi_namespace_node *node) +{ + uacpi_object *object; + + object = uacpi_namespace_node_get_object(node); + if (object != UACPI_NULL) { + if (object->type == UACPI_OBJECT_OPERATION_REGION) + uacpi_opregion_uninstall_handler(node); + + uacpi_object_unref(node->object); + node->object = UACPI_NULL; + } +} + +static void free_namespace_node(uacpi_handle handle) +{ + uacpi_namespace_node *node = handle; + + if (uacpi_likely(!uacpi_namespace_node_is_predefined(node))) { + uacpi_free(node, sizeof(*node)); + return; + } + + node->flags = UACPI_NAMESPACE_NODE_PREDEFINED; + node->object = UACPI_NULL; + node->parent = UACPI_NULL; + node->child = UACPI_NULL; + node->next = UACPI_NULL; +} + +uacpi_status uacpi_initialize_namespace(void) +{ + enum uacpi_predefined_namespace ns; + uacpi_object *obj; + uacpi_namespace_node *node; + uacpi_status ret; + + ret = uacpi_rw_lock_init(&namespace_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + for (ns = 0; ns <= UACPI_PREDEFINED_NAMESPACE_MAX; ns++) { + node = &predefined_namespaces[ns]; + uacpi_shareable_init(node); + + obj = make_object_for_predefined(ns); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + node->object = uacpi_create_internal_reference( + UACPI_REFERENCE_KIND_NAMED, obj + ); + if (uacpi_unlikely(node->object == UACPI_NULL)) { + uacpi_object_unref(obj); + return UACPI_STATUS_OUT_OF_MEMORY; + } + + uacpi_object_unref(obj); + } + + for (ns = UACPI_PREDEFINED_NAMESPACE_GPE; + ns <= UACPI_PREDEFINED_NAMESPACE_MAX; ns++) { + + /* + * Skip the installation of \_OSI if it was disabled by user. + * We still create the object, but it's not attached to the namespace. + */ + if (ns == UACPI_PREDEFINED_NAMESPACE_OSI && + uacpi_check_flag(UACPI_FLAG_NO_OSI)) + continue; + + uacpi_namespace_node_install( + uacpi_namespace_root(), &predefined_namespaces[ns] + ); + } + + return UACPI_STATUS_OK; +} + +void uacpi_deinitialize_namespace(void) +{ + uacpi_status ret; + uacpi_namespace_node *current, *next = UACPI_NULL; + uacpi_u32 depth = 1; + + current = uacpi_namespace_root(); + + ret = uacpi_namespace_write_lock(); + + while (depth) { + next = next == UACPI_NULL ? current->child : next->next; + + /* + * The previous value of 'next' was the last child of this subtree, + * we can now remove the entire scope of 'current->child' + */ + if (next == UACPI_NULL) { + depth--; + + // Wipe the subtree + while (current->child != UACPI_NULL) + uacpi_namespace_node_uninstall(current->child); + + // Reset the pointers back as if this iteration never happened + next = current; + current = current->parent; + + continue; + } + + /* + * We have more nodes to process, proceed to the next one, either the + * child of the 'next' node, if one exists, or its peer + */ + if (next->child) { + depth++; + current = next; + next = UACPI_NULL; + } + + // This node has no children, move on to its peer + } + + namespace_node_detach_object(uacpi_namespace_root()); + free_namespace_node(uacpi_namespace_root()); + + if (ret == UACPI_STATUS_OK) + uacpi_namespace_write_unlock(); + + uacpi_object_unref(g_uacpi_rt_ctx.root_object); + g_uacpi_rt_ctx.root_object = UACPI_NULL; + + uacpi_mutex_unref(g_uacpi_rt_ctx.global_lock_mutex); + g_uacpi_rt_ctx.global_lock_mutex = UACPI_NULL; + + uacpi_rw_lock_deinit(&namespace_lock); +} + +uacpi_namespace_node *uacpi_namespace_root(void) +{ + return &predefined_namespaces[UACPI_PREDEFINED_NAMESPACE_ROOT]; +} + +uacpi_namespace_node *uacpi_namespace_get_predefined( + enum uacpi_predefined_namespace ns +) +{ + if (uacpi_unlikely(ns > UACPI_PREDEFINED_NAMESPACE_MAX)) { + uacpi_warn("requested invalid predefined namespace %d\n", ns); + return UACPI_NULL; + } + + return &predefined_namespaces[ns]; +} + +uacpi_namespace_node *uacpi_namespace_node_alloc(uacpi_object_name name) +{ + uacpi_namespace_node *ret; + + ret = uacpi_kernel_alloc_zeroed(sizeof(*ret)); + if (uacpi_unlikely(ret == UACPI_NULL)) + return ret; + + uacpi_shareable_init(ret); + ret->name = name; + return ret; +} + +void uacpi_namespace_node_unref(uacpi_namespace_node *node) +{ + uacpi_shareable_unref_and_delete_if_last(node, free_namespace_node); +} + +uacpi_status uacpi_namespace_node_install( + uacpi_namespace_node *parent, + uacpi_namespace_node *node +) +{ + if (parent == UACPI_NULL) + parent = uacpi_namespace_root(); + + if (uacpi_unlikely(uacpi_namespace_node_is_dangling(node))) { + uacpi_warn("attempting to install a dangling namespace node %.4s\n", + node->name.text); + return UACPI_STATUS_NAMESPACE_NODE_DANGLING; + } + + if (parent->child == UACPI_NULL) { + parent->child = node; + } else { + uacpi_namespace_node *prev = parent->child; + + while (prev->next != UACPI_NULL) + prev = prev->next; + + prev->next = node; + } + + node->parent = parent; + return UACPI_STATUS_OK; +} + +uacpi_bool uacpi_namespace_node_is_alias(uacpi_namespace_node *node) +{ + return node->flags & UACPI_NAMESPACE_NODE_FLAG_ALIAS; +} + +uacpi_bool uacpi_namespace_node_is_dangling(uacpi_namespace_node *node) +{ + return node->flags & UACPI_NAMESPACE_NODE_FLAG_DANGLING; +} + +uacpi_bool uacpi_namespace_node_is_temporary(uacpi_namespace_node *node) +{ + return node->flags & UACPI_NAMESPACE_NODE_FLAG_TEMPORARY; +} + +uacpi_bool uacpi_namespace_node_is_predefined(uacpi_namespace_node *node) +{ + return node->flags & UACPI_NAMESPACE_NODE_PREDEFINED; +} + +uacpi_status uacpi_namespace_node_uninstall(uacpi_namespace_node *node) +{ + uacpi_namespace_node *prev; + + if (uacpi_unlikely(uacpi_namespace_node_is_dangling(node))) { + uacpi_warn("attempting to uninstall a dangling namespace node %.4s\n", + node->name.text); + return UACPI_STATUS_INTERNAL_ERROR; + } + + /* + * The way to trigger this is as follows: + * + * Method (FOO) { + * // Temporary device, will be deleted upon returning from FOO + * Device (\BAR) { + * } + * + * // + * // Load TBL where TBL is: + * // Scope (\BAR) { + * // Name (TEST, 123) + * // } + * // + * Load(TBL) + * } + * + * In the above example, TEST is a permanent node attached by bad AML to a + * temporary node created inside the FOO method at \BAR. The cleanup code + * will attempt to remove the \BAR device upon exit from FOO, but that is + * no longer possible as there's now a permanent child attached to it. + */ + if (uacpi_unlikely(node->child != UACPI_NULL)) { + uacpi_warn( + "refusing to uninstall node %.4s with a child (%.4s)\n", + node->name.text, node->child->name.text + ); + return UACPI_STATUS_DENIED; + } + + /* + * Even though namespace_node is reference-counted it still has an 'invalid' + * state that is entered after it is uninstalled from the global namespace. + * + * Reference counting is only needed to combat dangling pointer issues + * whereas bad AML might try to prolong a local object lifetime by + * returning it from a method, or CopyObject it somewhere. In that case the + * namespace node object itself is still alive, but no longer has a valid + * object associated with it. + * + * Example: + * Method (BAD) { + * OperationRegion(REG, SystemMemory, 0xDEADBEEF, 4) + * Field (REG, AnyAcc, NoLock) { + * FILD, 8, + * } + * + * Return (RefOf(FILD)) + * } + * + * // Local0 is now the sole owner of the 'FILD' object that under the + * // hood is still referencing the 'REG' operation region object from + * // the 'BAD' method. + * Local0 = DerefOf(BAD()) + * + * This is done to prevent potential very deep recursion where an object + * frees a namespace node that frees an attached object that frees a + * namespace node as well as potential infinite cycles between a namespace + * node and an object. + */ + namespace_node_detach_object(node); + + prev = node->parent ? node->parent->child : UACPI_NULL; + + if (prev == node) { + node->parent->child = node->next; + } else { + while (uacpi_likely(prev != UACPI_NULL) && prev->next != node) + prev = prev->next; + + if (uacpi_unlikely(prev == UACPI_NULL)) { + uacpi_warn( + "trying to uninstall a node %.4s (%p) not linked to any peer\n", + node->name.text, node + ); + return UACPI_STATUS_INTERNAL_ERROR; + } + + prev->next = node->next; + } + + node->flags |= UACPI_NAMESPACE_NODE_FLAG_DANGLING; + uacpi_namespace_node_unref(node); + + return UACPI_STATUS_OK; +} + +uacpi_namespace_node *uacpi_namespace_node_find_sub_node( + uacpi_namespace_node *parent, + uacpi_object_name name +) +{ + uacpi_namespace_node *node; + + if (parent == UACPI_NULL) + parent = uacpi_namespace_root(); + + node = parent->child; + + while (node) { + if (node->name.id == name.id) + return node; + + node = node->next; + } + + return UACPI_NULL; +} + +static uacpi_object_name segment_to_name( + const uacpi_char **string, uacpi_size *in_out_size +) +{ + uacpi_object_name out_name; + const uacpi_char *cursor = *string; + uacpi_size offset, bytes_left = *in_out_size; + + for (offset = 0; offset < 4; offset++) { + if (bytes_left < 1 || *cursor == '.') { + out_name.text[offset] = '_'; + continue; + } + + out_name.text[offset] = *cursor++; + bytes_left--; + } + + *string = cursor; + *in_out_size = bytes_left; + return out_name; +} + +uacpi_status uacpi_namespace_node_resolve( + uacpi_namespace_node *parent, const uacpi_char *path, + enum uacpi_should_lock should_lock, + enum uacpi_may_search_above_parent may_search_above_parent, + enum uacpi_permanent_only permanent_only, + uacpi_namespace_node **out_node +) +{ + uacpi_namespace_node *cur_node = parent; + uacpi_status ret = UACPI_STATUS_OK; + const uacpi_char *cursor = path; + uacpi_size bytes_left; + uacpi_char prev_char = 0; + uacpi_bool single_nameseg = UACPI_TRUE; + + if (cur_node == UACPI_NULL) + cur_node = uacpi_namespace_root(); + + bytes_left = uacpi_strlen(path); + + if (should_lock == UACPI_SHOULD_LOCK_YES) { + ret = uacpi_namespace_read_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + } + + for (;;) { + if (bytes_left == 0) + goto out; + + switch (*cursor) { + case '\\': + single_nameseg = UACPI_FALSE; + + if (prev_char == '^') { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + + cur_node = uacpi_namespace_root(); + break; + case '^': + single_nameseg = UACPI_FALSE; + + // Tried to go behind root + if (uacpi_unlikely(cur_node == uacpi_namespace_root())) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + + cur_node = cur_node->parent; + break; + default: + break; + } + + prev_char = *cursor; + + switch (prev_char) { + case '^': + case '\\': + cursor++; + bytes_left--; + break; + default: + break; + } + + if (prev_char != '^') + break; + } + + while (bytes_left != 0) { + uacpi_object_name nameseg; + + if (*cursor == '.') { + cursor++; + bytes_left--; + } + + nameseg = segment_to_name(&cursor, &bytes_left); + if (bytes_left != 0 && single_nameseg) + single_nameseg = UACPI_FALSE; + + cur_node = uacpi_namespace_node_find_sub_node(cur_node, nameseg); + if (cur_node == UACPI_NULL) { + if (may_search_above_parent == UACPI_MAY_SEARCH_ABOVE_PARENT_NO || + !single_nameseg) + goto out; + + parent = parent->parent; + + while (parent) { + cur_node = uacpi_namespace_node_find_sub_node(parent, nameseg); + if (cur_node != UACPI_NULL) + goto out; + + parent = parent->parent; + } + + goto out; + } + } + +out: + if (uacpi_unlikely(ret == UACPI_STATUS_INVALID_ARGUMENT)) { + uacpi_warn("invalid path '%s'\n", path); + goto out_read_unlock; + } + + if (cur_node == UACPI_NULL) { + ret = UACPI_STATUS_NOT_FOUND; + goto out_read_unlock; + } + + if (uacpi_namespace_node_is_temporary(cur_node) && + permanent_only == UACPI_PERMANENT_ONLY_YES) { + uacpi_warn("denying access to temporary namespace node '%.4s'\n", + cur_node->name.text); + ret = UACPI_STATUS_DENIED; + goto out_read_unlock; + } + + if (out_node != UACPI_NULL) + *out_node = cur_node; + +out_read_unlock: + if (should_lock == UACPI_SHOULD_LOCK_YES) + uacpi_namespace_read_unlock(); + return ret; +} + +uacpi_status uacpi_namespace_node_find( + uacpi_namespace_node *parent, const uacpi_char *path, + uacpi_namespace_node **out_node +) +{ + return uacpi_namespace_node_resolve( + parent, path, UACPI_SHOULD_LOCK_YES, UACPI_MAY_SEARCH_ABOVE_PARENT_NO, + UACPI_PERMANENT_ONLY_YES, out_node + ); +} + +uacpi_status uacpi_namespace_node_resolve_from_aml_namepath( + uacpi_namespace_node *scope, + const uacpi_char *path, + uacpi_namespace_node **out_node +) +{ + return uacpi_namespace_node_resolve( + scope, path, UACPI_SHOULD_LOCK_YES, UACPI_MAY_SEARCH_ABOVE_PARENT_YES, + UACPI_PERMANENT_ONLY_YES, out_node + ); +} + +uacpi_object *uacpi_namespace_node_get_object(const uacpi_namespace_node *node) +{ + if (node == UACPI_NULL || node->object == UACPI_NULL) + return UACPI_NULL; + + return uacpi_unwrap_internal_reference(node->object); +} + +uacpi_object *uacpi_namespace_node_get_object_typed( + const uacpi_namespace_node *node, uacpi_object_type_bits type_mask +) +{ + uacpi_object *obj; + + obj = uacpi_namespace_node_get_object(node); + if (uacpi_unlikely(obj == UACPI_NULL)) + return obj; + + if (!uacpi_object_is_one_of(obj, type_mask)) + return UACPI_NULL; + + return obj; +} + +uacpi_status uacpi_namespace_node_acquire_object_typed( + const uacpi_namespace_node *node, uacpi_object_type_bits type_mask, + uacpi_object **out_obj +) +{ + uacpi_status ret; + uacpi_object *obj; + + ret = uacpi_namespace_read_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + + obj = uacpi_namespace_node_get_object(node); + + if (uacpi_unlikely(obj == UACPI_NULL) || + !uacpi_object_is_one_of(obj, type_mask)) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + + uacpi_object_ref(obj); + *out_obj = obj; + +out: + uacpi_namespace_read_unlock(); + return ret; +} + +uacpi_status uacpi_namespace_node_acquire_object( + const uacpi_namespace_node *node, uacpi_object **out_obj +) +{ + return uacpi_namespace_node_acquire_object_typed( + node, UACPI_OBJECT_ANY_BIT, out_obj + ); +} + +enum action { + ACTION_REACQUIRE, + ACTION_PUT, +}; + +static uacpi_status object_mutate_refcount( + uacpi_object *obj, void (*cb)(uacpi_object*) +) +{ + uacpi_status ret = UACPI_STATUS_OK; + + if (uacpi_likely(!uacpi_object_is(obj, UACPI_OBJECT_REFERENCE))) { + cb(obj); + return ret; + } + + /* + * Reference objects must be (un)referenced under at least a read lock, as + * this requires walking down the entire reference chain and dropping each + * object ref-count by 1. This might race with the interpreter and + * object_replace_child in case an object in the chain is CopyObject'ed + * into. + */ + ret = uacpi_namespace_read_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + + cb(obj); + + uacpi_namespace_read_unlock(); + return ret; +} + +uacpi_status uacpi_namespace_node_reacquire_object( + uacpi_object *obj +) +{ + return object_mutate_refcount(obj, uacpi_object_ref); +} + +uacpi_status uacpi_namespace_node_release_object(uacpi_object *obj) +{ + return object_mutate_refcount(obj, uacpi_object_unref); +} + +uacpi_object_name uacpi_namespace_node_name(const uacpi_namespace_node *node) +{ + return node->name; +} + +uacpi_status uacpi_namespace_node_type_unlocked( + const uacpi_namespace_node *node, uacpi_object_type *out_type +) +{ + uacpi_object *obj; + + if (uacpi_unlikely(node == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + obj = uacpi_namespace_node_get_object(node); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_STATUS_NOT_FOUND; + + *out_type = obj->type; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_namespace_node_type( + const uacpi_namespace_node *node, uacpi_object_type *out_type +) +{ + uacpi_status ret; + + ret = uacpi_namespace_read_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_namespace_node_type_unlocked(node, out_type); + + uacpi_namespace_read_unlock(); + return ret; +} + +uacpi_status uacpi_namespace_node_is_one_of_unlocked( + const uacpi_namespace_node *node, uacpi_object_type_bits type_mask, uacpi_bool *out +) +{ + uacpi_object *obj; + + if (uacpi_unlikely(node == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + obj = uacpi_namespace_node_get_object(node); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_STATUS_NOT_FOUND; + + *out = uacpi_object_is_one_of(obj, type_mask); + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_namespace_node_is_one_of( + const uacpi_namespace_node *node, uacpi_object_type_bits type_mask, + uacpi_bool *out +) +{ + uacpi_status ret; + + ret = uacpi_namespace_read_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_namespace_node_is_one_of_unlocked(node,type_mask, out); + + uacpi_namespace_read_unlock(); + return ret; +} + +uacpi_status uacpi_namespace_node_is( + const uacpi_namespace_node *node, uacpi_object_type type, uacpi_bool *out +) +{ + return uacpi_namespace_node_is_one_of( + node, 1u << type, out + ); +} + +uacpi_status uacpi_namespace_do_for_each_child( + uacpi_namespace_node *node, uacpi_iteration_callback descending_callback, + uacpi_iteration_callback ascending_callback, + uacpi_object_type_bits type_mask, uacpi_u32 max_depth, + enum uacpi_should_lock should_lock, + enum uacpi_permanent_only permanent_only, void *user +) +{ + uacpi_status ret = UACPI_STATUS_OK; + uacpi_iteration_decision decision; + uacpi_iteration_callback cb; + uacpi_bool walking_up = UACPI_FALSE, matches = UACPI_FALSE; + uacpi_u32 depth = 1; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(descending_callback == UACPI_NULL && + ascending_callback == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + if (uacpi_unlikely(node == UACPI_NULL || max_depth == 0)) + return UACPI_STATUS_INVALID_ARGUMENT; + + if (should_lock == UACPI_SHOULD_LOCK_YES) { + ret = uacpi_namespace_read_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + } + + if (node->child == UACPI_NULL) + goto out; + + node = node->child; + + while (depth) { + uacpi_namespace_node_is_one_of_unlocked(node, type_mask, &matches); + if (!matches) { + decision = UACPI_ITERATION_DECISION_CONTINUE; + goto do_next; + } + + if (permanent_only == UACPI_PERMANENT_ONLY_YES && + uacpi_namespace_node_is_temporary(node)) { + decision = UACPI_ITERATION_DECISION_NEXT_PEER; + goto do_next; + } + + cb = walking_up ? ascending_callback : descending_callback; + if (cb != UACPI_NULL) { + if (should_lock == UACPI_SHOULD_LOCK_YES) { + ret = uacpi_namespace_read_unlock(); + if (uacpi_unlikely_error(ret)) + return ret; + } + + decision = cb(user, node, depth); + if (decision == UACPI_ITERATION_DECISION_BREAK) + return ret; + + if (should_lock == UACPI_SHOULD_LOCK_YES) { + ret = uacpi_namespace_read_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + } + } else { + decision = UACPI_ITERATION_DECISION_CONTINUE; + } + + do_next: + if (walking_up) { + if (node->next) { + node = node->next; + walking_up = UACPI_FALSE; + continue; + } + + depth--; + node = node->parent; + continue; + } + + switch (decision) { + case UACPI_ITERATION_DECISION_CONTINUE: + if ((depth != max_depth) && (node->child != UACPI_NULL)) { + node = node->child; + depth++; + continue; + } + UACPI_FALLTHROUGH; + case UACPI_ITERATION_DECISION_NEXT_PEER: + walking_up = UACPI_TRUE; + continue; + default: + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + } + +out: + if (should_lock == UACPI_SHOULD_LOCK_YES) + uacpi_namespace_read_unlock(); + return ret; +} + +uacpi_status uacpi_namespace_for_each_child_simple( + uacpi_namespace_node *parent, uacpi_iteration_callback callback, void *user +) +{ + return uacpi_namespace_do_for_each_child( + parent, callback, UACPI_NULL, UACPI_OBJECT_ANY_BIT, UACPI_MAX_DEPTH_ANY, + UACPI_SHOULD_LOCK_YES, UACPI_PERMANENT_ONLY_YES, user + ); +} + +uacpi_status uacpi_namespace_for_each_child( + uacpi_namespace_node *parent, uacpi_iteration_callback descending_callback, + uacpi_iteration_callback ascending_callback, + uacpi_object_type_bits type_mask, uacpi_u32 max_depth, void *user +) +{ + return uacpi_namespace_do_for_each_child( + parent, descending_callback, ascending_callback, type_mask, max_depth, + UACPI_SHOULD_LOCK_YES, UACPI_PERMANENT_ONLY_YES, user + ); +} + +uacpi_status uacpi_namespace_node_next_typed( + uacpi_namespace_node *parent, uacpi_namespace_node **iter, + uacpi_object_type_bits type_mask +) +{ + uacpi_status ret; + uacpi_bool is_one_of; + uacpi_namespace_node *node; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(parent == UACPI_NULL && *iter == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + ret = uacpi_namespace_read_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + + node = *iter; + if (node == UACPI_NULL) + node = parent->child; + else + node = node->next; + + for (; node != UACPI_NULL; node = node->next) { + if (uacpi_namespace_node_is_temporary(node)) + continue; + + ret = uacpi_namespace_node_is_one_of_unlocked( + node, type_mask, &is_one_of + ); + if (uacpi_unlikely_error(ret)) + break; + if (is_one_of) + break; + } + + uacpi_namespace_read_unlock(); + if (node == UACPI_NULL) + return UACPI_STATUS_NOT_FOUND; + + if (uacpi_likely_success(ret)) + *iter = node; + return ret; +} + +uacpi_status uacpi_namespace_node_next( + uacpi_namespace_node *parent, uacpi_namespace_node **iter +) +{ + return uacpi_namespace_node_next_typed( + parent, iter, UACPI_OBJECT_ANY_BIT + ); +} + +uacpi_size uacpi_namespace_node_depth(const uacpi_namespace_node *node) +{ + uacpi_size depth = 0; + + while (node->parent) { + depth++; + node = node->parent; + } + + return depth; +} + +uacpi_namespace_node *uacpi_namespace_node_parent( + uacpi_namespace_node *node +) +{ + return node->parent; +} + +const uacpi_char *uacpi_namespace_node_generate_absolute_path( + const uacpi_namespace_node *node +) +{ + uacpi_size depth, offset; + uacpi_size bytes_needed; + uacpi_char *path; + + depth = uacpi_namespace_node_depth(node) + 1; + + // \ only needs 1 byte, the rest is 4 bytes + bytes_needed = 1 + (depth - 1) * sizeof(uacpi_object_name); + + // \ and the first NAME don't need a '.', every other segment does + bytes_needed += depth > 2 ? depth - 2 : 0; + + // Null terminator + bytes_needed += 1; + + path = uacpi_kernel_alloc(bytes_needed); + if (uacpi_unlikely(path == UACPI_NULL)) + return path; + + path[0] = '\\'; + + offset = bytes_needed - 1; + path[offset] = '\0'; + + while (node != uacpi_namespace_root()) { + offset -= sizeof(uacpi_object_name); + uacpi_memcpy(&path[offset], node->name.text, sizeof(uacpi_object_name)); + + node = node->parent; + if (node != uacpi_namespace_root()) + path[--offset] = '.'; + } + + return path; +} + +void uacpi_free_absolute_path(const uacpi_char *path) +{ + uacpi_free_dynamic_string(path); +} + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/dev/acpi/uacpi/notify.c b/sys/dev/acpi/uacpi/notify.c new file mode 100644 index 0000000..b413df9 --- /dev/null +++ b/sys/dev/acpi/uacpi/notify.c @@ -0,0 +1,255 @@ +#include <uacpi/internal/notify.h> +#include <uacpi/internal/shareable.h> +#include <uacpi/internal/namespace.h> +#include <uacpi/internal/log.h> +#include <uacpi/internal/mutex.h> +#include <uacpi/internal/utilities.h> +#include <uacpi/internal/stdlib.h> +#include <uacpi/kernel_api.h> + +#ifndef UACPI_BAREBONES_MODE + +static uacpi_handle notify_mutex; + +uacpi_status uacpi_initialize_notify(void) +{ + notify_mutex = uacpi_kernel_create_mutex(); + if (uacpi_unlikely(notify_mutex == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + return UACPI_STATUS_OK; +} + +void uacpi_deinitialize_notify(void) +{ + if (notify_mutex != UACPI_NULL) + uacpi_kernel_free_mutex(notify_mutex); + + notify_mutex = UACPI_NULL; +} + +struct notification_ctx { + uacpi_namespace_node *node; + uacpi_u64 value; + uacpi_object *node_object; +}; + +static void free_notification_ctx(struct notification_ctx *ctx) +{ + uacpi_namespace_node_release_object(ctx->node_object); + uacpi_namespace_node_unref(ctx->node); + uacpi_free(ctx, sizeof(*ctx)); +} + +static void do_notify(uacpi_handle opaque) +{ + struct notification_ctx *ctx = opaque; + uacpi_device_notify_handler *handler; + uacpi_bool did_notify_root = UACPI_FALSE; + + handler = ctx->node_object->handlers->notify_head; + + for (;;) { + if (handler == UACPI_NULL) { + if (did_notify_root) { + free_notification_ctx(ctx); + return; + } + + handler = g_uacpi_rt_ctx.root_object->handlers->notify_head; + did_notify_root = UACPI_TRUE; + continue; + } + + handler->callback(handler->user_context, ctx->node, ctx->value); + handler = handler->next; + } +} + +uacpi_status uacpi_notify_all(uacpi_namespace_node *node, uacpi_u64 value) +{ + uacpi_status ret; + struct notification_ctx *ctx; + uacpi_object *node_object; + + node_object = uacpi_namespace_node_get_object_typed( + node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT | + UACPI_OBJECT_PROCESSOR_BIT + ); + if (uacpi_unlikely(node_object == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + ret = uacpi_acquire_native_mutex(notify_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + if (node_object->handlers->notify_head == UACPI_NULL && + g_uacpi_rt_ctx.root_object->handlers->notify_head == UACPI_NULL) { + ret = UACPI_STATUS_NO_HANDLER; + goto out; + } + + ctx = uacpi_kernel_alloc(sizeof(*ctx)); + if (uacpi_unlikely(ctx == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto out; + } + + ctx->node = node; + // In case this node goes out of scope + uacpi_shareable_ref(node); + + ctx->value = value; + ctx->node_object = uacpi_namespace_node_get_object(node); + uacpi_object_ref(ctx->node_object); + + ret = uacpi_kernel_schedule_work(UACPI_WORK_NOTIFICATION, do_notify, ctx); + if (uacpi_unlikely_error(ret)) { + uacpi_warn("unable to schedule notification work: %s\n", + uacpi_status_to_string(ret)); + free_notification_ctx(ctx); + } + +out: + uacpi_release_native_mutex(notify_mutex); + return ret; +} + +static uacpi_device_notify_handler *handler_container( + uacpi_handlers *handlers, uacpi_notify_handler target_handler +) +{ + uacpi_device_notify_handler *handler = handlers->notify_head; + + while (handler) { + if (handler->callback == target_handler) + return handler; + + handler = handler->next; + } + + return UACPI_NULL; +} + +uacpi_status uacpi_install_notify_handler( + uacpi_namespace_node *node, uacpi_notify_handler handler, + uacpi_handle handler_context +) +{ + uacpi_status ret; + uacpi_object *obj; + uacpi_handlers *handlers; + uacpi_device_notify_handler *new_handler; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (node == uacpi_namespace_root()) { + obj = g_uacpi_rt_ctx.root_object; + } else { + ret = uacpi_namespace_node_acquire_object_typed( + node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT | + UACPI_OBJECT_PROCESSOR_BIT, &obj + ); + if (uacpi_unlikely_error(ret)) + return ret; + } + + ret = uacpi_acquire_native_mutex(notify_mutex); + if (uacpi_unlikely_error(ret)) + goto out_no_mutex; + + uacpi_kernel_wait_for_work_completion(); + + handlers = obj->handlers; + + if (handler_container(handlers, handler) != UACPI_NULL) { + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out; + } + + new_handler = uacpi_kernel_alloc_zeroed(sizeof(*new_handler)); + if (uacpi_unlikely(new_handler == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + new_handler->callback = handler; + new_handler->user_context = handler_context; + new_handler->next = handlers->notify_head; + + handlers->notify_head = new_handler; + +out: + uacpi_release_native_mutex(notify_mutex); +out_no_mutex: + if (node != uacpi_namespace_root()) + uacpi_object_unref(obj); + + return ret; +} + +uacpi_status uacpi_uninstall_notify_handler( + uacpi_namespace_node *node, uacpi_notify_handler handler +) +{ + uacpi_status ret; + uacpi_object *obj; + uacpi_handlers *handlers; + uacpi_device_notify_handler *prev_handler, *containing = UACPI_NULL; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (node == uacpi_namespace_root()) { + obj = g_uacpi_rt_ctx.root_object; + } else { + ret = uacpi_namespace_node_acquire_object_typed( + node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT | + UACPI_OBJECT_PROCESSOR_BIT, &obj + ); + if (uacpi_unlikely_error(ret)) + return ret; + } + + ret = uacpi_acquire_native_mutex(notify_mutex); + if (uacpi_unlikely_error(ret)) + goto out_no_mutex; + + uacpi_kernel_wait_for_work_completion(); + + handlers = obj->handlers; + + containing = handler_container(handlers, handler); + if (containing == UACPI_NULL) { + ret = UACPI_STATUS_NOT_FOUND; + goto out; + } + + prev_handler = handlers->notify_head; + + // Are we the last linked handler? + if (prev_handler == containing) { + handlers->notify_head = containing->next; + goto out; + } + + // Nope, we're somewhere in the middle. Do a search. + while (prev_handler) { + if (prev_handler->next == containing) { + prev_handler->next = containing->next; + goto out; + } + + prev_handler = prev_handler->next; + } + +out: + uacpi_release_native_mutex(notify_mutex); +out_no_mutex: + if (node != uacpi_namespace_root()) + uacpi_object_unref(obj); + + if (uacpi_likely_success(ret)) + uacpi_free(containing, sizeof(*containing)); + + return ret; +} + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/dev/acpi/uacpi/opcodes.c b/sys/dev/acpi/uacpi/opcodes.c new file mode 100644 index 0000000..3665631 --- /dev/null +++ b/sys/dev/acpi/uacpi/opcodes.c @@ -0,0 +1,272 @@ +#include <uacpi/internal/opcodes.h> + +#ifndef UACPI_BAREBONES_MODE + +#define UACPI_OP(opname, opcode, props, ...) \ + { #opname, { .decode_ops = __VA_ARGS__ }, .properties = props, .code = opcode }, + +#define UACPI_OUT_OF_LINE_OP(opname, opcode, out_of_line_buf, props) \ + { \ + .name = #opname, \ + { .indirect_decode_ops = out_of_line_buf }, \ + .properties = props, \ + .code = opcode, \ + }, + +static const struct uacpi_op_spec opcode_table[0x100] = { + UACPI_ENUMERATE_OPCODES +}; + +static const struct uacpi_op_spec ext_opcode_table[] = { + UACPI_ENUMERATE_EXT_OPCODES +}; + +#define _(op) (op & 0x00FF) + +static const uacpi_u8 ext_op_to_idx[0x100] = { + [_(UACPI_AML_OP_MutexOp)] = 1, [_(UACPI_AML_OP_EventOp)] = 2, + [_(UACPI_AML_OP_CondRefOfOp)] = 3, [_(UACPI_AML_OP_CreateFieldOp)] = 4, + [_(UACPI_AML_OP_LoadTableOp)] = 5, [_(UACPI_AML_OP_LoadOp)] = 6, + [_(UACPI_AML_OP_StallOp)] = 7, [_(UACPI_AML_OP_SleepOp)] = 8, + [_(UACPI_AML_OP_AcquireOp)] = 9, [_(UACPI_AML_OP_SignalOp)] = 10, + [_(UACPI_AML_OP_WaitOp)] = 11, [_(UACPI_AML_OP_ResetOp)] = 12, + [_(UACPI_AML_OP_ReleaseOp)] = 13, [_(UACPI_AML_OP_FromBCDOp)] = 14, + [_(UACPI_AML_OP_ToBCDOp)] = 15, [_(UACPI_AML_OP_UnloadOp)] = 16, + [_(UACPI_AML_OP_RevisionOp)] = 17, [_(UACPI_AML_OP_DebugOp)] = 18, + [_(UACPI_AML_OP_FatalOp)] = 19, [_(UACPI_AML_OP_TimerOp)] = 20, + [_(UACPI_AML_OP_OpRegionOp)] = 21, [_(UACPI_AML_OP_FieldOp)] = 22, + [_(UACPI_AML_OP_DeviceOp)] = 23, [_(UACPI_AML_OP_ProcessorOp)] = 24, + [_(UACPI_AML_OP_PowerResOp)] = 25, [_(UACPI_AML_OP_ThermalZoneOp)] = 26, + [_(UACPI_AML_OP_IndexFieldOp)] = 27, [_(UACPI_AML_OP_BankFieldOp)] = 28, + [_(UACPI_AML_OP_DataRegionOp)] = 29, +}; + +const struct uacpi_op_spec *uacpi_get_op_spec(uacpi_aml_op op) +{ + if (op > 0xFF) + return &ext_opcode_table[ext_op_to_idx[_(op)]]; + + return &opcode_table[op]; +} + +#define PARSE_FIELD_ELEMENTS(parse_loop_pc) \ + /* Parse every field element found inside */ \ + UACPI_PARSE_OP_IF_HAS_DATA, 44, \ + /* Look at the first byte */ \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + \ + /* ReservedField := 0x00 PkgLength */ \ + UACPI_PARSE_OP_IF_LAST_EQUALS, 0x00, 3, \ + UACPI_PARSE_OP_PKGLEN, \ + UACPI_PARSE_OP_JMP, parse_loop_pc, \ + \ + /* AccessField := 0x01 AccessType AccessAttrib */ \ + UACPI_PARSE_OP_IF_LAST_EQUALS, 0x01, 6, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_JMP, parse_loop_pc, \ + \ + /* ConnectField := <0x02 NameString> | <0x02 BufferData> */ \ + UACPI_PARSE_OP_IF_LAST_EQUALS, 0x02, 5, \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_TYPECHECK, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_JMP, parse_loop_pc, \ + \ + /* ExtendedAccessField := 0x03 AccessType ExtendedAccessAttrib \ + * AccessLength */ \ + UACPI_PARSE_OP_IF_LAST_EQUALS, 0x03, 8, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_JMP, parse_loop_pc, \ + \ + /* NamedField := NameSeg PkgLength */ \ + \ + /* \ + * Discard the immediate, as it's the first byte of the \ + * nameseg. We don't need it. \ + */ \ + UACPI_PARSE_OP_ITEM_POP, \ + UACPI_PARSE_OP_AML_PC_DECREMENT, \ + UACPI_PARSE_OP_CREATE_NAMESTRING, \ + UACPI_PARSE_OP_PKGLEN, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_FIELD_UNIT, \ + UACPI_PARSE_OP_JMP, parse_loop_pc, \ + \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_END + +uacpi_u8 uacpi_field_op_decode_ops[] = { + UACPI_PARSE_OP_TRACKED_PKGLEN, + UACPI_PARSE_OP_EXISTING_NAMESTRING, + UACPI_PARSE_OP_LOAD_IMM, 1, + PARSE_FIELD_ELEMENTS(4), +}; + +uacpi_u8 uacpi_bank_field_op_decode_ops[] = { + UACPI_PARSE_OP_TRACKED_PKGLEN, + UACPI_PARSE_OP_EXISTING_NAMESTRING, + UACPI_PARSE_OP_EXISTING_NAMESTRING, + UACPI_PARSE_OP_OPERAND, + UACPI_PARSE_OP_LOAD_IMM, 1, + PARSE_FIELD_ELEMENTS(6), +}; + +uacpi_u8 uacpi_index_field_op_decode_ops[] = { + UACPI_PARSE_OP_TRACKED_PKGLEN, + UACPI_PARSE_OP_EXISTING_NAMESTRING, + UACPI_PARSE_OP_EXISTING_NAMESTRING, + UACPI_PARSE_OP_LOAD_IMM, 1, + PARSE_FIELD_ELEMENTS(5), +}; + +uacpi_u8 uacpi_load_op_decode_ops[] = { + // Storage for the scope pointer, this is left as 0 in case of errors + UACPI_PARSE_OP_LOAD_ZERO_IMM, + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_METHOD, + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, + UACPI_PARSE_OP_TARGET, + + /* + * Invoke the handler here to initialize the table. If this fails, it's + * expected to keep the item 0 as NULL, which is checked below to return + * false to the caller of Load. + */ + UACPI_PARSE_OP_INVOKE_HANDLER, + UACPI_PARSE_OP_IF_NULL, 0, 3, + UACPI_PARSE_OP_LOAD_FALSE_OBJECT, + UACPI_PARSE_OP_JMP, 15, + + UACPI_PARSE_OP_LOAD_TRUE_OBJECT, + UACPI_PARSE_OP_DISPATCH_TABLE_LOAD, + + /* + * Invoke the handler a second time to initialize any AML GPE handlers that + * might've been loaded from this table. + */ + UACPI_PARSE_OP_INVOKE_HANDLER, + UACPI_PARSE_OP_STORE_TO_TARGET, 3, + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, + UACPI_PARSE_OP_END, +}; + +uacpi_u8 uacpi_load_table_op_decode_ops[] = { + // Storage for the scope pointer, this is left as 0 in case of errors + UACPI_PARSE_OP_LOAD_ZERO_IMM, + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_METHOD, + // Index of the table we are going to be loaded to unref it later + UACPI_PARSE_OP_LOAD_ZERO_IMM, + // Storage for the target pointer, this is left as 0 if none was requested + UACPI_PARSE_OP_LOAD_ZERO_IMM, + + UACPI_PARSE_OP_LOAD_INLINE_IMM, 1, 5, + UACPI_PARSE_OP_IF_NOT_NULL, 4, 5, + UACPI_PARSE_OP_STRING, + UACPI_PARSE_OP_IMM_DECREMENT, 4, + UACPI_PARSE_OP_JMP, 8, + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, + + /* + * Invoke the handler here to initialize the table. If this fails, it's + * expected to keep the item 0 as NULL, which is checked below to return + * false to the caller of Load. + */ + UACPI_PARSE_OP_INVOKE_HANDLER, + UACPI_PARSE_OP_IF_NULL, 0, 3, + UACPI_PARSE_OP_LOAD_FALSE_OBJECT, + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, + UACPI_PARSE_OP_END, + + UACPI_PARSE_OP_LOAD_TRUE_OBJECT, + UACPI_PARSE_OP_DISPATCH_TABLE_LOAD, + + /* + * Invoke the handler a second time to block the store to target in case + * the load above failed, as well as do any AML GPE handler initialization. + */ + UACPI_PARSE_OP_INVOKE_HANDLER, + + // If we were given a target to store to, do the store + UACPI_PARSE_OP_IF_NOT_NULL, 3, 3, + UACPI_PARSE_OP_STORE_TO_TARGET_INDIRECT, 3, 10, + + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, + UACPI_PARSE_OP_END, +}; + +#define POP(x) UACPI_PARSE_OP_##x + +static +const uacpi_char *const pop_names[UACPI_PARSE_OP_MAX + 1] = { + [POP(END)] = "<END-OF-OP>", + [POP(SKIP_WITH_WARN_IF_NULL)] = "SKIP_WITH_WARN_IF_NULL", + [POP(EMIT_SKIP_WARN)] = "EMIT_SKIP_WARN", + [POP(SIMPLE_NAME)] = "SIMPLE_NAME", + [POP(SUPERNAME)] = "SUPERNAME", + [POP(SUPERNAME_OR_UNRESOLVED)] = "SUPERNAME_OR_UNRESOLVED", + [POP(TERM_ARG)] = "TERM_ARG", + [POP(TERM_ARG_UNWRAP_INTERNAL)] = "TERM_ARG_UNWRAP_INTERNAL", + [POP(TERM_ARG_OR_NAMED_OBJECT)] = "TERM_ARG_OR_NAMED_OBJECT", + [POP(TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED)] = "TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED", + [POP(OPERAND)] = "OPERAND", + [POP(STRING)] = "STRING", + [POP(COMPUTATIONAL_DATA)] = "COMPUTATIONAL_DATA", + [POP(TARGET)] = "TARGET", + [POP(PKGLEN)] = "PKGLEN", + [POP(TRACKED_PKGLEN)] = "TRACKED_PKGLEN", + [POP(CREATE_NAMESTRING)] = "CREATE_NAMESTRING", + [POP(CREATE_NAMESTRING_OR_NULL_IF_LOAD)] = "CREATE_NAMESTRING_OR_NULL_IF_LOAD", + [POP(EXISTING_NAMESTRING)] = "EXISTING_NAMESTRING", + [POP(EXISTING_NAMESTRING_OR_NULL)] = "EXISTING_NAMESTRING_OR_NULL", + [POP(EXISTING_NAMESTRING_OR_NULL_IF_LOAD)] = "EXISTING_NAMESTRING_OR_NULL_IF_LOAD", + [POP(INVOKE_HANDLER)] = "INVOKE_HANDLER", + [POP(OBJECT_ALLOC)] = "OBJECT_ALLOC", + [POP(EMPTY_OBJECT_ALLOC)] = "EMPTY_OBJECT_ALLOC", + [POP(OBJECT_CONVERT_TO_SHALLOW_COPY)] = "OBJECT_CONVERT_TO_SHALLOW_COPY", + [POP(OBJECT_CONVERT_TO_DEEP_COPY)] = "OBJECT_CONVERT_TO_DEEP_COPY", + [POP(OBJECT_ALLOC_TYPED)] = "OBJECT_ALLOC_TYPED", + [POP(RECORD_AML_PC)] = "RECORD_AML_PC", + [POP(LOAD_INLINE_IMM_AS_OBJECT)] = "LOAD_INLINE_IMM_AS_OBJECT", + [POP(LOAD_INLINE_IMM)] = "LOAD_INLINE_IMM", + [POP(LOAD_ZERO_IMM)] = "LOAD_ZERO_IMM", + [POP(LOAD_IMM)] = "LOAD_IMM", + [POP(LOAD_IMM_AS_OBJECT)] = "LOAD_IMM_AS_OBJECT", + [POP(LOAD_FALSE_OBJECT)] = "LOAD_FALSE_OBJECT", + [POP(LOAD_TRUE_OBJECT)] = "LOAD_TRUE_OBJECT", + [POP(TRUNCATE_NUMBER)] = "TRUNCATE_NUMBER", + [POP(TYPECHECK)] = "TYPECHECK", + [POP(INSTALL_NAMESPACE_NODE)] = "INSTALL_NAMESPACE_NODE", + [POP(OBJECT_TRANSFER_TO_PREV)] = "OBJECT_TRANSFER_TO_PREV", + [POP(OBJECT_COPY_TO_PREV)] = "OBJECT_COPY_TO_PREV", + [POP(STORE_TO_TARGET)] = "STORE_TO_TARGET", + [POP(STORE_TO_TARGET_INDIRECT)] = "STORE_TO_TARGET_INDIRECT", + [POP(UNREACHABLE)] = "UNREACHABLE", + [POP(BAD_OPCODE)] = "BAD_OPCODE", + [POP(AML_PC_DECREMENT)] = "AML_PC_DECREMENT", + [POP(IMM_DECREMENT)] = "IMM_DECREMENT", + [POP(ITEM_POP)] = "ITEM_POP", + [POP(DISPATCH_METHOD_CALL)] = "DISPATCH_METHOD_CALL", + [POP(DISPATCH_TABLE_LOAD)] = "DISPATCH_TABLE_LOAD", + [POP(CONVERT_NAMESTRING)] = "CONVERT_NAMESTRING", + [POP(IF_HAS_DATA)] = "IF_HAS_DATA", + [POP(IF_NULL)] = "IF_NULL", + [POP(IF_LAST_NULL)] = "IF_LAST_NULL", + [POP(IF_NOT_NULL)] = "IF_NOT_NULL", + [POP(IF_LAST_NOT_NULL)] = "IF_NOT_NULL", + [POP(IF_LAST_EQUALS)] = "IF_LAST_EQUALS", + [POP(IF_LAST_FALSE)] = "IF_LAST_FALSE", + [POP(IF_LAST_TRUE)] = "IF_LAST_TRUE", + [POP(SWITCH_TO_NEXT_IF_EQUALS)] = "SWITCH_TO_NEXT_IF_EQUALS", + [POP(IF_SWITCHED_FROM)] = "IF_SWITCHED_FROM", + [POP(JMP)] = "JMP", +}; + +const uacpi_char *uacpi_parse_op_to_string(enum uacpi_parse_op op) +{ + if (uacpi_unlikely(op > UACPI_PARSE_OP_MAX)) + return "<INVALID-OP>"; + + return pop_names[op]; +} + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/dev/acpi/uacpi/opregion.c b/sys/dev/acpi/uacpi/opregion.c new file mode 100644 index 0000000..ec0bc37 --- /dev/null +++ b/sys/dev/acpi/uacpi/opregion.c @@ -0,0 +1,1056 @@ +#include <uacpi/kernel_api.h> + +#include <uacpi/internal/opregion.h> +#include <uacpi/internal/namespace.h> +#include <uacpi/internal/stdlib.h> +#include <uacpi/internal/log.h> +#include <uacpi/internal/utilities.h> +#include <uacpi/internal/mutex.h> +#include <uacpi/internal/interpreter.h> + +#ifndef UACPI_BAREBONES_MODE + +struct uacpi_recursive_lock g_opregion_lock; + +uacpi_status uacpi_initialize_opregion(void) +{ + return uacpi_recursive_lock_init(&g_opregion_lock); +} + +void uacpi_deinitialize_opregion(void) +{ + uacpi_recursive_lock_deinit(&g_opregion_lock); +} + +void uacpi_trace_region_error( + uacpi_namespace_node *node, uacpi_char *message, uacpi_status ret +) +{ + const uacpi_char *path, *space_string = "<unknown>"; + uacpi_object *obj; + + path = uacpi_namespace_node_generate_absolute_path(node); + + obj = uacpi_namespace_node_get_object_typed( + node, UACPI_OBJECT_OPERATION_REGION_BIT + ); + if (uacpi_likely(obj != UACPI_NULL)) + space_string = uacpi_address_space_to_string(obj->op_region->space); + + uacpi_error( + "%s (%s) operation region %s: %s\n", + message, space_string, path, uacpi_status_to_string(ret) + ); + uacpi_free_dynamic_string(path); +} + +static void trace_region_io( + uacpi_field_unit *field, uacpi_address_space space, uacpi_u64 offset, + uacpi_region_op op, union uacpi_opregion_io_data data +) +{ + const uacpi_char *path; + const uacpi_char *type_str; + + if (!uacpi_should_log(UACPI_LOG_TRACE)) + return; + + switch (op) { + case UACPI_REGION_OP_READ: + type_str = "read from"; + break; + case UACPI_REGION_OP_WRITE: + type_str = "write to"; + break; + default: + type_str = "<INVALID-OP>"; + } + + path = uacpi_namespace_node_generate_absolute_path(field->region); + + switch (space) { + case UACPI_ADDRESS_SPACE_IPMI: + case UACPI_ADDRESS_SPACE_PRM: + case UACPI_ADDRESS_SPACE_FFIXEDHW: + uacpi_trace( + "write-then-read from [%s] %s[0x%016"UACPI_PRIX64"] = " + "<buffer of %zu bytes>\n", path, + uacpi_address_space_to_string(space), + UACPI_FMT64(offset), data.buffer.length + ); + break; + case UACPI_ADDRESS_SPACE_SMBUS: + case UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS: + uacpi_trace( + "%s [%s] %s[0x%016"UACPI_PRIX64"] = " + "<buffer of %zu bytes>\n", type_str, path, + uacpi_address_space_to_string(space), + UACPI_FMT64(offset), data.buffer.length + ); + break; + case UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO: + uacpi_trace( + "%s [%s] %s pins[%u..%u] = 0x%"UACPI_PRIX64"\n", + type_str, path, uacpi_address_space_to_string(space), + field->pin_offset, (field->pin_offset + field->bit_length) - 1, + UACPI_FMT64(*data.integer) + ); + break; + default: + uacpi_trace( + "%s [%s] (%d bytes) %s[0x%016"UACPI_PRIX64"] = 0x%"UACPI_PRIX64"\n", + type_str, path, field->access_width_bytes, + uacpi_address_space_to_string(space), + UACPI_FMT64(offset), UACPI_FMT64(*data.integer) + ); + break; + } + + uacpi_free_dynamic_string(path); +} + +static uacpi_bool space_needs_reg(enum uacpi_address_space space) +{ + if (space == UACPI_ADDRESS_SPACE_SYSTEM_MEMORY || + space == UACPI_ADDRESS_SPACE_SYSTEM_IO || + space == UACPI_ADDRESS_SPACE_TABLE_DATA) + return UACPI_FALSE; + + return UACPI_TRUE; +} + +static uacpi_status region_run_reg( + uacpi_namespace_node *node, uacpi_u8 connection_code +) +{ + uacpi_status ret; + uacpi_namespace_node *reg_node; + uacpi_object_array method_args; + uacpi_object *reg_obj, *args[2]; + + ret = uacpi_namespace_node_resolve( + node->parent, "_REG", UACPI_SHOULD_LOCK_NO, + UACPI_MAY_SEARCH_ABOVE_PARENT_NO, UACPI_PERMANENT_ONLY_NO, ®_node + ); + if (uacpi_unlikely_error(ret)) + return ret; + + reg_obj = uacpi_namespace_node_get_object_typed( + reg_node, UACPI_OBJECT_METHOD_BIT + ); + if (uacpi_unlikely(reg_obj == UACPI_NULL)) + return UACPI_STATUS_OK; + + args[0] = uacpi_create_object(UACPI_OBJECT_INTEGER); + if (uacpi_unlikely(args[0] == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + args[1] = uacpi_create_object(UACPI_OBJECT_INTEGER); + if (uacpi_unlikely(args[1] == UACPI_NULL)) { + uacpi_object_unref(args[0]); + return UACPI_STATUS_OUT_OF_MEMORY; + } + + args[0]->integer = uacpi_namespace_node_get_object(node)->op_region->space; + args[1]->integer = connection_code; + method_args.objects = args; + method_args.count = 2; + + ret = uacpi_execute_control_method( + reg_node, reg_obj->method, &method_args, UACPI_NULL + ); + if (uacpi_unlikely_error(ret)) + uacpi_trace_region_error(node, "error during _REG execution for", ret); + + uacpi_object_unref(args[0]); + uacpi_object_unref(args[1]); + return ret; +} + +uacpi_address_space_handlers *uacpi_node_get_address_space_handlers( + uacpi_namespace_node *node +) +{ + uacpi_object *object; + + if (node == uacpi_namespace_root()) + return g_uacpi_rt_ctx.root_object->address_space_handlers; + + object = uacpi_namespace_node_get_object(node); + if (uacpi_unlikely(object == UACPI_NULL)) + return UACPI_NULL; + + switch (object->type) { + case UACPI_OBJECT_DEVICE: + case UACPI_OBJECT_PROCESSOR: + case UACPI_OBJECT_THERMAL_ZONE: + return object->address_space_handlers; + default: + return UACPI_NULL; + } +} + +static uacpi_address_space_handler *find_handler( + uacpi_address_space_handlers *handlers, + enum uacpi_address_space space +) +{ + uacpi_address_space_handler *handler = handlers->head; + + while (handler) { + if (handler->space == space) + return handler; + + handler = handler->next; + } + + return UACPI_NULL; +} + +static uacpi_operation_region *find_previous_region_link( + uacpi_operation_region *region +) +{ + uacpi_address_space_handler *handler = region->handler; + uacpi_operation_region *parent = handler->regions; + + if (parent == region) + // This is the last attached region, it has no previous link + return region; + + while (parent->next != region) { + parent = parent->next; + + if (uacpi_unlikely(parent == UACPI_NULL)) + return UACPI_NULL; + } + + return parent; +} + +uacpi_status uacpi_opregion_attach(uacpi_namespace_node *node) +{ + uacpi_object *obj; + uacpi_operation_region *region; + uacpi_address_space_handler *handler; + uacpi_status ret; + uacpi_region_attach_data attach_data = { 0 }; + + if (uacpi_namespace_node_is_dangling(node)) + return UACPI_STATUS_NAMESPACE_NODE_DANGLING; + + obj = uacpi_namespace_node_get_object_typed( + node, UACPI_OBJECT_OPERATION_REGION_BIT + ); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + region = obj->op_region; + + if (region->handler == UACPI_NULL) + return UACPI_STATUS_NO_HANDLER; + if (region->state_flags & UACPI_OP_REGION_STATE_ATTACHED) + return UACPI_STATUS_OK; + + handler = region->handler; + attach_data.region_node = node; + + switch (region->space) { + case UACPI_ADDRESS_SPACE_PCC: + if (region->length) { + region->internal_buffer = uacpi_kernel_alloc_zeroed(region->length); + if (uacpi_unlikely(region->internal_buffer == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + } + + attach_data.pcc_info.buffer.bytes = region->internal_buffer; + attach_data.pcc_info.buffer.length = region->length; + attach_data.pcc_info.subspace_id = region->offset; + break; + case UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO: + attach_data.gpio_info.num_pins = region->length; + break; + default: + attach_data.generic_info.base = region->offset; + attach_data.generic_info.length = region->length; + break; + } + + attach_data.handler_context = handler->user_context; + + uacpi_object_ref(obj); + uacpi_namespace_write_unlock(); + ret = handler->callback(UACPI_REGION_OP_ATTACH, &attach_data); + uacpi_namespace_write_lock(); + + if (uacpi_unlikely_error(ret)) { + uacpi_object_unref(obj); + return ret; + } + + region->state_flags |= UACPI_OP_REGION_STATE_ATTACHED; + region->user_context = attach_data.out_region_context; + uacpi_object_unref(obj); + return ret; +} + +static void region_install_handler( + uacpi_namespace_node *node, uacpi_address_space_handler *handler +) +{ + uacpi_operation_region *region; + + region = uacpi_namespace_node_get_object(node)->op_region; + region->handler = handler; + uacpi_shareable_ref(handler); + + region->next = handler->regions; + handler->regions = region; +} + +enum unreg { + UNREG_NO = 0, + UNREG_YES, +}; + +static void region_uninstall_handler( + uacpi_namespace_node *node, enum unreg unreg +) +{ + uacpi_status ret; + uacpi_object *obj; + uacpi_address_space_handler *handler; + uacpi_operation_region *region, *link; + + obj = uacpi_namespace_node_get_object_typed( + node, UACPI_OBJECT_OPERATION_REGION_BIT + ); + if (uacpi_unlikely(obj == UACPI_NULL)) + return; + + region = obj->op_region; + + handler = region->handler; + if (handler == UACPI_NULL) + return; + + link = find_previous_region_link(region); + if (uacpi_unlikely(link == UACPI_NULL)) { + uacpi_error("operation region @%p not in the handler@%p list(?)\n", + region, handler); + goto out; + } else if (link == region) { + link = link->next; + handler->regions = link; + } else { + link->next = region->next; + } + +out: + if (region->state_flags & UACPI_OP_REGION_STATE_ATTACHED) { + uacpi_region_detach_data detach_data = { 0 }; + + detach_data.region_node = node; + detach_data.region_context = region->user_context; + detach_data.handler_context = handler->user_context; + + uacpi_shareable_ref(node); + uacpi_namespace_write_unlock(); + + ret = handler->callback(UACPI_REGION_OP_DETACH, &detach_data); + + uacpi_namespace_write_lock(); + uacpi_namespace_node_unref(node); + + if (uacpi_unlikely_error(ret)) { + uacpi_trace_region_error( + node, "error during handler detach for", ret + ); + } + } + + if ((region->state_flags & UACPI_OP_REGION_STATE_REG_EXECUTED) && + unreg == UNREG_YES) { + region_run_reg(node, ACPI_REG_DISCONNECT); + region->state_flags &= ~UACPI_OP_REGION_STATE_REG_EXECUTED; + } + + uacpi_address_space_handler_unref(region->handler); + region->handler = UACPI_NULL; + region->state_flags &= ~UACPI_OP_REGION_STATE_ATTACHED; +} + +static uacpi_status upgrade_to_opregion_lock(void) +{ + uacpi_status ret; + + /* + * Drop the namespace lock, and reacquire it after the opregion lock + * so we keep the ordering with user API. + */ + uacpi_namespace_write_unlock(); + + ret = uacpi_recursive_lock_acquire(&g_opregion_lock); + uacpi_namespace_write_lock(); + return ret; +} + +void uacpi_opregion_uninstall_handler(uacpi_namespace_node *node) +{ + if (uacpi_unlikely_error(upgrade_to_opregion_lock())) + return; + + region_uninstall_handler(node, UNREG_YES); + + uacpi_recursive_lock_release(&g_opregion_lock); +} + +uacpi_bool uacpi_address_space_handler_is_default( + uacpi_address_space_handler *handler +) +{ + return handler->flags & UACPI_ADDRESS_SPACE_HANDLER_DEFAULT; +} + +enum opregion_iter_action { + OPREGION_ITER_ACTION_UNINSTALL, + OPREGION_ITER_ACTION_INSTALL, +}; + +struct opregion_iter_ctx { + enum opregion_iter_action action; + uacpi_address_space_handler *handler; +}; + +static uacpi_iteration_decision do_install_or_uninstall_handler( + uacpi_handle opaque, uacpi_namespace_node *node, uacpi_u32 depth +) +{ + struct opregion_iter_ctx *ctx = opaque; + uacpi_address_space_handlers *handlers; + uacpi_object *object; + + UACPI_UNUSED(depth); + + object = uacpi_namespace_node_get_object(node); + if (object->type == UACPI_OBJECT_OPERATION_REGION) { + uacpi_operation_region *region = object->op_region; + + if (region->space != ctx->handler->space) + return UACPI_ITERATION_DECISION_CONTINUE; + + if (ctx->action == OPREGION_ITER_ACTION_INSTALL) { + if (region->handler) + region_uninstall_handler(node, UNREG_NO); + + region_install_handler(node, ctx->handler); + } else { + if (uacpi_unlikely(region->handler != ctx->handler)) { + uacpi_trace_region_error( + node, "handler mismatch for", + UACPI_STATUS_INTERNAL_ERROR + ); + return UACPI_ITERATION_DECISION_CONTINUE; + } + + region_uninstall_handler(node, UNREG_NO); + } + + return UACPI_ITERATION_DECISION_CONTINUE; + } + + handlers = uacpi_node_get_address_space_handlers(node); + if (handlers == UACPI_NULL) + return UACPI_ITERATION_DECISION_CONTINUE; + + // Device already has a handler for this space installed + if (find_handler(handlers, ctx->handler->space) != UACPI_NULL) + return UACPI_ITERATION_DECISION_NEXT_PEER; + + return UACPI_ITERATION_DECISION_CONTINUE; +} + +struct reg_run_ctx { + uacpi_u8 space; + uacpi_u8 connection_code; + uacpi_size reg_executed; + uacpi_size reg_errors; +}; + +static uacpi_iteration_decision do_run_reg( + void *opaque, uacpi_namespace_node *node, uacpi_u32 depth +) +{ + struct reg_run_ctx *ctx = opaque; + uacpi_operation_region *region; + uacpi_status ret; + uacpi_bool was_regged; + + UACPI_UNUSED(depth); + + region = uacpi_namespace_node_get_object(node)->op_region; + + if (region->space != ctx->space) + return UACPI_ITERATION_DECISION_CONTINUE; + + was_regged = region->state_flags & UACPI_OP_REGION_STATE_REG_EXECUTED; + if (was_regged == (ctx->connection_code == ACPI_REG_CONNECT)) + return UACPI_ITERATION_DECISION_CONTINUE; + + ret = region_run_reg(node, ctx->connection_code); + if (ctx->connection_code == ACPI_REG_DISCONNECT) + region->state_flags &= ~UACPI_OP_REGION_STATE_REG_EXECUTED; + + if (ret == UACPI_STATUS_NOT_FOUND) + return UACPI_ITERATION_DECISION_CONTINUE; + + if (ctx->connection_code == ACPI_REG_CONNECT) + region->state_flags |= UACPI_OP_REGION_STATE_REG_EXECUTED; + + ctx->reg_executed++; + + if (uacpi_unlikely_error(ret)) { + ctx->reg_errors++; + return UACPI_ITERATION_DECISION_CONTINUE; + } + + return UACPI_ITERATION_DECISION_CONTINUE; +} + +static uacpi_status reg_or_unreg_all_opregions( + uacpi_namespace_node *device_node, enum uacpi_address_space space, + uacpi_u8 connection_code +) +{ + uacpi_address_space_handlers *handlers; + uacpi_bool is_connect; + enum uacpi_permanent_only perm_only; + struct reg_run_ctx ctx = { 0 }; + + ctx.space = space; + ctx.connection_code = connection_code; + + handlers = uacpi_node_get_address_space_handlers(device_node); + if (uacpi_unlikely(handlers == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + is_connect = connection_code == ACPI_REG_CONNECT; + if (uacpi_unlikely(is_connect && + find_handler(handlers, space) == UACPI_NULL)) + return UACPI_STATUS_NO_HANDLER; + + /* + * We want to unreg non-permanent opregions as well, however, + * registering them is handled separately and should not be + * done by us. + */ + perm_only = is_connect ? UACPI_PERMANENT_ONLY_YES : UACPI_PERMANENT_ONLY_NO; + + uacpi_namespace_do_for_each_child( + device_node, do_run_reg, UACPI_NULL, + UACPI_OBJECT_OPERATION_REGION_BIT, UACPI_MAX_DEPTH_ANY, + UACPI_SHOULD_LOCK_NO, perm_only, &ctx + ); + + uacpi_trace( + "%sactivated all '%s' opregions controlled by '%.4s', " + "%zu _REG() calls (%zu errors)\n", + connection_code == ACPI_REG_CONNECT ? "" : "de", + uacpi_address_space_to_string(space), + device_node->name.text, ctx.reg_executed, ctx.reg_errors + ); + return UACPI_STATUS_OK; +} + +static uacpi_address_space_handlers *extract_handlers( + uacpi_namespace_node *node +) +{ + uacpi_object *handlers_obj; + + if (node == uacpi_namespace_root()) + return g_uacpi_rt_ctx.root_object->address_space_handlers; + + handlers_obj = uacpi_namespace_node_get_object_typed( + node, + UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT | + UACPI_OBJECT_PROCESSOR_BIT + ); + if (uacpi_unlikely(handlers_obj == UACPI_NULL)) + return UACPI_NULL; + + return handlers_obj->address_space_handlers; +} + +uacpi_status uacpi_reg_all_opregions( + uacpi_namespace_node *device_node, + enum uacpi_address_space space +) +{ + uacpi_status ret; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + if (!space_needs_reg(space)) + return UACPI_STATUS_OK; + + ret = uacpi_recursive_lock_acquire(&g_opregion_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_namespace_write_lock(); + if (uacpi_unlikely_error(ret)) { + uacpi_recursive_lock_release(&g_opregion_lock); + return ret; + } + + if (uacpi_unlikely(extract_handlers(device_node) == UACPI_NULL)) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + + ret = reg_or_unreg_all_opregions(device_node, space, ACPI_REG_CONNECT); + +out: + uacpi_namespace_write_unlock(); + uacpi_recursive_lock_release(&g_opregion_lock); + return ret; +} + +uacpi_status uacpi_install_address_space_handler_with_flags( + uacpi_namespace_node *device_node, enum uacpi_address_space space, + uacpi_region_handler handler, uacpi_handle handler_context, + uacpi_u16 flags +) +{ + uacpi_status ret; + uacpi_address_space_handlers *handlers; + uacpi_address_space_handler *this_handler, *new_handler; + struct opregion_iter_ctx iter_ctx; + + ret = uacpi_recursive_lock_acquire(&g_opregion_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_namespace_write_lock(); + if (uacpi_unlikely_error(ret)) { + uacpi_recursive_lock_release(&g_opregion_lock); + return ret; + } + + handlers = extract_handlers(device_node); + if (uacpi_unlikely(handlers == UACPI_NULL)) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + + this_handler = find_handler(handlers, space); + if (this_handler != UACPI_NULL) { + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out; + } + + new_handler = uacpi_kernel_alloc(sizeof(*new_handler)); + if (new_handler == UACPI_NULL) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto out; + } + uacpi_shareable_init(new_handler); + + new_handler->next = handlers->head; + new_handler->space = space; + new_handler->user_context = handler_context; + new_handler->callback = handler; + new_handler->regions = UACPI_NULL; + new_handler->flags = flags; + handlers->head = new_handler; + + iter_ctx.handler = new_handler; + iter_ctx.action = OPREGION_ITER_ACTION_INSTALL; + + uacpi_namespace_do_for_each_child( + device_node, do_install_or_uninstall_handler, UACPI_NULL, + UACPI_OBJECT_ANY_BIT, UACPI_MAX_DEPTH_ANY, UACPI_SHOULD_LOCK_NO, + UACPI_PERMANENT_ONLY_YES, &iter_ctx + ); + + if (!space_needs_reg(space)) + goto out; + + /* + * Installing an early address space handler, obviously not possible to + * execute any _REG methods here. Just return and hope that it is either + * a global address space handler, or a handler installed by a user who + * will run uacpi_reg_all_opregions manually after loading/initializing + * the namespace. + */ + if (g_uacpi_rt_ctx.init_level < UACPI_INIT_LEVEL_NAMESPACE_LOADED) + goto out; + + // Init level is NAMESPACE_INITIALIZED, so we can safely run _REG now + ret = reg_or_unreg_all_opregions( + device_node, space, ACPI_REG_CONNECT + ); + +out: + uacpi_namespace_write_unlock(); + uacpi_recursive_lock_release(&g_opregion_lock); + return ret; +} + +uacpi_status uacpi_install_address_space_handler( + uacpi_namespace_node *device_node, enum uacpi_address_space space, + uacpi_region_handler handler, uacpi_handle handler_context +) +{ + return uacpi_install_address_space_handler_with_flags( + device_node, space, handler, handler_context, 0 + ); +} + +uacpi_status uacpi_uninstall_address_space_handler( + uacpi_namespace_node *device_node, + enum uacpi_address_space space +) +{ + uacpi_status ret; + uacpi_address_space_handlers *handlers; + uacpi_address_space_handler *handler = UACPI_NULL, *prev_handler; + struct opregion_iter_ctx iter_ctx; + + ret = uacpi_recursive_lock_acquire(&g_opregion_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_namespace_write_lock(); + if (uacpi_unlikely_error(ret)) { + uacpi_recursive_lock_release(&g_opregion_lock); + return ret; + } + + handlers = extract_handlers(device_node); + if (uacpi_unlikely(handlers == UACPI_NULL)) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + + handler = find_handler(handlers, space); + if (uacpi_unlikely(handler == UACPI_NULL)) { + ret = UACPI_STATUS_NO_HANDLER; + goto out; + } + + iter_ctx.handler = handler; + iter_ctx.action = OPREGION_ITER_ACTION_UNINSTALL; + + uacpi_namespace_do_for_each_child( + device_node, do_install_or_uninstall_handler, UACPI_NULL, + UACPI_OBJECT_ANY_BIT, UACPI_MAX_DEPTH_ANY, UACPI_SHOULD_LOCK_NO, + UACPI_PERMANENT_ONLY_NO, &iter_ctx + ); + + prev_handler = handlers->head; + + // Are we the last linked handler? + if (prev_handler == handler) { + handlers->head = handler->next; + goto out_unreg; + } + + // Nope, we're somewhere in the middle. Do a search. + while (prev_handler) { + if (prev_handler->next == handler) { + prev_handler->next = handler->next; + goto out; + } + + prev_handler = prev_handler->next; + } + +out_unreg: + if (space_needs_reg(space)) + reg_or_unreg_all_opregions(device_node, space, ACPI_REG_DISCONNECT); + +out: + if (handler != UACPI_NULL) + uacpi_address_space_handler_unref(handler); + + uacpi_namespace_write_unlock(); + uacpi_recursive_lock_release(&g_opregion_lock); + return ret; +} + +uacpi_status uacpi_initialize_opregion_node(uacpi_namespace_node *node) +{ + uacpi_status ret; + uacpi_namespace_node *parent = node->parent; + uacpi_operation_region *region; + uacpi_address_space_handlers *handlers; + uacpi_address_space_handler *handler; + + ret = upgrade_to_opregion_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + + region = uacpi_namespace_node_get_object(node)->op_region; + ret = UACPI_STATUS_NOT_FOUND; + + while (parent) { + handlers = uacpi_node_get_address_space_handlers(parent); + if (handlers != UACPI_NULL) { + handler = find_handler(handlers, region->space); + + if (handler != UACPI_NULL) { + region_install_handler(node, handler); + ret = UACPI_STATUS_OK; + break; + } + } + + parent = parent->parent; + } + + if (ret != UACPI_STATUS_OK) + goto out; + if (!space_needs_reg(region->space)) + goto out; + if (uacpi_get_current_init_level() < UACPI_INIT_LEVEL_NAMESPACE_LOADED) + goto out; + + if (region_run_reg(node, ACPI_REG_CONNECT) != UACPI_STATUS_NOT_FOUND) + region->state_flags |= UACPI_OP_REGION_STATE_REG_EXECUTED; + +out: + uacpi_recursive_lock_release(&g_opregion_lock); + return ret; +} + +uacpi_bool uacpi_is_buffer_access_address_space(uacpi_address_space space) +{ + switch (space) { + case UACPI_ADDRESS_SPACE_SMBUS: + case UACPI_ADDRESS_SPACE_IPMI: + case UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS: + case UACPI_ADDRESS_SPACE_PRM: + case UACPI_ADDRESS_SPACE_FFIXEDHW: + return UACPI_TRUE; + default: + return UACPI_FALSE; + } +} + +static uacpi_bool space_needs_bounds_checking(uacpi_address_space space) +{ + return !uacpi_is_buffer_access_address_space(space) && + space != UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO; +} + +uacpi_status uacpi_dispatch_opregion_io( + uacpi_field_unit *field, uacpi_u32 offset, uacpi_region_op op, + union uacpi_opregion_io_data data +) +{ + uacpi_status ret; + uacpi_object *obj; + uacpi_operation_region *region; + uacpi_address_space_handler *handler; + uacpi_address_space space; + uacpi_u64 abs_offset, offset_end = offset; + uacpi_bool is_oob = UACPI_FALSE; + uacpi_region_op orig_op = op; + + union { + uacpi_region_rw_data rw; + uacpi_region_pcc_send_data pcc; + uacpi_region_gpio_rw_data gpio; + uacpi_region_ipmi_rw_data ipmi; + uacpi_region_ffixedhw_rw_data ffixedhw; + uacpi_region_prm_rw_data prm; + uacpi_region_serial_rw_data serial; + } handler_data; + + ret = upgrade_to_opregion_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_opregion_attach(field->region); + if (uacpi_unlikely_error(ret)) { + uacpi_trace_region_error( + field->region, "unable to attach", ret + ); + goto out; + } + + obj = uacpi_namespace_node_get_object_typed( + field->region, UACPI_OBJECT_OPERATION_REGION_BIT + ); + if (uacpi_unlikely(obj == UACPI_NULL)) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + + region = obj->op_region; + space = region->space; + handler = region->handler; + + abs_offset = region->offset + offset; + offset_end += field->access_width_bytes; + + if (uacpi_likely(space_needs_bounds_checking(region->space))) + is_oob = region->length < offset_end || abs_offset < offset; + if (uacpi_unlikely(is_oob)) { + const uacpi_char *path; + + path = uacpi_namespace_node_generate_absolute_path(field->region); + uacpi_error( + "out-of-bounds access to opregion %s[0x%"UACPI_PRIX64"->" + "0x%"UACPI_PRIX64"] at 0x%"UACPI_PRIX64" (idx=%u, width=%d)\n", + path, UACPI_FMT64(region->offset), + UACPI_FMT64(region->offset + region->length), + UACPI_FMT64(abs_offset), offset, field->access_width_bytes + ); + uacpi_free_dynamic_string(path); + ret = UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX; + goto out; + } + + handler_data.rw.region_context = region->user_context; + handler_data.rw.handler_context = handler->user_context; + + switch (region->space) { + case UACPI_ADDRESS_SPACE_PCC: { + uacpi_u8 *cursor; + + cursor = region->internal_buffer + offset; + + /* + * Reads from PCC just return the current contents of the internal + * buffer. + */ + if (op == UACPI_REGION_OP_READ) { + uacpi_memcpy_zerout( + data.integer, cursor, sizeof(*data.integer), + field->access_width_bytes + ); + goto io_done; + } + + uacpi_memcpy(cursor, data.integer, field->access_width_bytes); + + /* + * Dispatch a PCC send command if this was a write to the command field + * + * ACPI 6.5: 14.3. Extended PCC Subspace Shared Memory Region + */ + if (offset >= 12 && offset < 16) { + uacpi_memzero(&handler_data.pcc.buffer, sizeof(handler_data.pcc.buffer)); + handler_data.pcc.buffer.bytes = region->internal_buffer; + handler_data.pcc.buffer.length = region->length; + + op = UACPI_REGION_OP_PCC_SEND; + break; + } + + // No dispatch needed, IO is done + goto io_done; + } + case UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO: + handler_data.gpio.pin_offset = field->pin_offset; + handler_data.gpio.num_pins = field->bit_length; + handler_data.gpio.value = *data.integer; + + ret = uacpi_object_get_string_or_buffer( + field->connection, &handler_data.gpio.connection + ); + if (uacpi_unlikely_error(ret)) + goto io_done; + + op = op == UACPI_REGION_OP_READ ? + UACPI_REGION_OP_GPIO_READ : UACPI_REGION_OP_GPIO_WRITE; + break; + case UACPI_ADDRESS_SPACE_IPMI: + handler_data.ipmi.in_out_message = data.buffer; + handler_data.ipmi.command = abs_offset; + op = UACPI_REGION_OP_IPMI_COMMAND; + break; + case UACPI_ADDRESS_SPACE_FFIXEDHW: + handler_data.ffixedhw.in_out_message = data.buffer; + handler_data.ffixedhw.command = abs_offset; + op = UACPI_REGION_OP_FFIXEDHW_COMMAND; + break; + case UACPI_ADDRESS_SPACE_PRM: + handler_data.prm.in_out_message = data.buffer; + op = UACPI_REGION_OP_PRM_COMMAND; + break; + case UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS: + case UACPI_ADDRESS_SPACE_SMBUS: + ret = uacpi_object_get_string_or_buffer( + field->connection, &handler_data.serial.connection + ); + if (uacpi_unlikely_error(ret)) + goto io_done; + + handler_data.serial.command = abs_offset; + handler_data.serial.in_out_buffer = data.buffer; + handler_data.serial.access_attribute = field->attributes; + + switch (field->attributes) { + case UACPI_ACCESS_ATTRIBUTE_BYTES: + case UACPI_ACCESS_ATTRIBUTE_RAW_BYTES: + case UACPI_ACCESS_ATTRIBUTE_RAW_PROCESS_BYTES: + handler_data.serial.access_length = field->access_length; + break; + default: + handler_data.serial.access_length = 0; + } + + op = op == UACPI_REGION_OP_READ ? + UACPI_REGION_OP_SERIAL_READ : UACPI_REGION_OP_SERIAL_WRITE; + break; + default: + handler_data.rw.byte_width = field->access_width_bytes; + handler_data.rw.offset = abs_offset; + handler_data.rw.value = *data.integer; + break; + } + + uacpi_object_ref(obj); + uacpi_namespace_write_unlock(); + + ret = handler->callback(op, &handler_data); + + uacpi_namespace_write_lock(); + uacpi_object_unref(obj); + +io_done: + if (uacpi_unlikely_error(ret)) { + uacpi_trace_region_error(field->region, "unable to perform IO", ret); + goto out; + } + + if (orig_op == UACPI_REGION_OP_READ) { + switch (region->space) { + case UACPI_ADDRESS_SPACE_PCC: + case UACPI_ADDRESS_SPACE_IPMI: + case UACPI_ADDRESS_SPACE_FFIXEDHW: + case UACPI_ADDRESS_SPACE_PRM: + case UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS: + case UACPI_ADDRESS_SPACE_SMBUS: + break; + case UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO: + *data.integer = handler_data.gpio.value; + break; + default: + *data.integer = handler_data.rw.value; + break; + } + } + + trace_region_io(field, space, abs_offset, orig_op, data); + +out: + uacpi_recursive_lock_release(&g_opregion_lock); + return ret; +} + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/dev/acpi/uacpi/osi.c b/sys/dev/acpi/uacpi/osi.c new file mode 100644 index 0000000..0940261 --- /dev/null +++ b/sys/dev/acpi/uacpi/osi.c @@ -0,0 +1,388 @@ +#include <uacpi/platform/atomic.h> +#include <uacpi/internal/osi.h> +#include <uacpi/internal/helpers.h> +#include <uacpi/internal/stdlib.h> +#include <uacpi/internal/utilities.h> +#include <uacpi/internal/mutex.h> +#include <uacpi/kernel_api.h> + +#ifndef UACPI_BAREBONES_MODE + +struct registered_interface { + const uacpi_char *name; + uacpi_u8 weight; + uacpi_u8 kind; + + // Only applicable for predefined host interfaces + uacpi_u8 host_type; + + // Only applicable for predefined interfaces + uacpi_u8 disabled : 1; + uacpi_u8 dynamic : 1; + + struct registered_interface *next; +}; + +static uacpi_handle interface_mutex; +static struct registered_interface *registered_interfaces; +static uacpi_interface_handler interface_handler; +static uacpi_u32 latest_queried_interface; + +#define WINDOWS(string, interface) \ + { \ + .name = "Windows "string, \ + .weight = UACPI_VENDOR_INTERFACE_WINDOWS_##interface, \ + .kind = UACPI_INTERFACE_KIND_VENDOR, \ + .host_type = 0, \ + .disabled = 0, \ + .dynamic = 0, \ + .next = UACPI_NULL \ + } + +#define HOST_FEATURE(string, type) \ + { \ + .name = string, \ + .weight = 0, \ + .kind = UACPI_INTERFACE_KIND_FEATURE, \ + .host_type = UACPI_HOST_INTERFACE_##type, \ + .disabled = 1, \ + .dynamic = 0, \ + .next = UACPI_NULL, \ + } + +static struct registered_interface predefined_interfaces[] = { + // Vendor strings + WINDOWS("2000", 2000), + WINDOWS("2001", XP), + WINDOWS("2001 SP1", XP_SP1), + WINDOWS("2001.1", SERVER_2003), + WINDOWS("2001 SP2", XP_SP2), + WINDOWS("2001.1 SP1", SERVER_2003_SP1), + WINDOWS("2006", VISTA), + WINDOWS("2006.1", SERVER_2008), + WINDOWS("2006 SP1", VISTA_SP1), + WINDOWS("2006 SP2", VISTA_SP2), + WINDOWS("2009", 7), + WINDOWS("2012", 8), + WINDOWS("2013", 8_1), + WINDOWS("2015", 10), + WINDOWS("2016", 10_RS1), + WINDOWS("2017", 10_RS2), + WINDOWS("2017.2", 10_RS3), + WINDOWS("2018", 10_RS4), + WINDOWS("2018.2", 10_RS5), + WINDOWS("2019", 10_19H1), + WINDOWS("2020", 10_20H1), + WINDOWS("2021", 11), + WINDOWS("2022", 11_22H2), + + // Feature strings + HOST_FEATURE("Module Device", MODULE_DEVICE), + HOST_FEATURE("Processor Device", PROCESSOR_DEVICE), + HOST_FEATURE("3.0 Thermal Model", 3_0_THERMAL_MODEL), + HOST_FEATURE("3.0 _SCP Extensions", 3_0_SCP_EXTENSIONS), + HOST_FEATURE("Processor Aggregator Device", PROCESSOR_AGGREGATOR_DEVICE), + + // Interpreter features + { .name = "Extended Address Space Descriptor" }, +}; + +uacpi_status uacpi_initialize_interfaces(void) +{ + uacpi_size i; + + registered_interfaces = &predefined_interfaces[0]; + + interface_mutex = uacpi_kernel_create_mutex(); + if (uacpi_unlikely(interface_mutex == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + for (i = 0; i < (UACPI_ARRAY_SIZE(predefined_interfaces) - 1); ++i) + predefined_interfaces[i].next = &predefined_interfaces[i + 1]; + + return UACPI_STATUS_OK; +} + +void uacpi_deinitialize_interfaces(void) +{ + struct registered_interface *iface, *next_iface = registered_interfaces; + + while (next_iface) { + iface = next_iface; + next_iface = iface->next; + + iface->next = UACPI_NULL; + + if (iface->dynamic) { + uacpi_free_dynamic_string(iface->name); + uacpi_free(iface, sizeof(*iface)); + continue; + } + + // Only features are disabled by default + iface->disabled = iface->kind == UACPI_INTERFACE_KIND_FEATURE ? + UACPI_TRUE : UACPI_FALSE; + } + + if (interface_mutex) + uacpi_kernel_free_mutex(interface_mutex); + + interface_mutex = UACPI_NULL; + interface_handler = UACPI_NULL; + latest_queried_interface = 0; + registered_interfaces = UACPI_NULL; +} + +uacpi_vendor_interface uacpi_latest_queried_vendor_interface(void) +{ + return uacpi_atomic_load32(&latest_queried_interface); +} + +static struct registered_interface *find_interface_unlocked( + const uacpi_char *name +) +{ + struct registered_interface *interface = registered_interfaces; + + while (interface) { + if (uacpi_strcmp(interface->name, name) == 0) + return interface; + + interface = interface->next; + } + + return UACPI_NULL; +} + +static struct registered_interface *find_host_interface_unlocked( + uacpi_host_interface type +) +{ + struct registered_interface *interface = registered_interfaces; + + while (interface) { + if (interface->host_type == type) + return interface; + + interface = interface->next; + } + + return UACPI_NULL; +} + +uacpi_status uacpi_install_interface( + const uacpi_char *name, uacpi_interface_kind kind +) +{ + struct registered_interface *interface; + uacpi_status ret; + uacpi_char *name_copy; + uacpi_size name_size; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + ret = uacpi_acquire_native_mutex(interface_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + interface = find_interface_unlocked(name); + if (interface != UACPI_NULL) { + if (interface->disabled) + interface->disabled = UACPI_FALSE; + + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out; + } + + interface = uacpi_kernel_alloc(sizeof(*interface)); + if (uacpi_unlikely(interface == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto out; + } + + name_size = uacpi_strlen(name) + 1; + name_copy = uacpi_kernel_alloc(name_size); + if (uacpi_unlikely(name_copy == UACPI_NULL)) { + uacpi_free(interface, sizeof(*interface)); + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto out; + } + + uacpi_memcpy(name_copy, name, name_size); + interface->name = name_copy; + interface->weight = 0; + interface->kind = kind; + interface->host_type = 0; + interface->disabled = 0; + interface->dynamic = 1; + interface->next = registered_interfaces; + registered_interfaces = interface; + +out: + uacpi_release_native_mutex(interface_mutex); + return ret; +} + +uacpi_status uacpi_uninstall_interface(const uacpi_char *name) +{ + struct registered_interface *cur, *prev; + uacpi_status ret; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + ret = uacpi_acquire_native_mutex(interface_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + cur = registered_interfaces; + prev = cur; + + ret = UACPI_STATUS_NOT_FOUND; + while (cur) { + if (uacpi_strcmp(cur->name, name) != 0) { + prev = cur; + cur = cur->next; + continue; + } + + if (cur->dynamic) { + if (prev == cur) { + registered_interfaces = cur->next; + } else { + prev->next = cur->next; + } + + uacpi_release_native_mutex(interface_mutex); + uacpi_free_dynamic_string(cur->name); + uacpi_free(cur, sizeof(*cur)); + return UACPI_STATUS_OK; + } + + /* + * If this interface was already disabled, pretend we didn't actually + * find it and keep ret as UACPI_STATUS_NOT_FOUND. The fact that it's + * still in the registered list is an implementation detail of + * predefined interfaces. + */ + if (!cur->disabled) { + cur->disabled = UACPI_TRUE; + ret = UACPI_STATUS_OK; + } + + break; + } + + uacpi_release_native_mutex(interface_mutex); + return ret; +} + +static uacpi_status configure_host_interface( + uacpi_host_interface type, uacpi_bool enabled +) +{ + struct registered_interface *interface; + uacpi_status ret; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + ret = uacpi_acquire_native_mutex(interface_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + interface = find_host_interface_unlocked(type); + if (interface == UACPI_NULL) { + ret = UACPI_STATUS_NOT_FOUND; + goto out; + } + + interface->disabled = !enabled; +out: + uacpi_release_native_mutex(interface_mutex); + return ret; +} + +uacpi_status uacpi_enable_host_interface(uacpi_host_interface type) +{ + return configure_host_interface(type, UACPI_TRUE); +} + +uacpi_status uacpi_disable_host_interface(uacpi_host_interface type) +{ + return configure_host_interface(type, UACPI_FALSE); +} + +uacpi_status uacpi_set_interface_query_handler( + uacpi_interface_handler handler +) +{ + uacpi_status ret; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + ret = uacpi_acquire_native_mutex(interface_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + if (interface_handler != UACPI_NULL && handler != UACPI_NULL) { + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out; + } + + interface_handler = handler; +out: + uacpi_release_native_mutex(interface_mutex); + return ret; +} + +uacpi_status uacpi_bulk_configure_interfaces( + uacpi_interface_action action, uacpi_interface_kind kind +) +{ + uacpi_status ret; + struct registered_interface *interface; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + ret = uacpi_acquire_native_mutex(interface_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + interface = registered_interfaces; + while (interface) { + if (kind & interface->kind) + interface->disabled = (action == UACPI_INTERFACE_ACTION_DISABLE); + + interface = interface->next; + } + + uacpi_release_native_mutex(interface_mutex); + return ret; +} + +uacpi_status uacpi_handle_osi(const uacpi_char *string, uacpi_bool *out_value) +{ + uacpi_status ret; + struct registered_interface *interface; + uacpi_bool is_supported = UACPI_FALSE; + + ret = uacpi_acquire_native_mutex(interface_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + interface = find_interface_unlocked(string); + if (interface == UACPI_NULL) + goto out; + + if (interface->weight > latest_queried_interface) + uacpi_atomic_store32(&latest_queried_interface, interface->weight); + + is_supported = !interface->disabled; + if (interface_handler) + is_supported = interface_handler(string, is_supported); +out: + uacpi_release_native_mutex(interface_mutex); + *out_value = is_supported; + return UACPI_STATUS_OK; +} + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/dev/acpi/uacpi/registers.c b/sys/dev/acpi/uacpi/registers.c new file mode 100644 index 0000000..a52ce97 --- /dev/null +++ b/sys/dev/acpi/uacpi/registers.c @@ -0,0 +1,572 @@ +#include <uacpi/internal/registers.h> +#include <uacpi/internal/stdlib.h> +#include <uacpi/internal/context.h> +#include <uacpi/internal/io.h> +#include <uacpi/internal/log.h> +#include <uacpi/platform/atomic.h> +#include <uacpi/acpi.h> + +#ifndef UACPI_BAREBONES_MODE + +static uacpi_handle g_reg_lock; + +enum register_kind { + REGISTER_KIND_GAS, + REGISTER_KIND_IO, +}; + +enum register_access_kind { + REGISTER_ACCESS_KIND_PRESERVE, + REGISTER_ACCESS_KIND_WRITE_TO_CLEAR, + REGISTER_ACCESS_KIND_NORMAL, +}; + +struct register_spec { + uacpi_u8 kind; + uacpi_u8 access_kind; + uacpi_u8 access_width; // only REGISTER_KIND_IO + void *accessors[2]; + uacpi_u64 write_only_mask; + uacpi_u64 preserve_mask; +}; + +static const struct register_spec g_registers[UACPI_REGISTER_MAX + 1] = { + [UACPI_REGISTER_PM1_STS] = { + .kind = REGISTER_KIND_GAS, + .access_kind = REGISTER_ACCESS_KIND_WRITE_TO_CLEAR, + .accessors = { + &g_uacpi_rt_ctx.pm1a_status_blk, + &g_uacpi_rt_ctx.pm1b_status_blk, + }, + .preserve_mask = ACPI_PM1_STS_IGN0_MASK, + }, + [UACPI_REGISTER_PM1_EN] = { + .kind = REGISTER_KIND_GAS, + .access_kind = REGISTER_ACCESS_KIND_PRESERVE, + .accessors = { + &g_uacpi_rt_ctx.pm1a_enable_blk, + &g_uacpi_rt_ctx.pm1b_enable_blk, + }, + }, + [UACPI_REGISTER_PM1_CNT] = { + .kind = REGISTER_KIND_GAS, + .access_kind = REGISTER_ACCESS_KIND_PRESERVE, + .accessors = { + &g_uacpi_rt_ctx.fadt.x_pm1a_cnt_blk, + &g_uacpi_rt_ctx.fadt.x_pm1b_cnt_blk, + }, + .write_only_mask = ACPI_PM1_CNT_SLP_EN_MASK | + ACPI_PM1_CNT_GBL_RLS_MASK, + .preserve_mask = ACPI_PM1_CNT_PRESERVE_MASK, + }, + [UACPI_REGISTER_PM_TMR] = { + .kind = REGISTER_KIND_GAS, + .access_kind = REGISTER_ACCESS_KIND_PRESERVE, + .accessors = { &g_uacpi_rt_ctx.fadt.x_pm_tmr_blk, }, + }, + [UACPI_REGISTER_PM2_CNT] = { + .kind = REGISTER_KIND_GAS, + .access_kind = REGISTER_ACCESS_KIND_PRESERVE, + .accessors = { &g_uacpi_rt_ctx.fadt.x_pm2_cnt_blk, }, + .preserve_mask = ACPI_PM2_CNT_PRESERVE_MASK, + }, + [UACPI_REGISTER_SLP_CNT] = { + .kind = REGISTER_KIND_GAS, + .access_kind = REGISTER_ACCESS_KIND_PRESERVE, + .accessors = { &g_uacpi_rt_ctx.fadt.sleep_control_reg, }, + .write_only_mask = ACPI_SLP_CNT_SLP_EN_MASK, + .preserve_mask = ACPI_SLP_CNT_PRESERVE_MASK, + }, + [UACPI_REGISTER_SLP_STS] = { + .kind = REGISTER_KIND_GAS, + .access_kind = REGISTER_ACCESS_KIND_WRITE_TO_CLEAR, + .accessors = { &g_uacpi_rt_ctx.fadt.sleep_status_reg, }, + .preserve_mask = ACPI_SLP_STS_PRESERVE_MASK, + }, + [UACPI_REGISTER_RESET] = { + .kind = REGISTER_KIND_GAS, + .access_kind = REGISTER_ACCESS_KIND_NORMAL, + .accessors = { &g_uacpi_rt_ctx.fadt.reset_reg, }, + }, + [UACPI_REGISTER_SMI_CMD] = { + .kind = REGISTER_KIND_IO, + .access_kind = REGISTER_ACCESS_KIND_NORMAL, + .access_width = 1, + .accessors = { &g_uacpi_rt_ctx.fadt.smi_cmd, }, + }, +}; + +enum register_mapping_state { + REGISTER_MAPPING_STATE_NONE = 0, + REGISTER_MAPPING_STATE_NOT_NEEDED, + REGISTER_MAPPING_STATE_MAPPED, +}; + +struct register_mapping { + uacpi_mapped_gas mappings[2]; + uacpi_u8 states[2]; +}; +static struct register_mapping g_register_mappings[UACPI_REGISTER_MAX + 1]; + +static uacpi_status map_one( + const struct register_spec *spec, struct register_mapping *mapping, + uacpi_u8 idx +) +{ + uacpi_status ret = UACPI_STATUS_OK; + + if (mapping->states[idx] != REGISTER_MAPPING_STATE_NONE) + return ret; + + if (spec->kind == REGISTER_KIND_GAS) { + struct acpi_gas *gas = spec->accessors[idx]; + + if (gas == UACPI_NULL || gas->address == 0) { + mapping->states[idx] = REGISTER_MAPPING_STATE_NOT_NEEDED; + return ret; + } + + ret = uacpi_map_gas_noalloc(gas, &mapping->mappings[idx]); + } else { + struct acpi_gas temp_gas = { 0 }; + + if (idx != 0) { + mapping->states[idx] = REGISTER_MAPPING_STATE_NOT_NEEDED; + return ret; + } + + temp_gas.address_space_id = UACPI_ADDRESS_SPACE_SYSTEM_IO; + temp_gas.address = *(uacpi_u32*)spec->accessors[0]; + temp_gas.register_bit_width = spec->access_width * 8; + + ret = uacpi_map_gas_noalloc(&temp_gas, &mapping->mappings[idx]); + } + + if (uacpi_likely_success(ret)) + mapping->states[idx] = REGISTER_MAPPING_STATE_MAPPED; + + return ret; +} + +static uacpi_status ensure_register_mapped( + const struct register_spec *spec, struct register_mapping *mapping +) +{ + uacpi_status ret; + uacpi_bool needs_mapping = UACPI_FALSE; + uacpi_u8 state; + uacpi_cpu_flags flags; + + state = uacpi_atomic_load8(&mapping->states[0]); + needs_mapping |= state == REGISTER_MAPPING_STATE_NONE; + + state = uacpi_atomic_load8(&mapping->states[1]); + needs_mapping |= state == REGISTER_MAPPING_STATE_NONE; + + if (!needs_mapping) + return UACPI_STATUS_OK; + + flags = uacpi_kernel_lock_spinlock(g_reg_lock); + + ret = map_one(spec, mapping, 0); + if (uacpi_unlikely_error(ret)) + goto out; + + ret = map_one(spec, mapping, 1); +out: + uacpi_kernel_unlock_spinlock(g_reg_lock, flags); + return ret; +} + +static uacpi_status get_reg( + uacpi_u8 idx, const struct register_spec **out_spec, + struct register_mapping **out_mapping +) +{ + if (idx > UACPI_REGISTER_MAX) + return UACPI_STATUS_INVALID_ARGUMENT; + + *out_spec = &g_registers[idx]; + *out_mapping = &g_register_mappings[idx]; + return UACPI_STATUS_OK; +} + +static uacpi_status do_read_one( + struct register_mapping *mapping, uacpi_u8 idx, uacpi_u64 *out_value +) +{ + if (mapping->states[idx] != REGISTER_MAPPING_STATE_MAPPED) + return UACPI_STATUS_OK; + + return uacpi_gas_read_mapped(&mapping->mappings[idx], out_value); +} + +static uacpi_status do_read_register( + const struct register_spec *reg, struct register_mapping *mapping, + uacpi_u64 *out_value +) +{ + uacpi_status ret; + uacpi_u64 value0 = 0, value1 = 0; + + ret = do_read_one(mapping, 0, &value0); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = do_read_one(mapping, 1, &value1); + if (uacpi_unlikely_error(ret)) + return ret; + + *out_value = value0 | value1; + if (reg->write_only_mask) + *out_value &= ~reg->write_only_mask; + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_read_register( + enum uacpi_register reg_enum, uacpi_u64 *out_value +) +{ + uacpi_status ret; + const struct register_spec *reg; + struct register_mapping *mapping; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + ret = get_reg(reg_enum, ®, &mapping); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = ensure_register_mapped(reg, mapping); + if (uacpi_unlikely_error(ret)) + return ret; + + return do_read_register(reg, mapping, out_value); +} + +static uacpi_status do_write_one( + struct register_mapping *mapping, uacpi_u8 idx, uacpi_u64 in_value +) +{ + if (mapping->states[idx] != REGISTER_MAPPING_STATE_MAPPED) + return UACPI_STATUS_OK; + + return uacpi_gas_write_mapped(&mapping->mappings[idx], in_value); +} + +static uacpi_status do_write_register( + const struct register_spec *reg, struct register_mapping *mapping, + uacpi_u64 in_value +) +{ + uacpi_status ret; + + if (reg->preserve_mask) { + in_value &= ~reg->preserve_mask; + + if (reg->access_kind == REGISTER_ACCESS_KIND_PRESERVE) { + uacpi_u64 data; + + ret = do_read_register(reg, mapping, &data); + if (uacpi_unlikely_error(ret)) + return ret; + + in_value |= data & reg->preserve_mask; + } + } + + ret = do_write_one(mapping, 0, in_value); + if (uacpi_unlikely_error(ret)) + return ret; + + return do_write_one(mapping, 1, in_value); +} + +uacpi_status uacpi_write_register( + enum uacpi_register reg_enum, uacpi_u64 in_value +) +{ + uacpi_status ret; + const struct register_spec *reg; + struct register_mapping *mapping; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + ret = get_reg(reg_enum, ®, &mapping); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = ensure_register_mapped(reg, mapping); + if (uacpi_unlikely_error(ret)) + return ret; + + return do_write_register(reg, mapping, in_value); +} + +uacpi_status uacpi_write_registers( + enum uacpi_register reg_enum, uacpi_u64 in_value0, uacpi_u64 in_value1 +) +{ + uacpi_status ret; + const struct register_spec *reg; + struct register_mapping *mapping; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + ret = get_reg(reg_enum, ®, &mapping); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = ensure_register_mapped(reg, mapping); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = do_write_one(mapping, 0, in_value0); + if (uacpi_unlikely_error(ret)) + return ret; + + return do_write_one(mapping, 1, in_value1); +} + +struct register_field { + uacpi_u8 reg; + uacpi_u8 offset; + uacpi_u16 mask; +}; + +static const struct register_field g_fields[UACPI_REGISTER_FIELD_MAX + 1] = { + [UACPI_REGISTER_FIELD_TMR_STS] = { + .reg = UACPI_REGISTER_PM1_STS, + .offset = ACPI_PM1_STS_TMR_STS_IDX, + .mask = ACPI_PM1_STS_TMR_STS_MASK, + }, + [UACPI_REGISTER_FIELD_BM_STS] = { + .reg = UACPI_REGISTER_PM1_STS, + .offset = ACPI_PM1_STS_BM_STS_IDX, + .mask = ACPI_PM1_STS_BM_STS_MASK, + }, + [UACPI_REGISTER_FIELD_GBL_STS] = { + .reg = UACPI_REGISTER_PM1_STS, + .offset = ACPI_PM1_STS_GBL_STS_IDX, + .mask = ACPI_PM1_STS_GBL_STS_MASK, + }, + [UACPI_REGISTER_FIELD_PWRBTN_STS] = { + .reg = UACPI_REGISTER_PM1_STS, + .offset = ACPI_PM1_STS_PWRBTN_STS_IDX, + .mask = ACPI_PM1_STS_PWRBTN_STS_MASK, + }, + [UACPI_REGISTER_FIELD_SLPBTN_STS] = { + .reg = UACPI_REGISTER_PM1_STS, + .offset = ACPI_PM1_STS_SLPBTN_STS_IDX, + .mask = ACPI_PM1_STS_SLPBTN_STS_MASK, + }, + [UACPI_REGISTER_FIELD_RTC_STS] = { + .reg = UACPI_REGISTER_PM1_STS, + .offset = ACPI_PM1_STS_RTC_STS_IDX, + .mask = ACPI_PM1_STS_RTC_STS_MASK, + }, + [UACPI_REGISTER_FIELD_HWR_WAK_STS] = { + .reg = UACPI_REGISTER_SLP_STS, + .offset = ACPI_SLP_STS_WAK_STS_IDX, + .mask = ACPI_SLP_STS_WAK_STS_MASK, + }, + [UACPI_REGISTER_FIELD_WAK_STS] = { + .reg = UACPI_REGISTER_PM1_STS, + .offset = ACPI_PM1_STS_WAKE_STS_IDX, + .mask = ACPI_PM1_STS_WAKE_STS_MASK, + }, + [UACPI_REGISTER_FIELD_PCIEX_WAKE_STS] = { + .reg = UACPI_REGISTER_PM1_STS, + .offset = ACPI_PM1_STS_PCIEXP_WAKE_STS_IDX, + .mask = ACPI_PM1_STS_PCIEXP_WAKE_STS_MASK, + }, + [UACPI_REGISTER_FIELD_TMR_EN] = { + .reg = UACPI_REGISTER_PM1_EN, + .offset = ACPI_PM1_EN_TMR_EN_IDX, + .mask = ACPI_PM1_EN_TMR_EN_MASK, + }, + [UACPI_REGISTER_FIELD_GBL_EN] = { + .reg = UACPI_REGISTER_PM1_EN, + .offset = ACPI_PM1_EN_GBL_EN_IDX, + .mask = ACPI_PM1_EN_GBL_EN_MASK, + }, + [UACPI_REGISTER_FIELD_PWRBTN_EN] = { + .reg = UACPI_REGISTER_PM1_EN, + .offset = ACPI_PM1_EN_PWRBTN_EN_IDX, + .mask = ACPI_PM1_EN_PWRBTN_EN_MASK, + }, + [UACPI_REGISTER_FIELD_SLPBTN_EN] = { + .reg = UACPI_REGISTER_PM1_EN, + .offset = ACPI_PM1_EN_SLPBTN_EN_IDX, + .mask = ACPI_PM1_EN_SLPBTN_EN_MASK, + }, + [UACPI_REGISTER_FIELD_RTC_EN] = { + .reg = UACPI_REGISTER_PM1_EN, + .offset = ACPI_PM1_EN_RTC_EN_IDX, + .mask = ACPI_PM1_EN_RTC_EN_MASK, + }, + [UACPI_REGISTER_FIELD_PCIEXP_WAKE_DIS] = { + .reg = UACPI_REGISTER_PM1_EN, + .offset = ACPI_PM1_EN_PCIEXP_WAKE_DIS_IDX, + .mask = ACPI_PM1_EN_PCIEXP_WAKE_DIS_MASK, + }, + [UACPI_REGISTER_FIELD_SCI_EN] = { + .reg = UACPI_REGISTER_PM1_CNT, + .offset = ACPI_PM1_CNT_SCI_EN_IDX, + .mask = ACPI_PM1_CNT_SCI_EN_MASK, + }, + [UACPI_REGISTER_FIELD_BM_RLD] = { + .reg = UACPI_REGISTER_PM1_CNT, + .offset = ACPI_PM1_CNT_BM_RLD_IDX, + .mask = ACPI_PM1_CNT_BM_RLD_MASK, + }, + [UACPI_REGISTER_FIELD_GBL_RLS] = { + .reg = UACPI_REGISTER_PM1_CNT, + .offset = ACPI_PM1_CNT_GBL_RLS_IDX, + .mask = ACPI_PM1_CNT_GBL_RLS_MASK, + }, + [UACPI_REGISTER_FIELD_SLP_TYP] = { + .reg = UACPI_REGISTER_PM1_CNT, + .offset = ACPI_PM1_CNT_SLP_TYP_IDX, + .mask = ACPI_PM1_CNT_SLP_TYP_MASK, + }, + [UACPI_REGISTER_FIELD_SLP_EN] = { + .reg = UACPI_REGISTER_PM1_CNT, + .offset = ACPI_PM1_CNT_SLP_EN_IDX, + .mask = ACPI_PM1_CNT_SLP_EN_MASK, + }, + [UACPI_REGISTER_FIELD_HWR_SLP_TYP] = { + .reg = UACPI_REGISTER_SLP_CNT, + .offset = ACPI_SLP_CNT_SLP_TYP_IDX, + .mask = ACPI_SLP_CNT_SLP_TYP_MASK, + }, + [UACPI_REGISTER_FIELD_HWR_SLP_EN] = { + .reg = UACPI_REGISTER_SLP_CNT, + .offset = ACPI_SLP_CNT_SLP_EN_IDX, + .mask = ACPI_SLP_CNT_SLP_EN_MASK, + }, + [UACPI_REGISTER_FIELD_ARB_DIS] = { + .reg = UACPI_REGISTER_PM2_CNT, + .offset = ACPI_PM2_CNT_ARB_DIS_IDX, + .mask = ACPI_PM2_CNT_ARB_DIS_MASK, + }, +}; + +uacpi_status uacpi_initialize_registers(void) +{ + g_reg_lock = uacpi_kernel_create_spinlock(); + if (uacpi_unlikely(g_reg_lock == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + return UACPI_STATUS_OK; +} + +void uacpi_deinitialize_registers(void) +{ + uacpi_u8 i; + struct register_mapping *mapping; + + if (g_reg_lock != UACPI_NULL) { + uacpi_kernel_free_spinlock(g_reg_lock); + g_reg_lock = UACPI_NULL; + } + + for (i = 0; i <= UACPI_REGISTER_MAX; ++i) { + mapping = &g_register_mappings[i]; + + if (mapping->states[0] == REGISTER_MAPPING_STATE_MAPPED) + uacpi_unmap_gas_nofree(&mapping->mappings[0]); + if (mapping->states[1] == REGISTER_MAPPING_STATE_MAPPED) + uacpi_unmap_gas_nofree(&mapping->mappings[1]); + } + + uacpi_memzero(&g_register_mappings, sizeof(g_register_mappings)); +} + +uacpi_status uacpi_read_register_field( + enum uacpi_register_field field_enum, uacpi_u64 *out_value +) +{ + uacpi_status ret; + uacpi_u8 field_idx = field_enum; + const struct register_field *field; + const struct register_spec *reg; + struct register_mapping *mapping; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(field_idx > UACPI_REGISTER_FIELD_MAX)) + return UACPI_STATUS_INVALID_ARGUMENT; + + field = &g_fields[field_idx]; + reg = &g_registers[field->reg]; + mapping = &g_register_mappings[field->reg]; + + ret = ensure_register_mapped(reg, mapping); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = do_read_register(reg, mapping, out_value); + if (uacpi_unlikely_error(ret)) + return ret; + + *out_value = (*out_value & field->mask) >> field->offset; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_write_register_field( + enum uacpi_register_field field_enum, uacpi_u64 in_value +) +{ + uacpi_status ret; + uacpi_u8 field_idx = field_enum; + const struct register_field *field; + const struct register_spec *reg; + struct register_mapping *mapping; + + uacpi_u64 data; + uacpi_cpu_flags flags; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(field_idx > UACPI_REGISTER_FIELD_MAX)) + return UACPI_STATUS_INVALID_ARGUMENT; + + field = &g_fields[field_idx]; + reg = &g_registers[field->reg]; + mapping = &g_register_mappings[field->reg]; + + ret = ensure_register_mapped(reg, mapping); + if (uacpi_unlikely_error(ret)) + return ret; + + in_value = (in_value << field->offset) & field->mask; + + flags = uacpi_kernel_lock_spinlock(g_reg_lock); + + if (reg->kind == REGISTER_ACCESS_KIND_WRITE_TO_CLEAR) { + if (in_value == 0) { + ret = UACPI_STATUS_OK; + goto out; + } + + ret = do_write_register(reg, mapping, in_value); + goto out; + } + + ret = do_read_register(reg, mapping, &data); + if (uacpi_unlikely_error(ret)) + goto out; + + data &= ~field->mask; + data |= in_value; + + ret = do_write_register(reg, mapping, data); + +out: + uacpi_kernel_unlock_spinlock(g_reg_lock, flags); + return ret; +} + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/dev/acpi/uacpi/resources.c b/sys/dev/acpi/uacpi/resources.c new file mode 100644 index 0000000..a9bcb82 --- /dev/null +++ b/sys/dev/acpi/uacpi/resources.c @@ -0,0 +1,2569 @@ +#include <uacpi/types.h> +#include <uacpi/acpi.h> +#include <uacpi/internal/resources.h> +#include <uacpi/internal/stdlib.h> +#include <uacpi/internal/utilities.h> +#include <uacpi/internal/log.h> +#include <uacpi/internal/namespace.h> +#include <uacpi/uacpi.h> + +#ifndef UACPI_BAREBONES_MODE + +#define LARGE_RESOURCE_BASE (ACPI_RESOURCE_END_TAG + 1) +#define L(x) (x + LARGE_RESOURCE_BASE) + +/* + * Map raw AML resource types to the internal enum, this also takes care of type + * sanitization by returning UACPI_AML_RESOURCE_INVALID for any unknown type. + */ +static const uacpi_u8 aml_resource_to_type[256] = { + // Small items + [ACPI_RESOURCE_IRQ] = UACPI_AML_RESOURCE_IRQ, + [ACPI_RESOURCE_DMA] = UACPI_AML_RESOURCE_DMA, + [ACPI_RESOURCE_START_DEPENDENT] = UACPI_AML_RESOURCE_START_DEPENDENT, + [ACPI_RESOURCE_END_DEPENDENT] = UACPI_AML_RESOURCE_END_DEPENDENT, + [ACPI_RESOURCE_IO] = UACPI_AML_RESOURCE_IO, + [ACPI_RESOURCE_FIXED_IO] = UACPI_AML_RESOURCE_FIXED_IO, + [ACPI_RESOURCE_FIXED_DMA] = UACPI_AML_RESOURCE_FIXED_DMA, + [ACPI_RESOURCE_VENDOR_TYPE0] = UACPI_AML_RESOURCE_VENDOR_TYPE0, + [ACPI_RESOURCE_END_TAG] = UACPI_AML_RESOURCE_END_TAG, + + // Large items + [L(ACPI_RESOURCE_MEMORY24)] = UACPI_AML_RESOURCE_MEMORY24, + [L(ACPI_RESOURCE_GENERIC_REGISTER)] = UACPI_AML_RESOURCE_GENERIC_REGISTER, + [L(ACPI_RESOURCE_VENDOR_TYPE1)] = UACPI_AML_RESOURCE_VENDOR_TYPE1, + [L(ACPI_RESOURCE_MEMORY32)] = UACPI_AML_RESOURCE_MEMORY32, + [L(ACPI_RESOURCE_FIXED_MEMORY32)] = UACPI_AML_RESOURCE_FIXED_MEMORY32, + [L(ACPI_RESOURCE_ADDRESS32)] = UACPI_AML_RESOURCE_ADDRESS32, + [L(ACPI_RESOURCE_ADDRESS16)] = UACPI_AML_RESOURCE_ADDRESS16, + [L(ACPI_RESOURCE_EXTENDED_IRQ)] = UACPI_AML_RESOURCE_EXTENDED_IRQ, + [L(ACPI_RESOURCE_ADDRESS64_EXTENDED)] = UACPI_AML_RESOURCE_ADDRESS64_EXTENDED, + [L(ACPI_RESOURCE_ADDRESS64)] = UACPI_AML_RESOURCE_ADDRESS64, + [L(ACPI_RESOURCE_GPIO_CONNECTION)] = UACPI_AML_RESOURCE_GPIO_CONNECTION, + [L(ACPI_RESOURCE_PIN_FUNCTION)] = UACPI_AML_RESOURCE_PIN_FUNCTION, + [L(ACPI_RESOURCE_SERIAL_CONNECTION)] = UACPI_AML_RESOURCE_SERIAL_CONNECTION, + [L(ACPI_RESOURCE_PIN_CONFIGURATION)] = UACPI_AML_RESOURCE_PIN_CONFIGURATION, + [L(ACPI_RESOURCE_PIN_GROUP)] = UACPI_AML_RESOURCE_PIN_GROUP, + [L(ACPI_RESOURCE_PIN_GROUP_FUNCTION)] = UACPI_AML_RESOURCE_PIN_GROUP_FUNCTION, + [L(ACPI_RESOURCE_PIN_GROUP_CONFIGURATION)] = UACPI_AML_RESOURCE_PIN_GROUP_CONFIGURATION, + [L(ACPI_RESOURCE_CLOCK_INPUT)] = UACPI_AML_RESOURCE_CLOCK_INPUT, +}; + +static const uacpi_u8 type_to_aml_resource[] = { + [UACPI_AML_RESOURCE_IRQ] = ACPI_RESOURCE_IRQ, + [UACPI_AML_RESOURCE_DMA] = ACPI_RESOURCE_DMA, + [UACPI_AML_RESOURCE_START_DEPENDENT] = ACPI_RESOURCE_START_DEPENDENT, + [UACPI_AML_RESOURCE_END_DEPENDENT] = ACPI_RESOURCE_END_DEPENDENT, + [UACPI_AML_RESOURCE_IO] = ACPI_RESOURCE_IO, + [UACPI_AML_RESOURCE_FIXED_IO] = ACPI_RESOURCE_FIXED_IO, + [UACPI_AML_RESOURCE_FIXED_DMA] = ACPI_RESOURCE_FIXED_DMA, + [UACPI_AML_RESOURCE_VENDOR_TYPE0] = ACPI_RESOURCE_VENDOR_TYPE0, + [UACPI_AML_RESOURCE_END_TAG] = ACPI_RESOURCE_END_TAG, + + // Large items + [UACPI_AML_RESOURCE_MEMORY24] = ACPI_RESOURCE_MEMORY24, + [UACPI_AML_RESOURCE_GENERIC_REGISTER] = ACPI_RESOURCE_GENERIC_REGISTER, + [UACPI_AML_RESOURCE_VENDOR_TYPE1] = ACPI_RESOURCE_VENDOR_TYPE1, + [UACPI_AML_RESOURCE_MEMORY32] = ACPI_RESOURCE_MEMORY32, + [UACPI_AML_RESOURCE_FIXED_MEMORY32] = ACPI_RESOURCE_FIXED_MEMORY32, + [UACPI_AML_RESOURCE_ADDRESS32] = ACPI_RESOURCE_ADDRESS32, + [UACPI_AML_RESOURCE_ADDRESS16] = ACPI_RESOURCE_ADDRESS16, + [UACPI_AML_RESOURCE_EXTENDED_IRQ] = ACPI_RESOURCE_EXTENDED_IRQ, + [UACPI_AML_RESOURCE_ADDRESS64_EXTENDED] = ACPI_RESOURCE_ADDRESS64_EXTENDED, + [UACPI_AML_RESOURCE_ADDRESS64] = ACPI_RESOURCE_ADDRESS64, + [UACPI_AML_RESOURCE_GPIO_CONNECTION] = ACPI_RESOURCE_GPIO_CONNECTION, + [UACPI_AML_RESOURCE_PIN_FUNCTION] = ACPI_RESOURCE_PIN_FUNCTION, + [UACPI_AML_RESOURCE_SERIAL_CONNECTION] = ACPI_RESOURCE_SERIAL_CONNECTION, + [UACPI_AML_RESOURCE_PIN_CONFIGURATION] = ACPI_RESOURCE_PIN_CONFIGURATION, + [UACPI_AML_RESOURCE_PIN_GROUP] = ACPI_RESOURCE_PIN_GROUP, + [UACPI_AML_RESOURCE_PIN_GROUP_FUNCTION] = ACPI_RESOURCE_PIN_GROUP_FUNCTION, + [UACPI_AML_RESOURCE_PIN_GROUP_CONFIGURATION] = ACPI_RESOURCE_PIN_GROUP_CONFIGURATION, + [UACPI_AML_RESOURCE_CLOCK_INPUT] = ACPI_RESOURCE_CLOCK_INPUT, +}; + +static const uacpi_u8 native_resource_to_type[UACPI_RESOURCE_TYPE_MAX + 1] = { + [UACPI_RESOURCE_TYPE_IRQ] = UACPI_AML_RESOURCE_IRQ, + [UACPI_RESOURCE_TYPE_EXTENDED_IRQ] = UACPI_AML_RESOURCE_EXTENDED_IRQ, + [UACPI_RESOURCE_TYPE_DMA] = UACPI_AML_RESOURCE_DMA, + [UACPI_RESOURCE_TYPE_FIXED_DMA] = UACPI_AML_RESOURCE_FIXED_DMA, + [UACPI_RESOURCE_TYPE_IO] = UACPI_AML_RESOURCE_IO, + [UACPI_RESOURCE_TYPE_FIXED_IO] = UACPI_AML_RESOURCE_FIXED_IO, + [UACPI_RESOURCE_TYPE_ADDRESS16] = UACPI_AML_RESOURCE_ADDRESS16, + [UACPI_RESOURCE_TYPE_ADDRESS32] = UACPI_AML_RESOURCE_ADDRESS32, + [UACPI_RESOURCE_TYPE_ADDRESS64] = UACPI_AML_RESOURCE_ADDRESS64, + [UACPI_RESOURCE_TYPE_ADDRESS64_EXTENDED] = UACPI_AML_RESOURCE_ADDRESS64_EXTENDED, + [UACPI_RESOURCE_TYPE_MEMORY24] = UACPI_AML_RESOURCE_MEMORY24, + [UACPI_RESOURCE_TYPE_MEMORY32] = UACPI_AML_RESOURCE_MEMORY32, + [UACPI_RESOURCE_TYPE_FIXED_MEMORY32] = UACPI_AML_RESOURCE_FIXED_MEMORY32, + [UACPI_RESOURCE_TYPE_START_DEPENDENT] = UACPI_AML_RESOURCE_START_DEPENDENT, + [UACPI_RESOURCE_TYPE_END_DEPENDENT] = UACPI_AML_RESOURCE_END_DEPENDENT, + [UACPI_RESOURCE_TYPE_VENDOR_SMALL] = UACPI_AML_RESOURCE_VENDOR_TYPE0, + [UACPI_RESOURCE_TYPE_VENDOR_LARGE] = UACPI_AML_RESOURCE_VENDOR_TYPE1, + [UACPI_RESOURCE_TYPE_GENERIC_REGISTER] = UACPI_AML_RESOURCE_GENERIC_REGISTER, + [UACPI_RESOURCE_TYPE_GPIO_CONNECTION] = UACPI_AML_RESOURCE_GPIO_CONNECTION, + [UACPI_RESOURCE_TYPE_SERIAL_I2C_CONNECTION] = UACPI_AML_RESOURCE_SERIAL_CONNECTION, + [UACPI_RESOURCE_TYPE_SERIAL_SPI_CONNECTION] = UACPI_AML_RESOURCE_SERIAL_CONNECTION, + [UACPI_RESOURCE_TYPE_SERIAL_UART_CONNECTION] = UACPI_AML_RESOURCE_SERIAL_CONNECTION, + [UACPI_RESOURCE_TYPE_SERIAL_CSI2_CONNECTION] = UACPI_AML_RESOURCE_SERIAL_CONNECTION, + [UACPI_RESOURCE_TYPE_PIN_FUNCTION] = UACPI_AML_RESOURCE_PIN_FUNCTION, + [UACPI_RESOURCE_TYPE_PIN_CONFIGURATION] = UACPI_AML_RESOURCE_PIN_CONFIGURATION, + [UACPI_RESOURCE_TYPE_PIN_GROUP] = UACPI_AML_RESOURCE_PIN_GROUP, + [UACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION] = UACPI_AML_RESOURCE_PIN_GROUP_FUNCTION, + [UACPI_RESOURCE_TYPE_PIN_GROUP_CONFIGURATION] = UACPI_AML_RESOURCE_PIN_GROUP_CONFIGURATION, + [UACPI_RESOURCE_TYPE_CLOCK_INPUT] = UACPI_AML_RESOURCE_CLOCK_INPUT, + [UACPI_RESOURCE_TYPE_END_TAG] = UACPI_AML_RESOURCE_END_TAG, +}; + +#define SMALL_ITEM_HEADER_SIZE sizeof(struct acpi_small_item) +#define LARGE_ITEM_HEADER_SIZE sizeof(struct acpi_large_item) + +static const uacpi_u8 aml_resource_kind_to_header_size[2] = { + [UACPI_AML_RESOURCE_KIND_SMALL] = SMALL_ITEM_HEADER_SIZE, + [UACPI_AML_RESOURCE_KIND_LARGE] = LARGE_ITEM_HEADER_SIZE, +}; + +static uacpi_size aml_size_with_header(const struct uacpi_resource_spec *spec) +{ + return spec->aml_size + + aml_resource_kind_to_header_size[spec->resource_kind]; +} + +static uacpi_size extra_size_for_native_irq_or_dma( + const struct uacpi_resource_spec *spec, void *data, uacpi_size size +) +{ + uacpi_u16 mask; + uacpi_u8 i, total_bits, num_bits = 0; + + UACPI_UNUSED(size); + + if (spec->type == UACPI_AML_RESOURCE_IRQ) { + struct acpi_resource_irq *irq = data; + mask = irq->irq_mask; + total_bits = 16; + } else { + struct acpi_resource_dma *dma = data; + mask = dma->channel_mask; + total_bits = 8; + } + + for (i = 0; i < total_bits; ++i) + num_bits += !!(mask & (1 << i)); + + return num_bits; +} + +static uacpi_size size_for_aml_irq( + const struct uacpi_resource_spec *spec, uacpi_resource *resource +) +{ + uacpi_resource_irq *irq = &resource->irq; + uacpi_size size; + + size = aml_size_with_header(spec); + + switch (irq->length_kind) { + case UACPI_RESOURCE_LENGTH_KIND_FULL: + goto out_full; + case UACPI_RESOURCE_LENGTH_KIND_ONE_LESS: + case UACPI_RESOURCE_LENGTH_KIND_DONT_CARE: + if (irq->triggering != UACPI_TRIGGERING_EDGE) + goto out_full; + if (irq->polarity != UACPI_POLARITY_ACTIVE_HIGH) + goto out_full; + if (irq->sharing != UACPI_EXCLUSIVE) + goto out_full; + + return size - 1; + } + +out_full: + if (uacpi_unlikely(irq->length_kind == + UACPI_RESOURCE_LENGTH_KIND_ONE_LESS)) { + uacpi_warn("requested IRQ resource length is " + "not compatible with specified flags, corrected\n"); + } + + return size; +} + +static uacpi_size size_for_aml_start_dependent( + const struct uacpi_resource_spec *spec, uacpi_resource *resource +) +{ + uacpi_resource_start_dependent *start_dep = &resource->start_dependent; + uacpi_size size; + + size = aml_size_with_header(spec); + switch (start_dep->length_kind) { + case UACPI_RESOURCE_LENGTH_KIND_FULL: + goto out_full; + case UACPI_RESOURCE_LENGTH_KIND_ONE_LESS: + case UACPI_RESOURCE_LENGTH_KIND_DONT_CARE: + if (start_dep->compatibility != UACPI_ACCEPTABLE) + goto out_full; + if (start_dep->performance != UACPI_ACCEPTABLE) + goto out_full; + + return size - 1; + } + +out_full: + if (uacpi_unlikely(start_dep->length_kind == + UACPI_RESOURCE_LENGTH_KIND_ONE_LESS)) { + uacpi_warn("requested StartDependentFn resource length is " + "not compatible with specified flags, corrected\n"); + } + + return size; +} + +static uacpi_size extra_size_for_native_vendor( + const struct uacpi_resource_spec *spec, void *data, uacpi_size size +) +{ + UACPI_UNUSED(spec); + UACPI_UNUSED(data); + return size; +} + +static uacpi_size size_for_aml_vendor( + const struct uacpi_resource_spec *spec, uacpi_resource *resource +) +{ + uacpi_size size = resource->vendor.length; + + UACPI_UNUSED(spec); + + if (size > 7 || resource->type == UACPI_RESOURCE_TYPE_VENDOR_LARGE) { + size += aml_resource_kind_to_header_size[ + UACPI_AML_RESOURCE_KIND_LARGE + ]; + + if (uacpi_unlikely(resource->type != UACPI_RESOURCE_TYPE_VENDOR_LARGE)) { + uacpi_warn("vendor data too large for small descriptor (%zu), " + "correcting to large\n", size); + resource->type = UACPI_RESOURCE_TYPE_VENDOR_LARGE; + } + } else { + size += aml_resource_kind_to_header_size[ + UACPI_AML_RESOURCE_KIND_SMALL + ]; + } + + return size; +} + +static uacpi_size extra_size_for_resource_source( + uacpi_size base_size, uacpi_size reported_size +) +{ + uacpi_size string_length; + + if (reported_size <= base_size) + return 0; + + /* + * The remainder of the descriptor minus the resource index field + */ + string_length = (reported_size - base_size) - 1; + return UACPI_ALIGN_UP(string_length, sizeof(void*), uacpi_size); +} + +static uacpi_size size_for_aml_resource_source( + uacpi_resource_source *source, uacpi_bool with_index +) +{ + uacpi_size length = source->length; + + if (uacpi_unlikely(length && !source->index_present)) { + uacpi_warn("resource declares no source index with non-empty " + "string (%zu bytes), corrected\n", length); + source->index_present = UACPI_TRUE; + } + + // If index is included in the dynamic resource source, add it to the length + if (with_index) + length += source->index_present; + + return length; +} + +static uacpi_size extra_size_for_native_address_or_clock_input( + const struct uacpi_resource_spec *spec, void *data, uacpi_size size +) +{ + UACPI_UNUSED(data); + return extra_size_for_resource_source(spec->aml_size, size); +} + +static uacpi_size size_for_aml_address_or_clock_input( + const struct uacpi_resource_spec *spec, uacpi_resource *resource +) +{ + uacpi_resource_source *source; + bool has_index = UACPI_TRUE; + + switch (resource->type) { + case UACPI_RESOURCE_TYPE_ADDRESS16: + source = &resource->address16.source; + break; + case UACPI_RESOURCE_TYPE_ADDRESS32: + source = &resource->address32.source; + break; + case UACPI_RESOURCE_TYPE_ADDRESS64: + source = &resource->address64.source; + break; + case UACPI_RESOURCE_TYPE_CLOCK_INPUT: + source = &resource->clock_input.source; + has_index = UACPI_FALSE; + break; + default: + return 0; + } + + return aml_size_with_header(spec) + + size_for_aml_resource_source(source, has_index); +} + +static uacpi_size extra_size_for_extended_irq( + const struct uacpi_resource_spec *spec, void *data, uacpi_size size +) +{ + struct acpi_resource_extended_irq *irq = data; + uacpi_size extra_size = 0; + + extra_size += irq->num_irqs * sizeof(uacpi_u32); + extra_size += extra_size_for_resource_source( + spec->aml_size, size - extra_size + ); + + return extra_size; +} + +static uacpi_size size_for_aml_extended_irq( + const struct uacpi_resource_spec *spec, uacpi_resource *resource +) +{ + uacpi_resource_extended_irq *irq = &resource->extended_irq; + uacpi_size size; + + size = aml_size_with_header(spec); + size += irq->num_irqs * 4; + size += size_for_aml_resource_source(&irq->source, UACPI_TRUE); + + return size; +} + +static uacpi_size extra_size_for_native_gpio_or_pins( + const struct uacpi_resource_spec *spec, void *data, uacpi_size size +) +{ + uacpi_size pin_table_offset; + + /* + * These resources pretend to have variable layout by declaring "offset" + * fields, but the layout is hardcoded and mandated by the spec to be + * very specific. We can use the offset numbers here to calculate the final + * length. + * + * For example, the layout of GPIO connection _always_ looks as follows: + * [0...22] -> fixed data + * [23...<source name offset - 1>] -> pin table + * [<source name offset>...<vendor data offset - 1>] -> source name + * [<vendor data offset>...<data offset + data length>] -> vendor data + */ + switch (spec->type) { + case UACPI_AML_RESOURCE_GPIO_CONNECTION: { + struct acpi_resource_gpio_connection *gpio = data; + pin_table_offset = gpio->pin_table_offset; + break; + } + + case UACPI_AML_RESOURCE_PIN_FUNCTION: { + struct acpi_resource_pin_function *pin = data; + pin_table_offset = pin->pin_table_offset; + break; + } + + case UACPI_AML_RESOURCE_PIN_CONFIGURATION: { + struct acpi_resource_pin_configuration *config = data; + pin_table_offset = config->pin_table_offset; + break; + } + + case UACPI_AML_RESOURCE_PIN_GROUP: { + struct acpi_resource_pin_group *group = data; + pin_table_offset = group->pin_table_offset; + break; + } + + default: + return 0; + } + + /* + * The size we get passed here does not include the header size because + * that's how resources are encoded. Subtract it here so that we get the + * correct final length. + */ + return size - (pin_table_offset - LARGE_ITEM_HEADER_SIZE); +} + +static uacpi_size size_for_aml_gpio_or_pins( + const struct uacpi_resource_spec *spec, uacpi_resource *resource +) +{ + uacpi_size source_length, vendor_length, pin_table_length, size; + + size = aml_size_with_header(spec); + switch (spec->type) { + case UACPI_AML_RESOURCE_GPIO_CONNECTION: { + uacpi_resource_gpio_connection *res = &resource->gpio_connection; + source_length = res->source.length; + pin_table_length = res->pin_table_length; + vendor_length = res->vendor_data_length; + break; + } + + case UACPI_AML_RESOURCE_PIN_FUNCTION: { + uacpi_resource_pin_function *res = &resource->pin_function; + source_length = res->source.length; + pin_table_length = res->pin_table_length; + vendor_length = res->vendor_data_length; + break; + } + + case UACPI_AML_RESOURCE_PIN_CONFIGURATION: { + uacpi_resource_pin_configuration *res = &resource->pin_configuration; + source_length = res->source.length; + pin_table_length = res->pin_table_length; + vendor_length = res->vendor_data_length; + break; + } + + case UACPI_AML_RESOURCE_PIN_GROUP: { + uacpi_resource_pin_group *res = &resource->pin_group; + source_length = res->label.length; + pin_table_length = res->pin_table_length; + vendor_length = res->vendor_data_length; + break; + } + + default: + return 0; + } + + size += source_length; + size += pin_table_length * 2; + size += vendor_length; + + return size; +} + +static uacpi_size extra_size_for_native_pin_group( + const struct uacpi_resource_spec *spec, void *data, uacpi_size size +) +{ + uacpi_size source_offset; + + switch (spec->type) { + case UACPI_AML_RESOURCE_PIN_GROUP_FUNCTION: { + struct acpi_resource_pin_group_function *func = data; + source_offset = func->source_offset; + break; + } + + case UACPI_AML_RESOURCE_PIN_GROUP_CONFIGURATION: { + struct acpi_resource_pin_group_configuration *config = data; + source_offset = config->source_offset; + break; + } + + default: + return 0; + } + + // Same logic as extra_size_for_native_gpio_or_pins + return size - (source_offset - LARGE_ITEM_HEADER_SIZE); +} + +static uacpi_size size_for_aml_pin_group( + const struct uacpi_resource_spec *spec, uacpi_resource *resource +) +{ + uacpi_size source_length, label_length, vendor_length, size; + + size = aml_size_with_header(spec); + switch (spec->type) { + case UACPI_AML_RESOURCE_PIN_GROUP_FUNCTION: { + uacpi_resource_pin_group_function *res = &resource->pin_group_function; + source_length = res->source.length; + label_length = res->label.length; + vendor_length = res->vendor_data_length; + break; + } + + case UACPI_AML_RESOURCE_PIN_GROUP_CONFIGURATION: { + uacpi_resource_pin_group_configuration *res; + res = &resource->pin_group_configuration; + source_length = res->source.length; + label_length = res->label.length; + vendor_length = res->vendor_data_length; + break; + } + + default: + return 0; + } + + size += source_length; + size += label_length; + size += vendor_length; + + return size; +} + +#define AML_SERIAL_RESOURCE_EXTRA_SIZE(type) \ + (sizeof(struct acpi_resource_serial_##type) \ + - sizeof(struct acpi_resource_serial)) + +#define NATIVE_SERIAL_RESOURCE_EXTRA_SIZE(type) \ + (sizeof(uacpi_resource_##type##_connection) \ + - sizeof(uacpi_resource_serial_bus_common)) + +static const uacpi_u8 aml_serial_resource_to_extra_aml_size +[ACPI_SERIAL_TYPE_MAX + 1] = { + [ACPI_SERIAL_TYPE_I2C] = AML_SERIAL_RESOURCE_EXTRA_SIZE(i2c), + [ACPI_SERIAL_TYPE_SPI] = AML_SERIAL_RESOURCE_EXTRA_SIZE(spi), + [ACPI_SERIAL_TYPE_UART] = AML_SERIAL_RESOURCE_EXTRA_SIZE(uart), + [ACPI_SERIAL_TYPE_CSI2] = AML_SERIAL_RESOURCE_EXTRA_SIZE(csi2), +}; + +static const uacpi_u8 aml_serial_resource_to_extra_native_size +[ACPI_SERIAL_TYPE_MAX + 1] = { + [ACPI_SERIAL_TYPE_I2C] = NATIVE_SERIAL_RESOURCE_EXTRA_SIZE(i2c), + [ACPI_SERIAL_TYPE_SPI] = NATIVE_SERIAL_RESOURCE_EXTRA_SIZE(spi), + [ACPI_SERIAL_TYPE_UART] = NATIVE_SERIAL_RESOURCE_EXTRA_SIZE(uart), + [ACPI_SERIAL_TYPE_CSI2] = NATIVE_SERIAL_RESOURCE_EXTRA_SIZE(csi2), +}; + +static uacpi_size extra_size_for_serial_connection( + const struct uacpi_resource_spec *spec, void *data, uacpi_size size +) +{ + struct acpi_resource_serial *serial = data; + uacpi_size extra_bytes = size; + + extra_bytes -= spec->aml_size; + extra_bytes -= aml_serial_resource_to_extra_aml_size[serial->type]; + extra_bytes += aml_serial_resource_to_extra_native_size[serial->type]; + + return extra_bytes; +} + +static uacpi_size aml_size_for_serial_connection( + const struct uacpi_resource_spec *spec, uacpi_resource *resource +) +{ + uacpi_size size; + uacpi_resource_serial_bus_common *serial_bus = &resource->serial_bus_common; + + size = aml_size_with_header(spec); + size += aml_serial_resource_to_extra_aml_size[serial_bus->type]; + size += serial_bus->vendor_data_length; + size += serial_bus->source.length; + + return size; +} + +#define OP(short_code, ...) \ +{ \ + .code = UACPI_RESOURCE_CONVERT_OPCODE_##short_code, \ + __VA_ARGS__ \ +} + +#define END() OP(END) + +#define AML_O(short_aml_name, field) \ + uacpi_offsetof(struct acpi_resource_##short_aml_name, field) + +#define AML_F(short_aml_name, field) \ + .f1.aml_offset = AML_O(short_aml_name, field) + +#define NATIVE_O(short_name, field) \ + uacpi_offsetof(uacpi_resource_##short_name, field) + +#define NATIVE_F(short_native_name, field) \ + .f2.native_offset = NATIVE_O(short_native_name, field) + +#define IMM(value) .f3.imm = value +#define ARG0(value) .f1.arg0 = (value) +#define ARG1(value) .f2.arg1 = (value) +#define ARG2(value) .f3.arg2 = (value) + + +static const struct uacpi_resource_convert_instruction convert_irq_to_native[] = { + OP(PACKED_ARRAY_16, AML_F(irq, irq_mask), NATIVE_F(irq, irqs), + ARG2(NATIVE_O(irq, num_irqs))), + OP(SKIP_IF_AML_SIZE_LESS_THAN, ARG0(3), IMM(6)), + OP(SET_TO_IMM, NATIVE_F(irq, length_kind), + IMM(UACPI_RESOURCE_LENGTH_KIND_FULL)), + OP(BIT_FIELD_1, AML_F(irq, flags), NATIVE_F(irq, triggering), IMM(0)), + OP(BIT_FIELD_1, AML_F(irq, flags), NATIVE_F(irq, polarity), IMM(3)), + OP(BIT_FIELD_1, AML_F(irq, flags), NATIVE_F(irq, sharing), IMM(4)), + OP(BIT_FIELD_1, AML_F(irq, flags), NATIVE_F(irq, wake_capability), IMM(5)), + END(), + OP(SET_TO_IMM, NATIVE_F(irq, length_kind), + IMM(UACPI_RESOURCE_LENGTH_KIND_ONE_LESS)), + OP(SET_TO_IMM, NATIVE_F(irq, triggering), IMM(UACPI_TRIGGERING_EDGE)), + END(), +}; + +const struct uacpi_resource_convert_instruction convert_irq_to_aml[] = { + OP(PACKED_ARRAY_16, AML_F(irq, irq_mask), NATIVE_F(irq, irqs), + ARG2(NATIVE_O(irq, num_irqs))), + OP(SKIP_IF_AML_SIZE_LESS_THAN, ARG0(3), IMM(4)), + OP(BIT_FIELD_1, AML_F(irq, flags), NATIVE_F(irq, triggering), IMM(0)), + OP(BIT_FIELD_1, AML_F(irq, flags), NATIVE_F(irq, polarity), IMM(3)), + OP(BIT_FIELD_1, AML_F(irq, flags), NATIVE_F(irq, sharing), IMM(4)), + OP(BIT_FIELD_1, AML_F(irq, flags), NATIVE_F(irq, wake_capability), IMM(5)), + END(), +}; + +static const struct uacpi_resource_convert_instruction convert_dma[] = { + OP(PACKED_ARRAY_8, AML_F(dma, channel_mask), NATIVE_F(dma, channels), + ARG2(NATIVE_O(dma, num_channels))), + OP(BIT_FIELD_2, AML_F(dma, flags), NATIVE_F(dma, transfer_type), IMM(0)), + OP(BIT_FIELD_1, AML_F(dma, flags), NATIVE_F(dma, bus_master_status), IMM(2)), + OP(BIT_FIELD_2, AML_F(dma, flags), NATIVE_F(dma, channel_speed), IMM(5)), + END(), +}; + +static const struct uacpi_resource_convert_instruction +convert_start_dependent_to_native[] = { + OP(SKIP_IF_AML_SIZE_LESS_THAN, ARG0(1), IMM(4)), + OP(SET_TO_IMM, NATIVE_F(start_dependent, length_kind), + IMM(UACPI_RESOURCE_LENGTH_KIND_FULL)), + OP(BIT_FIELD_2, AML_F(start_dependent, flags), + NATIVE_F(start_dependent, compatibility), IMM(0)), + OP(BIT_FIELD_2, AML_F(start_dependent, flags), + NATIVE_F(start_dependent, performance), IMM(2)), + END(), + OP(SET_TO_IMM, NATIVE_F(start_dependent, length_kind), + IMM(UACPI_RESOURCE_LENGTH_KIND_ONE_LESS)), + OP(SET_TO_IMM, NATIVE_F(start_dependent, compatibility), + IMM(UACPI_ACCEPTABLE)), + OP(SET_TO_IMM, NATIVE_F(start_dependent, performance), + IMM(UACPI_ACCEPTABLE)), + END(), +}; + +static const struct uacpi_resource_convert_instruction +convert_start_dependent_to_aml[] = { + OP(SKIP_IF_AML_SIZE_LESS_THAN, ARG0(1), IMM(1)), + OP(BIT_FIELD_2, AML_F(start_dependent, flags), + NATIVE_F(start_dependent, compatibility), IMM(0)), + OP(BIT_FIELD_2, AML_F(start_dependent, flags), + NATIVE_F(start_dependent, performance), IMM(2)), + END(), +}; + +static const struct uacpi_resource_convert_instruction convert_io[] = { + OP(BIT_FIELD_1, AML_F(io, information), NATIVE_F(io, decode_type)), + OP(FIELD_16, AML_F(io, minimum), NATIVE_F(io, minimum)), + OP(FIELD_16, AML_F(io, maximum), NATIVE_F(io, maximum)), + OP(FIELD_8, AML_F(io, alignment), NATIVE_F(io, alignment)), + OP(FIELD_8, AML_F(io, length), NATIVE_F(io, length)), + END(), +}; + +static const struct uacpi_resource_convert_instruction convert_fixed_io[] = { + OP(FIELD_16, AML_F(fixed_io, address), NATIVE_F(fixed_io, address)), + OP(FIELD_8, AML_F(fixed_io, length), NATIVE_F(fixed_io, length)), + END(), +}; + +static const struct uacpi_resource_convert_instruction convert_fixed_dma[] = { + OP(FIELD_16, AML_F(fixed_dma, request_line), + NATIVE_F(fixed_dma, request_line)), + OP(FIELD_16, AML_F(fixed_dma, channel), NATIVE_F(fixed_dma, channel)), + OP(FIELD_8, AML_F(fixed_dma, transfer_width), + NATIVE_F(fixed_dma, transfer_width)), + END(), +}; + +static const struct uacpi_resource_convert_instruction convert_vendor_type0[] = { + OP(LOAD_AML_SIZE_32, NATIVE_F(vendor, length)), + OP(FIELD_8, AML_F(vendor_defined_type0, byte_data), NATIVE_F(vendor, data)), + END(), +}; + +static const struct uacpi_resource_convert_instruction convert_vendor_type1[] = { + OP(LOAD_AML_SIZE_32, NATIVE_F(vendor, length)), + OP(FIELD_8, AML_F(vendor_defined_type1, byte_data), NATIVE_F(vendor, data)), + END(), +}; + +static const struct uacpi_resource_convert_instruction convert_memory24[] = { + OP(BIT_FIELD_1, AML_F(memory24, information), + NATIVE_F(memory24, write_status), IMM(0)), + OP(FIELD_16, AML_F(memory24, minimum), NATIVE_F(memory24, minimum), IMM(4)), + END(), +}; + +static const struct uacpi_resource_convert_instruction convert_memory32[] = { + OP(BIT_FIELD_1, AML_F(memory32, information), + NATIVE_F(memory32, write_status), IMM(0)), + OP(FIELD_32, AML_F(memory32, minimum), NATIVE_F(memory32, minimum), IMM(4)), + END(), +}; + +static const struct uacpi_resource_convert_instruction +convert_fixed_memory32[] = { + OP(BIT_FIELD_1, AML_F(fixed_memory32, information), + NATIVE_F(fixed_memory32, write_status), IMM(0)), + OP(FIELD_32, AML_F(fixed_memory32, address), + NATIVE_F(fixed_memory32, address)), + OP(FIELD_32, AML_F(fixed_memory32, length), + NATIVE_F(fixed_memory32, length)), + END(), +}; + +static const struct uacpi_resource_convert_instruction +convert_generic_register[] = { + OP(FIELD_8, AML_F(generic_register, address_space_id), + NATIVE_F(generic_register, address_space_id), IMM(4)), + OP(FIELD_64, AML_F(generic_register, address), + NATIVE_F(generic_register, address)), + END(), +}; + +#define CONVERT_TYPE_SPECIFIC_FLAGS(addr_type) \ + OP(LOAD_8_STORE, AML_F(addr_type, common.type), \ + NATIVE_F(addr_type, common.type)), \ + OP(SKIP_IF_NOT_EQUALS, ARG0(UACPI_RANGE_MEMORY), IMM(5)), \ + OP(BIT_FIELD_1, \ + AML_F(addr_type, common.type_flags), \ + NATIVE_F(addr_type, common.attribute.memory.write_status), IMM(0)), \ + OP(BIT_FIELD_2, \ + AML_F(addr_type, common.type_flags), \ + NATIVE_F(addr_type, common.attribute.memory.caching), IMM(1)), \ + OP(BIT_FIELD_2, \ + AML_F(addr_type, common.type_flags), \ + NATIVE_F(addr_type, common.attribute.memory.range_type), IMM(3)), \ + OP(BIT_FIELD_1, \ + AML_F(addr_type, common.type_flags), \ + NATIVE_F(addr_type, common.attribute.memory.translation), IMM(5)), \ + END(), \ + OP(SKIP_IF_NOT_EQUALS, ARG0(UACPI_RANGE_IO), IMM(4)), \ + OP(BIT_FIELD_2, \ + AML_F(addr_type, common.type_flags), \ + NATIVE_F(addr_type, common.attribute.io.range_type), IMM(0)), \ + OP(BIT_FIELD_1, \ + AML_F(addr_type, common.type_flags), \ + NATIVE_F(addr_type, common.attribute.io.translation_type), IMM(4)), \ + OP(BIT_FIELD_1, \ + AML_F(addr_type, common.type_flags), \ + NATIVE_F(addr_type, common.attribute.io.translation), IMM(5)), \ + END(), \ + /* Memory type that we don't know, just copy the byte */ \ + OP(FIELD_8, AML_F(addr_type, common.type_flags), \ + NATIVE_F(addr_type, common.attribute.type_specific), IMM(0xFF)), \ + END() + +#define CONVERT_GENERAL_ADDRESS_FLAGS(addr_type) \ + OP(BIT_FIELD_1, \ + AML_F(addr_type, common.flags), \ + NATIVE_F(addr_type, common.direction), IMM(0)), \ + OP(BIT_FIELD_1, \ + AML_F(addr_type, common.flags), \ + NATIVE_F(addr_type, common.decode_type), IMM(1)), \ + OP(BIT_FIELD_1, \ + AML_F(addr_type, common.flags), \ + NATIVE_F(addr_type, common.fixed_min_address), IMM(2)), \ + OP(BIT_FIELD_1, \ + AML_F(addr_type, common.flags), \ + NATIVE_F(addr_type, common.fixed_max_address), IMM(3)) \ + +#define DEFINE_ADDRESS_CONVERSION(width) \ + static const struct uacpi_resource_convert_instruction \ + convert_address##width[] = { \ + CONVERT_GENERAL_ADDRESS_FLAGS(address##width), \ + OP(FIELD_##width, AML_F(address##width, granularity), \ + NATIVE_F(address##width, granularity), IMM(5)), \ + OP(RESOURCE_SOURCE, NATIVE_F(address##width, source)), \ + CONVERT_TYPE_SPECIFIC_FLAGS(address##width), \ + }; + +DEFINE_ADDRESS_CONVERSION(16) +DEFINE_ADDRESS_CONVERSION(32) +DEFINE_ADDRESS_CONVERSION(64) + +static const struct uacpi_resource_convert_instruction +convert_address64_extended[] = { + CONVERT_GENERAL_ADDRESS_FLAGS(address64_extended), + OP(FIELD_8, AML_F(address64_extended, revision_id), + NATIVE_F(address64_extended, revision_id)), + OP(FIELD_64, AML_F(address64_extended, granularity), + NATIVE_F(address64_extended, granularity), IMM(6)), + CONVERT_TYPE_SPECIFIC_FLAGS(address64_extended), +}; + +static const struct uacpi_resource_convert_instruction +convert_extended_irq[] = { + OP(BIT_FIELD_1, AML_F(extended_irq, flags), + NATIVE_F(extended_irq, direction), IMM(0)), + OP(BIT_FIELD_1, AML_F(extended_irq, flags), + NATIVE_F(extended_irq, triggering), IMM(1)), + OP(BIT_FIELD_1, AML_F(extended_irq, flags), + NATIVE_F(extended_irq, polarity), IMM(2)), + OP(BIT_FIELD_1, AML_F(extended_irq, flags), + NATIVE_F(extended_irq, sharing), IMM(3)), + OP(BIT_FIELD_1, AML_F(extended_irq, flags), + NATIVE_F(extended_irq, wake_capability), IMM(4)), + OP(LOAD_8_STORE, AML_F(extended_irq, num_irqs), + NATIVE_F(extended_irq, num_irqs), IMM(4)), + OP(RESOURCE_SOURCE, NATIVE_F(extended_irq, source)), + + // Use FIELD_8 here since the accumulator has been multiplied by 4 + OP(FIELD_8, AML_F(extended_irq, irqs), NATIVE_F(extended_irq, irqs)), + END(), +}; + +static const struct uacpi_resource_convert_instruction convert_clock_input[] = { + OP(FIELD_8, AML_F(clock_input, revision_id), + NATIVE_F(clock_input, revision_id)), + OP(BIT_FIELD_1, AML_F(clock_input, flags), NATIVE_F(clock_input, frequency), + IMM(0)), + OP(BIT_FIELD_2, AML_F(clock_input, flags), NATIVE_F(clock_input, scale), + IMM(1)), + OP(FIELD_16, AML_F(clock_input, divisor), NATIVE_F(clock_input, divisor)), + OP(FIELD_32, AML_F(clock_input, numerator), NATIVE_F(clock_input, numerator)), + OP(FIELD_8, AML_F(clock_input, source_index), NATIVE_F(clock_input, source.index)), + OP(RESOURCE_SOURCE_NO_INDEX, NATIVE_F(clock_input, source)), + END(), +}; + +#define DECODE_SOURCE_INDEX(short_aml_name) \ + OP(FIELD_8, AML_F(short_aml_name, source_index), \ + NATIVE_F(short_aml_name, source.index)) \ + +#define DECODE_RES_PIN_TBL_AND_VENDOR_DATA( \ + short_aml_name, res_opcode, offset_field, res_field \ +) \ + OP(LOAD_PIN_TABLE_LENGTH, AML_F(short_aml_name, offset_field), \ + NATIVE_F(short_aml_name, pin_table_length)), \ + OP(RESOURCE_##res_opcode, NATIVE_F(short_aml_name, res_field), \ + AML_F(short_aml_name, offset_field), \ + ARG2(AML_O(short_aml_name, vendor_data_offset))), \ + OP(PIN_TABLE, AML_F(short_aml_name, pin_table_offset), \ + NATIVE_F(short_aml_name, pin_table_length), \ + ARG2(NATIVE_O(short_aml_name, pin_table))), \ + OP(VENDOR_DATA, AML_F(short_aml_name, vendor_data_offset), \ + NATIVE_F(short_aml_name, vendor_data_length), \ + ARG2(NATIVE_O(short_aml_name, vendor_data))) + +static const struct uacpi_resource_convert_instruction +convert_gpio_connection[] = { + OP(FIELD_8, AML_F(gpio_connection, revision_id), + NATIVE_F(gpio_connection, revision_id)), + OP(BIT_FIELD_1, AML_F(gpio_connection, general_flags), + NATIVE_F(gpio_connection, direction)), + OP(FIELD_8, AML_F(gpio_connection, pull_configuration), + NATIVE_F(gpio_connection, pull_configuration)), + OP(FIELD_16, AML_F(gpio_connection, drive_strength), + NATIVE_F(gpio_connection, drive_strength), IMM(2)), + DECODE_SOURCE_INDEX(gpio_connection), + DECODE_RES_PIN_TBL_AND_VENDOR_DATA( + gpio_connection, SOURCE_NO_INDEX, source_offset, source + ), + OP(LOAD_8_STORE, AML_F(gpio_connection, type), NATIVE_F(gpio_connection, type)), + OP(SKIP_IF_NOT_EQUALS, ARG0(UACPI_GPIO_CONNECTION_INTERRUPT), IMM(5)), + OP(BIT_FIELD_1, AML_F(gpio_connection, connection_flags), + NATIVE_F(gpio_connection, intr.triggering), IMM(0)), + OP(BIT_FIELD_2, AML_F(gpio_connection, connection_flags), + NATIVE_F(gpio_connection, intr.polarity), IMM(1)), + OP(BIT_FIELD_1, AML_F(gpio_connection, connection_flags), + NATIVE_F(gpio_connection, intr.sharing), IMM(3)), + OP(BIT_FIELD_1, AML_F(gpio_connection, connection_flags), + NATIVE_F(gpio_connection, intr.wake_capability), IMM(4)), + END(), + OP(SKIP_IF_NOT_EQUALS, ARG0(UACPI_GPIO_CONNECTION_IO), IMM(3)), + OP(BIT_FIELD_2, AML_F(gpio_connection, connection_flags), + NATIVE_F(gpio_connection, io.restriction), IMM(0)), + OP(BIT_FIELD_1, AML_F(gpio_connection, connection_flags), + NATIVE_F(gpio_connection, io.sharing), IMM(3)), + END(), + OP(FIELD_16, AML_F(gpio_connection, connection_flags), + NATIVE_F(gpio_connection, type_specific), IMM(0xFF)), + END(), +}; + +static const struct uacpi_resource_convert_instruction +convert_pin_function[] = { + OP(FIELD_8, AML_F(pin_function, revision_id), + NATIVE_F(pin_function, revision_id)), + OP(BIT_FIELD_1, AML_F(pin_function, flags), + NATIVE_F(pin_function, sharing), IMM(0)), + OP(FIELD_8, AML_F(pin_function, pull_configuration), + NATIVE_F(pin_function, pull_configuration)), + OP(FIELD_16, AML_F(pin_function, function_number), + NATIVE_F(pin_function, function_number)), + DECODE_SOURCE_INDEX(pin_function), + DECODE_RES_PIN_TBL_AND_VENDOR_DATA( + pin_function, SOURCE_NO_INDEX, source_offset, source + ), + END(), +}; + +static const struct uacpi_resource_convert_instruction +convert_pin_configuration[] = { + OP(FIELD_8, AML_F(pin_configuration, revision_id), + NATIVE_F(pin_configuration, revision_id)), + OP(BIT_FIELD_1, AML_F(pin_configuration, flags), + NATIVE_F(pin_configuration, sharing), IMM(0)), + OP(BIT_FIELD_1, AML_F(pin_configuration, flags), + NATIVE_F(pin_configuration, direction), IMM(1)), + OP(FIELD_8, AML_F(pin_configuration, type), + NATIVE_F(pin_configuration, type)), + OP(FIELD_32, AML_F(pin_configuration, value), + NATIVE_F(pin_configuration, value)), + DECODE_SOURCE_INDEX(pin_configuration), + DECODE_RES_PIN_TBL_AND_VENDOR_DATA( + pin_configuration, SOURCE_NO_INDEX, source_offset, source + ), + END(), +}; + +static const struct uacpi_resource_convert_instruction convert_pin_group[] = { + OP(FIELD_8, AML_F(pin_group, revision_id), + NATIVE_F(pin_group, revision_id)), + OP(BIT_FIELD_1, AML_F(pin_group, flags), + NATIVE_F(pin_group, direction), IMM(0)), + DECODE_RES_PIN_TBL_AND_VENDOR_DATA( + pin_group, LABEL, source_lable_offset, label + ), + END(), +}; + +#define DECODE_PIN_GROUP_RES_SOURCES(postfix) \ + DECODE_SOURCE_INDEX(pin_group_##postfix), \ + OP(RESOURCE_SOURCE_NO_INDEX, NATIVE_F(pin_group_##postfix, source), \ + AML_F(pin_group_##postfix, source_offset), \ + ARG2(AML_O(pin_group_##postfix, source_lable_offset))), \ + OP(LOAD_16_NATIVE, NATIVE_F(pin_group_##postfix, source.length)), \ + OP(RESOURCE_LABEL, NATIVE_F(pin_group_##postfix, label), \ + AML_F(pin_group_##postfix, source_lable_offset), \ + ARG2(AML_O(pin_group_##postfix, vendor_data_offset))), \ + OP(VENDOR_DATA, AML_F(pin_group_##postfix, vendor_data_offset), \ + NATIVE_F(pin_group_##postfix, vendor_data_length), \ + ARG2(NATIVE_O(pin_group_##postfix, vendor_data))) + +static const struct uacpi_resource_convert_instruction +convert_pin_group_function[] = { + OP(FIELD_8, AML_F(pin_group_function, revision_id), + NATIVE_F(pin_group_function, revision_id)), + OP(BIT_FIELD_1, AML_F(pin_group_function, flags), + NATIVE_F(pin_group_function, sharing), IMM(0)), + OP(BIT_FIELD_1, AML_F(pin_group_function, flags), + NATIVE_F(pin_group_function, direction), IMM(1)), + OP(FIELD_16, AML_F(pin_group_function, function), + NATIVE_F(pin_group_function, function)), + DECODE_PIN_GROUP_RES_SOURCES(function), + END(), +}; + +static const struct uacpi_resource_convert_instruction +convert_pin_group_configuration[] = { + OP(FIELD_8, AML_F(pin_group_configuration, revision_id), + NATIVE_F(pin_group_configuration, revision_id)), + OP(BIT_FIELD_1, AML_F(pin_group_configuration, flags), + NATIVE_F(pin_group_configuration, sharing), IMM(0)), + OP(BIT_FIELD_1, AML_F(pin_group_configuration, flags), + NATIVE_F(pin_group_configuration, direction), IMM(1)), + OP(FIELD_8, AML_F(pin_group_configuration, type), + NATIVE_F(pin_group_configuration, type)), + OP(FIELD_32, AML_F(pin_group_configuration, value), + NATIVE_F(pin_group_configuration, value)), + DECODE_PIN_GROUP_RES_SOURCES(configuration), + END(), +}; + +static const struct uacpi_resource_convert_instruction +convert_generic_serial_bus[] = { + OP(FIELD_8, AML_F(serial, revision_id), + NATIVE_F(serial_bus_common, revision_id)), + OP(FIELD_8, AML_F(serial, type_specific_revision_id), + NATIVE_F(serial_bus_common, type_revision_id)), + OP(FIELD_8, AML_F(serial, source_index), + NATIVE_F(serial_bus_common, source.index)), + OP(FIELD_16, AML_F(serial, type_data_length), + NATIVE_F(serial_bus_common, type_data_length)), + OP(BIT_FIELD_1, AML_F(serial, flags), + NATIVE_F(serial_bus_common, mode), IMM(0)), + OP(BIT_FIELD_1, AML_F(serial, flags), + NATIVE_F(serial_bus_common, direction), IMM(1)), + OP(BIT_FIELD_1, AML_F(serial, flags), + NATIVE_F(serial_bus_common, sharing), IMM(2)), + OP(SERIAL_TYPE_SPECIFIC, AML_F(serial, type), + NATIVE_F(serial_bus_common, type)), + OP(RESOURCE_SOURCE_NO_INDEX, NATIVE_F(serial_bus_common, source)), + OP(LOAD_8_NATIVE, NATIVE_F(serial_bus_common, type)), + OP(SKIP_IF_NOT_EQUALS, ARG0(ACPI_SERIAL_TYPE_I2C), IMM(4)), + OP(BIT_FIELD_1, AML_F(serial, type_specific_flags), + NATIVE_F(i2c_connection, addressing_mode), IMM(0)), + OP(FIELD_32, AML_F(serial_i2c, connection_speed), + NATIVE_F(i2c_connection, connection_speed), IMM(0xFF)), + OP(FIELD_16, AML_F(serial_i2c, slave_address), + NATIVE_F(i2c_connection, slave_address)), + END(), + OP(SKIP_IF_NOT_EQUALS, ARG0(ACPI_SERIAL_TYPE_SPI), IMM(5)), + OP(BIT_FIELD_1, AML_F(serial, type_specific_flags), + NATIVE_F(spi_connection, wire_mode), IMM(0)), + OP(BIT_FIELD_1, AML_F(serial, type_specific_flags), + NATIVE_F(spi_connection, device_polarity), IMM(1)), + OP(FIELD_32, AML_F(serial_spi, connection_speed), + NATIVE_F(spi_connection, connection_speed), IMM(0xFF)), + OP(FIELD_8, AML_F(serial_spi, data_bit_length), + NATIVE_F(spi_connection, data_bit_length), IMM(5)), + END(), + OP(SKIP_IF_NOT_EQUALS, ARG0(ACPI_SERIAL_TYPE_UART), IMM(8)), + OP(BIT_FIELD_2, AML_F(serial, type_specific_flags), + NATIVE_F(uart_connection, flow_control), IMM(0)), + OP(BIT_FIELD_2, AML_F(serial, type_specific_flags), + NATIVE_F(uart_connection, stop_bits), IMM(2)), + OP(BIT_FIELD_3, AML_F(serial, type_specific_flags), + NATIVE_F(uart_connection, data_bits), IMM(4)), + OP(BIT_FIELD_1, AML_F(serial, type_specific_flags), + NATIVE_F(uart_connection, endianness), IMM(7)), + OP(FIELD_32, AML_F(serial_uart, baud_rate), + NATIVE_F(uart_connection, baud_rate), IMM(0xFF)), + OP(FIELD_16, AML_F(serial_uart, rx_fifo), + NATIVE_F(uart_connection, rx_fifo), IMM(2)), + OP(FIELD_8, AML_F(serial_uart, parity), + NATIVE_F(uart_connection, parity), IMM(2)), + END(), + OP(SKIP_IF_NOT_EQUALS, ARG0(ACPI_SERIAL_TYPE_CSI2), IMM(3)), + OP(BIT_FIELD_2, AML_F(serial, type_specific_flags), + NATIVE_F(csi2_connection, phy_type), IMM(0)), + OP(BIT_FIELD_6, AML_F(serial, type_specific_flags), + NATIVE_F(csi2_connection, local_port), IMM(2)), + END(), + + /* + * Insert a trap to catch unimplemented types, this should be unreachable + * because of validation earlier. + */ + OP(UNREACHABLE), +}; + +#define NATIVE_RESOURCE_HEADER_SIZE 8 + +#define DEFINE_SMALL_AML_RESOURCE(aml_type_enum, native_type_enum, \ + aml_struct, native_struct, ...) \ + [aml_type_enum] = { \ + .type = aml_type_enum, \ + .native_type = native_type_enum, \ + .resource_kind = UACPI_AML_RESOURCE_KIND_SMALL, \ + .aml_size = sizeof(aml_struct) - SMALL_ITEM_HEADER_SIZE, \ + .native_size = sizeof(native_struct) + NATIVE_RESOURCE_HEADER_SIZE, \ + __VA_ARGS__ \ + } + +#define DEFINE_SMALL_AML_RESOURCE_NO_NATIVE_REPR( \ + aml_type_enum, native_type_enum, aml_struct, ... \ +) \ + [aml_type_enum] = { \ + .type = aml_type_enum, \ + .native_type = native_type_enum, \ + .resource_kind = UACPI_AML_RESOURCE_KIND_SMALL, \ + .aml_size = sizeof(aml_struct) - SMALL_ITEM_HEADER_SIZE, \ + .native_size = NATIVE_RESOURCE_HEADER_SIZE, \ + __VA_ARGS__ \ + } + +#define DEFINE_LARGE_AML_RESOURCE(aml_type_enum, native_type_enum, \ + aml_struct, native_struct, ...) \ + [aml_type_enum] = { \ + .type = aml_type_enum, \ + .native_type = native_type_enum, \ + .resource_kind = UACPI_AML_RESOURCE_KIND_LARGE, \ + .aml_size = sizeof(aml_struct) - LARGE_ITEM_HEADER_SIZE, \ + .native_size = sizeof(native_struct) + NATIVE_RESOURCE_HEADER_SIZE, \ + __VA_ARGS__ \ + } + +const struct uacpi_resource_spec aml_resources[UACPI_AML_RESOURCE_MAX + 1] = { + DEFINE_SMALL_AML_RESOURCE( + UACPI_AML_RESOURCE_IRQ, + UACPI_RESOURCE_TYPE_IRQ, + struct acpi_resource_irq, + uacpi_resource_irq, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED_OR_ONE_LESS, + .extra_size_for_native = extra_size_for_native_irq_or_dma, + .size_for_aml = size_for_aml_irq, + .to_native = convert_irq_to_native, + .to_aml = convert_irq_to_aml, + ), + DEFINE_SMALL_AML_RESOURCE( + UACPI_AML_RESOURCE_DMA, + UACPI_RESOURCE_TYPE_DMA, + struct acpi_resource_dma, + uacpi_resource_dma, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED, + .extra_size_for_native = extra_size_for_native_irq_or_dma, + .to_native = convert_dma, + .to_aml = convert_dma, + ), + DEFINE_SMALL_AML_RESOURCE( + UACPI_AML_RESOURCE_START_DEPENDENT, + UACPI_RESOURCE_TYPE_START_DEPENDENT, + struct acpi_resource_start_dependent, + uacpi_resource_start_dependent, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED_OR_ONE_LESS, + .size_for_aml = size_for_aml_start_dependent, + .to_native = convert_start_dependent_to_native, + .to_aml = convert_start_dependent_to_aml, + ), + DEFINE_SMALL_AML_RESOURCE_NO_NATIVE_REPR( + UACPI_AML_RESOURCE_END_DEPENDENT, + UACPI_RESOURCE_TYPE_END_DEPENDENT, + struct acpi_resource_end_dependent, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED, + ), + DEFINE_SMALL_AML_RESOURCE( + UACPI_AML_RESOURCE_IO, + UACPI_RESOURCE_TYPE_IO, + struct acpi_resource_io, + uacpi_resource_io, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED, + .to_native = convert_io, + .to_aml = convert_io, + ), + DEFINE_SMALL_AML_RESOURCE( + UACPI_AML_RESOURCE_FIXED_IO, + UACPI_RESOURCE_TYPE_FIXED_IO, + struct acpi_resource_fixed_io, + uacpi_resource_fixed_io, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED, + .to_native = convert_fixed_io, + .to_aml = convert_fixed_io, + ), + DEFINE_SMALL_AML_RESOURCE( + UACPI_AML_RESOURCE_FIXED_DMA, + UACPI_RESOURCE_TYPE_FIXED_DMA, + struct acpi_resource_fixed_dma, + uacpi_resource_fixed_dma, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED, + .to_native = convert_fixed_dma, + .to_aml = convert_fixed_dma, + ), + DEFINE_SMALL_AML_RESOURCE( + UACPI_AML_RESOURCE_VENDOR_TYPE0, + UACPI_RESOURCE_TYPE_VENDOR_SMALL, + struct acpi_resource_vendor_defined_type0, + uacpi_resource_vendor, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE, + .size_for_aml = size_for_aml_vendor, + .extra_size_for_native = extra_size_for_native_vendor, + .to_native = convert_vendor_type0, + .to_aml = convert_vendor_type0, + ), + DEFINE_SMALL_AML_RESOURCE_NO_NATIVE_REPR( + UACPI_AML_RESOURCE_END_TAG, + UACPI_RESOURCE_TYPE_END_TAG, + struct acpi_resource_end_tag, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_MEMORY24, + UACPI_RESOURCE_TYPE_MEMORY24, + struct acpi_resource_memory24, + uacpi_resource_memory24, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED, + .to_native = convert_memory24, + .to_aml = convert_memory24, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_GENERIC_REGISTER, + UACPI_RESOURCE_TYPE_GENERIC_REGISTER, + struct acpi_resource_generic_register, + uacpi_resource_generic_register, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED, + .to_native = convert_generic_register, + .to_aml = convert_generic_register, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_VENDOR_TYPE1, + UACPI_RESOURCE_TYPE_VENDOR_LARGE, + struct acpi_resource_vendor_defined_type1, + uacpi_resource_vendor, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE, + .extra_size_for_native = extra_size_for_native_vendor, + .size_for_aml = size_for_aml_vendor, + .to_native = convert_vendor_type1, + .to_aml = convert_vendor_type1, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_MEMORY32, + UACPI_RESOURCE_TYPE_MEMORY32, + struct acpi_resource_memory32, + uacpi_resource_memory32, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED, + .to_native = convert_memory32, + .to_aml = convert_memory32, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_FIXED_MEMORY32, + UACPI_RESOURCE_TYPE_FIXED_MEMORY32, + struct acpi_resource_fixed_memory32, + uacpi_resource_fixed_memory32, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED, + .to_native = convert_fixed_memory32, + .to_aml = convert_fixed_memory32, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_ADDRESS32, + UACPI_RESOURCE_TYPE_ADDRESS32, + struct acpi_resource_address32, + uacpi_resource_address32, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE, + .extra_size_for_native = extra_size_for_native_address_or_clock_input, + .size_for_aml = size_for_aml_address_or_clock_input, + .to_native = convert_address32, + .to_aml = convert_address32, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_ADDRESS16, + UACPI_RESOURCE_TYPE_ADDRESS16, + struct acpi_resource_address16, + uacpi_resource_address16, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE, + .extra_size_for_native = extra_size_for_native_address_or_clock_input, + .size_for_aml = size_for_aml_address_or_clock_input, + .to_native = convert_address16, + .to_aml = convert_address16, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_EXTENDED_IRQ, + UACPI_RESOURCE_TYPE_EXTENDED_IRQ, + struct acpi_resource_extended_irq, + uacpi_resource_extended_irq, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE, + .extra_size_for_native = extra_size_for_extended_irq, + .size_for_aml = size_for_aml_extended_irq, + .to_native = convert_extended_irq, + .to_aml = convert_extended_irq, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_ADDRESS64, + UACPI_RESOURCE_TYPE_ADDRESS64, + struct acpi_resource_address64, + uacpi_resource_address64, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE, + .extra_size_for_native = extra_size_for_native_address_or_clock_input, + .size_for_aml = size_for_aml_address_or_clock_input, + .to_native = convert_address64, + .to_aml = convert_address64, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_ADDRESS64_EXTENDED, + UACPI_RESOURCE_TYPE_ADDRESS64_EXTENDED, + struct acpi_resource_address64_extended, + uacpi_resource_address64_extended, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED, + .to_native = convert_address64_extended, + .to_aml = convert_address64_extended, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_GPIO_CONNECTION, + UACPI_RESOURCE_TYPE_GPIO_CONNECTION, + struct acpi_resource_gpio_connection, + uacpi_resource_gpio_connection, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE, + .extra_size_for_native = extra_size_for_native_gpio_or_pins, + .size_for_aml = size_for_aml_gpio_or_pins, + .to_aml = convert_gpio_connection, + .to_native = convert_gpio_connection, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_PIN_FUNCTION, + UACPI_RESOURCE_TYPE_PIN_FUNCTION, + struct acpi_resource_pin_function, + uacpi_resource_pin_function, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE, + .extra_size_for_native = extra_size_for_native_gpio_or_pins, + .size_for_aml = size_for_aml_gpio_or_pins, + .to_aml = convert_pin_function, + .to_native = convert_pin_function, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_SERIAL_CONNECTION, + 0, // the native type here is determined dynamically + struct acpi_resource_serial, + uacpi_resource_serial_bus_common, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE, + .extra_size_for_native = extra_size_for_serial_connection, + .size_for_aml = aml_size_for_serial_connection, + .to_native = convert_generic_serial_bus, + .to_aml = convert_generic_serial_bus, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_PIN_CONFIGURATION, + UACPI_RESOURCE_TYPE_PIN_CONFIGURATION, + struct acpi_resource_pin_configuration, + uacpi_resource_pin_configuration, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE, + .extra_size_for_native = extra_size_for_native_gpio_or_pins, + .size_for_aml = size_for_aml_gpio_or_pins, + .to_native = convert_pin_configuration, + .to_aml = convert_pin_configuration, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_PIN_GROUP, + UACPI_RESOURCE_TYPE_PIN_GROUP, + struct acpi_resource_pin_group, + uacpi_resource_pin_group, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE, + .extra_size_for_native = extra_size_for_native_gpio_or_pins, + .size_for_aml = size_for_aml_gpio_or_pins, + .to_native = convert_pin_group, + .to_aml = convert_pin_group, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_PIN_GROUP_FUNCTION, + UACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION, + struct acpi_resource_pin_group_function, + uacpi_resource_pin_group_function, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE, + .extra_size_for_native = extra_size_for_native_pin_group, + .size_for_aml = size_for_aml_pin_group, + .to_native = convert_pin_group_function, + .to_aml = convert_pin_group_function, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_PIN_GROUP_CONFIGURATION, + UACPI_RESOURCE_TYPE_PIN_GROUP_CONFIGURATION, + struct acpi_resource_pin_group_configuration, + uacpi_resource_pin_group_configuration, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE, + .extra_size_for_native = extra_size_for_native_pin_group, + .size_for_aml = size_for_aml_pin_group, + .to_native = convert_pin_group_configuration, + .to_aml = convert_pin_group_configuration, + ), + DEFINE_LARGE_AML_RESOURCE( + UACPI_AML_RESOURCE_CLOCK_INPUT, + UACPI_RESOURCE_TYPE_CLOCK_INPUT, + struct acpi_resource_clock_input, + uacpi_resource_clock_input, + .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE, + .extra_size_for_native = extra_size_for_native_address_or_clock_input, + .size_for_aml = size_for_aml_address_or_clock_input, + .to_native = convert_clock_input, + .to_aml = convert_clock_input, + ), +}; + +static enum uacpi_aml_resource get_aml_resource_type(uacpi_u8 raw_byte) +{ + if (raw_byte & ACPI_LARGE_ITEM) { + return aml_resource_to_type[ + LARGE_RESOURCE_BASE + (raw_byte & ACPI_LARGE_ITEM_NAME_MASK) + ]; + } + + return aml_resource_to_type[ + (raw_byte >> ACPI_SMALL_ITEM_NAME_IDX) & ACPI_SMALL_ITEM_NAME_MASK + ]; +} + +static uacpi_status get_aml_resource_size( + uacpi_u8 *data, uacpi_size bytes_left, uacpi_u16 *out_size +) +{ + uacpi_u16 size; + + /* + * Resource header is not included in size for both, so we subtract + * the header size from bytes_left to validate it. + */ + if (*data & ACPI_LARGE_ITEM) { + if (uacpi_unlikely(bytes_left < 3)) + return UACPI_STATUS_AML_INVALID_RESOURCE; + + uacpi_memcpy(&size, data + 1, sizeof(size)); + bytes_left -= aml_resource_kind_to_header_size[ + UACPI_AML_RESOURCE_KIND_LARGE + ]; + } else { + size = *data & ACPI_SMALL_ITEM_LENGTH_MASK; + bytes_left -= aml_resource_kind_to_header_size[ + UACPI_AML_RESOURCE_KIND_SMALL + ]; + } + + if (uacpi_unlikely(size > bytes_left)) + return UACPI_STATUS_AML_INVALID_RESOURCE; + + *out_size = size; + return UACPI_STATUS_OK; +} + +static uacpi_status validate_aml_serial_type(uacpi_u8 type) +{ + if (uacpi_unlikely(type < ACPI_SERIAL_TYPE_I2C || + type > ACPI_SERIAL_TYPE_CSI2)) { + uacpi_error("invalid/unsupported serial connection type %d\n", type); + return UACPI_STATUS_AML_INVALID_RESOURCE; + } + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_for_each_aml_resource( + uacpi_data_view buffer, uacpi_aml_resource_iteration_callback cb, void *user +) +{ + uacpi_status ret; + uacpi_iteration_decision decision; + uacpi_u8 *data; + uacpi_size bytes_left; + uacpi_u16 resource_size; + enum uacpi_aml_resource type; + const struct uacpi_resource_spec *spec; + + bytes_left = buffer.length; + data = buffer.bytes; + + while (bytes_left) { + type = get_aml_resource_type(*data); + if (uacpi_unlikely(type == UACPI_AML_RESOURCE_TYPE_INVALID)) + return UACPI_STATUS_AML_INVALID_RESOURCE; + + ret = get_aml_resource_size(data, bytes_left, &resource_size); + if (uacpi_unlikely_error(ret)) + return ret; + + spec = &aml_resources[type]; + switch (spec->size_kind) { + case UACPI_AML_RESOURCE_SIZE_KIND_FIXED: + if (resource_size != spec->aml_size) + return UACPI_STATUS_AML_INVALID_RESOURCE; + break; + case UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE: + if (resource_size < spec->aml_size) + return UACPI_STATUS_AML_INVALID_RESOURCE; + break; + case UACPI_AML_RESOURCE_SIZE_KIND_FIXED_OR_ONE_LESS: + if (resource_size != spec->aml_size && + resource_size != (spec->aml_size - 1)) + return UACPI_STATUS_AML_INVALID_RESOURCE; + break; + default: + return UACPI_STATUS_INTERNAL_ERROR; + } + + if (spec->type == UACPI_AML_RESOURCE_SERIAL_CONNECTION) { + struct acpi_resource_serial *serial; + + serial = (struct acpi_resource_serial*)data; + + ret = validate_aml_serial_type(serial->type); + if (uacpi_unlikely_error(ret)) + return ret; + } + + decision = cb(user, data, resource_size, spec); + switch (decision) { + case UACPI_ITERATION_DECISION_BREAK: + return UACPI_STATUS_OK; + case UACPI_ITERATION_DECISION_CONTINUE: { + uacpi_size total_size = resource_size; + + total_size += aml_resource_kind_to_header_size[spec->resource_kind]; + data += total_size; + bytes_left -= total_size; + break; + } + default: + return UACPI_STATUS_INTERNAL_ERROR; + } + + if (type == UACPI_AML_RESOURCE_END_TAG) + return UACPI_STATUS_OK; + } + + return UACPI_STATUS_NO_RESOURCE_END_TAG; +} + +static uacpi_iteration_decision find_end( + void *opaque, uacpi_u8 *data, uacpi_u16 resource_size, + const struct uacpi_resource_spec *spec +) +{ + uacpi_u8 **out_ptr = opaque; + UACPI_UNUSED(resource_size); + + if (spec->type != UACPI_AML_RESOURCE_END_TAG) + return UACPI_ITERATION_DECISION_CONTINUE; + + *out_ptr = data; + return UACPI_ITERATION_DECISION_BREAK; +} + +static uacpi_size native_size_for_aml_resource( + uacpi_u8 *data, uacpi_u16 size, const struct uacpi_resource_spec *spec +) +{ + uacpi_size final_size = spec->native_size; + + if (spec->extra_size_for_native) + final_size += spec->extra_size_for_native(spec, data, size); + + return UACPI_ALIGN_UP(final_size, sizeof(void*), uacpi_size); +} + +uacpi_status uacpi_find_aml_resource_end_tag( + uacpi_data_view buffer, uacpi_size *out_offset +) +{ + uacpi_u8 *end_tag_ptr = UACPI_NULL; + uacpi_status ret; + + if (buffer.length == 0) { + *out_offset = 0; + return UACPI_STATUS_OK; + } + + /* + * This returning UACPI_STATUS_OK guarantees that end_tag_ptr is set to + * a valid value because a missing end tag would produce a + * UACPI_STATUS_NO_RESOURCE_END_TAG error. + */ + ret = uacpi_for_each_aml_resource(buffer, find_end, &end_tag_ptr); + if (uacpi_unlikely_error(ret)) + return ret; + + *out_offset = end_tag_ptr - buffer.bytes; + return UACPI_STATUS_OK; +} + +struct resource_conversion_ctx { + union { + void *buf; + uacpi_u8 *byte_buf; + uacpi_size size; + }; + uacpi_status st; + uacpi_bool just_one; +}; + +static uacpi_iteration_decision conditional_continue( + struct resource_conversion_ctx *ctx +) +{ + return ctx->just_one ? UACPI_ITERATION_DECISION_BREAK : + UACPI_ITERATION_DECISION_CONTINUE; +} + +// Opcodes that are the same for both AML->native and native->AML +#define CONVERSION_OPCODES_COMMON(native_buf) \ + case UACPI_RESOURCE_CONVERT_OPCODE_END: \ + return conditional_continue(ctx); \ + \ + case UACPI_RESOURCE_CONVERT_OPCODE_FIELD_8: \ + case UACPI_RESOURCE_CONVERT_OPCODE_FIELD_16: \ + case UACPI_RESOURCE_CONVERT_OPCODE_FIELD_32: \ + case UACPI_RESOURCE_CONVERT_OPCODE_FIELD_64: { \ + uacpi_u8 bytes; \ + \ + bytes = 1 << (insn->code - UACPI_RESOURCE_CONVERT_OPCODE_FIELD_8); \ + accumulator = insn->f3.imm == 0xFF ? 0 : accumulator + insn->f3.imm; \ + \ + uacpi_memcpy(dst, src, bytes * UACPI_MAX(1, accumulator)); \ + accumulator = 0; \ + break; \ + } \ + \ + case UACPI_RESOURCE_CONVERT_OPCODE_SKIP_IF_AML_SIZE_LESS_THAN: \ + if (aml_size < insn->f1.arg0) \ + pc += insn->f3.imm; \ + break; \ + case UACPI_RESOURCE_CONVERT_OPCODE_SKIP_IF_NOT_EQUALS: \ + if (insn->f1.arg0 != accumulator) \ + pc += insn->f3.imm; \ + break; \ + \ + case UACPI_RESOURCE_CONVERT_OPCODE_SET_TO_IMM: \ + uacpi_memcpy(dst, &insn->f3.imm, sizeof(insn->f3.imm)); \ + break; \ + \ + case UACPI_RESOURCE_CONVERT_OPCODE_LOAD_IMM: \ + accumulator = insn->f3.imm; \ + break; \ + \ + case UACPI_RESOURCE_CONVERT_OPCODE_LOAD_8_STORE: \ + uacpi_memcpy_zerout(&accumulator, src, sizeof(accumulator), 1); \ + uacpi_memcpy(dst, &accumulator, 1); \ + \ + if (insn->f3.imm) \ + accumulator *= insn->f3.imm; \ + break; \ + \ + case UACPI_RESOURCE_CONVERT_OPCODE_LOAD_8_NATIVE: \ + case UACPI_RESOURCE_CONVERT_OPCODE_LOAD_16_NATIVE: { \ + uacpi_u8 bytes; \ + \ + bytes = \ + 1 << (insn->code - UACPI_RESOURCE_CONVERT_OPCODE_LOAD_8_NATIVE); \ + uacpi_memcpy_zerout( \ + &accumulator, native_buf, sizeof(accumulator), bytes \ + ); \ + break; \ + } \ + \ + case UACPI_RESOURCE_CONVERT_OPCODE_UNREACHABLE: \ + default: \ + if (insn->code != UACPI_RESOURCE_CONVERT_OPCODE_UNREACHABLE) { \ + uacpi_error("unhandled resource conversion opcode %d\n", \ + insn->code); \ + } else { \ + uacpi_error("tried to execute unreachable conversion opcode\n"); \ + } \ + ctx->st = UACPI_STATUS_INTERNAL_ERROR; \ + return UACPI_ITERATION_DECISION_BREAK; + +#define PTR_AT(ptr, offset) (void*)((uacpi_u8*)(ptr) + (offset)) + +#define NATIVE_OFFSET(res, offset) \ + PTR_AT(res, (offset) + (sizeof(uacpi_u32) * 2)) + +#define NATIVE_FIELD(res, name, field) \ + NATIVE_OFFSET(res, NATIVE_O(name, field)) + +#define CHECK_AML_OOB(offset, prefix, what) \ + if (uacpi_unlikely(offset > ((uacpi_u32)aml_size + header_size))) { \ + uacpi_error(prefix what " is OOB: %zu > %u\n", \ + (uacpi_size)offset, (uacpi_u32)aml_size + header_size); \ + ctx->st = UACPI_STATUS_AML_INVALID_RESOURCE; \ + return UACPI_ITERATION_DECISION_BREAK; \ + } + +#define CHECK_AML_OFFSET_BASE(offset, what) \ + if (uacpi_unlikely(offset < base_aml_size_with_header)) { \ + uacpi_error( \ + "invalid " what " offset: %zu, expected at least %u\n", \ + (uacpi_size)offset, base_aml_size_with_header); \ + ctx->st = UACPI_STATUS_AML_INVALID_RESOURCE; \ + return UACPI_ITERATION_DECISION_BREAK; \ + } + +#define CHECK_AML_OFFSET(offset, what) \ + CHECK_AML_OOB(offset, "end of ", what) \ + CHECK_AML_OFFSET_BASE(offset, what) + +static uacpi_resource_type aml_serial_to_native_type( + uacpi_u8 type +) +{ + return (type - ACPI_SERIAL_TYPE_I2C) + + UACPI_RESOURCE_TYPE_SERIAL_I2C_CONNECTION; +} + +static uacpi_iteration_decision do_aml_resource_to_native( + void *opaque, uacpi_u8 *data, uacpi_u16 aml_size, + const struct uacpi_resource_spec *spec +) +{ + struct resource_conversion_ctx *ctx = opaque; + uacpi_resource *resource = ctx->buf; + const struct uacpi_resource_convert_instruction *insns, *insn; + uacpi_u8 header_size, pc = 0; + uacpi_u8 *src, *dst; + void *resource_end; + uacpi_u16 base_aml_size; + uacpi_u32 base_aml_size_with_header, accumulator = 0; + + insns = spec->to_native; + + header_size = aml_resource_kind_to_header_size[spec->resource_kind]; + resource->type = spec->native_type; + resource->length = native_size_for_aml_resource(data, aml_size, spec); + resource_end = ctx->byte_buf + spec->native_size; + ctx->byte_buf += resource->length; + + base_aml_size = base_aml_size_with_header = spec->aml_size; + base_aml_size_with_header += header_size; + + if (insns == UACPI_NULL) + return conditional_continue(ctx); + + for (;;) { + insn = &insns[pc++]; + + src = data + insn->f1.aml_offset; + dst = NATIVE_OFFSET(resource, insn->f2.native_offset); + + switch (insn->code) { + case UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_8: + case UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_16: { + uacpi_size i, j, max_bit; + uacpi_u16 value; + + if (insn->code == UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_16) { + max_bit = 16; + uacpi_memcpy(&value, src, sizeof(uacpi_u16)); + } else { + max_bit = 8; + uacpi_memcpy_zerout( + &value, src, sizeof(value), sizeof(uacpi_u8) + ); + } + + for (i = 0, j = 0; i < max_bit; ++i) { + if (!(value & (1 << i))) + continue; + + dst[j++] = i; + } + + uacpi_memcpy(NATIVE_OFFSET(resource, insn->f3.arg2), &j, 1); + break; + } + + case UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_1: + case UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_2: + case UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_3: + case UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_6:{ + uacpi_u8 mask, value; + + mask = (insn->code - UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_1) + 1; + mask = (1 << mask) - 1; + + value = (*src >> insn->f3.imm) & mask; + uacpi_memcpy(dst, &value, sizeof(value)); + break; + } + + case UACPI_RESOURCE_CONVERT_OPCODE_LOAD_AML_SIZE_32: + accumulator = aml_size; + uacpi_memcpy(dst, &accumulator, 4); + break; + + case UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE: + case UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE_NO_INDEX: + case UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_LABEL: { + uacpi_size offset = 0, max_offset, length = 0; + uacpi_char *src_string, *dst_string; + union { + void *ptr; + uacpi_resource_source *source; + uacpi_resource_label *label; + } dst_name = { 0 }; + + dst_name.ptr = dst; + + /* + * Check if the string is bounded by anything at the top. If not, we + * just assume it ends at the end of the resource. + */ + if (insn->f3.arg2) { + uacpi_memcpy_zerout(&max_offset, data + insn->f3.arg2, + sizeof(max_offset), sizeof(uacpi_u16)); + CHECK_AML_OFFSET(max_offset, "resource source"); + } else { + max_offset = aml_size + header_size; + } + + offset += base_aml_size_with_header; + offset += accumulator; + + if (insn->code != UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_LABEL) + dst_name.source->index_present = UACPI_TRUE; + + if (offset >= max_offset) { + if (insn->code == UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE) + dst_name.source->index_present = UACPI_FALSE; + break; + } + + src_string = PTR_AT(data, offset); + + if (insn->code == UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE) { + uacpi_memcpy(&dst_name.source->index, src_string++, 1); + offset++; + } + + if (offset == max_offset) + break; + + while (offset++ < max_offset) { + if (src_string[length++] == '\0') + break; + } + + if (src_string[length - 1] != '\0') { + uacpi_error("non-null-terminated resource source string\n"); + ctx->st = UACPI_STATUS_AML_INVALID_RESOURCE; + return UACPI_ITERATION_DECISION_BREAK; + } + + dst_string = PTR_AT(resource_end, accumulator); + uacpi_memcpy(dst_string, src_string, length); + + if (insn->code == UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_LABEL) { + dst_name.label->length = length; + dst_name.label->string = dst_string; + } else { + dst_name.source->length = length; + dst_name.source->string = dst_string; + } + + break; + } + + case UACPI_RESOURCE_CONVERT_OPCODE_LOAD_PIN_TABLE_LENGTH: + uacpi_memcpy_zerout(&accumulator, src, + sizeof(accumulator), sizeof(uacpi_u16)); + CHECK_AML_OFFSET(accumulator, "pin table"); + + accumulator -= base_aml_size_with_header; + break; + + case UACPI_RESOURCE_CONVERT_OPCODE_PIN_TABLE: { + uacpi_u16 entry_count = accumulator / 2; + + /* + * Pin table is stored right at the end of the resource buffer, + * copy the data there. + */ + uacpi_memcpy( + resource_end, + data + base_aml_size_with_header, + accumulator + ); + + // Set pin_table_length + uacpi_memcpy(dst, &entry_count, sizeof(entry_count)); + + // Set pin_table pointer + uacpi_memcpy(NATIVE_OFFSET(resource, insn->f3.arg2), + &resource_end, sizeof(void*)); + break; + } + + case UACPI_RESOURCE_CONVERT_OPCODE_VENDOR_DATA: { + uacpi_size length; + uacpi_u16 data_offset, offset_from_end; + void *native_dst, *vendor_data; + + uacpi_memcpy(&data_offset, src, sizeof(data_offset)); + CHECK_AML_OFFSET(data_offset, "vendor data"); + + vendor_data = data + data_offset; + + /* + * Rebase the offset to cut off the header as it's not included + * in the size fields. + */ + data_offset -= header_size; + + length = aml_size - data_offset; + if (length == 0) + break; + + uacpi_memcpy(dst, &length, sizeof(uacpi_u16)); + + offset_from_end = data_offset - base_aml_size; + native_dst = PTR_AT(resource_end, offset_from_end); + + uacpi_memcpy(native_dst, vendor_data, length); + uacpi_memcpy(NATIVE_OFFSET(resource, insn->f3.arg2), + &native_dst, sizeof(void*)); + break; + } + + case UACPI_RESOURCE_CONVERT_OPCODE_SERIAL_TYPE_SPECIFIC: { + uacpi_resource_serial_bus_common *serial_bus_common; + uacpi_u8 serial_type, extra_size, type_length; + + serial_bus_common = &resource->serial_bus_common; + serial_type = *src; + serial_bus_common->type = serial_type; + resource->type = aml_serial_to_native_type(serial_type); + + /* + * Now that we know the serial type rebase the end pointers and + * sizes. + */ + resource_end = PTR_AT( + resource_end, + aml_serial_resource_to_extra_native_size[serial_type] + ); + extra_size = aml_serial_resource_to_extra_aml_size[serial_type]; + base_aml_size += extra_size; + base_aml_size_with_header += extra_size; + + type_length = serial_bus_common->type_data_length; + if (uacpi_unlikely(type_length < extra_size)) { + uacpi_error( + "invalid type-specific data length: %d, " + "expected at least %d\n", type_length, extra_size + ); + ctx->st = UACPI_STATUS_AML_INVALID_RESOURCE; + return UACPI_ITERATION_DECISION_BREAK; + } + + /* + * Calculate the length of the vendor data. All the extra data + * beyond the end of type-specific size is considered vendor data. + */ + accumulator = type_length - extra_size; + if (accumulator == 0) + break; + + serial_bus_common->vendor_data_length = accumulator; + serial_bus_common->vendor_data = resource_end; + uacpi_memcpy( + resource_end, + data + base_aml_size_with_header, + accumulator + ); + break; + } + + CONVERSION_OPCODES_COMMON(dst) + } + } +} + +static uacpi_iteration_decision accumulate_native_buffer_size( + void *opaque, uacpi_u8 *data, uacpi_u16 resource_size, + const struct uacpi_resource_spec *spec +) +{ + struct resource_conversion_ctx *ctx = opaque; + uacpi_size size_for_this; + + size_for_this = native_size_for_aml_resource(data, resource_size, spec); + if (size_for_this == 0 || (ctx->size + size_for_this) < ctx->size) { + uacpi_error("invalid native size for aml resource: %zu\n", + size_for_this); + ctx->st = UACPI_STATUS_AML_INVALID_RESOURCE; + return UACPI_ITERATION_DECISION_BREAK; + } + + ctx->size += size_for_this; + return conditional_continue(ctx); +} + +static uacpi_status eval_resource_helper( + uacpi_namespace_node *node, const uacpi_char *method, + uacpi_object **out_obj +) +{ + uacpi_status ret; + uacpi_bool is_device; + + ret = uacpi_namespace_node_is(node, UACPI_OBJECT_DEVICE, &is_device); + if (uacpi_unlikely_error(ret)) + return ret; + + if (uacpi_unlikely(!is_device)) + return UACPI_STATUS_INVALID_ARGUMENT; + + return uacpi_eval_simple_buffer( + node, method, out_obj + ); +} + +uacpi_status uacpi_native_resources_from_aml( + uacpi_data_view aml_buffer, uacpi_resources **out_resources +) +{ + uacpi_status ret; + struct resource_conversion_ctx ctx = { 0 }; + uacpi_resources *resources; + + ret = uacpi_for_each_aml_resource( + aml_buffer, accumulate_native_buffer_size, &ctx + ); + if (uacpi_unlikely_error(ret)) + return ret; + + if (uacpi_unlikely_error(ctx.st)) + return ctx.st; + + // Realistically any resource buffer bigger than this is probably a bug + if (uacpi_unlikely(ctx.size > (5 * 1024u * 1024u))) { + uacpi_error("bug: bogus native resource buffer size %zu\n", ctx.size); + return UACPI_STATUS_INTERNAL_ERROR; + } + + resources = uacpi_kernel_alloc_zeroed(ctx.size + sizeof(uacpi_resources)); + if (uacpi_unlikely(resources == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + resources->length = ctx.size; + resources->entries = UACPI_PTR_ADD(resources, sizeof(uacpi_resources)); + + uacpi_memzero(&ctx, sizeof(ctx)); + ctx.buf = resources->entries; + + ret = uacpi_for_each_aml_resource(aml_buffer, do_aml_resource_to_native, &ctx); + if (uacpi_unlikely_error(ret)) { + uacpi_free_resources(resources); + return ret; + } + + *out_resources = resources; + return ret; +} + +uacpi_status uacpi_get_resource_from_buffer( + uacpi_data_view aml_buffer, uacpi_resource **out_resource +) +{ + uacpi_status ret; + struct resource_conversion_ctx ctx = { + .just_one = UACPI_TRUE, + }; + uacpi_resource *resource; + + ret = uacpi_for_each_aml_resource( + aml_buffer, accumulate_native_buffer_size, &ctx + ); + if (uacpi_unlikely_error(ret)) + return ret; + + resource = uacpi_kernel_alloc_zeroed(ctx.size); + if (uacpi_unlikely(resource == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + uacpi_memzero(&ctx, sizeof(ctx)); + ctx.buf = resource; + ctx.just_one = UACPI_TRUE; + + ret = uacpi_for_each_aml_resource(aml_buffer, do_aml_resource_to_native, &ctx); + if (uacpi_unlikely_error(ret)) { + uacpi_free_resource(resource); + return ret; + } + + *out_resource = resource; + return ret; +} + +void uacpi_free_resources(uacpi_resources *resources) +{ + if (resources == UACPI_NULL) + return; + + uacpi_free(resources, sizeof(uacpi_resources) + resources->length); +} + +void uacpi_free_resource(uacpi_resource *resource) +{ + if (resource == UACPI_NULL) + return; + + uacpi_free(resource, resource->length); +} + +static uacpi_status extract_native_resources_from_method( + uacpi_namespace_node *device, const uacpi_char *method, + uacpi_resources **out_resources +) +{ + uacpi_status ret; + uacpi_object *obj; + uacpi_data_view buffer; + + ret = eval_resource_helper(device, method, &obj); + if (uacpi_unlikely_error(ret)) + return ret; + + uacpi_buffer_to_view(obj->buffer, &buffer); + + ret = uacpi_native_resources_from_aml(buffer, out_resources); + uacpi_object_unref(obj); + + return ret; +} + +uacpi_status uacpi_get_current_resources( + uacpi_namespace_node *device, uacpi_resources **out_resources +) +{ + return extract_native_resources_from_method(device, "_CRS", out_resources); +} + +uacpi_status uacpi_get_possible_resources( + uacpi_namespace_node *device, uacpi_resources **out_resources +) +{ + return extract_native_resources_from_method(device, "_PRS", out_resources); +} + +uacpi_status uacpi_get_device_resources( + uacpi_namespace_node *device, const uacpi_char *method, + uacpi_resources **out_resources +) +{ + return extract_native_resources_from_method(device, method, out_resources); +} + +uacpi_status uacpi_for_each_resource( + uacpi_resources *resources, uacpi_resource_iteration_callback cb, void *user +) +{ + uacpi_size bytes_left = resources->length; + uacpi_resource *current = resources->entries; + uacpi_iteration_decision decision; + + while (bytes_left) { + // At least the head bytes + if (uacpi_unlikely(bytes_left < 4)) { + uacpi_error("corrupted resource buffer %p length %zu\n", + resources, resources->length); + return UACPI_STATUS_INVALID_ARGUMENT; + } + + if (uacpi_unlikely(current->type > UACPI_RESOURCE_TYPE_MAX)) { + uacpi_error("invalid resource type %d\n", current->type); + return UACPI_STATUS_INVALID_ARGUMENT; + } + + if (uacpi_unlikely(current->length > bytes_left)) { + uacpi_error("corrupted resource@%p length %u (%zu bytes left)\n", + current, current->length, bytes_left); + return UACPI_STATUS_INVALID_ARGUMENT; + } + + decision = cb(user, current); + + if (decision == UACPI_ITERATION_DECISION_BREAK || + current->type == UACPI_RESOURCE_TYPE_END_TAG) + return UACPI_STATUS_OK; + + bytes_left -= current->length; + current = (uacpi_resource*)((uacpi_u8*)current + current->length); + } + + return UACPI_STATUS_NO_RESOURCE_END_TAG; +} + +uacpi_status uacpi_for_each_device_resource( + uacpi_namespace_node *device, const uacpi_char *method, + uacpi_resource_iteration_callback cb, void *user +) +{ + uacpi_status ret; + uacpi_resources *resources; + + ret = extract_native_resources_from_method(device, method, &resources); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_for_each_resource(resources, cb, user); + uacpi_free_resources(resources); + + return ret; +} + +static const struct uacpi_resource_spec *resource_spec_from_native( + uacpi_resource *resource +) +{ + return &aml_resources[native_resource_to_type[resource->type]]; +} + +static uacpi_size aml_size_for_native_resource( + uacpi_resource *resource, const struct uacpi_resource_spec *spec +) +{ + return spec->size_for_aml ? + spec->size_for_aml(spec, resource) : + aml_size_with_header(spec); +} + +static uacpi_iteration_decision do_native_resource_to_aml( + void *opaque, uacpi_resource *resource +) +{ + struct resource_conversion_ctx *ctx = opaque; + const struct uacpi_resource_spec *spec; + const struct uacpi_resource_convert_instruction *insns, *insn; + uacpi_u8 pc = 0; + uacpi_u8 *dst_base, *src, *dst; + uacpi_u32 aml_size, base_aml_size_with_header, accumulator = 0; + void *resource_end; + + spec = resource_spec_from_native(resource); + aml_size = aml_size_for_native_resource(resource, spec); + insns = spec->to_aml; + + dst_base = ctx->byte_buf; + ctx->byte_buf += aml_size; + aml_size -= aml_resource_kind_to_header_size[spec->resource_kind]; + + base_aml_size_with_header = spec->aml_size; + base_aml_size_with_header += aml_resource_kind_to_header_size[ + spec->resource_kind + ]; + resource_end = PTR_AT(resource, spec->native_size); + + if (spec->resource_kind == UACPI_AML_RESOURCE_KIND_LARGE) { + *dst_base = ACPI_LARGE_ITEM | type_to_aml_resource[spec->type]; + uacpi_memcpy(dst_base + 1, &aml_size, sizeof(uacpi_u16)); + } else { + *dst_base = type_to_aml_resource[spec->type] << ACPI_SMALL_ITEM_NAME_IDX; + *dst_base |= aml_size; + } + + if (insns == UACPI_NULL) + return UACPI_ITERATION_DECISION_CONTINUE; + + for (;;) { + insn = &insns[pc++]; + + src = NATIVE_OFFSET(resource, insn->f2.native_offset); + dst = dst_base + insn->f1.aml_offset; + + switch (insn->code) { + case UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_8: + case UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_16: { + uacpi_u8 i, *array_size, bytes = 1; + uacpi_u16 mask = 0; + + array_size = NATIVE_OFFSET(resource, insn->f3.arg2); + for (i = 0; i < *array_size; ++i) + mask |= 1 << src[i]; + + if (insn->code == UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_16) + bytes = 2; + + uacpi_memcpy(dst, &mask, bytes); + break; + } + + case UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_1: + case UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_2: + case UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_3: + case UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_6: + *dst |= *src << insn->f3.imm; + break; + + case UACPI_RESOURCE_CONVERT_OPCODE_LOAD_AML_SIZE_32: + accumulator = aml_size; + break; + + case UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE: + case UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE_NO_INDEX: + case UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_LABEL: { + uacpi_size source_offset, length; + uacpi_u8 *dst_string; + const uacpi_char *src_string; + union { + void *ptr; + uacpi_resource_source *source; + uacpi_resource_label *label; + } src_name = { 0 }; + + src_name.ptr = src; + + source_offset = base_aml_size_with_header + accumulator; + dst_string = dst_base + source_offset; + + if (insn->f1.aml_offset) + uacpi_memcpy(dst, &source_offset, sizeof(uacpi_u16)); + + if (insn->code == UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE && + src_name.source->index_present) + uacpi_memcpy(dst_string++, &src_name.source->index, 1); + + if (insn->code == UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_LABEL) { + length = src_name.label->length; + src_string = src_name.label->string; + } else { + length = src_name.source->length; + src_string = src_name.source->string; + } + + if (length == 0) + break; + + if (uacpi_unlikely(src_string == UACPI_NULL)) { + uacpi_error( + "source string length is %zu but the pointer is NULL\n", + length + ); + ctx->st = UACPI_STATUS_INVALID_ARGUMENT; + return UACPI_ITERATION_DECISION_BREAK; + } + + uacpi_memcpy(dst_string, src_string, length); + break; + } + + case UACPI_RESOURCE_CONVERT_OPCODE_LOAD_PIN_TABLE_LENGTH: + uacpi_memcpy_zerout(&accumulator, src, + sizeof(accumulator), sizeof(uacpi_u16)); + accumulator *= sizeof(uacpi_u16); + break; + + case UACPI_RESOURCE_CONVERT_OPCODE_PIN_TABLE: + /* + * The pin table resides right at the end of the base resource, + * set the offset to it in the AML we're encoding. + */ + uacpi_memcpy(dst, &base_aml_size_with_header, sizeof(uacpi_u16)); + + /* + * Copy the actual data. It also resides right at the end of the + * native base resource. + */ + uacpi_memcpy( + dst_base + base_aml_size_with_header, + resource_end, + accumulator + ); + break; + + case UACPI_RESOURCE_CONVERT_OPCODE_VENDOR_DATA: { + uacpi_u16 vendor_data_length, data_offset, vendor_data_offset; + uacpi_u8 *vendor_data; + + // Read the vendor_data pointer + uacpi_memcpy(&vendor_data, NATIVE_OFFSET(resource, insn->f3.arg2), + sizeof(void*)); + uacpi_memcpy(&vendor_data_length, src, sizeof(uacpi_u16)); + + if (vendor_data == UACPI_NULL) { + uacpi_size full_aml_size; + + if (uacpi_unlikely(vendor_data_length != 0)) { + uacpi_error( + "vendor_data_length is %d, but pointer is NULL\n", + vendor_data_length + ); + ctx->st = UACPI_STATUS_INVALID_ARGUMENT; + return UACPI_ITERATION_DECISION_BREAK; + } + + /* + * There's no vendor data. The specification still mandates + * that we fill the vendor data offset field correctly, meaning + * we set it to the total length of the resource. + */ + full_aml_size = aml_size; + full_aml_size += aml_resource_kind_to_header_size[ + spec->resource_kind + ]; + + uacpi_memcpy(dst, &full_aml_size, sizeof(uacpi_u16)); + break; + } + + /* + * Calculate the offset of vendor data from the end of the native + * resource and use it since it matches the offset from the end of + * the AML resource. + * + * Non-zero value means there's a source string in between. + */ + data_offset = vendor_data - (uacpi_u8*)resource_end; + vendor_data_offset = data_offset + base_aml_size_with_header; + + // Write vendor_data_offset + uacpi_memcpy(dst, &vendor_data_offset, sizeof(uacpi_u16)); + + /* + * Write vendor_data_length, this field is right after + * vendor_data_offset, and is completely redundant, but it exists + * nonetheless. + */ + uacpi_memcpy( + dst + sizeof(uacpi_u16), + &vendor_data_length, + sizeof(vendor_data_length) + ); + + // Finally write the data itself + uacpi_memcpy( + dst_base + vendor_data_offset, + vendor_data, + vendor_data_length + ); + break; + } + + case UACPI_RESOURCE_CONVERT_OPCODE_SERIAL_TYPE_SPECIFIC: { + uacpi_u8 serial_type = *src; + *dst = serial_type; + + ctx->st = validate_aml_serial_type(serial_type); + if (uacpi_unlikely_error(ctx->st)) + return UACPI_ITERATION_DECISION_BREAK; + + if (uacpi_unlikely(resource->type != + aml_serial_to_native_type(serial_type))) { + uacpi_error( + "native serial resource type %d doesn't match expected %d\n", + resource->type, aml_serial_to_native_type(serial_type) + ); + ctx->st = UACPI_STATUS_INVALID_ARGUMENT; + return UACPI_ITERATION_DECISION_BREAK; + } + + // Rebase the end pointer & size now that we know the serial type + resource_end = PTR_AT( + resource_end, + aml_serial_resource_to_extra_native_size[serial_type] + ); + base_aml_size_with_header += aml_serial_resource_to_extra_aml_size[ + serial_type + ]; + + accumulator = resource->serial_bus_common.vendor_data_length; + if (accumulator == 0) + break; + + // Copy vendor data + uacpi_memcpy( + dst_base + base_aml_size_with_header, + resource_end, + accumulator + ); + break; + } + + CONVERSION_OPCODES_COMMON(src) + } + } +} + +static uacpi_status native_resources_to_aml( + uacpi_resources *native_resources, void *aml_buffer +) +{ + uacpi_status ret; + struct resource_conversion_ctx ctx = { 0 }; + + ctx.buf = aml_buffer; + + ret = uacpi_for_each_resource( + native_resources, do_native_resource_to_aml, &ctx + ); + if (ret == UACPI_STATUS_NO_RESOURCE_END_TAG) { + // An end tag is always included + uacpi_resource end_tag = { .type = UACPI_RESOURCE_TYPE_END_TAG }; + + do_native_resource_to_aml(&ctx, &end_tag); + ret = UACPI_STATUS_OK; + } + if (uacpi_unlikely_error(ret)) + return ret; + + return ctx.st; +} + +static uacpi_iteration_decision accumulate_aml_buffer_size( + void *opaque, uacpi_resource *resource +) +{ + struct resource_conversion_ctx *ctx = opaque; + const struct uacpi_resource_spec *spec; + uacpi_size size_for_this; + + // resource->type is sanitized to be valid here by the iteration function + spec = resource_spec_from_native(resource); + + size_for_this = aml_size_for_native_resource(resource, spec); + if (size_for_this == 0 || (ctx->size + size_for_this) < ctx->size) { + uacpi_error("invalid aml size for native resource: %zu\n", + size_for_this); + ctx->st = UACPI_STATUS_INVALID_ARGUMENT; + return UACPI_ITERATION_DECISION_BREAK; + } + + ctx->size += size_for_this; + return UACPI_ITERATION_DECISION_CONTINUE; +} + +uacpi_status uacpi_native_resources_to_aml( + uacpi_resources *resources, uacpi_object **out_template +) +{ + uacpi_status ret; + uacpi_object *obj; + void *buffer; + struct resource_conversion_ctx ctx = { 0 }; + + ret = uacpi_for_each_resource( + resources, accumulate_aml_buffer_size, &ctx + ); + if (ret == UACPI_STATUS_NO_RESOURCE_END_TAG) { + // An end tag is always included + uacpi_resource end_tag = { .type = UACPI_RESOURCE_TYPE_END_TAG }; + + accumulate_aml_buffer_size(&ctx, &end_tag); + ret = UACPI_STATUS_OK; + } + if (uacpi_unlikely_error(ret)) + return ret; + if (uacpi_unlikely_error(ctx.st)) + return ctx.st; + + // Same reasoning as native_resource_from_aml + if (uacpi_unlikely(ctx.size > (5 * 1024u * 1024u))) { + uacpi_error("bug: bogus target aml resource buffer size %zu\n", + ctx.size); + return UACPI_STATUS_INTERNAL_ERROR; + } + + buffer = uacpi_kernel_alloc_zeroed(ctx.size); + if (uacpi_unlikely(buffer == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + obj = uacpi_create_object(UACPI_OBJECT_BUFFER); + if (uacpi_unlikely(obj == UACPI_NULL)) { + uacpi_free(buffer, ctx.size); + return UACPI_STATUS_OUT_OF_MEMORY; + } + + obj->buffer->data = buffer; + obj->buffer->size = ctx.size; + + ret = native_resources_to_aml(resources, buffer); + if (uacpi_unlikely_error(ret)) + uacpi_object_unref(obj); + + if (ret == UACPI_STATUS_OK) + *out_template = obj; + + return ret; +} + +uacpi_status uacpi_set_resources( + uacpi_namespace_node *device, uacpi_resources *resources +) +{ + uacpi_status ret; + uacpi_object *res_template; + uacpi_object_array args; + + ret = uacpi_native_resources_to_aml(resources, &res_template); + if (uacpi_unlikely_error(ret)) + return ret; + + args.objects = &res_template; + args.count = 1; + ret = uacpi_eval(device, "_SRS", &args, UACPI_NULL); + + uacpi_object_unref(res_template); + return ret; +} + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/dev/acpi/uacpi/shareable.c b/sys/dev/acpi/uacpi/shareable.c new file mode 100644 index 0000000..b42660a --- /dev/null +++ b/sys/dev/acpi/uacpi/shareable.c @@ -0,0 +1,71 @@ +#include <uacpi/internal/shareable.h> +#include <uacpi/internal/stdlib.h> +#include <uacpi/platform/atomic.h> + +#ifndef UACPI_BAREBONES_MODE + +#define BUGGED_REFCOUNT 0xFFFFFFFF + +void uacpi_shareable_init(uacpi_handle handle) +{ + struct uacpi_shareable *shareable = handle; + shareable->reference_count = 1; +} + +uacpi_bool uacpi_bugged_shareable(uacpi_handle handle) +{ + struct uacpi_shareable *shareable = handle; + + if (uacpi_unlikely(shareable->reference_count == 0)) + uacpi_make_shareable_bugged(shareable); + + return uacpi_atomic_load32(&shareable->reference_count) == BUGGED_REFCOUNT; +} + +void uacpi_make_shareable_bugged(uacpi_handle handle) +{ + struct uacpi_shareable *shareable = handle; + uacpi_atomic_store32(&shareable->reference_count, BUGGED_REFCOUNT); +} + +uacpi_u32 uacpi_shareable_ref(uacpi_handle handle) +{ + struct uacpi_shareable *shareable = handle; + + if (uacpi_unlikely(uacpi_bugged_shareable(shareable))) + return BUGGED_REFCOUNT; + + return uacpi_atomic_inc32(&shareable->reference_count) - 1; +} + +uacpi_u32 uacpi_shareable_unref(uacpi_handle handle) +{ + struct uacpi_shareable *shareable = handle; + + if (uacpi_unlikely(uacpi_bugged_shareable(shareable))) + return BUGGED_REFCOUNT; + + return uacpi_atomic_dec32(&shareable->reference_count) + 1; +} + +void uacpi_shareable_unref_and_delete_if_last( + uacpi_handle handle, void (*do_free)(uacpi_handle) +) +{ + if (handle == UACPI_NULL) + return; + + if (uacpi_unlikely(uacpi_bugged_shareable(handle))) + return; + + if (uacpi_shareable_unref(handle) == 1) + do_free(handle); +} + +uacpi_u32 uacpi_shareable_refcount(uacpi_handle handle) +{ + struct uacpi_shareable *shareable = handle; + return uacpi_atomic_load32(&shareable->reference_count); +} + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/dev/acpi/uacpi/sleep.c b/sys/dev/acpi/uacpi/sleep.c new file mode 100644 index 0000000..4736324 --- /dev/null +++ b/sys/dev/acpi/uacpi/sleep.c @@ -0,0 +1,616 @@ +#include <uacpi/sleep.h> +#include <uacpi/internal/context.h> +#include <uacpi/internal/log.h> +#include <uacpi/internal/io.h> +#include <uacpi/internal/registers.h> +#include <uacpi/internal/event.h> +#include <uacpi/platform/arch_helpers.h> + +#ifndef UACPI_BAREBONES_MODE + +#ifndef UACPI_REDUCED_HARDWARE +#define CALL_SLEEP_FN(name, state) \ + (uacpi_is_hardware_reduced() ? \ + name##_hw_reduced(state) : name##_hw_full(state)) +#else +#define CALL_SLEEP_FN(name, state) name##_hw_reduced(state); +#endif + +static uacpi_status eval_wak(uacpi_u8 state); +static uacpi_status eval_sst(uacpi_u8 value); + +#ifndef UACPI_REDUCED_HARDWARE +uacpi_status uacpi_set_waking_vector( + uacpi_phys_addr addr32, uacpi_phys_addr addr64 +) +{ + struct acpi_facs *facs = g_uacpi_rt_ctx.facs; + + if (facs == UACPI_NULL) + return UACPI_STATUS_OK; + + facs->firmware_waking_vector = addr32; + + // The 64-bit wake vector doesn't exist, we're done + if (facs->length < 32) + return UACPI_STATUS_OK; + + // Only allow 64-bit wake vector on 1.0 and above FACS + if (facs->version >= 1) + facs->x_firmware_waking_vector = addr64; + else + facs->x_firmware_waking_vector = 0; + + return UACPI_STATUS_OK; +} + +static uacpi_status enter_sleep_state_hw_full(uacpi_u8 state) +{ + uacpi_status ret; + uacpi_u64 wake_status, pm1a, pm1b; + + ret = uacpi_write_register_field( + UACPI_REGISTER_FIELD_WAK_STS, ACPI_PM1_STS_CLEAR + ); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_disable_all_gpes(); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_clear_all_events(); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_enable_all_wake_gpes(); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_read_register(UACPI_REGISTER_PM1_CNT, &pm1a); + if (uacpi_unlikely_error(ret)) + return ret; + + pm1a &= ~((uacpi_u64)(ACPI_PM1_CNT_SLP_TYP_MASK | ACPI_PM1_CNT_SLP_EN_MASK)); + pm1b = pm1a; + + pm1a |= g_uacpi_rt_ctx.last_sleep_typ_a << ACPI_PM1_CNT_SLP_TYP_IDX; + pm1b |= g_uacpi_rt_ctx.last_sleep_typ_b << ACPI_PM1_CNT_SLP_TYP_IDX; + + /* + * Just like ACPICA, split writing SLP_TYP and SLP_EN to work around + * buggy firmware that can't handle both written at the same time. + */ + ret = uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b); + if (uacpi_unlikely_error(ret)) + return ret; + + pm1a |= ACPI_PM1_CNT_SLP_EN_MASK; + pm1b |= ACPI_PM1_CNT_SLP_EN_MASK; + + if (state < UACPI_SLEEP_STATE_S4) + UACPI_ARCH_FLUSH_CPU_CACHE(); + + ret = uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b); + if (uacpi_unlikely_error(ret)) + return ret; + + if (state > UACPI_SLEEP_STATE_S3) { + /* + * We're still here, this is a bug or very slow firmware. + * Just try spinning for a bit. + */ + uacpi_u64 stalled_time = 0; + + // 10 seconds max + while (stalled_time < (10 * 1000 * 1000)) { + uacpi_kernel_stall(100); + stalled_time += 100; + } + + // Try one more time + ret = uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b); + if (uacpi_unlikely_error(ret)) + return ret; + + // Nothing we can do here, give up + return UACPI_STATUS_HARDWARE_TIMEOUT; + } + + do { + ret = uacpi_read_register_field( + UACPI_REGISTER_FIELD_WAK_STS, &wake_status + ); + if (uacpi_unlikely_error(ret)) + return ret; + } while (wake_status != 1); + + return UACPI_STATUS_OK; +} + +static uacpi_status prepare_for_wake_from_sleep_state_hw_full(uacpi_u8 state) +{ + uacpi_status ret; + uacpi_u64 pm1a, pm1b; + UACPI_UNUSED(state); + + /* + * Some hardware apparently relies on S0 values being written to the PM1 + * control register on wake, so do this here. + */ + + if (g_uacpi_rt_ctx.s0_sleep_typ_a == UACPI_SLEEP_TYP_INVALID) + goto out; + + ret = uacpi_read_register(UACPI_REGISTER_PM1_CNT, &pm1a); + if (uacpi_unlikely_error(ret)) + goto out; + + pm1a &= ~((uacpi_u64)(ACPI_PM1_CNT_SLP_TYP_MASK | ACPI_PM1_CNT_SLP_EN_MASK)); + pm1b = pm1a; + + pm1a |= g_uacpi_rt_ctx.s0_sleep_typ_a << ACPI_PM1_CNT_SLP_TYP_IDX; + pm1b |= g_uacpi_rt_ctx.s0_sleep_typ_b << ACPI_PM1_CNT_SLP_TYP_IDX; + + uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b); +out: + // Errors ignored intentionally, we don't want to abort because of this + return UACPI_STATUS_OK; +} + +static uacpi_status wake_from_sleep_state_hw_full(uacpi_u8 state) +{ + uacpi_status ret; + g_uacpi_rt_ctx.last_sleep_typ_a = UACPI_SLEEP_TYP_INVALID; + g_uacpi_rt_ctx.last_sleep_typ_b = UACPI_SLEEP_TYP_INVALID; + + // Set the status to 2 (waking) while we execute the wake method. + eval_sst(2); + + ret = uacpi_disable_all_gpes(); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_enable_all_runtime_gpes(); + if (uacpi_unlikely_error(ret)) + return ret; + + eval_wak(state); + + // Apparently some BIOSes expect us to clear this, so do it + uacpi_write_register_field( + UACPI_REGISTER_FIELD_WAK_STS, ACPI_PM1_STS_CLEAR + ); + + // Now that we're awake set the status to 1 (running) + eval_sst(1); + + return UACPI_STATUS_OK; +} +#endif + +static uacpi_status get_slp_type_for_state( + uacpi_u8 state, uacpi_u8 *a, uacpi_u8 *b +) +{ + uacpi_char path[] = "_S0"; + uacpi_status ret; + uacpi_object *obj0, *obj1, *ret_obj = UACPI_NULL; + + path[2] += state; + + ret = uacpi_eval_typed( + uacpi_namespace_root(), path, UACPI_NULL, + UACPI_OBJECT_PACKAGE_BIT, &ret_obj + ); + if (ret != UACPI_STATUS_OK) { + if (uacpi_unlikely(ret != UACPI_STATUS_NOT_FOUND)) { + uacpi_warn("error while evaluating %s: %s\n", path, + uacpi_status_to_string(ret)); + } else { + uacpi_trace("sleep state %d is not supported as %s was not found\n", + state, path); + } + goto out; + } + + switch (ret_obj->package->count) { + case 0: + uacpi_error("empty package while evaluating %s!\n", path); + ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + goto out; + + case 1: + obj0 = ret_obj->package->objects[0]; + if (uacpi_unlikely(obj0->type != UACPI_OBJECT_INTEGER)) { + uacpi_error( + "invalid object type at pkg[0] => %s when evaluating %s\n", + uacpi_object_type_to_string(obj0->type), path + ); + goto out; + } + + *a = obj0->integer; + *b = obj0->integer >> 8; + break; + + default: + obj0 = ret_obj->package->objects[0]; + obj1 = ret_obj->package->objects[1]; + + if (uacpi_unlikely(obj0->type != UACPI_OBJECT_INTEGER || + obj1->type != UACPI_OBJECT_INTEGER)) { + uacpi_error( + "invalid object type when evaluating %s: " + "pkg[0] => %s, pkg[1] => %s\n", path, + uacpi_object_type_to_string(obj0->type), + uacpi_object_type_to_string(obj1->type) + ); + ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + goto out; + } + + *a = obj0->integer; + *b = obj1->integer; + break; + } + +out: + if (ret != UACPI_STATUS_OK) { + *a = UACPI_SLEEP_TYP_INVALID; + *b = UACPI_SLEEP_TYP_INVALID; + } + + uacpi_object_unref(ret_obj); + return ret; +} + +static uacpi_status eval_sleep_helper( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_u8 value +) +{ + uacpi_object *arg; + uacpi_object_array args; + uacpi_status ret; + + arg = uacpi_create_object(UACPI_OBJECT_INTEGER); + if (uacpi_unlikely(arg == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + arg->integer = value; + args.objects = &arg; + args.count = 1; + + ret = uacpi_eval(parent, path, &args, UACPI_NULL); + switch (ret) { + case UACPI_STATUS_OK: + break; + case UACPI_STATUS_NOT_FOUND: + ret = UACPI_STATUS_OK; + break; + default: + uacpi_error("error while evaluating %s: %s\n", + path, uacpi_status_to_string(ret)); + break; + } + + uacpi_object_unref(arg); + return ret; +} + +static uacpi_status eval_pts(uacpi_u8 state) +{ + return eval_sleep_helper(uacpi_namespace_root(), "_PTS", state); +} + +static uacpi_status eval_wak(uacpi_u8 state) +{ + return eval_sleep_helper(uacpi_namespace_root(), "_WAK", state); +} + +static uacpi_status eval_sst(uacpi_u8 value) +{ + return eval_sleep_helper( + uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_SI), + "_SST", value + ); +} + +static uacpi_status eval_sst_for_state(enum uacpi_sleep_state state) +{ + uacpi_u8 arg; + + /* + * This optional object is a control method that OSPM invokes to set the + * system status indicator as desired. + * Arguments:(1) + * Arg0 - An Integer containing the system status indicator identifier: + * 0 - No system state indication. Indicator off + * 1 - Working + * 2 - Waking + * 3 - Sleeping. Used to indicate system state S1, S2, or S3 + * 4 - Sleeping with context saved to non-volatile storage + */ + switch (state) { + case UACPI_SLEEP_STATE_S0: + arg = 1; + break; + case UACPI_SLEEP_STATE_S1: + case UACPI_SLEEP_STATE_S2: + case UACPI_SLEEP_STATE_S3: + arg = 3; + break; + case UACPI_SLEEP_STATE_S4: + arg = 4; + break; + case UACPI_SLEEP_STATE_S5: + arg = 0; + break; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + return eval_sst(arg); +} + +uacpi_status uacpi_prepare_for_sleep_state(enum uacpi_sleep_state state_enum) +{ + uacpi_u8 state = state_enum; + uacpi_status ret; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED); + + if (uacpi_unlikely(state > UACPI_SLEEP_STATE_S5)) + return UACPI_STATUS_INVALID_ARGUMENT; + + ret = get_slp_type_for_state( + state, + &g_uacpi_rt_ctx.last_sleep_typ_a, + &g_uacpi_rt_ctx.last_sleep_typ_b + ); + if (ret != UACPI_STATUS_OK) + return ret; + + ret = get_slp_type_for_state( + 0, + &g_uacpi_rt_ctx.s0_sleep_typ_a, + &g_uacpi_rt_ctx.s0_sleep_typ_b + ); + + ret = eval_pts(state); + if (uacpi_unlikely_error(ret)) + return ret; + + eval_sst_for_state(state); + return UACPI_STATUS_OK; +} + +static uacpi_u8 make_hw_reduced_sleep_control(uacpi_u8 slp_typ) +{ + uacpi_u8 value; + + value = (slp_typ << ACPI_SLP_CNT_SLP_TYP_IDX); + value &= ACPI_SLP_CNT_SLP_TYP_MASK; + value |= ACPI_SLP_CNT_SLP_EN_MASK; + + return value; +} + +static uacpi_status enter_sleep_state_hw_reduced(uacpi_u8 state) +{ + uacpi_status ret; + uacpi_u8 sleep_control; + uacpi_u64 wake_status; + struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt; + + if (!fadt->sleep_control_reg.address || !fadt->sleep_status_reg.address) + return UACPI_STATUS_NOT_FOUND; + + ret = uacpi_write_register_field( + UACPI_REGISTER_FIELD_HWR_WAK_STS, + ACPI_SLP_STS_CLEAR + ); + if (uacpi_unlikely_error(ret)) + return ret; + + sleep_control = make_hw_reduced_sleep_control( + g_uacpi_rt_ctx.last_sleep_typ_a + ); + + if (state < UACPI_SLEEP_STATE_S4) + UACPI_ARCH_FLUSH_CPU_CACHE(); + + /* + * To put the system into a sleep state, software will write the HW-reduced + * Sleep Type value (obtained from the \_Sx object in the DSDT) and the + * SLP_EN bit to the sleep control register. + */ + ret = uacpi_write_register(UACPI_REGISTER_SLP_CNT, sleep_control); + if (uacpi_unlikely_error(ret)) + return ret; + + /* + * The OSPM then polls the WAK_STS bit of the SLEEP_STATUS_REG waiting for + * it to be one (1), indicating that the system has been transitioned + * back to the Working state. + */ + do { + ret = uacpi_read_register_field( + UACPI_REGISTER_FIELD_HWR_WAK_STS, &wake_status + ); + if (uacpi_unlikely_error(ret)) + return ret; + } while (wake_status != 1); + + return UACPI_STATUS_OK; +} + +static uacpi_status prepare_for_wake_from_sleep_state_hw_reduced(uacpi_u8 state) +{ + uacpi_u8 sleep_control; + UACPI_UNUSED(state); + + if (g_uacpi_rt_ctx.s0_sleep_typ_a == UACPI_SLEEP_TYP_INVALID) + goto out; + + sleep_control = make_hw_reduced_sleep_control( + g_uacpi_rt_ctx.s0_sleep_typ_a + ); + uacpi_write_register(UACPI_REGISTER_SLP_CNT, sleep_control); + +out: + return UACPI_STATUS_OK; +} + +static uacpi_status wake_from_sleep_state_hw_reduced(uacpi_u8 state) +{ + g_uacpi_rt_ctx.last_sleep_typ_a = UACPI_SLEEP_TYP_INVALID; + g_uacpi_rt_ctx.last_sleep_typ_b = UACPI_SLEEP_TYP_INVALID; + + // Set the status to 2 (waking) while we execute the wake method. + eval_sst(2); + + eval_wak(state); + + // Apparently some BIOSes expect us to clear this, so do it + uacpi_write_register_field( + UACPI_REGISTER_FIELD_HWR_WAK_STS, ACPI_SLP_STS_CLEAR + ); + + // Now that we're awake set the status to 1 (running) + eval_sst(1); + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_enter_sleep_state(enum uacpi_sleep_state state_enum) +{ + uacpi_u8 state = state_enum; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED); + + if (uacpi_unlikely(state > UACPI_SLEEP_STATE_MAX)) + return UACPI_STATUS_INVALID_ARGUMENT; + + if (uacpi_unlikely(g_uacpi_rt_ctx.last_sleep_typ_a > ACPI_SLP_TYP_MAX || + g_uacpi_rt_ctx.last_sleep_typ_b > ACPI_SLP_TYP_MAX)) { + uacpi_error("invalid SLP_TYP values: 0x%02X:0x%02X\n", + g_uacpi_rt_ctx.last_sleep_typ_a, + g_uacpi_rt_ctx.last_sleep_typ_b); + return UACPI_STATUS_AML_BAD_ENCODING; + } + + return CALL_SLEEP_FN(enter_sleep_state, state); +} + +uacpi_status uacpi_prepare_for_wake_from_sleep_state( + uacpi_sleep_state state_enum +) +{ + uacpi_u8 state = state_enum; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED); + + if (uacpi_unlikely(state > UACPI_SLEEP_STATE_MAX)) + return UACPI_STATUS_INVALID_ARGUMENT; + + return CALL_SLEEP_FN(prepare_for_wake_from_sleep_state, state); +} + +uacpi_status uacpi_wake_from_sleep_state( + uacpi_sleep_state state_enum +) +{ + uacpi_u8 state = state_enum; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED); + + if (uacpi_unlikely(state > UACPI_SLEEP_STATE_MAX)) + return UACPI_STATUS_INVALID_ARGUMENT; + + return CALL_SLEEP_FN(wake_from_sleep_state, state); +} + +uacpi_status uacpi_reboot(void) +{ + uacpi_status ret; + uacpi_handle pci_dev = UACPI_NULL, io_handle = UACPI_NULL; + struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt; + struct acpi_gas *reset_reg = &fadt->reset_reg; + + /* + * Allow restarting earlier than namespace load so that the kernel can + * use this in case of some initialization error. + */ + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (!(fadt->flags & ACPI_RESET_REG_SUP) || !reset_reg->address) + return UACPI_STATUS_NOT_FOUND; + + switch (reset_reg->address_space_id) { + case UACPI_ADDRESS_SPACE_SYSTEM_IO: + /* + * For SystemIO we don't do any checking, and we ignore bit width + * because that's what NT does. + */ + ret = uacpi_kernel_io_map(reset_reg->address, 1, &io_handle); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_kernel_io_write8(io_handle, 0, fadt->reset_value); + break; + case UACPI_ADDRESS_SPACE_SYSTEM_MEMORY: + ret = uacpi_write_register(UACPI_REGISTER_RESET, fadt->reset_value); + break; + case UACPI_ADDRESS_SPACE_PCI_CONFIG: { + uacpi_pci_address address = { 0 }; + + // Bus is assumed to be 0 here + address.segment = 0; + address.bus = 0; + address.device = (reset_reg->address >> 32) & 0xFF; + address.function = (reset_reg->address >> 16) & 0xFF; + + ret = uacpi_kernel_pci_device_open(address, &pci_dev); + if (uacpi_unlikely_error(ret)) + break; + + ret = uacpi_kernel_pci_write8( + pci_dev, reset_reg->address & 0xFFFF, fadt->reset_value + ); + break; + } + default: + uacpi_warn( + "unable to perform a reset: unsupported address space '%s' (%d)\n", + uacpi_address_space_to_string(reset_reg->address_space_id), + reset_reg->address_space_id + ); + ret = UACPI_STATUS_UNIMPLEMENTED; + } + + if (ret == UACPI_STATUS_OK) { + /* + * This should've worked but we're still here. + * Spin for a bit then give up. + */ + uacpi_u64 stalled_time = 0; + + while (stalled_time < (1000 * 1000)) { + uacpi_kernel_stall(100); + stalled_time += 100; + } + + uacpi_error("reset timeout\n"); + ret = UACPI_STATUS_HARDWARE_TIMEOUT; + } + + if (pci_dev != UACPI_NULL) + uacpi_kernel_pci_device_close(pci_dev); + if (io_handle != UACPI_NULL) + uacpi_kernel_io_unmap(io_handle); + + return ret; +} + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/dev/acpi/uacpi/stdlib.c b/sys/dev/acpi/uacpi/stdlib.c new file mode 100644 index 0000000..98344f1 --- /dev/null +++ b/sys/dev/acpi/uacpi/stdlib.c @@ -0,0 +1,728 @@ +#include <uacpi/internal/stdlib.h> +#include <uacpi/internal/utilities.h> + +#ifdef UACPI_USE_BUILTIN_STRING + +#ifndef uacpi_memcpy +void *uacpi_memcpy(void *dest, const void *src, uacpi_size count) +{ + uacpi_char *cd = dest; + const uacpi_char *cs = src; + + while (count--) + *cd++ = *cs++; + + return dest; +} +#endif + +#ifndef uacpi_memmove +void *uacpi_memmove(void *dest, const void *src, uacpi_size count) +{ + uacpi_char *cd = dest; + const uacpi_char *cs = src; + + if (src < dest) { + cs += count; + cd += count; + + while (count--) + *--cd = *--cs; + } else { + while (count--) + *cd++ = *cs++; + } + + return dest; +} +#endif + +#ifndef uacpi_memset +void *uacpi_memset(void *dest, uacpi_i32 ch, uacpi_size count) +{ + uacpi_u8 fill = ch; + uacpi_u8 *cdest = dest; + + while (count--) + *cdest++ = fill; + + return dest; +} +#endif + +#ifndef uacpi_memcmp +uacpi_i32 uacpi_memcmp(const void *lhs, const void *rhs, uacpi_size count) +{ + const uacpi_u8 *byte_lhs = lhs; + const uacpi_u8 *byte_rhs = rhs; + uacpi_size i; + + for (i = 0; i < count; ++i) { + if (byte_lhs[i] != byte_rhs[i]) + return byte_lhs[i] - byte_rhs[i]; + } + + return 0; +} +#endif + +#endif // UACPI_USE_BUILTIN_STRING + +#ifndef uacpi_strlen +uacpi_size uacpi_strlen(const uacpi_char *str) +{ + const uacpi_char *str1; + + for (str1 = str; *str1; str1++); + + return str1 - str; +} +#endif + +#ifndef UACPI_BAREBONES_MODE + +#ifndef uacpi_strnlen +uacpi_size uacpi_strnlen(const uacpi_char *str, uacpi_size max) +{ + const uacpi_char *str1; + + for (str1 = str; max-- && *str1; str1++); + + return str1 - str; +} +#endif + +#ifndef uacpi_strcmp +uacpi_i32 uacpi_strcmp(const uacpi_char *lhs, const uacpi_char *rhs) +{ + uacpi_size i = 0; + typedef const uacpi_u8 *cucp; + + while (lhs[i] && rhs[i]) { + if (lhs[i] != rhs[i]) + return *(cucp)&lhs[i] - *(cucp)&rhs[i]; + + i++; + } + + return *(cucp)&lhs[i] - *(cucp)&rhs[i]; +} +#endif + +void uacpi_memcpy_zerout(void *dst, const void *src, + uacpi_size dst_size, uacpi_size src_size) +{ + uacpi_size bytes_to_copy = UACPI_MIN(src_size, dst_size); + + if (bytes_to_copy) + uacpi_memcpy(dst, src, bytes_to_copy); + + if (dst_size > bytes_to_copy) + uacpi_memzero((uacpi_u8 *)dst + bytes_to_copy, dst_size - bytes_to_copy); +} + +uacpi_u8 uacpi_bit_scan_forward(uacpi_u64 value) +{ +#if defined(_MSC_VER) && !defined(__clang__) + unsigned char ret; + unsigned long index; + +#ifdef _WIN64 + ret = _BitScanForward64(&index, value); + if (ret == 0) + return 0; + + return (uacpi_u8)index + 1; +#else + ret = _BitScanForward(&index, value); + if (ret == 0) { + ret = _BitScanForward(&index, value >> 32); + if (ret == 0) + return 0; + + return (uacpi_u8)index + 33; + } + + return (uacpi_u8)index + 1; +#endif + +#elif defined(__WATCOMC__) + // TODO: Use compiler intrinsics or inline ASM here + uacpi_u8 index; + uacpi_u64 mask = 1; + + for (index = 1; index <= 64; index++, mask <<= 1) { + if (value & mask) { + return index; + } + } + + return 0; +#else + return __builtin_ffsll(value); +#endif +} + +uacpi_u8 uacpi_bit_scan_backward(uacpi_u64 value) +{ +#if defined(_MSC_VER) && !defined(__clang__) + unsigned char ret; + unsigned long index; + +#ifdef _WIN64 + ret = _BitScanReverse64(&index, value); + if (ret == 0) + return 0; + + return (uacpi_u8)index + 1; +#else + ret = _BitScanReverse(&index, value >> 32); + if (ret == 0) { + ret = _BitScanReverse(&index, value); + if (ret == 0) + return 0; + + return (uacpi_u8)index + 1; + } + + return (uacpi_u8)index + 33; +#endif + +#elif defined(__WATCOMC__) + // TODO: Use compiler intrinsics or inline ASM here + uacpi_u8 index; + uacpi_u64 mask = (1ull << 63); + + for (index = 64; index > 0; index--, mask >>= 1) { + if (value & mask) { + return index; + } + } + + return 0; +#else + if (value == 0) + return 0; + + return 64 - __builtin_clzll(value); +#endif +} + +#ifndef UACPI_NATIVE_ALLOC_ZEROED +void *uacpi_builtin_alloc_zeroed(uacpi_size size) +{ + void *ptr; + + ptr = uacpi_kernel_alloc(size); + if (uacpi_unlikely(ptr == UACPI_NULL)) + return ptr; + + uacpi_memzero(ptr, size); + return ptr; +} +#endif + +#endif // !UACPI_BAREBONES_MODE + +#ifndef uacpi_vsnprintf +struct fmt_buf_state { + uacpi_char *buffer; + uacpi_size capacity; + uacpi_size bytes_written; +}; + +struct fmt_spec { + uacpi_u8 is_signed : 1; + uacpi_u8 prepend : 1; + uacpi_u8 uppercase : 1; + uacpi_u8 left_justify : 1; + uacpi_u8 alternate_form : 1; + uacpi_u8 has_precision : 1; + uacpi_char pad_char; + uacpi_char prepend_char; + uacpi_u64 min_width; + uacpi_u64 precision; + uacpi_u32 base; +}; + +static void write_one(struct fmt_buf_state *fb_state, uacpi_char c) +{ + if (fb_state->bytes_written < fb_state->capacity) + fb_state->buffer[fb_state->bytes_written] = c; + + fb_state->bytes_written++; +} + +static void write_many( + struct fmt_buf_state *fb_state, const uacpi_char *string, uacpi_size count +) +{ + if (fb_state->bytes_written < fb_state->capacity) { + uacpi_size count_to_write; + + count_to_write = UACPI_MIN( + count, fb_state->capacity - fb_state->bytes_written + ); + uacpi_memcpy( + &fb_state->buffer[fb_state->bytes_written], string, count_to_write + ); + } + + fb_state->bytes_written += count; +} + +static uacpi_char hex_char(uacpi_bool upper, uacpi_u64 value) +{ + static const uacpi_char upper_hex[] = "0123456789ABCDEF"; + static const uacpi_char lower_hex[] = "0123456789abcdef"; + + return (upper ? upper_hex : lower_hex)[value]; +} + +static void write_padding( + struct fmt_buf_state *fb_state, struct fmt_spec *fm, uacpi_size repr_size +) +{ + uacpi_u64 mw = fm->min_width; + + if (mw <= repr_size) + return; + + mw -= repr_size; + + while (mw--) + write_one(fb_state, fm->left_justify ? ' ' : fm->pad_char); +} + +#define REPR_BUFFER_SIZE 32 + +static void write_integer( + struct fmt_buf_state *fb_state, struct fmt_spec *fm, uacpi_u64 value +) +{ + uacpi_char repr_buffer[REPR_BUFFER_SIZE]; + uacpi_size index = REPR_BUFFER_SIZE; + uacpi_u64 remainder; + uacpi_char repr; + uacpi_bool negative = UACPI_FALSE; + uacpi_size repr_size; + + if (fm->is_signed) { + uacpi_i64 as_ll = value; + + if (as_ll < 0) { + value = -as_ll; + negative = UACPI_TRUE; + } + } + + if (fm->prepend || negative) + write_one(fb_state, negative ? '-' : fm->prepend_char); + + while (value) { + remainder = value % fm->base; + value /= fm->base; + + if (fm->base == 16) { + repr = hex_char(fm->uppercase, remainder); + } else if (fm->base == 8 || fm->base == 10) { + repr = remainder + '0'; + } else { + repr = '?'; + } + + repr_buffer[--index] = repr; + } + repr_size = REPR_BUFFER_SIZE - index; + + if (repr_size == 0) { + repr_buffer[--index] = '0'; + repr_size = 1; + } + + if (fm->alternate_form) { + if (fm->base == 16) { + repr_buffer[--index] = fm->uppercase ? 'X' : 'x'; + repr_buffer[--index] = '0'; + repr_size += 2; + } else if (fm->base == 8) { + repr_buffer[--index] = '0'; + repr_size += 1; + } + } + + if (fm->left_justify) { + write_many(fb_state, &repr_buffer[index], repr_size); + write_padding(fb_state, fm, repr_size); + } else { + write_padding(fb_state, fm, repr_size); + write_many(fb_state, &repr_buffer[index], repr_size); + } +} + +static uacpi_bool string_has_at_least( + const uacpi_char *string, uacpi_size characters +) +{ + while (*string) { + if (--characters == 0) + return UACPI_TRUE; + + string++; + } + + return UACPI_FALSE; +} + +static uacpi_bool consume_digits( + const uacpi_char **string, uacpi_size *out_size +) +{ + uacpi_size size = 0; + + for (;;) { + char c = **string; + if (c < '0' || c > '9') + break; + + size++; + *string += 1; + } + + if (size == 0) + return UACPI_FALSE; + + *out_size = size; + return UACPI_TRUE; +} + +enum parse_number_mode { + PARSE_NUMBER_MODE_MAYBE, + PARSE_NUMBER_MODE_MUST, +}; + +static uacpi_bool parse_number( + const uacpi_char **fmt, enum parse_number_mode mode, uacpi_u64 *out_value +) +{ + uacpi_status ret; + uacpi_size num_digits; + const uacpi_char *digits = *fmt; + + if (!consume_digits(fmt, &num_digits)) + return mode != PARSE_NUMBER_MODE_MUST; + + ret = uacpi_string_to_integer(digits, num_digits, UACPI_BASE_DEC, out_value); + return ret == UACPI_STATUS_OK; +} + +static uacpi_bool consume(const uacpi_char **string, const uacpi_char *token) +{ + uacpi_size token_size; + + token_size = uacpi_strlen(token); + + if (!string_has_at_least(*string, token_size)) + return UACPI_FALSE; + + if (!uacpi_memcmp(*string, token, token_size)) { + *string += token_size; + return UACPI_TRUE; + } + + return UACPI_FALSE; +} + +static uacpi_bool is_one_of(uacpi_char c, const uacpi_char *list) +{ + for (; *list; list++) { + if (c == *list) + return UACPI_TRUE; + } + + return UACPI_FALSE; +} + +static uacpi_bool consume_one_of( + const uacpi_char **string, const uacpi_char *list, uacpi_char *consumed_char +) +{ + uacpi_char c = **string; + if (!c) + return UACPI_FALSE; + + if (is_one_of(c, list)) { + *consumed_char = c; + *string += 1; + return UACPI_TRUE; + } + + return UACPI_FALSE; +} + +static uacpi_u32 base_from_specifier(uacpi_char specifier) +{ + switch (specifier) + { + case 'x': + case 'X': + return 16; + case 'o': + return 8; + default: + return 10; + } +} + +static uacpi_bool is_uppercase_specifier(uacpi_char specifier) +{ + return specifier == 'X'; +} + +static const uacpi_char *find_next_conversion( + const uacpi_char *fmt, uacpi_size *offset +) +{ + *offset = 0; + + while (*fmt) { + if (*fmt == '%') + return fmt; + + fmt++; + *offset += 1; + } + + return UACPI_NULL; +} + +uacpi_i32 uacpi_vsnprintf( + uacpi_char *buffer, uacpi_size capacity, const uacpi_char *fmt, + uacpi_va_list vlist +) +{ + struct fmt_buf_state fb_state = { 0 }; + uacpi_u64 value; + const uacpi_char *next_conversion; + uacpi_size next_offset; + uacpi_char flag; + + fb_state.buffer = buffer; + fb_state.capacity = capacity; + fb_state.bytes_written = 0; + + while (*fmt) { + struct fmt_spec fm = { + .pad_char = ' ', + .base = 10, + }; + next_conversion = find_next_conversion(fmt, &next_offset); + + if (next_offset) + write_many(&fb_state, fmt, next_offset); + + if (!next_conversion) + break; + + fmt = next_conversion; + if (consume(&fmt, "%%")) { + write_one(&fb_state, '%'); + continue; + } + + // consume % + fmt++; + + while (consume_one_of(&fmt, "+- 0#", &flag)) { + switch (flag) { + case '+': + case ' ': + fm.prepend = UACPI_TRUE; + fm.prepend_char = flag; + continue; + case '-': + fm.left_justify = UACPI_TRUE; + continue; + case '0': + fm.pad_char = '0'; + continue; + case '#': + fm.alternate_form = UACPI_TRUE; + continue; + default: + return -1; + } + } + + if (consume(&fmt, "*")) { + fm.min_width = uacpi_va_arg(vlist, int); + } else if (!parse_number(&fmt, PARSE_NUMBER_MODE_MAYBE, &fm.min_width)) { + return -1; + } + + if (consume(&fmt, ".")) { + fm.has_precision = UACPI_TRUE; + + if (consume(&fmt, "*")) { + fm.precision = uacpi_va_arg(vlist, int); + } else { + if (!parse_number(&fmt, PARSE_NUMBER_MODE_MUST, &fm.precision)) + return -1; + } + } + + flag = 0; + + if (consume(&fmt, "c")) { + uacpi_char c = uacpi_va_arg(vlist, int); + write_one(&fb_state, c); + continue; + } + + if (consume(&fmt, "s")) { + const uacpi_char *string = uacpi_va_arg(vlist, uacpi_char*); + uacpi_size i; + + if (uacpi_unlikely(string == UACPI_NULL)) + string = "<null>"; + + for (i = 0; (!fm.has_precision || i < fm.precision) && string[i]; ++i) + write_one(&fb_state, string[i]); + while (i++ < fm.min_width) + write_one(&fb_state, ' '); + continue; + } + + if (consume(&fmt, "p")) { + value = (uacpi_uintptr)uacpi_va_arg(vlist, void*); + fm.base = 16; + fm.min_width = UACPI_POINTER_SIZE * 2; + fm.pad_char = '0'; + goto write_int; + } + + if (consume(&fmt, "hh")) { + if (consume(&fmt, "d") || consume(&fmt, "i")) { + value = (signed char)uacpi_va_arg(vlist, int); + fm.is_signed = UACPI_TRUE; + } else if (consume_one_of(&fmt, "oxXu", &flag)) { + value = (unsigned char)uacpi_va_arg(vlist, int); + } else { + return -1; + } + goto write_int; + } + + if (consume(&fmt, "h")) { + if (consume(&fmt, "d") || consume(&fmt, "i")) { + value = (signed short)uacpi_va_arg(vlist, int); + fm.is_signed = UACPI_TRUE; + } else if (consume_one_of(&fmt, "oxXu", &flag)) { + value = (unsigned short)uacpi_va_arg(vlist, int); + } else { + return -1; + } + goto write_int; + } + + if (consume(&fmt, "ll") || + (sizeof(uacpi_size) == sizeof(long long) && consume(&fmt, "z"))) { + if (consume(&fmt, "d") || consume(&fmt, "i")) { + value = uacpi_va_arg(vlist, long long); + fm.is_signed = UACPI_TRUE; + } else if (consume_one_of(&fmt, "oxXu", &flag)) { + value = uacpi_va_arg(vlist, unsigned long long); + } else { + return -1; + } + goto write_int; + } + + if (consume(&fmt, "l") || + (sizeof(uacpi_size) == sizeof(long) && consume(&fmt, "z"))) { + if (consume(&fmt, "d") || consume(&fmt, "i")) { + value = uacpi_va_arg(vlist, long); + fm.is_signed = UACPI_TRUE; + } else if (consume_one_of(&fmt, "oxXu", &flag)) { + value = uacpi_va_arg(vlist, unsigned long); + } else { + return -1; + } + goto write_int; + } + + if (consume(&fmt, "d") || consume(&fmt, "i")) { + value = uacpi_va_arg(vlist, uacpi_i32); + fm.is_signed = UACPI_TRUE; + } else if (consume_one_of(&fmt, "oxXu", &flag)) { + value = uacpi_va_arg(vlist, uacpi_u32); + } else { + return -1; + } + + write_int: + if (flag != 0) { + fm.base = base_from_specifier(flag); + fm.uppercase = is_uppercase_specifier(flag); + } + + write_integer(&fb_state, &fm, value); + } + + if (fb_state.capacity) { + uacpi_size last_char; + + last_char = UACPI_MIN(fb_state.bytes_written, fb_state.capacity - 1); + fb_state.buffer[last_char] = '\0'; + } + + return fb_state.bytes_written; +} +#endif + +#ifndef uacpi_snprintf +uacpi_i32 uacpi_snprintf( + uacpi_char *buffer, uacpi_size capacity, const uacpi_char *fmt, ... +) +{ + uacpi_va_list vlist; + uacpi_i32 ret; + + uacpi_va_start(vlist, fmt); + ret = uacpi_vsnprintf(buffer, capacity, fmt, vlist); + uacpi_va_end(vlist); + + return ret; +} +#endif + +#ifndef UACPI_FORMATTED_LOGGING +void uacpi_log(uacpi_log_level lvl, const uacpi_char *str, ...) +{ + uacpi_char buf[UACPI_PLAIN_LOG_BUFFER_SIZE]; + int ret; + + uacpi_va_list vlist; + uacpi_va_start(vlist, str); + + ret = uacpi_vsnprintf(buf, sizeof(buf), str, vlist); + if (uacpi_unlikely(ret < 0)) + return; + + /* + * If this log message is too large for the configured buffer size, cut off + * the end and transform into "...\n" to indicate that it didn't fit and + * prevent the newline from being truncated. + */ + if (uacpi_unlikely(ret >= UACPI_PLAIN_LOG_BUFFER_SIZE)) { + buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 5] = '.'; + buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 4] = '.'; + buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 3] = '.'; + buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 2] = '\n'; + } + + uacpi_kernel_log(lvl, buf); + + uacpi_va_end(vlist); +} +#endif diff --git a/sys/dev/acpi/uacpi/tables.c b/sys/dev/acpi/uacpi/tables.c new file mode 100644 index 0000000..df7d7b9 --- /dev/null +++ b/sys/dev/acpi/uacpi/tables.c @@ -0,0 +1,1399 @@ +#include <uacpi/internal/tables.h> +#include <uacpi/internal/utilities.h> +#include <uacpi/internal/stdlib.h> +#include <uacpi/internal/interpreter.h> +#include <uacpi/internal/mutex.h> + +DYNAMIC_ARRAY_WITH_INLINE_STORAGE( + table_array, struct uacpi_installed_table, UACPI_STATIC_TABLE_ARRAY_LEN +) +DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL( + table_array, struct uacpi_installed_table, static +) + +static struct table_array tables; +static uacpi_bool early_table_access; +static uacpi_table_installation_handler installation_handler; + +#ifndef UACPI_BAREBONES_MODE + +static uacpi_handle table_mutex; + +#define ENSURE_TABLES_ONLINE() \ + do { \ + if (!early_table_access) \ + UACPI_ENSURE_INIT_LEVEL_AT_LEAST( \ + UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED \ + ); \ + } while (0) + +#else + +/* + * Use a dummy function instead of a macro to prevent the following error: + * error: statement with no effect [-Werror=unused-value] + */ +static inline uacpi_status dummy_mutex_acquire_release(uacpi_handle mtx) +{ + UACPI_UNUSED(mtx); + return UACPI_STATUS_OK; +} + +#define table_mutex UACPI_NULL +#define uacpi_acquire_native_mutex_may_be_null dummy_mutex_acquire_release +#define uacpi_release_native_mutex_may_be_null dummy_mutex_acquire_release + +#define ENSURE_TABLES_ONLINE() \ + do { \ + if (!early_table_access) \ + return UACPI_STATUS_INIT_LEVEL_MISMATCH; \ + } while (0) + +#endif // !UACPI_BAREBONES_MODE + +static uacpi_status table_install_physical_with_origin_unlocked( + uacpi_phys_addr phys, enum uacpi_table_origin origin, + const uacpi_char *expected_signature, uacpi_table *out_table +); +static uacpi_status table_install_with_origin_unlocked( + void *virt, enum uacpi_table_origin origin, uacpi_table *out_table +); + +UACPI_PACKED(struct uacpi_rxsdt { + struct acpi_sdt_hdr hdr; + uacpi_u8 ptr_bytes[]; +}) + +static void dump_table_header( + uacpi_phys_addr phys_addr, void *hdr +) +{ + struct acpi_sdt_hdr *sdt = hdr; + + if (uacpi_signatures_match(hdr, ACPI_FACS_SIGNATURE)) { + uacpi_info( + "FACS 0x%016"UACPI_PRIX64" %08X\n", UACPI_FMT64(phys_addr), + sdt->length + ); + return; + } + + if (!uacpi_memcmp(hdr, ACPI_RSDP_SIGNATURE, sizeof(ACPI_RSDP_SIGNATURE) - 1)) { + struct acpi_rsdp *rsdp = hdr; + + uacpi_info( + "RSDP 0x%016"UACPI_PRIX64" %08X v%02X (%6.6s)\n", + UACPI_FMT64(phys_addr), rsdp->revision >= 2 ? rsdp->length : 20, + rsdp->revision, rsdp->oemid + ); + return; + } + + uacpi_info( + "%.4s 0x%016"UACPI_PRIX64" %08X v%02X (%6.6s %8.8s)\n", + sdt->signature, UACPI_FMT64(phys_addr), sdt->length, sdt->revision, + sdt->oemid, sdt->oem_table_id + ); +} + +static uacpi_status initialize_from_rxsdt(uacpi_phys_addr rxsdt_addr, + uacpi_size entry_size) +{ + struct uacpi_rxsdt *rxsdt; + uacpi_size i, entry_bytes, map_len = sizeof(*rxsdt); + uacpi_phys_addr entry_addr; + uacpi_status ret; + + rxsdt = uacpi_kernel_map(rxsdt_addr, map_len); + if (rxsdt == UACPI_NULL) + return UACPI_STATUS_MAPPING_FAILED; + + dump_table_header(rxsdt_addr, rxsdt); + + ret = uacpi_check_table_signature(rxsdt, + entry_size == 8 ? ACPI_XSDT_SIGNATURE : ACPI_RSDT_SIGNATURE); + if (uacpi_unlikely_error(ret)) + goto error_out; + + map_len = rxsdt->hdr.length; + uacpi_kernel_unmap(rxsdt, sizeof(*rxsdt)); + + if (uacpi_unlikely(map_len < (sizeof(*rxsdt) + entry_size))) + return UACPI_STATUS_INVALID_TABLE_LENGTH; + + // Make sure length is aligned to entry size so we don't OOB + entry_bytes = map_len - sizeof(*rxsdt); + entry_bytes &= ~(entry_size - 1); + + rxsdt = uacpi_kernel_map(rxsdt_addr, map_len); + if (uacpi_unlikely(rxsdt == UACPI_NULL)) + return UACPI_STATUS_MAPPING_FAILED; + + ret = uacpi_verify_table_checksum(rxsdt, map_len); + if (uacpi_unlikely_error(ret)) + goto error_out; + + for (i = 0; i < entry_bytes; i += entry_size) { + uacpi_u64 entry_phys_addr_large = 0; + uacpi_memcpy(&entry_phys_addr_large, &rxsdt->ptr_bytes[i], entry_size); + + if (!entry_phys_addr_large) + continue; + + entry_addr = uacpi_truncate_phys_addr_with_warn(entry_phys_addr_large); + ret = uacpi_table_install_physical_with_origin( + entry_addr, UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL, UACPI_NULL + ); + if (uacpi_unlikely(ret != UACPI_STATUS_OK && + ret != UACPI_STATUS_OVERRIDDEN)) + goto error_out; + } + + ret = UACPI_STATUS_OK; + +error_out: + uacpi_kernel_unmap(rxsdt, map_len); + return ret; +} + +static uacpi_status initialize_from_rsdp(void) +{ + uacpi_status ret; + uacpi_phys_addr rsdp_phys; + struct acpi_rsdp *rsdp; + uacpi_phys_addr rxsdt; + uacpi_size rxsdt_entry_size; + + g_uacpi_rt_ctx.is_rev1 = UACPI_TRUE; + + ret = uacpi_kernel_get_rsdp(&rsdp_phys); + if (uacpi_unlikely_error(ret)) + return ret; + + rsdp = uacpi_kernel_map(rsdp_phys, sizeof(struct acpi_rsdp)); + if (rsdp == UACPI_NULL) + return UACPI_STATUS_MAPPING_FAILED; + + dump_table_header(rsdp_phys, rsdp); + + if (rsdp->revision > 1 && rsdp->xsdt_addr && + !uacpi_check_flag(UACPI_FLAG_BAD_XSDT)) + { + rxsdt = uacpi_truncate_phys_addr_with_warn(rsdp->xsdt_addr); + rxsdt_entry_size = 8; + } else { + rxsdt = (uacpi_phys_addr)rsdp->rsdt_addr; + rxsdt_entry_size = 4; + } + + uacpi_kernel_unmap(rsdp, sizeof(struct acpi_rsdp)); + + if (!rxsdt) { + uacpi_error("both RSDT & XSDT tables are NULL!\n"); + return UACPI_STATUS_INVALID_ARGUMENT; + } + + return initialize_from_rxsdt(rxsdt, rxsdt_entry_size); +} + +uacpi_status uacpi_setup_early_table_access( + void *temporary_buffer, uacpi_size buffer_size +) +{ + uacpi_status ret; + +#ifndef UACPI_BAREBONES_MODE + UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_EARLY); +#endif + if (uacpi_unlikely(early_table_access)) + return UACPI_STATUS_INIT_LEVEL_MISMATCH; + + if (uacpi_unlikely(buffer_size < sizeof(struct uacpi_installed_table))) + return UACPI_STATUS_INVALID_ARGUMENT; + + uacpi_logger_initialize(); + + tables.dynamic_storage = temporary_buffer; + tables.dynamic_capacity = buffer_size / sizeof(struct uacpi_installed_table); + early_table_access = UACPI_TRUE; + + ret = initialize_from_rsdp(); + if (uacpi_unlikely_error(ret)) + uacpi_deinitialize_tables(); + + return ret; +} + +#ifndef UACPI_BAREBONES_MODE +static uacpi_iteration_decision warn_if_early_referenced( + void *user, struct uacpi_installed_table *tbl, uacpi_size idx +) +{ + UACPI_UNUSED(user); + + if (uacpi_unlikely(tbl->reference_count != 0)) { + uacpi_warn( + "table "UACPI_PRI_TBL_HDR" (%zu) still has %d early reference(s)!\n", + UACPI_FMT_TBL_HDR(&tbl->hdr), idx, tbl->reference_count + ); + } + + return UACPI_ITERATION_DECISION_CONTINUE; +} + +uacpi_status uacpi_initialize_tables(void) +{ + if (early_table_access) { + uacpi_size num_tables; + + uacpi_for_each_table(0, warn_if_early_referenced, UACPI_NULL); + + // Reallocate the user buffer into a normal heap array + num_tables = table_array_size(&tables); + if (num_tables > table_array_inline_capacity(&tables)) { + void *new_buf; + + /* + * Allocate a new buffer with size equal to exactly the number of + * dynamic tables (that live in the user provided temporary buffer). + */ + num_tables -= table_array_inline_capacity(&tables); + new_buf = uacpi_kernel_alloc( + sizeof(struct uacpi_installed_table) * num_tables + ); + if (uacpi_unlikely(new_buf == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + uacpi_memcpy(new_buf, tables.dynamic_storage, + sizeof(struct uacpi_installed_table) * num_tables); + tables.dynamic_storage = new_buf; + tables.dynamic_capacity = num_tables; + } else { + /* + * User-provided temporary buffer was not used at all, just remove + * any references to it. + */ + tables.dynamic_storage = UACPI_NULL; + tables.dynamic_capacity = 0; + } + + early_table_access = UACPI_FALSE; + } else { + uacpi_status ret; + + ret = initialize_from_rsdp(); + if (uacpi_unlikely_error(ret)) + return ret; + } + + if (!uacpi_is_hardware_reduced()) { + struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt; + uacpi_table tbl; + + if (fadt->x_firmware_ctrl) { + uacpi_status ret; + + ret = table_install_physical_with_origin_unlocked( + fadt->x_firmware_ctrl, UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL, + ACPI_FACS_SIGNATURE, &tbl + ); + if (uacpi_unlikely(ret != UACPI_STATUS_OK && + ret != UACPI_STATUS_OVERRIDDEN)) + return ret; + + g_uacpi_rt_ctx.facs = tbl.ptr; + } + } + + table_mutex = uacpi_kernel_create_mutex(); + if (uacpi_unlikely(table_mutex == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + return UACPI_STATUS_OK; +} +#endif // !UACPI_BAREBONES_MODE + +void uacpi_deinitialize_tables(void) +{ + uacpi_size i; + + for (i = 0; i < table_array_size(&tables); ++i) { + struct uacpi_installed_table *tbl = table_array_at(&tables, i); + + switch (tbl->origin) { +#ifndef UACPI_BAREBONES_MODE + case UACPI_TABLE_ORIGIN_FIRMWARE_VIRTUAL: + uacpi_free(tbl->ptr, tbl->hdr.length); + break; +#endif + case UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL: + case UACPI_TABLE_ORIGIN_HOST_PHYSICAL: + if (tbl->reference_count != 0) + uacpi_kernel_unmap(tbl->ptr, tbl->hdr.length); + break; + default: + break; + } + } + + if (early_table_access) { + uacpi_memzero(&tables, sizeof(tables)); + early_table_access = UACPI_FALSE; + } else { + table_array_clear(&tables); + } + + installation_handler = UACPI_NULL; + +#ifndef UACPI_BAREBONES_MODE + if (table_mutex) + uacpi_kernel_free_mutex(table_mutex); + + table_mutex = UACPI_NULL; +#endif +} + +uacpi_status uacpi_set_table_installation_handler( + uacpi_table_installation_handler handler +) +{ + uacpi_status ret; + + ret = uacpi_acquire_native_mutex_may_be_null(table_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + if (installation_handler != UACPI_NULL && handler != UACPI_NULL) + goto out; + + installation_handler = handler; + +out: + uacpi_release_native_mutex_may_be_null(table_mutex); + return ret; +} + +static uacpi_status initialize_fadt(const void*); + +static uacpi_u8 table_checksum(void *table, uacpi_size size) +{ + uacpi_u8 *bytes = table; + uacpi_u8 csum = 0; + uacpi_size i; + + for (i = 0; i < size; ++i) + csum += bytes[i]; + + return csum; +} + +uacpi_status uacpi_verify_table_checksum(void *table, uacpi_size size) +{ + uacpi_status ret = UACPI_STATUS_OK; + uacpi_u8 csum; + + csum = table_checksum(table, size); + + if (uacpi_unlikely(csum != 0)) { + enum uacpi_log_level lvl = UACPI_LOG_WARN; + struct acpi_sdt_hdr *hdr = table; + + if (uacpi_check_flag(UACPI_FLAG_BAD_CSUM_FATAL)) { + ret = UACPI_STATUS_BAD_CHECKSUM; + lvl = UACPI_LOG_ERROR; + } + + uacpi_log_lvl( + lvl, "invalid table "UACPI_PRI_TBL_HDR" checksum %d!\n", + UACPI_FMT_TBL_HDR(hdr), csum + ); + } + + return ret; +} + +uacpi_bool uacpi_signatures_match(const void *const lhs, const void *const rhs) +{ + return uacpi_memcmp(lhs, rhs, sizeof(uacpi_object_name)) == 0; +} + +uacpi_status uacpi_check_table_signature(void *table, const uacpi_char *expect) +{ + uacpi_status ret = UACPI_STATUS_OK; + + if (!uacpi_signatures_match(table, expect)) { + enum uacpi_log_level lvl = UACPI_LOG_WARN; + struct acpi_sdt_hdr *hdr = table; + + if (uacpi_check_flag(UACPI_FLAG_BAD_TBL_SIGNATURE_FATAL)) { + ret = UACPI_STATUS_INVALID_SIGNATURE; + lvl = UACPI_LOG_ERROR; + } + + uacpi_log_lvl( + lvl, + "invalid table "UACPI_PRI_TBL_HDR" signature (expected '%.4s')\n", + UACPI_FMT_TBL_HDR(hdr), expect + ); + } + + return ret; +} + +static uacpi_status table_alloc( + struct uacpi_installed_table **out_tbl, uacpi_size *out_idx +) +{ + struct uacpi_installed_table *tbl; + + if (early_table_access && + table_array_size(&tables) == table_array_capacity(&tables)) { + uacpi_warn("early table access buffer capacity exhausted!\n"); + return UACPI_STATUS_OUT_OF_MEMORY; + } + + tbl = table_array_alloc(&tables); + if (uacpi_unlikely(tbl == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + *out_tbl = tbl; + *out_idx = table_array_size(&tables) - 1; + return UACPI_STATUS_OK; +} + +static uacpi_status get_external_table_header( + uacpi_phys_addr phys_addr, struct acpi_sdt_hdr *out_hdr +) +{ + void *virt; + + virt = uacpi_kernel_map(phys_addr, sizeof(*out_hdr)); + if (uacpi_unlikely(virt == UACPI_NULL)) + return UACPI_STATUS_MAPPING_FAILED; + + uacpi_memcpy(out_hdr, virt, sizeof(*out_hdr)); + + uacpi_kernel_unmap(virt, sizeof(*out_hdr)); + return UACPI_STATUS_OK; +} + +static uacpi_status table_ref_unlocked(struct uacpi_installed_table *tbl) +{ + switch (tbl->reference_count) { + case 0: { + uacpi_status ret; + + if (tbl->flags & UACPI_TABLE_INVALID) + return UACPI_STATUS_INVALID_ARGUMENT; + + if (tbl->origin != UACPI_TABLE_ORIGIN_HOST_PHYSICAL && + tbl->origin != UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL) + break; + + tbl->ptr = uacpi_kernel_map(tbl->phys_addr, tbl->hdr.length); + if (uacpi_unlikely(tbl->ptr == UACPI_NULL)) + return UACPI_STATUS_MAPPING_FAILED; + + if (!(tbl->flags & UACPI_TABLE_CSUM_VERIFIED)) { + ret = uacpi_verify_table_checksum(tbl->ptr, tbl->hdr.length); + if (uacpi_unlikely_error(ret)) { + uacpi_kernel_unmap(tbl->ptr, tbl->hdr.length); + tbl->flags |= UACPI_TABLE_INVALID; + tbl->ptr = UACPI_NULL; + return ret; + } + + tbl->flags |= UACPI_TABLE_CSUM_VERIFIED; + } + break; + } + case 0xFFFF - 1: + uacpi_warn( + "too many references for "UACPI_PRI_TBL_HDR + ", mapping permanently\n", UACPI_FMT_TBL_HDR(&tbl->hdr) + ); + break; + default: + break; + } + + if (uacpi_likely(tbl->reference_count != 0xFFFF)) + tbl->reference_count++; + return UACPI_STATUS_OK; +} + +static uacpi_status table_unref_unlocked(struct uacpi_installed_table *tbl) +{ + switch (tbl->reference_count) { + case 0: + uacpi_warn( + "tried to unref table "UACPI_PRI_TBL_HDR" with no references\n", + UACPI_FMT_TBL_HDR(&tbl->hdr) + ); + return UACPI_STATUS_INVALID_ARGUMENT; + case 1: + if (tbl->origin != UACPI_TABLE_ORIGIN_HOST_PHYSICAL && + tbl->origin != UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL) + break; + + uacpi_kernel_unmap(tbl->ptr, tbl->hdr.length); + tbl->ptr = UACPI_NULL; + break; + case 0xFFFF: + /* + * Consider the reference count (overflow) of 0xFFFF to be a permanently + * mapped table as we don't know the actual number of references. + */ + return UACPI_STATUS_OK; + default: + break; + } + + tbl->reference_count--; + return UACPI_STATUS_OK; +} + +static uacpi_status verify_and_install_table( + struct acpi_sdt_hdr *hdr, uacpi_phys_addr phys_addr, void *virt_addr, + enum uacpi_table_origin origin, uacpi_table *out_table +) +{ + uacpi_status ret; + struct uacpi_installed_table *table; + uacpi_bool is_fadt; + uacpi_size idx; + uacpi_u8 flags = 0; + + is_fadt = uacpi_signatures_match(hdr->signature, ACPI_FADT_SIGNATURE); + + /* + * FACS is the only(?) table without a checksum because it has OSPM + * writable fields. Don't try to validate it here. + */ + if (uacpi_signatures_match(hdr->signature, ACPI_FACS_SIGNATURE)) { + flags |= UACPI_TABLE_CSUM_VERIFIED; + } else if (uacpi_check_flag(UACPI_FLAG_PROACTIVE_TBL_CSUM) || is_fadt || + out_table != UACPI_NULL) { + void *mapping = virt_addr; + + // We may already have a valid mapping, reuse it if we do + if (mapping == UACPI_NULL) + mapping = uacpi_kernel_map(phys_addr, hdr->length); + if (uacpi_unlikely(mapping == UACPI_NULL)) + return UACPI_STATUS_MAPPING_FAILED; + + ret = uacpi_verify_table_checksum(mapping, hdr->length); + if (uacpi_likely_success(ret)) { + if (is_fadt) + ret = initialize_fadt(mapping); + flags |= UACPI_TABLE_CSUM_VERIFIED; + } + + if (virt_addr == UACPI_NULL) + uacpi_kernel_unmap(mapping, hdr->length); + + if (uacpi_unlikely_error(ret)) + return ret; + } + + if (uacpi_signatures_match(hdr->signature, ACPI_DSDT_SIGNATURE)) + g_uacpi_rt_ctx.is_rev1 = hdr->revision < 2; + + ret = table_alloc(&table, &idx); + if (uacpi_unlikely_error(ret)) + return ret; + + dump_table_header(phys_addr, hdr); + + uacpi_memcpy(&table->hdr, hdr, sizeof(*hdr)); + table->reference_count = 0; + table->phys_addr = phys_addr; + table->ptr = virt_addr; + table->flags = flags; + table->origin = origin; + + if (out_table == UACPI_NULL) + return UACPI_STATUS_OK; + + table->reference_count++; + out_table->ptr = virt_addr; + out_table->index = idx; + return UACPI_STATUS_OK; +} + +static uacpi_status handle_table_override( + uacpi_table_installation_disposition disposition, uacpi_u64 address, + uacpi_table *out_table +) +{ + uacpi_status ret; + + switch (disposition) { + case UACPI_TABLE_INSTALLATION_DISPOSITON_VIRTUAL_OVERRIDE: + ret = table_install_with_origin_unlocked( + UACPI_VIRT_ADDR_TO_PTR((uacpi_virt_addr)address), + UACPI_TABLE_ORIGIN_HOST_VIRTUAL, + out_table + ); + return ret; + case UACPI_TABLE_INSTALLATION_DISPOSITON_PHYSICAL_OVERRIDE: + return table_install_physical_with_origin_unlocked( + (uacpi_phys_addr)address, + UACPI_TABLE_ORIGIN_HOST_PHYSICAL, + UACPI_NULL, + out_table + ); + default: + uacpi_error("invalid table installation disposition %d\n", disposition); + return UACPI_STATUS_INTERNAL_ERROR; + } +} + +static uacpi_status table_install_physical_with_origin_unlocked( + uacpi_phys_addr phys, enum uacpi_table_origin origin, + const uacpi_char *expected_signature, uacpi_table *out_table +) +{ + struct acpi_sdt_hdr hdr; + void *virt = UACPI_NULL; + uacpi_status ret; + + ret = get_external_table_header(phys, &hdr); + if (uacpi_unlikely_error(ret)) + return ret; + + if (uacpi_unlikely(hdr.length < sizeof(struct acpi_sdt_hdr))) { + uacpi_error("invalid table '%.4s' (0x%016"UACPI_PRIX64") size: %u\n", + hdr.signature, UACPI_FMT64(phys), hdr.length); + return UACPI_STATUS_INVALID_TABLE_LENGTH; + } + + if (expected_signature != UACPI_NULL) { + ret = uacpi_check_table_signature(&hdr, expected_signature); + if (uacpi_unlikely_error(ret)) + return ret; + } + + if (installation_handler != UACPI_NULL || out_table != UACPI_NULL) { + virt = uacpi_kernel_map(phys, hdr.length); + if (uacpi_unlikely(!virt)) + return UACPI_STATUS_MAPPING_FAILED; + } + + if (origin == UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL && + installation_handler != UACPI_NULL) { + uacpi_u64 override; + uacpi_table_installation_disposition disposition; + + disposition = installation_handler(virt, &override); + + switch (disposition) { + case UACPI_TABLE_INSTALLATION_DISPOSITON_ALLOW: + break; + case UACPI_TABLE_INSTALLATION_DISPOSITON_DENY: + uacpi_info( + "table '%.4s' (0x%016"UACPI_PRIX64") installation denied " + "by host\n", hdr.signature, UACPI_FMT64(phys) + ); + ret = UACPI_STATUS_DENIED; + goto out; + + default: + uacpi_info( + "table '%.4s' (0x%016"UACPI_PRIX64") installation " + "overridden by host\n", hdr.signature, UACPI_FMT64(phys) + ); + + ret = handle_table_override(disposition, override, out_table); + if (uacpi_likely_success(ret)) + ret = UACPI_STATUS_OVERRIDDEN; + + goto out; + } + } + + ret = verify_and_install_table(&hdr, phys, virt, origin, out_table); +out: + // We don't unmap only in this case + if (ret == UACPI_STATUS_OK && out_table != UACPI_NULL) + return ret; + + if (virt != UACPI_NULL) + uacpi_kernel_unmap(virt, hdr.length); + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_table_install_physical_with_origin( + uacpi_phys_addr phys, enum uacpi_table_origin origin, uacpi_table *out_table +) +{ + uacpi_status ret; + + ret = uacpi_acquire_native_mutex_may_be_null(table_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = table_install_physical_with_origin_unlocked( + phys, origin, UACPI_NULL, out_table + ); + uacpi_release_native_mutex_may_be_null(table_mutex); + + return ret; +} + +static uacpi_status table_install_with_origin_unlocked( + void *virt, enum uacpi_table_origin origin, uacpi_table *out_table +) +{ + struct acpi_sdt_hdr *hdr = virt; + + if (uacpi_unlikely(hdr->length < sizeof(struct acpi_sdt_hdr))) { + uacpi_error("invalid table '%.4s' (%p) size: %u\n", + hdr->signature, virt, hdr->length); + return UACPI_STATUS_INVALID_TABLE_LENGTH; + } + +#ifndef UACPI_BAREBONES_MODE + if (origin == UACPI_TABLE_ORIGIN_FIRMWARE_VIRTUAL && + installation_handler != UACPI_NULL) { + uacpi_u64 override; + uacpi_table_installation_disposition disposition; + + disposition = installation_handler(virt, &override); + + switch (disposition) { + case UACPI_TABLE_INSTALLATION_DISPOSITON_ALLOW: + break; + case UACPI_TABLE_INSTALLATION_DISPOSITON_DENY: + uacpi_info( + "table "UACPI_PRI_TBL_HDR" installation denied by host\n", + UACPI_FMT_TBL_HDR(hdr) + ); + return UACPI_STATUS_DENIED; + + default: { + uacpi_status ret; + uacpi_info( + "table "UACPI_PRI_TBL_HDR" installation overridden by host\n", + UACPI_FMT_TBL_HDR(hdr) + ); + + ret = handle_table_override(disposition, override, out_table); + if (uacpi_likely_success(ret)) + ret = UACPI_STATUS_OVERRIDDEN; + + return ret; + } + } + } +#endif + + return verify_and_install_table( + hdr, 0, virt, origin, out_table + ); +} + +uacpi_status uacpi_table_install_with_origin( + void *virt, enum uacpi_table_origin origin, uacpi_table *out_table +) +{ + uacpi_status ret; + + ret = uacpi_acquire_native_mutex_may_be_null(table_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = table_install_with_origin_unlocked(virt, origin, out_table); + + uacpi_release_native_mutex_may_be_null(table_mutex); + return ret; +} + +uacpi_status uacpi_table_install(void *virt, uacpi_table *out_table) +{ + ENSURE_TABLES_ONLINE(); + + return uacpi_table_install_with_origin( + virt, UACPI_TABLE_ORIGIN_HOST_VIRTUAL, out_table + ); +} + +uacpi_status uacpi_table_install_physical( + uacpi_phys_addr addr, uacpi_table *out_table +) +{ + ENSURE_TABLES_ONLINE(); + + return uacpi_table_install_physical_with_origin( + addr, UACPI_TABLE_ORIGIN_HOST_PHYSICAL, out_table + ); +} + +uacpi_status uacpi_for_each_table( + uacpi_size base_idx, uacpi_table_iteration_callback cb, void *user +) +{ + uacpi_status ret; + uacpi_size idx; + struct uacpi_installed_table *tbl; + uacpi_iteration_decision dec; + + ENSURE_TABLES_ONLINE(); + + ret = uacpi_acquire_native_mutex_may_be_null(table_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + for (idx = base_idx; idx < table_array_size(&tables); ++idx) { + tbl = table_array_at(&tables, idx); + + if (tbl->flags & UACPI_TABLE_INVALID) + continue; + + dec = cb(user, tbl, idx); + if (dec == UACPI_ITERATION_DECISION_BREAK) + break; + } + + uacpi_release_native_mutex_may_be_null(table_mutex); + return ret; +} + +enum search_type { + SEARCH_TYPE_BY_ID, + SEARCH_TYPE_MATCH, +}; + +struct table_search_ctx { + union { + const uacpi_table_identifiers *id; + uacpi_table_match_callback match_cb; + }; + + uacpi_table *out_table; + uacpi_u8 search_type; + uacpi_status status; +}; + +static uacpi_iteration_decision do_search_tables( + void *user, struct uacpi_installed_table *tbl, uacpi_size idx +) +{ + struct table_search_ctx *ctx = user; + uacpi_table *out_table; + uacpi_status ret; + + switch (ctx->search_type) { + case SEARCH_TYPE_BY_ID: { + const uacpi_table_identifiers *id = ctx->id; + + if (!uacpi_signatures_match(&id->signature, tbl->hdr.signature)) + return UACPI_ITERATION_DECISION_CONTINUE; + if (id->oemid[0] != '\0' && + uacpi_memcmp(id->oemid, tbl->hdr.oemid, sizeof(id->oemid)) != 0) + return UACPI_ITERATION_DECISION_CONTINUE; + + if (id->oem_table_id[0] != '\0' && + uacpi_memcmp(id->oem_table_id, tbl->hdr.oem_table_id, + sizeof(id->oem_table_id)) != 0) + return UACPI_ITERATION_DECISION_CONTINUE; + + break; + } + + case SEARCH_TYPE_MATCH: + if (!ctx->match_cb(tbl)) + return UACPI_ITERATION_DECISION_CONTINUE; + break; + + default: + ctx->status = UACPI_STATUS_INVALID_ARGUMENT; + return UACPI_ITERATION_DECISION_BREAK; + } + + ret = table_ref_unlocked(tbl); + if (uacpi_likely_success(ret)) { + out_table = ctx->out_table; + out_table->ptr = tbl->ptr; + out_table->index = idx; + ctx->status = ret; + return UACPI_ITERATION_DECISION_BREAK; + } + + /* + * Don't abort nor propagate bad checksums, just pretend this table never + * existed and go on with the search. + */ + if (ret == UACPI_STATUS_BAD_CHECKSUM) + return UACPI_ITERATION_DECISION_CONTINUE; + + ctx->status = ret; + return UACPI_ITERATION_DECISION_BREAK; +} + +#ifndef UACPI_BAREBONES_MODE +uacpi_status uacpi_table_match( + uacpi_size base_idx, uacpi_table_match_callback cb, uacpi_table *out_table +) +{ + uacpi_status ret; + struct table_search_ctx ctx = { 0 }; + + ctx.match_cb = cb; + ctx.search_type = SEARCH_TYPE_MATCH; + ctx.out_table = out_table; + ctx.status = UACPI_STATUS_NOT_FOUND; + + ret = uacpi_for_each_table(base_idx, do_search_tables, &ctx); + if (uacpi_unlikely_error(ret)) + return ret; + + return ctx.status; +} +#endif + +static uacpi_status find_table( + uacpi_size base_idx, const uacpi_table_identifiers *id, + uacpi_table *out_table +) +{ + uacpi_status ret; + struct table_search_ctx ctx = { 0 }; + + ctx.id = id; + ctx.out_table = out_table; + ctx.search_type = SEARCH_TYPE_BY_ID; + ctx.status = UACPI_STATUS_NOT_FOUND; + + ret = uacpi_for_each_table(base_idx, do_search_tables, &ctx); + if (uacpi_unlikely_error(ret)) + return ret; + + return ctx.status; +} + +uacpi_status uacpi_table_find_by_signature( + const uacpi_char *signature_string, struct uacpi_table *out_table +) +{ + struct uacpi_table_identifiers id = { 0 }; + + id.signature.text[0] = signature_string[0]; + id.signature.text[1] = signature_string[1]; + id.signature.text[2] = signature_string[2]; + id.signature.text[3] = signature_string[3]; + + ENSURE_TABLES_ONLINE(); + + return find_table(0, &id, out_table); +} + +uacpi_status uacpi_table_find_next_with_same_signature( + uacpi_table *in_out_table +) +{ + struct uacpi_table_identifiers id = { 0 }; + + ENSURE_TABLES_ONLINE(); + + if (uacpi_unlikely(in_out_table->ptr == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + uacpi_memcpy(&id.signature, in_out_table->hdr->signature, + sizeof(id.signature)); + uacpi_table_unref(in_out_table); + + return find_table(in_out_table->index + 1, &id, in_out_table); +} + +uacpi_status uacpi_table_find( + const uacpi_table_identifiers *id, uacpi_table *out_table +) +{ + ENSURE_TABLES_ONLINE(); + + return find_table(0, id, out_table); +} + +#define TABLE_CTL_SET_FLAGS (1 << 0) +#define TABLE_CTL_CLEAR_FLAGS (1 << 1) +#define TABLE_CTL_VALIDATE_SET_FLAGS (1 << 2) +#define TABLE_CTL_VALIDATE_CLEAR_FLAGS (1 << 3) +#define TABLE_CTL_GET (1 << 4) +#define TABLE_CTL_PUT (1 << 5) + +struct table_ctl_request { + uacpi_u8 type; + + uacpi_u8 expect_set; + uacpi_u8 expect_clear; + uacpi_u8 set; + uacpi_u8 clear; + + void *out_tbl; +}; + +static uacpi_status table_ctl(uacpi_size idx, struct table_ctl_request *req) +{ + uacpi_status ret; + struct uacpi_installed_table *tbl; + + ENSURE_TABLES_ONLINE(); + + ret = uacpi_acquire_native_mutex_may_be_null(table_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + if (uacpi_unlikely(table_array_size(&tables) <= idx)) { + uacpi_error( + "requested invalid table index %zu (%zu tables installed)\n", + idx, table_array_size(&tables) + ); + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + + tbl = table_array_at(&tables, idx); + if (uacpi_unlikely(tbl->flags & UACPI_TABLE_INVALID)) + return UACPI_STATUS_INVALID_ARGUMENT; + + if (req->type & TABLE_CTL_VALIDATE_SET_FLAGS) { + uacpi_u8 mask = req->expect_set; + + if (uacpi_unlikely((tbl->flags & mask) != mask)) { + uacpi_error( + "unexpected table '%.4s' flags %02X, expected %02X to be set\n", + tbl->hdr.signature, tbl->flags, mask + ); + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + } + + if (req->type & TABLE_CTL_VALIDATE_CLEAR_FLAGS) { + uacpi_u8 mask = req->expect_clear; + + if (uacpi_unlikely((tbl->flags & mask) != 0)) { + uacpi_error( + "unexpected table '%.4s' flags %02X, expected %02X " + "to be clear\n", tbl->hdr.signature, tbl->flags, mask + ); + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out; + } + } + + if (req->type & TABLE_CTL_GET) { + ret = table_ref_unlocked(tbl); + if (uacpi_unlikely_error(ret)) + goto out; + + req->out_tbl = tbl->ptr; + } + + if (req->type & TABLE_CTL_PUT) { + ret = table_unref_unlocked(tbl); + if (uacpi_unlikely_error(ret)) + goto out; + } + + if (req->type & TABLE_CTL_SET_FLAGS) + tbl->flags |= req->set; + if (req->type & TABLE_CTL_CLEAR_FLAGS) + tbl->flags &= ~req->clear; + +out: + uacpi_release_native_mutex_may_be_null(table_mutex); + return ret; +} + +#ifndef UACPI_BAREBONES_MODE +uacpi_status uacpi_table_load_with_cause( + uacpi_size idx, enum uacpi_table_load_cause cause +) +{ + uacpi_status ret; + struct table_ctl_request req = { + .type = TABLE_CTL_SET_FLAGS | TABLE_CTL_VALIDATE_CLEAR_FLAGS | + TABLE_CTL_GET, + .set = UACPI_TABLE_LOADED, + .expect_clear = UACPI_TABLE_LOADED, + }; + + ret = table_ctl(idx, &req); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_execute_table(req.out_tbl, cause); + + req.type = TABLE_CTL_PUT; + table_ctl(idx, &req); + return ret; +} + +uacpi_status uacpi_table_load(uacpi_size idx) +{ + return uacpi_table_load_with_cause(idx, UACPI_TABLE_LOAD_CAUSE_HOST); +} + +void uacpi_table_mark_as_loaded(uacpi_size idx) +{ + struct table_ctl_request req = { + .type = TABLE_CTL_SET_FLAGS, .set = UACPI_TABLE_LOADED + }; + + table_ctl(idx, &req); +} +#endif // !UACPI_BAREBONES_MODE + +uacpi_status uacpi_table_ref(uacpi_table *tbl) +{ + struct table_ctl_request req = { + .type = TABLE_CTL_GET + }; + + return table_ctl(tbl->index, &req); +} + +uacpi_status uacpi_table_unref(uacpi_table *tbl) +{ + struct table_ctl_request req = { + .type = TABLE_CTL_PUT + }; + + return table_ctl(tbl->index, &req); +} + +uacpi_u16 fadt_version_sizes[] = { + 116, 132, 244, 244, 268, 276 +}; + +static void fadt_ensure_correct_revision(struct acpi_fadt *fadt) +{ + uacpi_size current_rev, rev; + + current_rev = fadt->hdr.revision; + + for (rev = 0; rev < UACPI_ARRAY_SIZE(fadt_version_sizes); ++rev) { + if (fadt->hdr.length <= fadt_version_sizes[rev]) + break; + } + + if (rev == UACPI_ARRAY_SIZE(fadt_version_sizes)) { + uacpi_trace( + "FADT revision (%zu) is likely greater than the last " + "supported, reducing to %zu\n", current_rev, rev + ); + fadt->hdr.revision = rev; + return; + } + + rev++; + + if (current_rev != rev && !(rev == 3 && current_rev == 4)) { + uacpi_warn( + "FADT length %u doesn't match expected for revision %zu, " + "assuming version %zu\n", fadt->hdr.length, current_rev, + rev + ); + fadt->hdr.revision = rev; + } +} + +static void gas_init_system_io( + struct acpi_gas *gas, uacpi_u64 address, uacpi_u8 byte_size +) +{ + gas->address = address; + gas->address_space_id = UACPI_ADDRESS_SPACE_SYSTEM_IO; + gas->register_bit_width = UACPI_MIN(255, byte_size * 8); + gas->register_bit_offset = 0; + gas->access_size = 0; +} + + +struct register_description { + uacpi_size offset, xoffset; + uacpi_size length_offset; +}; + +#define fadt_offset(field) uacpi_offsetof(struct acpi_fadt, field) + +/* + * We convert all the legacy registers into GAS format and write them into + * the x_* fields for convenience and faster access at runtime. + */ +static struct register_description fadt_registers[] = { + { + .offset = fadt_offset(pm1a_evt_blk), + .xoffset = fadt_offset(x_pm1a_evt_blk), + .length_offset = fadt_offset(pm1_evt_len), + }, + { + .offset = fadt_offset(pm1b_evt_blk), + .xoffset = fadt_offset(x_pm1b_evt_blk), + .length_offset = fadt_offset(pm1_evt_len), + }, + { + .offset = fadt_offset(pm1a_cnt_blk), + .xoffset = fadt_offset(x_pm1a_cnt_blk), + .length_offset = fadt_offset(pm1_cnt_len), + }, + { + .offset = fadt_offset(pm1b_cnt_blk), + .xoffset = fadt_offset(x_pm1b_cnt_blk), + .length_offset = fadt_offset(pm1_cnt_len), + }, + { + .offset = fadt_offset(pm2_cnt_blk), + .xoffset = fadt_offset(x_pm2_cnt_blk), + .length_offset = fadt_offset(pm2_cnt_len), + }, + { + .offset = fadt_offset(pm_tmr_blk), + .xoffset = fadt_offset(x_pm_tmr_blk), + .length_offset = fadt_offset(pm_tmr_len), + }, + { + .offset = fadt_offset(gpe0_blk), + .xoffset = fadt_offset(x_gpe0_blk), + .length_offset = fadt_offset(gpe0_blk_len), + }, + { + .offset = fadt_offset(gpe1_blk), + .xoffset = fadt_offset(x_gpe1_blk), + .length_offset = fadt_offset(gpe1_blk_len), + }, +}; + +static void *fadt_relative(uacpi_size offset) +{ + return ((uacpi_u8*)&g_uacpi_rt_ctx.fadt) + offset; +} + +static void convert_registers_to_gas(void) +{ + uacpi_size i; + struct register_description *desc; + struct acpi_gas *gas; + uacpi_u32 legacy_addr; + uacpi_u8 length; + + for (i = 0; i < UACPI_ARRAY_SIZE(fadt_registers); ++i) { + desc = &fadt_registers[i]; + + legacy_addr = *(uacpi_u32*)fadt_relative(desc->offset); + length = *(uacpi_u8*)fadt_relative(desc->length_offset); + gas = fadt_relative(desc->xoffset); + + if (gas->address) + continue; + + gas_init_system_io(gas, legacy_addr, length); + } +} + +#ifndef UACPI_BAREBONES_MODE +static void split_one_block( + struct acpi_gas *src, struct acpi_gas *dst0, struct acpi_gas *dst1 +) +{ + uacpi_size byte_length; + + if (src->address == 0) + return; + + byte_length = src->register_bit_width / 8; + byte_length /= 2; + + gas_init_system_io(dst0, src->address, byte_length); + gas_init_system_io(dst1, src->address + byte_length, byte_length); +} + +static void split_event_blocks(void) +{ + split_one_block( + &g_uacpi_rt_ctx.fadt.x_pm1a_evt_blk, + &g_uacpi_rt_ctx.pm1a_status_blk, + &g_uacpi_rt_ctx.pm1a_enable_blk + ); + split_one_block( + &g_uacpi_rt_ctx.fadt.x_pm1b_evt_blk, + &g_uacpi_rt_ctx.pm1b_status_blk, + &g_uacpi_rt_ctx.pm1b_enable_blk + ); +} +#endif // !UACPI_BAREBONES_MODE + +static uacpi_status initialize_fadt(const void *virt) +{ + uacpi_status ret; + struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt; + const struct acpi_sdt_hdr *hdr = virt; + + /* + * Here we (roughly) follow ACPICA initialization sequence to make sure we + * handle potential BIOS quirks with garbage inside FADT correctly. + */ + + uacpi_memcpy(fadt, hdr, UACPI_MIN(sizeof(*fadt), hdr->length)); + +#if !defined(UACPI_REDUCED_HARDWARE) && !defined(UACPI_BAREBONES_MODE) + g_uacpi_rt_ctx.is_hardware_reduced = fadt->flags & ACPI_HW_REDUCED_ACPI; +#endif + + fadt_ensure_correct_revision(fadt); + + /* + * These are reserved prior to version 3, so zero them out to work around + * BIOS implementations that might dirty these. + */ + if (fadt->hdr.revision <= 2) { + fadt->preferred_pm_profile = 0; + fadt->pstate_cnt = 0; + fadt->cst_cnt = 0; + fadt->iapc_boot_arch = 0; + } + + if (!fadt->x_dsdt) + fadt->x_dsdt = fadt->dsdt; + + if (fadt->x_dsdt) { + ret = table_install_physical_with_origin_unlocked( + fadt->x_dsdt, UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL, + ACPI_DSDT_SIGNATURE, UACPI_NULL + ); + if (uacpi_unlikely(ret != UACPI_STATUS_OK && + ret != UACPI_STATUS_OVERRIDDEN)) + return ret; + } + + /* + * Unconditionally use 32 bit FACS if it exists, as 64 bit FACS is known + * to cause issues on some firmware: + * https://bugzilla.kernel.org/show_bug.cgi?id=74021 + * + * Note that we don't install it here as FACS needs permanent mapping, which + * we might not be able to obtain at this point in case of early table + * access. + */ + if (fadt->firmware_ctrl) + fadt->x_firmware_ctrl = fadt->firmware_ctrl; + + if (!uacpi_is_hardware_reduced()) { + convert_registers_to_gas(); +#ifndef UACPI_BAREBONES_MODE + split_event_blocks(); +#endif + } + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_table_fadt(struct acpi_fadt **out_fadt) +{ + ENSURE_TABLES_ONLINE(); + + *out_fadt = &g_uacpi_rt_ctx.fadt; + return UACPI_STATUS_OK; +} diff --git a/sys/dev/acpi/uacpi/types.c b/sys/dev/acpi/uacpi/types.c new file mode 100644 index 0000000..840d3ef --- /dev/null +++ b/sys/dev/acpi/uacpi/types.c @@ -0,0 +1,1489 @@ +#include <uacpi/types.h> +#include <uacpi/internal/types.h> +#include <uacpi/internal/stdlib.h> +#include <uacpi/internal/shareable.h> +#include <uacpi/internal/dynamic_array.h> +#include <uacpi/internal/log.h> +#include <uacpi/internal/namespace.h> +#include <uacpi/internal/tables.h> +#include <uacpi/kernel_api.h> + +const uacpi_char *uacpi_address_space_to_string( + enum uacpi_address_space space +) +{ + switch (space) { + case UACPI_ADDRESS_SPACE_SYSTEM_MEMORY: + return "SystemMemory"; + case UACPI_ADDRESS_SPACE_SYSTEM_IO: + return "SystemIO"; + case UACPI_ADDRESS_SPACE_PCI_CONFIG: + return "PCI_Config"; + case UACPI_ADDRESS_SPACE_EMBEDDED_CONTROLLER: + return "EmbeddedControl"; + case UACPI_ADDRESS_SPACE_SMBUS: + return "SMBus"; + case UACPI_ADDRESS_SPACE_SYSTEM_CMOS: + return "SystemCMOS"; + case UACPI_ADDRESS_SPACE_PCI_BAR_TARGET: + return "PciBarTarget"; + case UACPI_ADDRESS_SPACE_IPMI: + return "IPMI"; + case UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO: + return "GeneralPurposeIO"; + case UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS: + return "GenericSerialBus"; + case UACPI_ADDRESS_SPACE_PCC: + return "PCC"; + case UACPI_ADDRESS_SPACE_PRM: + return "PlatformRtMechanism"; + case UACPI_ADDRESS_SPACE_FFIXEDHW: + return "FFixedHW"; + case UACPI_ADDRESS_SPACE_TABLE_DATA: + return "TableData"; + default: + return "<vendor specific>"; + } +} + +#ifndef UACPI_BAREBONES_MODE + +const uacpi_char *uacpi_object_type_to_string(uacpi_object_type type) +{ + switch (type) { + case UACPI_OBJECT_UNINITIALIZED: + return "Uninitialized"; + case UACPI_OBJECT_INTEGER: + return "Integer"; + case UACPI_OBJECT_STRING: + return "String"; + case UACPI_OBJECT_BUFFER: + return "Buffer"; + case UACPI_OBJECT_PACKAGE: + return "Package"; + case UACPI_OBJECT_FIELD_UNIT: + return "Field Unit"; + case UACPI_OBJECT_DEVICE: + return "Device"; + case UACPI_OBJECT_EVENT: + return "Event"; + case UACPI_OBJECT_REFERENCE: + return "Reference"; + case UACPI_OBJECT_BUFFER_INDEX: + return "Buffer Index"; + case UACPI_OBJECT_METHOD: + return "Method"; + case UACPI_OBJECT_MUTEX: + return "Mutex"; + case UACPI_OBJECT_OPERATION_REGION: + return "Operation Region"; + case UACPI_OBJECT_POWER_RESOURCE: + return "Power Resource"; + case UACPI_OBJECT_PROCESSOR: + return "Processor"; + case UACPI_OBJECT_THERMAL_ZONE: + return "Thermal Zone"; + case UACPI_OBJECT_BUFFER_FIELD: + return "Buffer Field"; + case UACPI_OBJECT_DEBUG: + return "Debug"; + default: + return "<Invalid type>"; + } +} + +static uacpi_bool buffer_alloc(uacpi_object *obj, uacpi_size initial_size) +{ + uacpi_buffer *buf; + + buf = uacpi_kernel_alloc_zeroed(sizeof(uacpi_buffer)); + if (uacpi_unlikely(buf == UACPI_NULL)) + return UACPI_FALSE; + + uacpi_shareable_init(buf); + + if (initial_size) { + buf->data = uacpi_kernel_alloc(initial_size); + if (uacpi_unlikely(buf->data == UACPI_NULL)) { + uacpi_free(buf, sizeof(*buf)); + return UACPI_FALSE; + } + + buf->size = initial_size; + } + + obj->buffer = buf; + return UACPI_TRUE; +} + +static uacpi_bool empty_buffer_or_string_alloc(uacpi_object *object) +{ + return buffer_alloc(object, 0); +} + +uacpi_bool uacpi_package_fill( + uacpi_package *pkg, uacpi_size num_elements, + enum uacpi_prealloc_objects prealloc_objects +) +{ + uacpi_size i; + + if (uacpi_unlikely(num_elements == 0)) + return UACPI_TRUE; + + pkg->objects = uacpi_kernel_alloc_zeroed( + num_elements * sizeof(uacpi_handle) + ); + if (uacpi_unlikely(pkg->objects == UACPI_NULL)) + return UACPI_FALSE; + + pkg->count = num_elements; + + if (prealloc_objects == UACPI_PREALLOC_OBJECTS_YES) { + for (i = 0; i < num_elements; ++i) { + pkg->objects[i] = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED); + + if (uacpi_unlikely(pkg->objects[i] == UACPI_NULL)) + return UACPI_FALSE; + } + } + + return UACPI_TRUE; +} + +static uacpi_bool package_alloc( + uacpi_object *obj, uacpi_size initial_size, + enum uacpi_prealloc_objects prealloc +) +{ + uacpi_package *pkg; + + pkg = uacpi_kernel_alloc_zeroed(sizeof(uacpi_package)); + if (uacpi_unlikely(pkg == UACPI_NULL)) + return UACPI_FALSE; + + uacpi_shareable_init(pkg); + + if (uacpi_unlikely(!uacpi_package_fill(pkg, initial_size, prealloc))) { + uacpi_free(pkg, sizeof(*pkg)); + return UACPI_FALSE; + } + + obj->package = pkg; + return UACPI_TRUE; +} + +static uacpi_bool empty_package_alloc(uacpi_object *object) +{ + return package_alloc(object, 0, UACPI_PREALLOC_OBJECTS_NO); +} + +uacpi_mutex *uacpi_create_mutex(void) +{ + uacpi_mutex *mutex; + + mutex = uacpi_kernel_alloc_zeroed(sizeof(uacpi_mutex)); + if (uacpi_unlikely(mutex == UACPI_NULL)) + return UACPI_NULL; + + mutex->owner = UACPI_THREAD_ID_NONE; + + mutex->handle = uacpi_kernel_create_mutex(); + if (mutex->handle == UACPI_NULL) { + uacpi_free(mutex, sizeof(*mutex)); + return UACPI_NULL; + } + + uacpi_shareable_init(mutex); + return mutex; +} + +static uacpi_bool mutex_alloc(uacpi_object *obj) +{ + obj->mutex = uacpi_create_mutex(); + return obj->mutex != UACPI_NULL; +} + +static uacpi_bool event_alloc(uacpi_object *obj) +{ + uacpi_event *event; + + event = uacpi_kernel_alloc_zeroed(sizeof(uacpi_event)); + if (uacpi_unlikely(event == UACPI_NULL)) + return UACPI_FALSE; + + event->handle = uacpi_kernel_create_event(); + if (event->handle == UACPI_NULL) { + uacpi_free(event, sizeof(*event)); + return UACPI_FALSE; + } + + uacpi_shareable_init(event); + obj->event = event; + + return UACPI_TRUE; +} + +static uacpi_bool method_alloc(uacpi_object *obj) +{ + uacpi_control_method *method; + + method = uacpi_kernel_alloc_zeroed(sizeof(*method)); + if (uacpi_unlikely(method == UACPI_NULL)) + return UACPI_FALSE; + + uacpi_shareable_init(method); + obj->method = method; + + return UACPI_TRUE; +} + +static uacpi_bool op_region_alloc(uacpi_object *obj) +{ + uacpi_operation_region *op_region; + + op_region = uacpi_kernel_alloc_zeroed(sizeof(*op_region)); + if (uacpi_unlikely(op_region == UACPI_NULL)) + return UACPI_FALSE; + + uacpi_shareable_init(op_region); + obj->op_region = op_region; + + return UACPI_TRUE; +} + +static uacpi_bool field_unit_alloc(uacpi_object *obj) +{ + uacpi_field_unit *field_unit; + + field_unit = uacpi_kernel_alloc_zeroed(sizeof(*field_unit)); + if (uacpi_unlikely(field_unit == UACPI_NULL)) + return UACPI_FALSE; + + uacpi_shareable_init(field_unit); + obj->field_unit = field_unit; + + return UACPI_TRUE; +} + +static uacpi_bool processor_alloc(uacpi_object *obj) +{ + uacpi_processor *processor; + + processor = uacpi_kernel_alloc_zeroed(sizeof(*processor)); + if (uacpi_unlikely(processor == UACPI_NULL)) + return UACPI_FALSE; + + uacpi_shareable_init(processor); + obj->processor = processor; + + return UACPI_TRUE; +} + +static uacpi_bool device_alloc(uacpi_object *obj) +{ + uacpi_device *device; + + device = uacpi_kernel_alloc_zeroed(sizeof(*device)); + if (uacpi_unlikely(device == UACPI_NULL)) + return UACPI_FALSE; + + uacpi_shareable_init(device); + obj->device = device; + + return UACPI_TRUE; +} + +static uacpi_bool thermal_zone_alloc(uacpi_object *obj) +{ + uacpi_thermal_zone *thermal_zone; + + thermal_zone = uacpi_kernel_alloc_zeroed(sizeof(*thermal_zone)); + if (uacpi_unlikely(thermal_zone == UACPI_NULL)) + return UACPI_FALSE; + + uacpi_shareable_init(thermal_zone); + obj->thermal_zone = thermal_zone; + + return UACPI_TRUE; +} + +typedef uacpi_bool (*object_ctor)(uacpi_object *obj); + +static object_ctor object_constructor_table[UACPI_OBJECT_MAX_TYPE_VALUE + 1] = { + [UACPI_OBJECT_STRING] = empty_buffer_or_string_alloc, + [UACPI_OBJECT_BUFFER] = empty_buffer_or_string_alloc, + [UACPI_OBJECT_PACKAGE] = empty_package_alloc, + [UACPI_OBJECT_FIELD_UNIT] = field_unit_alloc, + [UACPI_OBJECT_MUTEX] = mutex_alloc, + [UACPI_OBJECT_EVENT] = event_alloc, + [UACPI_OBJECT_OPERATION_REGION] = op_region_alloc, + [UACPI_OBJECT_METHOD] = method_alloc, + [UACPI_OBJECT_PROCESSOR] = processor_alloc, + [UACPI_OBJECT_DEVICE] = device_alloc, + [UACPI_OBJECT_THERMAL_ZONE] = thermal_zone_alloc, +}; + +uacpi_object *uacpi_create_object(uacpi_object_type type) +{ + uacpi_object *ret; + object_ctor ctor; + + ret = uacpi_kernel_alloc_zeroed(sizeof(*ret)); + if (uacpi_unlikely(ret == UACPI_NULL)) + return ret; + + uacpi_shareable_init(ret); + ret->type = type; + + ctor = object_constructor_table[type]; + if (ctor == UACPI_NULL) + return ret; + + if (uacpi_unlikely(!ctor(ret))) { + uacpi_free(ret, sizeof(*ret)); + return UACPI_NULL; + } + + return ret; +} + +static void free_buffer(uacpi_handle handle) +{ + uacpi_buffer *buf = handle; + + if (buf->data != UACPI_NULL) + /* + * If buffer has a size of 0 but a valid data pointer it's probably an + * "empty" buffer allocated by the interpreter in make_null_buffer + * and its real size is actually 1. + */ + uacpi_free(buf->data, UACPI_MAX(buf->size, 1)); + + uacpi_free(buf, sizeof(*buf)); +} + +DYNAMIC_ARRAY_WITH_INLINE_STORAGE(free_queue, uacpi_package*, 4) +DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(free_queue, uacpi_package*, static) + +static uacpi_bool free_queue_push(struct free_queue *queue, uacpi_package *pkg) +{ + uacpi_package **slot; + + slot = free_queue_alloc(queue); + if (uacpi_unlikely(slot == UACPI_NULL)) + return UACPI_FALSE; + + *slot = pkg; + return UACPI_TRUE; +} + +static void free_object(uacpi_object *obj); + +// No references allowed here, only plain objects +static void free_plain_no_recurse(uacpi_object *obj, struct free_queue *queue) +{ + switch (obj->type) { + case UACPI_OBJECT_PACKAGE: + if (uacpi_shareable_unref(obj->package) > 1) + break; + + if (uacpi_unlikely(!free_queue_push(queue, + obj->package))) { + uacpi_warn( + "unable to free nested package @%p: not enough memory\n", + obj->package + ); + } + + // Don't call free_object here as that will recurse + uacpi_free(obj, sizeof(*obj)); + break; + default: + /* + * This call is guaranteed to not recurse further as we handle + * recursive cases elsewhere explicitly. + */ + free_object(obj); + } +} + +static void unref_plain_no_recurse(uacpi_object *obj, struct free_queue *queue) +{ + if (uacpi_shareable_unref(obj) > 1) + return; + + free_plain_no_recurse(obj, queue); +} + +static void unref_chain_no_recurse(uacpi_object *obj, struct free_queue *queue) +{ + uacpi_object *next_obj = UACPI_NULL; + + while (obj) { + if (obj->type == UACPI_OBJECT_REFERENCE) + next_obj = obj->inner_object; + + if (uacpi_shareable_unref(obj) > 1) + goto do_next; + + if (obj->type == UACPI_OBJECT_REFERENCE) { + uacpi_free(obj, sizeof(*obj)); + } else { + free_plain_no_recurse(obj, queue); + } + + do_next: + obj = next_obj; + next_obj = UACPI_NULL; + } +} + +static void unref_object_no_recurse(uacpi_object *obj, struct free_queue *queue) +{ + if (obj->type == UACPI_OBJECT_REFERENCE) { + unref_chain_no_recurse(obj, queue); + return; + } + + unref_plain_no_recurse(obj, queue); +} + +static void free_package(uacpi_handle handle) +{ + struct free_queue queue = { 0 }; + uacpi_package *pkg = handle; + uacpi_object *obj; + uacpi_size i; + + free_queue_push(&queue, pkg); + + while (free_queue_size(&queue) != 0) { + pkg = *free_queue_last(&queue); + free_queue_pop(&queue); + + /* + * 1. Unref/free every object in the package. Note that this might add + * even more packages into the free queue. + */ + for (i = 0; i < pkg->count; ++i) { + obj = pkg->objects[i]; + unref_object_no_recurse(obj, &queue); + } + + // 2. Release the object array + uacpi_free(pkg->objects, sizeof(*pkg->objects) * pkg->count); + + // 3. Release the package itself + uacpi_free(pkg, sizeof(*pkg)); + } + + free_queue_clear(&queue); +} + +static void free_mutex(uacpi_handle handle) +{ + uacpi_mutex *mutex = handle; + + uacpi_kernel_free_mutex(mutex->handle); + uacpi_free(mutex, sizeof(*mutex)); +} + +void uacpi_mutex_unref(uacpi_mutex *mutex) +{ + if (mutex == UACPI_NULL) + return; + + uacpi_shareable_unref_and_delete_if_last(mutex, free_mutex); +} + +static void free_event(uacpi_handle handle) +{ + uacpi_event *event = handle; + + uacpi_kernel_free_event(event->handle); + uacpi_free(event, sizeof(*event)); +} + +static void free_address_space_handler(uacpi_handle handle) +{ + uacpi_address_space_handler *handler = handle; + uacpi_free(handler, sizeof(*handler)); +} + +static void free_address_space_handlers( + uacpi_address_space_handler *handler +) +{ + uacpi_address_space_handler *next_handler; + + while (handler) { + next_handler = handler->next; + uacpi_shareable_unref_and_delete_if_last( + handler, free_address_space_handler + ); + handler = next_handler; + } +} + +static void free_device_notify_handlers(uacpi_device_notify_handler *handler) +{ + uacpi_device_notify_handler *next_handler; + + while (handler) { + next_handler = handler->next; + uacpi_free(handler, sizeof(*handler)); + handler = next_handler; + } +} + +static void free_handlers(uacpi_handle handle) +{ + uacpi_handlers *handlers = handle; + + free_address_space_handlers(handlers->address_space_head); + free_device_notify_handlers(handlers->notify_head); +} + +void uacpi_address_space_handler_unref(uacpi_address_space_handler *handler) +{ + uacpi_shareable_unref_and_delete_if_last( + handler, free_address_space_handler + ); +} + +static void free_op_region(uacpi_handle handle) +{ + uacpi_operation_region *op_region = handle; + + if (uacpi_unlikely(op_region->handler != UACPI_NULL)) { + uacpi_warn( + "BUG: attempting to free an opregion@%p with a handler attached\n", + op_region + ); + } + + switch (op_region->space) { + case UACPI_ADDRESS_SPACE_PCC: + uacpi_free(op_region->internal_buffer, op_region->length); + break; + case UACPI_ADDRESS_SPACE_TABLE_DATA: { + struct uacpi_table table = { 0 }; + + table.index = op_region->table_idx; + uacpi_table_unref( + &table + ); + break; + } + default: + break; + } + + uacpi_free(op_region, sizeof(*op_region)); +} + +static void free_device(uacpi_handle handle) +{ + uacpi_device *device = handle; + free_handlers(device); + uacpi_free(device, sizeof(*device)); +} + +static void free_processor(uacpi_handle handle) +{ + uacpi_processor *processor = handle; + free_handlers(processor); + uacpi_free(processor, sizeof(*processor)); +} + +static void free_thermal_zone(uacpi_handle handle) +{ + uacpi_thermal_zone *thermal_zone = handle; + free_handlers(thermal_zone); + uacpi_free(thermal_zone, sizeof(*thermal_zone)); +} + +static void free_field_unit(uacpi_handle handle) +{ + uacpi_field_unit *field_unit = handle; + + if (field_unit->connection) + uacpi_object_unref(field_unit->connection); + + switch (field_unit->kind) { + case UACPI_FIELD_UNIT_KIND_NORMAL: + uacpi_namespace_node_unref(field_unit->region); + break; + case UACPI_FIELD_UNIT_KIND_BANK: + uacpi_namespace_node_unref(field_unit->bank_region); + uacpi_shareable_unref_and_delete_if_last( + field_unit->bank_selection, free_field_unit + ); + break; + case UACPI_FIELD_UNIT_KIND_INDEX: + uacpi_shareable_unref_and_delete_if_last( + field_unit->index, free_field_unit + ); + uacpi_shareable_unref_and_delete_if_last( + field_unit->data, free_field_unit + ); + break; + default: + break; + } + + uacpi_free(field_unit, sizeof(*field_unit)); +} + +static void free_method(uacpi_handle handle) +{ + uacpi_control_method *method = handle; + + uacpi_shareable_unref_and_delete_if_last( + method->mutex, free_mutex + ); + + if (!method->native_call && method->owns_code) + uacpi_free(method->code, method->size); + uacpi_free(method, sizeof(*method)); +} + +void uacpi_method_unref(uacpi_control_method *method) +{ + uacpi_shareable_unref_and_delete_if_last(method, free_method); +} + +static void free_object_storage(uacpi_object *obj) +{ + switch (obj->type) { + case UACPI_OBJECT_STRING: + case UACPI_OBJECT_BUFFER: + uacpi_shareable_unref_and_delete_if_last(obj->buffer, free_buffer); + break; + case UACPI_OBJECT_BUFFER_FIELD: + uacpi_shareable_unref_and_delete_if_last(obj->buffer_field.backing, + free_buffer); + break; + case UACPI_OBJECT_BUFFER_INDEX: + uacpi_shareable_unref_and_delete_if_last(obj->buffer_index.buffer, + free_buffer); + break; + case UACPI_OBJECT_METHOD: + uacpi_method_unref(obj->method); + break; + case UACPI_OBJECT_PACKAGE: + uacpi_shareable_unref_and_delete_if_last(obj->package, + free_package); + break; + case UACPI_OBJECT_FIELD_UNIT: + uacpi_shareable_unref_and_delete_if_last(obj->field_unit, + free_field_unit); + break; + case UACPI_OBJECT_MUTEX: + uacpi_mutex_unref(obj->mutex); + break; + case UACPI_OBJECT_EVENT: + uacpi_shareable_unref_and_delete_if_last(obj->event, + free_event); + break; + case UACPI_OBJECT_OPERATION_REGION: + uacpi_shareable_unref_and_delete_if_last(obj->op_region, + free_op_region); + break; + case UACPI_OBJECT_PROCESSOR: + uacpi_shareable_unref_and_delete_if_last(obj->processor, + free_processor); + break; + case UACPI_OBJECT_DEVICE: + uacpi_shareable_unref_and_delete_if_last(obj->device, + free_device); + break; + case UACPI_OBJECT_THERMAL_ZONE: + uacpi_shareable_unref_and_delete_if_last(obj->thermal_zone, + free_thermal_zone); + break; + default: + break; + } +} + +static void free_object(uacpi_object *obj) +{ + free_object_storage(obj); + uacpi_free(obj, sizeof(*obj)); +} + +static void make_chain_bugged(uacpi_object *obj) +{ + uacpi_warn("object refcount bug, marking chain @%p as bugged\n", obj); + + while (obj) { + uacpi_make_shareable_bugged(obj); + + if (obj->type == UACPI_OBJECT_REFERENCE) + obj = obj->inner_object; + else + obj = UACPI_NULL; + } +} + +void uacpi_object_ref(uacpi_object *obj) +{ + while (obj) { + uacpi_shareable_ref(obj); + + if (obj->type == UACPI_OBJECT_REFERENCE) + obj = obj->inner_object; + else + obj = UACPI_NULL; + } +} + +static void free_chain(uacpi_object *obj) +{ + uacpi_object *next_obj = UACPI_NULL; + + while (obj) { + if (obj->type == UACPI_OBJECT_REFERENCE) + next_obj = obj->inner_object; + + if (uacpi_shareable_refcount(obj) == 0) + free_object(obj); + + obj = next_obj; + next_obj = UACPI_NULL; + } +} + +void uacpi_object_unref(uacpi_object *obj) +{ + uacpi_object *this_obj = obj; + + if (!obj) + return; + + while (obj) { + if (uacpi_unlikely(uacpi_bugged_shareable(obj))) + return; + + uacpi_shareable_unref(obj); + + if (obj->type == UACPI_OBJECT_REFERENCE) { + obj = obj->inner_object; + } else { + obj = UACPI_NULL; + } + } + + if (uacpi_shareable_refcount(this_obj) == 0) + free_chain(this_obj); +} + +static uacpi_status buffer_alloc_and_store( + uacpi_object *obj, uacpi_size buf_size, + const void *src, uacpi_size src_size +) +{ + if (uacpi_unlikely(!buffer_alloc(obj, buf_size))) + return UACPI_STATUS_OUT_OF_MEMORY; + + uacpi_memcpy_zerout(obj->buffer->data, src, buf_size, src_size); + return UACPI_STATUS_OK; +} + +static uacpi_status assign_buffer(uacpi_object *dst, uacpi_object *src, + enum uacpi_assign_behavior behavior) +{ + if (behavior == UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY) { + dst->buffer = src->buffer; + uacpi_shareable_ref(dst->buffer); + return UACPI_STATUS_OK; + } + + return buffer_alloc_and_store(dst, src->buffer->size, + src->buffer->data, src->buffer->size); +} + +struct pkg_copy_req { + uacpi_object *dst; + uacpi_package *src; +}; + +DYNAMIC_ARRAY_WITH_INLINE_STORAGE(pkg_copy_reqs, struct pkg_copy_req, 2) +DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL( + pkg_copy_reqs, struct pkg_copy_req, static +) + +static uacpi_bool pkg_copy_reqs_push( + struct pkg_copy_reqs *reqs, + uacpi_object *dst, uacpi_package *pkg +) +{ + struct pkg_copy_req *req; + + req = pkg_copy_reqs_alloc(reqs); + if (uacpi_unlikely(req == UACPI_NULL)) + return UACPI_FALSE; + + req->dst = dst; + req->src = pkg; + + return UACPI_TRUE; +} + +static uacpi_status deep_copy_package_no_recurse( + uacpi_object *dst, uacpi_package *src, + struct pkg_copy_reqs *reqs +) +{ + uacpi_size i; + uacpi_package *dst_package; + + if (uacpi_unlikely(!package_alloc(dst, src->count, + UACPI_PREALLOC_OBJECTS_YES))) + return UACPI_STATUS_OUT_OF_MEMORY; + + dst->type = UACPI_OBJECT_PACKAGE; + dst_package = dst->package; + + for (i = 0; i < src->count; ++i) { + uacpi_status st; + uacpi_object *src_obj = src->objects[i]; + uacpi_object *dst_obj = dst_package->objects[i]; + + // Don't copy the internal package index reference + if (src_obj->type == UACPI_OBJECT_REFERENCE && + src_obj->flags == UACPI_REFERENCE_KIND_PKG_INDEX) + src_obj = src_obj->inner_object; + + if (src_obj->type == UACPI_OBJECT_PACKAGE) { + uacpi_bool ret; + + ret = pkg_copy_reqs_push(reqs, dst_obj, src_obj->package); + if (uacpi_unlikely(!ret)) + return UACPI_STATUS_OUT_OF_MEMORY; + + continue; + } + + st = uacpi_object_assign(dst_obj, src_obj, + UACPI_ASSIGN_BEHAVIOR_DEEP_COPY); + if (uacpi_unlikely_error(st)) + return st; + } + + return UACPI_STATUS_OK; +} + +static uacpi_status deep_copy_package(uacpi_object *dst, uacpi_object *src) +{ + uacpi_status ret = UACPI_STATUS_OK; + struct pkg_copy_reqs reqs = { 0 }; + + pkg_copy_reqs_push(&reqs, dst, src->package); + + while (pkg_copy_reqs_size(&reqs) != 0) { + struct pkg_copy_req req; + + req = *pkg_copy_reqs_last(&reqs); + pkg_copy_reqs_pop(&reqs); + + ret = deep_copy_package_no_recurse(req.dst, req.src, &reqs); + if (uacpi_unlikely_error(ret)) + break; + } + + pkg_copy_reqs_clear(&reqs); + return ret; +} + +static uacpi_status assign_mutex(uacpi_object *dst, uacpi_object *src, + enum uacpi_assign_behavior behavior) +{ + if (behavior == UACPI_ASSIGN_BEHAVIOR_DEEP_COPY) { + if (uacpi_likely(mutex_alloc(dst))) { + dst->mutex->sync_level = src->mutex->sync_level; + return UACPI_STATUS_OK; + } + + return UACPI_STATUS_OUT_OF_MEMORY; + } + + dst->mutex = src->mutex; + uacpi_shareable_ref(dst->mutex); + + return UACPI_STATUS_OK; +} + +static uacpi_status assign_event(uacpi_object *dst, uacpi_object *src, + enum uacpi_assign_behavior behavior) +{ + if (behavior == UACPI_ASSIGN_BEHAVIOR_DEEP_COPY) { + if (uacpi_likely(event_alloc(dst))) + return UACPI_STATUS_OK; + + return UACPI_STATUS_OUT_OF_MEMORY; + } + + dst->event = src->event; + uacpi_shareable_ref(dst->event); + + return UACPI_STATUS_OK; +} + +static uacpi_status assign_package(uacpi_object *dst, uacpi_object *src, + enum uacpi_assign_behavior behavior) +{ + if (behavior == UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY) { + dst->package = src->package; + uacpi_shareable_ref(dst->package); + return UACPI_STATUS_OK; + } + + return deep_copy_package(dst, src); +} + +void uacpi_object_attach_child(uacpi_object *parent, uacpi_object *child) +{ + uacpi_u32 refs_to_add; + + parent->inner_object = child; + + if (uacpi_unlikely(uacpi_bugged_shareable(parent))) { + make_chain_bugged(child); + return; + } + + refs_to_add = uacpi_shareable_refcount(parent); + while (refs_to_add--) + uacpi_object_ref(child); +} + +void uacpi_object_detach_child(uacpi_object *parent) +{ + uacpi_u32 refs_to_remove; + uacpi_object *child; + + child = parent->inner_object; + parent->inner_object = UACPI_NULL; + + if (uacpi_unlikely(uacpi_bugged_shareable(parent))) + return; + + refs_to_remove = uacpi_shareable_refcount(parent); + while (refs_to_remove--) + uacpi_object_unref(child); +} + +uacpi_object_type uacpi_object_get_type(uacpi_object *obj) +{ + return obj->type; +} + +uacpi_object_type_bits uacpi_object_get_type_bit(uacpi_object *obj) +{ + return (1u << obj->type); +} + +uacpi_bool uacpi_object_is(uacpi_object *obj, uacpi_object_type type) +{ + return obj->type == type; +} + +uacpi_bool uacpi_object_is_one_of( + uacpi_object *obj, uacpi_object_type_bits type_mask +) +{ + return (uacpi_object_get_type_bit(obj) & type_mask) != 0; +} + +#define TYPE_CHECK_USER_OBJ_RET(obj, type_bits, ret) \ + do { \ + if (uacpi_unlikely(obj == UACPI_NULL || \ + !uacpi_object_is_one_of(obj, type_bits))) \ + return ret; \ + } while (0) + +#define TYPE_CHECK_USER_OBJ(obj, type_bits) \ + TYPE_CHECK_USER_OBJ_RET(obj, type_bits, UACPI_STATUS_INVALID_ARGUMENT) + +#define ENSURE_VALID_USER_OBJ_RET(obj, ret) \ + do { \ + if (uacpi_unlikely(obj == UACPI_NULL)) \ + return ret; \ + } while (0) + +#define ENSURE_VALID_USER_OBJ(obj) \ + ENSURE_VALID_USER_OBJ_RET(obj, UACPI_STATUS_INVALID_ARGUMENT) + +uacpi_status uacpi_object_get_integer(uacpi_object *obj, uacpi_u64 *out) +{ + TYPE_CHECK_USER_OBJ(obj, UACPI_OBJECT_INTEGER_BIT); + + *out = obj->integer; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_object_assign_integer(uacpi_object *obj, uacpi_u64 value) +{ + uacpi_object object = { 0 }; + + ENSURE_VALID_USER_OBJ(obj); + + object.type = UACPI_OBJECT_INTEGER; + object.integer = value; + + return uacpi_object_assign(obj, &object, UACPI_ASSIGN_BEHAVIOR_DEEP_COPY); +} + +void uacpi_buffer_to_view(uacpi_buffer *buf, uacpi_data_view *out_view) +{ + out_view->bytes = buf->byte_data; + out_view->length = buf->size; +} + +static uacpi_status uacpi_object_do_get_string_or_buffer( + uacpi_object *obj, uacpi_data_view *out, uacpi_u32 mask +) +{ + TYPE_CHECK_USER_OBJ(obj, mask); + + uacpi_buffer_to_view(obj->buffer, out); + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_object_get_string_or_buffer( + uacpi_object *obj, uacpi_data_view *out +) +{ + return uacpi_object_do_get_string_or_buffer( + obj, out, UACPI_OBJECT_STRING_BIT | UACPI_OBJECT_BUFFER_BIT + ); +} + +uacpi_status uacpi_object_get_string(uacpi_object *obj, uacpi_data_view *out) +{ + return uacpi_object_do_get_string_or_buffer( + obj, out, UACPI_OBJECT_STRING_BIT + ); +} + +uacpi_status uacpi_object_get_buffer(uacpi_object *obj, uacpi_data_view *out) +{ + return uacpi_object_do_get_string_or_buffer( + obj, out, UACPI_OBJECT_BUFFER_BIT + ); +} + +uacpi_bool uacpi_object_is_aml_namepath(uacpi_object *obj) +{ + TYPE_CHECK_USER_OBJ_RET(obj, UACPI_OBJECT_STRING_BIT, UACPI_FALSE); + return obj->flags == UACPI_STRING_KIND_PATH; +} + +uacpi_status uacpi_object_resolve_as_aml_namepath( + uacpi_object *obj, uacpi_namespace_node *scope, + uacpi_namespace_node **out_node +) +{ + uacpi_status ret; + uacpi_namespace_node *node; + + if (!uacpi_object_is_aml_namepath(obj)) + return UACPI_STATUS_INVALID_ARGUMENT; + + ret = uacpi_namespace_node_resolve_from_aml_namepath( + scope, obj->buffer->text, &node + ); + if (uacpi_likely_success(ret)) + *out_node = node; + return ret; +} + +static uacpi_status uacpi_object_do_assign_buffer( + uacpi_object *obj, uacpi_data_view in, uacpi_object_type type +) +{ + uacpi_status ret; + uacpi_object tmp_obj = { 0 }; + uacpi_size dst_buf_size = in.length; + + tmp_obj.type = type; + + ENSURE_VALID_USER_OBJ(obj); + + if (type == UACPI_OBJECT_STRING && (in.length == 0 || + in.const_bytes[in.length - 1] != 0x00)) + dst_buf_size++; + + ret = buffer_alloc_and_store( + &tmp_obj, dst_buf_size, in.const_bytes, in.length + ); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_object_assign( + obj, &tmp_obj, UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY + ); + uacpi_shareable_unref_and_delete_if_last(tmp_obj.buffer, free_buffer); + + return ret; +} + +uacpi_status uacpi_object_assign_string(uacpi_object *obj, uacpi_data_view in) +{ + return uacpi_object_do_assign_buffer(obj, in, UACPI_OBJECT_STRING); +} + +uacpi_status uacpi_object_assign_buffer(uacpi_object *obj, uacpi_data_view in) +{ + return uacpi_object_do_assign_buffer(obj, in, UACPI_OBJECT_BUFFER); +} + +uacpi_object *uacpi_object_create_uninitialized(void) +{ + return uacpi_create_object(UACPI_OBJECT_UNINITIALIZED); +} + +uacpi_status uacpi_object_create_integer_safe( + uacpi_u64 value, uacpi_overflow_behavior behavior, uacpi_object **out_obj +) +{ + uacpi_status ret; + uacpi_u8 bitness; + uacpi_object *obj; + + ret = uacpi_get_aml_bitness(&bitness); + if (uacpi_unlikely_error(ret)) + return ret; + + switch (behavior) { + case UACPI_OVERFLOW_TRUNCATE: + case UACPI_OVERFLOW_DISALLOW: + if (bitness == 32 && value > 0xFFFFFFFF) { + if (behavior == UACPI_OVERFLOW_DISALLOW) + return UACPI_STATUS_INVALID_ARGUMENT; + + value &= 0xFFFFFFFF; + } + UACPI_FALLTHROUGH; + case UACPI_OVERFLOW_ALLOW: + obj = uacpi_object_create_integer(value); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + *out_obj = obj; + return ret; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } +} + +uacpi_object *uacpi_object_create_integer(uacpi_u64 value) +{ + uacpi_object *obj; + + obj = uacpi_create_object(UACPI_OBJECT_INTEGER); + if (uacpi_unlikely(obj == UACPI_NULL)) + return obj; + + obj->integer = value; + return obj; +} + +static uacpi_object *uacpi_object_do_create_string_or_buffer( + uacpi_data_view view, uacpi_object_type type +) +{ + uacpi_status ret; + uacpi_object *obj; + + obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_NULL; + + ret = uacpi_object_do_assign_buffer(obj, view, type); + if (uacpi_unlikely_error(ret)) { + uacpi_object_unref(obj); + return UACPI_NULL; + } + + return obj; +} + +uacpi_object *uacpi_object_create_string(uacpi_data_view view) +{ + return uacpi_object_do_create_string_or_buffer(view, UACPI_OBJECT_STRING); +} + +uacpi_object *uacpi_object_create_buffer(uacpi_data_view view) +{ + return uacpi_object_do_create_string_or_buffer(view, UACPI_OBJECT_BUFFER); +} + +uacpi_object *uacpi_object_create_cstring(const uacpi_char *str) +{ + uacpi_data_view data_view = { 0 }; + + data_view.const_text = str; + data_view.length = uacpi_strlen(str) + 1; + return uacpi_object_create_string(data_view); +} + +uacpi_status uacpi_object_get_package( + uacpi_object *obj, uacpi_object_array *out +) +{ + TYPE_CHECK_USER_OBJ(obj, UACPI_OBJECT_PACKAGE_BIT); + + out->objects = obj->package->objects; + out->count = obj->package->count; + return UACPI_STATUS_OK; +} + +uacpi_object *uacpi_object_create_reference(uacpi_object *child) +{ + uacpi_object *obj; + + ENSURE_VALID_USER_OBJ_RET(child, UACPI_NULL); + + obj = uacpi_create_object(UACPI_OBJECT_REFERENCE); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_NULL; + + uacpi_object_attach_child(obj, child); + obj->flags = UACPI_REFERENCE_KIND_ARG; + + return obj; +} + +uacpi_status uacpi_object_assign_reference( + uacpi_object *obj, uacpi_object *child +) +{ + uacpi_status ret; + uacpi_object object = { 0 }; + + ENSURE_VALID_USER_OBJ(obj); + ENSURE_VALID_USER_OBJ(child); + + // First clear out the object + object.type = UACPI_OBJECT_UNINITIALIZED; + ret = uacpi_object_assign( + obj, &object, + UACPI_ASSIGN_BEHAVIOR_DEEP_COPY + ); + if (uacpi_unlikely_error(ret)) + return ret; + + obj->type = UACPI_OBJECT_REFERENCE; + uacpi_object_attach_child(obj, child); + obj->flags = UACPI_REFERENCE_KIND_ARG; + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_object_get_dereferenced( + uacpi_object *obj, uacpi_object **out +) +{ + TYPE_CHECK_USER_OBJ(obj, UACPI_OBJECT_REFERENCE_BIT); + + *out = obj->inner_object; + uacpi_shareable_ref(*out); + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_object_get_processor_info( + uacpi_object *obj, uacpi_processor_info *out +) +{ + TYPE_CHECK_USER_OBJ(obj, UACPI_OBJECT_PROCESSOR_BIT); + + out->id = obj->processor->id; + out->block_address = obj->processor->block_address; + out->block_length = obj->processor->block_length; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_object_get_power_resource_info( + uacpi_object *obj, uacpi_power_resource_info *out +) +{ + TYPE_CHECK_USER_OBJ(obj, UACPI_OBJECT_POWER_RESOURCE_BIT); + + out->system_level = obj->power_resource.system_level; + out->resource_order = obj->power_resource.resource_order; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_object_assign_package( + uacpi_object *obj, uacpi_object_array in +) +{ + uacpi_status ret; + uacpi_size i; + uacpi_object tmp_obj = { + .type = UACPI_OBJECT_PACKAGE, + }; + + ENSURE_VALID_USER_OBJ(obj); + + if (uacpi_unlikely(!package_alloc(&tmp_obj, in.count, + UACPI_PREALLOC_OBJECTS_NO))) + return UACPI_STATUS_OUT_OF_MEMORY; + + obj->type = UACPI_OBJECT_PACKAGE; + + for (i = 0; i < in.count; ++i) { + tmp_obj.package->objects[i] = in.objects[i]; + uacpi_object_ref(tmp_obj.package->objects[i]); + } + + ret = uacpi_object_assign(obj, &tmp_obj, UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY); + uacpi_shareable_unref_and_delete_if_last(tmp_obj.package, free_package); + + return ret; +} + +uacpi_object *uacpi_object_create_package(uacpi_object_array in) +{ + uacpi_status ret; + uacpi_object *obj; + + obj = uacpi_object_create_uninitialized(); + if (uacpi_unlikely(obj == UACPI_NULL)) + return obj; + + ret = uacpi_object_assign_package(obj, in); + if (uacpi_unlikely_error(ret)) { + uacpi_object_unref(obj); + return UACPI_NULL; + } + + return obj; +} + +uacpi_status uacpi_object_assign(uacpi_object *dst, uacpi_object *src, + enum uacpi_assign_behavior behavior) +{ + uacpi_status ret = UACPI_STATUS_OK; + + if (src == dst) + return ret; + + switch (dst->type) { + case UACPI_OBJECT_REFERENCE: + uacpi_object_detach_child(dst); + break; + case UACPI_OBJECT_STRING: + case UACPI_OBJECT_BUFFER: + case UACPI_OBJECT_METHOD: + case UACPI_OBJECT_PACKAGE: + case UACPI_OBJECT_MUTEX: + case UACPI_OBJECT_EVENT: + case UACPI_OBJECT_PROCESSOR: + case UACPI_OBJECT_DEVICE: + case UACPI_OBJECT_THERMAL_ZONE: + free_object_storage(dst); + break; + default: + break; + } + + switch (src->type) { + case UACPI_OBJECT_UNINITIALIZED: + case UACPI_OBJECT_DEBUG: + break; + case UACPI_OBJECT_BUFFER: + case UACPI_OBJECT_STRING: + dst->flags = src->flags; + ret = assign_buffer(dst, src, behavior); + break; + case UACPI_OBJECT_BUFFER_FIELD: + dst->buffer_field = src->buffer_field; + uacpi_shareable_ref(dst->buffer_field.backing); + break; + case UACPI_OBJECT_BUFFER_INDEX: + dst->buffer_index = src->buffer_index; + uacpi_shareable_ref(dst->buffer_index.buffer); + break; + case UACPI_OBJECT_INTEGER: + dst->integer = src->integer; + break; + case UACPI_OBJECT_METHOD: + dst->method = src->method; + uacpi_shareable_ref(dst->method); + break; + case UACPI_OBJECT_MUTEX: + ret = assign_mutex(dst, src, behavior); + break; + case UACPI_OBJECT_EVENT: + ret = assign_event(dst, src, behavior); + break; + case UACPI_OBJECT_OPERATION_REGION: + dst->op_region = src->op_region; + uacpi_shareable_ref(dst->op_region); + break; + case UACPI_OBJECT_PACKAGE: + ret = assign_package(dst, src, behavior); + break; + case UACPI_OBJECT_FIELD_UNIT: + dst->field_unit = src->field_unit; + uacpi_shareable_ref(dst->field_unit); + break; + case UACPI_OBJECT_REFERENCE: + uacpi_object_attach_child(dst, src->inner_object); + break; + case UACPI_OBJECT_PROCESSOR: + dst->processor = src->processor; + uacpi_shareable_ref(dst->processor); + break; + case UACPI_OBJECT_DEVICE: + dst->device = src->device; + uacpi_shareable_ref(dst->device); + break; + case UACPI_OBJECT_THERMAL_ZONE: + dst->thermal_zone = src->thermal_zone; + uacpi_shareable_ref(dst->thermal_zone); + break; + default: + ret = UACPI_STATUS_UNIMPLEMENTED; + } + + if (ret == UACPI_STATUS_OK) + dst->type = src->type; + + return ret; +} + +struct uacpi_object *uacpi_create_internal_reference( + enum uacpi_reference_kind kind, uacpi_object *child +) +{ + uacpi_object *ret; + + ret = uacpi_create_object(UACPI_OBJECT_REFERENCE); + if (uacpi_unlikely(ret == UACPI_NULL)) + return ret; + + ret->flags = kind; + uacpi_object_attach_child(ret, child); + return ret; +} + +uacpi_object *uacpi_unwrap_internal_reference(uacpi_object *object) +{ + for (;;) { + if (object->type != UACPI_OBJECT_REFERENCE || + (object->flags == UACPI_REFERENCE_KIND_REFOF || + object->flags == UACPI_REFERENCE_KIND_PKG_INDEX)) + return object; + + object = object->inner_object; + } +} + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/dev/acpi/uacpi/uacpi.c b/sys/dev/acpi/uacpi/uacpi.c new file mode 100644 index 0000000..c6c569f --- /dev/null +++ b/sys/dev/acpi/uacpi/uacpi.c @@ -0,0 +1,998 @@ +#include <uacpi/uacpi.h> +#include <uacpi/acpi.h> + +#include <uacpi/internal/log.h> +#include <uacpi/internal/context.h> +#include <uacpi/internal/utilities.h> +#include <uacpi/internal/tables.h> +#include <uacpi/internal/interpreter.h> +#include <uacpi/internal/namespace.h> +#include <uacpi/internal/opregion.h> +#include <uacpi/internal/registers.h> +#include <uacpi/internal/event.h> +#include <uacpi/internal/notify.h> +#include <uacpi/internal/osi.h> +#include <uacpi/internal/registers.h> + +struct uacpi_runtime_context g_uacpi_rt_ctx = { 0 }; + +void uacpi_context_set_log_level(uacpi_log_level lvl) +{ + if (lvl == 0) + lvl = UACPI_DEFAULT_LOG_LEVEL; + + g_uacpi_rt_ctx.log_level = lvl; +} + +void uacpi_logger_initialize(void) +{ + static uacpi_bool version_printed = UACPI_FALSE; + + if (g_uacpi_rt_ctx.log_level == 0) + uacpi_context_set_log_level(UACPI_DEFAULT_LOG_LEVEL); + + if (!version_printed) { + version_printed = UACPI_TRUE; + uacpi_info( + "starting uACPI, version %d.%d.%d\n", + UACPI_MAJOR, UACPI_MINOR, UACPI_PATCH + ); + } +} + +void uacpi_context_set_proactive_table_checksum(uacpi_bool setting) +{ + if (setting) + g_uacpi_rt_ctx.flags |= UACPI_FLAG_PROACTIVE_TBL_CSUM; + else + g_uacpi_rt_ctx.flags &= ~UACPI_FLAG_PROACTIVE_TBL_CSUM; +} + +const uacpi_char *uacpi_status_to_string(uacpi_status st) +{ + switch (st) { + case UACPI_STATUS_OK: + return "no error"; + case UACPI_STATUS_MAPPING_FAILED: + return "failed to map memory"; + case UACPI_STATUS_OUT_OF_MEMORY: + return "out of memory"; + case UACPI_STATUS_BAD_CHECKSUM: + return "bad table checksum"; + case UACPI_STATUS_INVALID_SIGNATURE: + return "invalid table signature"; + case UACPI_STATUS_INVALID_TABLE_LENGTH: + return "invalid table length"; + case UACPI_STATUS_NOT_FOUND: + return "not found"; + case UACPI_STATUS_INVALID_ARGUMENT: + return "invalid argument"; + case UACPI_STATUS_UNIMPLEMENTED: + return "unimplemented"; + case UACPI_STATUS_ALREADY_EXISTS: + return "already exists"; + case UACPI_STATUS_INTERNAL_ERROR: + return "internal error"; + case UACPI_STATUS_TYPE_MISMATCH: + return "object type mismatch"; + case UACPI_STATUS_INIT_LEVEL_MISMATCH: + return "init level too low/high for this action"; + case UACPI_STATUS_NAMESPACE_NODE_DANGLING: + return "attempting to use a dangling namespace node"; + case UACPI_STATUS_NO_HANDLER: + return "no handler found"; + case UACPI_STATUS_NO_RESOURCE_END_TAG: + return "resource template without an end tag"; + case UACPI_STATUS_COMPILED_OUT: + return "this functionality has been compiled out of this build"; + case UACPI_STATUS_HARDWARE_TIMEOUT: + return "timed out waiting for hardware response"; + case UACPI_STATUS_TIMEOUT: + return "wait timed out"; + case UACPI_STATUS_OVERRIDDEN: + return "the requested action has been overridden"; + case UACPI_STATUS_DENIED: + return "the requested action has been denied"; + + case UACPI_STATUS_AML_UNDEFINED_REFERENCE: + return "AML referenced an undefined object"; + case UACPI_STATUS_AML_INVALID_NAMESTRING: + return "invalid AML name string"; + case UACPI_STATUS_AML_OBJECT_ALREADY_EXISTS: + return "object already exists"; + case UACPI_STATUS_AML_INVALID_OPCODE: + return "invalid AML opcode"; + case UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE: + return "incompatible AML object type"; + case UACPI_STATUS_AML_BAD_ENCODING: + return "bad AML instruction encoding"; + case UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX: + return "out of bounds AML index"; + case UACPI_STATUS_AML_SYNC_LEVEL_TOO_HIGH: + return "AML attempted to acquire a mutex with a lower sync level"; + case UACPI_STATUS_AML_INVALID_RESOURCE: + return "invalid resource template encoding or type"; + case UACPI_STATUS_AML_LOOP_TIMEOUT: + return "hanging AML while loop"; + case UACPI_STATUS_AML_CALL_STACK_DEPTH_LIMIT: + return "reached maximum AML call stack depth"; + default: + return "<invalid status>"; + } +} + +void uacpi_state_reset(void) +{ +#ifndef UACPI_BAREBONES_MODE + uacpi_deinitialize_namespace(); + uacpi_deinitialize_interfaces(); + uacpi_deinitialize_events(); + uacpi_deinitialize_notify(); + uacpi_deinitialize_opregion(); +#endif + + uacpi_deinitialize_tables(); + +#ifndef UACPI_BAREBONES_MODE + +#ifndef UACPI_REDUCED_HARDWARE + if (g_uacpi_rt_ctx.was_in_legacy_mode) + uacpi_leave_acpi_mode(); +#endif + + uacpi_deinitialize_registers(); + +#ifndef UACPI_REDUCED_HARDWARE + if (g_uacpi_rt_ctx.global_lock_event) + uacpi_kernel_free_event(g_uacpi_rt_ctx.global_lock_event); + if (g_uacpi_rt_ctx.global_lock_spinlock) + uacpi_kernel_free_spinlock(g_uacpi_rt_ctx.global_lock_spinlock); +#endif + +#endif // !UACPI_BAREBONES_MODE + + uacpi_memzero(&g_uacpi_rt_ctx, sizeof(g_uacpi_rt_ctx)); + +#if defined(UACPI_KERNEL_INITIALIZATION) && !defined(UACPI_BAREBONES_MODE) + uacpi_kernel_deinitialize(); +#endif +} + +#ifndef UACPI_BAREBONES_MODE + +void uacpi_context_set_loop_timeout(uacpi_u32 seconds) +{ + if (seconds == 0) + seconds = UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS; + + g_uacpi_rt_ctx.loop_timeout_seconds = seconds; +} + +void uacpi_context_set_max_call_stack_depth(uacpi_u32 depth) +{ + if (depth == 0) + depth = UACPI_DEFAULT_MAX_CALL_STACK_DEPTH; + + g_uacpi_rt_ctx.max_call_stack_depth = depth; +} + +uacpi_u32 uacpi_context_get_loop_timeout(void) +{ + return g_uacpi_rt_ctx.loop_timeout_seconds; +} + +#ifndef UACPI_REDUCED_HARDWARE +enum hw_mode { + HW_MODE_ACPI = 0, + HW_MODE_LEGACY = 1, +}; + +static enum hw_mode read_mode(void) +{ + uacpi_status ret; + uacpi_u64 raw_value; + struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt; + + if (!fadt->smi_cmd) + return HW_MODE_ACPI; + + ret = uacpi_read_register_field(UACPI_REGISTER_FIELD_SCI_EN, &raw_value); + if (uacpi_unlikely_error(ret)) + return HW_MODE_LEGACY; + + return raw_value ? HW_MODE_ACPI : HW_MODE_LEGACY; +} + +static uacpi_status set_mode(enum hw_mode mode) +{ + uacpi_status ret; + uacpi_u64 raw_value, stalled_time = 0; + struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt; + + if (uacpi_unlikely(!fadt->smi_cmd)) { + uacpi_error("SMI_CMD is not implemented by the firmware\n"); + return UACPI_STATUS_NOT_FOUND; + } + + if (uacpi_unlikely(!fadt->acpi_enable && !fadt->acpi_disable)) { + uacpi_error("mode transition is not implemented by the hardware\n"); + return UACPI_STATUS_NOT_FOUND; + } + + switch (mode) { + case HW_MODE_ACPI: + raw_value = fadt->acpi_enable; + break; + case HW_MODE_LEGACY: + raw_value = fadt->acpi_disable; + break; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + ret = uacpi_write_register(UACPI_REGISTER_SMI_CMD, raw_value); + if (uacpi_unlikely_error(ret)) + return ret; + + // Allow up to 5 seconds for the hardware to enter the desired mode + while (stalled_time < (5 * 1000 * 1000)) { + if (read_mode() == mode) + return UACPI_STATUS_OK; + + uacpi_kernel_stall(100); + stalled_time += 100; + } + + uacpi_error("hardware time out while changing modes\n"); + return UACPI_STATUS_HARDWARE_TIMEOUT; +} + +static uacpi_status enter_mode(enum hw_mode mode, uacpi_bool *did_change) +{ + uacpi_status ret; + const uacpi_char *mode_str; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_is_hardware_reduced()) + return UACPI_STATUS_OK; + + mode_str = mode == HW_MODE_LEGACY ? "legacy" : "acpi"; + + if (read_mode() == mode) { + uacpi_trace("%s mode already enabled\n", mode_str); + return UACPI_STATUS_OK; + } + + ret = set_mode(mode); + if (uacpi_unlikely_error(ret)) { + uacpi_warn( + "unable to enter %s mode: %s\n", + mode_str, uacpi_status_to_string(ret) + ); + return ret; + } + + uacpi_trace("entered %s mode\n", mode_str); + if (did_change != UACPI_NULL) + *did_change = UACPI_TRUE; + + return ret; +} + +uacpi_status uacpi_enter_acpi_mode(void) +{ + return enter_mode(HW_MODE_ACPI, UACPI_NULL); +} + +uacpi_status uacpi_leave_acpi_mode(void) +{ + return enter_mode(HW_MODE_LEGACY, UACPI_NULL); +} + +static void enter_acpi_mode_initial(void) +{ + enter_mode(HW_MODE_ACPI, &g_uacpi_rt_ctx.was_in_legacy_mode); +} +#else +static void enter_acpi_mode_initial(void) { } +#endif + +uacpi_init_level uacpi_get_current_init_level(void) +{ + return g_uacpi_rt_ctx.init_level; +} + +uacpi_status uacpi_initialize(uacpi_u64 flags) +{ + uacpi_status ret; + + UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_EARLY); + +#ifdef UACPI_KERNEL_INITIALIZATION + ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_EARLY); + if (uacpi_unlikely_error(ret)) + return ret; +#endif + + g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED; + g_uacpi_rt_ctx.last_sleep_typ_a = UACPI_SLEEP_TYP_INVALID; + g_uacpi_rt_ctx.last_sleep_typ_b = UACPI_SLEEP_TYP_INVALID; + g_uacpi_rt_ctx.s0_sleep_typ_a = UACPI_SLEEP_TYP_INVALID; + g_uacpi_rt_ctx.s0_sleep_typ_b = UACPI_SLEEP_TYP_INVALID; + g_uacpi_rt_ctx.flags = flags; + + uacpi_logger_initialize(); + + if (g_uacpi_rt_ctx.loop_timeout_seconds == 0) + uacpi_context_set_loop_timeout(UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS); + if (g_uacpi_rt_ctx.max_call_stack_depth == 0) + uacpi_context_set_max_call_stack_depth(UACPI_DEFAULT_MAX_CALL_STACK_DEPTH); + + ret = uacpi_initialize_tables(); + if (uacpi_unlikely_error(ret)) + goto out_fatal_error; + + ret = uacpi_initialize_registers(); + if (uacpi_unlikely_error(ret)) + goto out_fatal_error; + + ret = uacpi_initialize_events_early(); + if (uacpi_unlikely_error(ret)) + goto out_fatal_error; + + ret = uacpi_initialize_opregion(); + if (uacpi_unlikely_error(ret)) + goto out_fatal_error; + + ret = uacpi_initialize_interfaces(); + if (uacpi_unlikely_error(ret)) + goto out_fatal_error; + + ret = uacpi_initialize_namespace(); + if (uacpi_unlikely_error(ret)) + goto out_fatal_error; + + ret = uacpi_initialize_notify(); + if (uacpi_unlikely_error(ret)) + goto out_fatal_error; + + uacpi_install_default_address_space_handlers(); + + if (!uacpi_check_flag(UACPI_FLAG_NO_ACPI_MODE)) + enter_acpi_mode_initial(); + + return UACPI_STATUS_OK; + +out_fatal_error: + uacpi_state_reset(); + return ret; +} + +struct table_load_stats { + uacpi_u32 load_counter; + uacpi_u32 failure_counter; +}; + +static void trace_table_load_failure( + struct acpi_sdt_hdr *tbl, uacpi_log_level lvl, uacpi_status ret +) +{ + uacpi_log_lvl( + lvl, + "failed to load "UACPI_PRI_TBL_HDR": %s\n", + UACPI_FMT_TBL_HDR(tbl), uacpi_status_to_string(ret) + ); +} + +static uacpi_bool match_ssdt_or_psdt(struct uacpi_installed_table *tbl) +{ + if (tbl->flags & UACPI_TABLE_LOADED) + return UACPI_FALSE; + + return uacpi_signatures_match(tbl->hdr.signature, ACPI_SSDT_SIGNATURE) || + uacpi_signatures_match(tbl->hdr.signature, ACPI_PSDT_SIGNATURE); +} + +static uacpi_u64 elapsed_ms(uacpi_u64 begin_ns, uacpi_u64 end_ns) +{ + return (end_ns - begin_ns) / (1000ull * 1000ull); +} + +static uacpi_bool warn_on_bad_timesource(uacpi_u64 begin_ts, uacpi_u64 end_ts) +{ + const uacpi_char *reason; + + if (uacpi_unlikely(begin_ts == 0 && end_ts == 0)) { + reason = "uacpi_kernel_get_nanoseconds_since_boot() appears to be a stub"; + goto out_bad_timesource; + } + + if (uacpi_unlikely(begin_ts == end_ts)) { + reason = "poor time source precision detected"; + goto out_bad_timesource; + } + + if (uacpi_unlikely(end_ts < begin_ts)) { + reason = "time source backwards drift detected"; + goto out_bad_timesource; + } + + return UACPI_FALSE; + +out_bad_timesource: + uacpi_warn("%s, this may cause problems\n", reason); + return UACPI_TRUE; +} + +uacpi_status uacpi_namespace_load(void) +{ + struct uacpi_table tbl; + uacpi_status ret; + uacpi_u64 begin_ts, end_ts; + struct table_load_stats st = { 0 }; + uacpi_size cur_index; + + UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + +#ifdef UACPI_KERNEL_INITIALIZATION + ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + if (uacpi_unlikely_error(ret)) + goto out_fatal_error; +#endif + + begin_ts = uacpi_kernel_get_nanoseconds_since_boot(); + + ret = uacpi_table_find_by_signature(ACPI_DSDT_SIGNATURE, &tbl); + if (uacpi_unlikely_error(ret)) { + uacpi_error("unable to find DSDT: %s\n", uacpi_status_to_string(ret)); + goto out_fatal_error; + } + + ret = uacpi_table_load_with_cause(tbl.index, UACPI_TABLE_LOAD_CAUSE_INIT); + if (uacpi_unlikely_error(ret)) { + trace_table_load_failure(tbl.hdr, UACPI_LOG_ERROR, ret); + st.failure_counter++; + } + st.load_counter++; + uacpi_table_unref(&tbl); + + for (cur_index = 0;; cur_index = tbl.index + 1) { + ret = uacpi_table_match(cur_index, match_ssdt_or_psdt, &tbl); + if (ret != UACPI_STATUS_OK) { + if (uacpi_unlikely(ret != UACPI_STATUS_NOT_FOUND)) + goto out_fatal_error; + + break; + } + + ret = uacpi_table_load_with_cause(tbl.index, UACPI_TABLE_LOAD_CAUSE_INIT); + if (uacpi_unlikely_error(ret)) { + trace_table_load_failure(tbl.hdr, UACPI_LOG_WARN, ret); + st.failure_counter++; + } + st.load_counter++; + uacpi_table_unref(&tbl); + } + + end_ts = uacpi_kernel_get_nanoseconds_since_boot(); + g_uacpi_rt_ctx.bad_timesource = warn_on_bad_timesource(begin_ts, end_ts); + + if (uacpi_unlikely(st.failure_counter != 0 || g_uacpi_rt_ctx.bad_timesource)) { + uacpi_info( + "loaded %u AML blob%s (%u error%s)\n", + st.load_counter, st.load_counter > 1 ? "s" : "", st.failure_counter, + st.failure_counter == 1 ? "" : "s" + ); + } else { + uacpi_u64 ops = g_uacpi_rt_ctx.opcodes_executed; + uacpi_u64 ops_per_sec = ops * UACPI_NANOSECONDS_PER_SEC; + + ops_per_sec /= end_ts - begin_ts; + + uacpi_info( + "successfully loaded %u AML blob%s, %"UACPI_PRIu64" ops in " + "%"UACPI_PRIu64"ms (avg %"UACPI_PRIu64"/s)\n", + st.load_counter, st.load_counter > 1 ? "s" : "", + UACPI_FMT64(ops), UACPI_FMT64(elapsed_ms(begin_ts, end_ts)), + UACPI_FMT64(ops_per_sec) + ); + } + + ret = uacpi_initialize_events(); + if (uacpi_unlikely_error(ret)) { + uacpi_error("event initialization failed: %s\n", + uacpi_status_to_string(ret)); + goto out_fatal_error; + } + + g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_NAMESPACE_LOADED; + return UACPI_STATUS_OK; + +out_fatal_error: + uacpi_state_reset(); + return ret; +} + +struct ns_init_context { + uacpi_size ini_executed; + uacpi_size ini_errors; + uacpi_size sta_executed; + uacpi_size sta_errors; + uacpi_size devices; + uacpi_size thermal_zones; +}; + +static void ini_eval(struct ns_init_context *ctx, uacpi_namespace_node *node) +{ + uacpi_status ret; + + ret = uacpi_eval(node, "_INI", UACPI_NULL, UACPI_NULL); + if (ret == UACPI_STATUS_NOT_FOUND) + return; + + ctx->ini_executed++; + if (uacpi_unlikely_error(ret)) + ctx->ini_errors++; +} + +static uacpi_status sta_eval( + struct ns_init_context *ctx, uacpi_namespace_node *node, + uacpi_u32 *value +) +{ + uacpi_status ret; + + ret = uacpi_eval_sta(node, value); + if (*value == 0xFFFFFFFF) + return ret; + + ctx->sta_executed++; + if (uacpi_unlikely_error(ret)) + ctx->sta_errors++; + + return ret; +} + +static uacpi_iteration_decision do_sta_ini( + void *opaque, uacpi_namespace_node *node, uacpi_u32 depth +) +{ + struct ns_init_context *ctx = opaque; + uacpi_status ret; + uacpi_object_type type = UACPI_OBJECT_UNINITIALIZED; + uacpi_u32 sta_ret; + + UACPI_UNUSED(depth); + + // We don't care about aliases + if (uacpi_namespace_node_is_alias(node)) + return UACPI_ITERATION_DECISION_NEXT_PEER; + + ret = uacpi_namespace_node_type(node, &type); + switch (type) { + case UACPI_OBJECT_DEVICE: + case UACPI_OBJECT_PROCESSOR: + ctx->devices++; + break; + case UACPI_OBJECT_THERMAL_ZONE: + ctx->thermal_zones++; + break; + default: + if (node != uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_TZ)) + return UACPI_ITERATION_DECISION_CONTINUE; + } + + ret = sta_eval(ctx, node, &sta_ret); + if (uacpi_unlikely_error(ret)) + return UACPI_ITERATION_DECISION_CONTINUE; + + if (!(sta_ret & ACPI_STA_RESULT_DEVICE_PRESENT)) { + if (!(sta_ret & ACPI_STA_RESULT_DEVICE_FUNCTIONING)) + return UACPI_ITERATION_DECISION_NEXT_PEER; + + /* + * ACPI 6.5 specification: + * _STA may return bit 0 clear (not present) with bit [3] set (device + * is functional). This case is used to indicate a valid device for + * which no device driver should be loaded (for example, a bridge + * device.) Children of this device may be present and valid. OSPM + * should continue enumeration below a device whose _STA returns this + * bit combination. + */ + return UACPI_ITERATION_DECISION_CONTINUE; + } + + ini_eval(ctx, node); + + return UACPI_ITERATION_DECISION_CONTINUE; +} + +uacpi_status uacpi_namespace_initialize(void) +{ + struct ns_init_context ctx = { 0 }; + uacpi_namespace_node *root; + uacpi_u64 begin_ts, end_ts; + uacpi_address_space_handlers *handlers; + uacpi_address_space_handler *handler; + uacpi_status ret = UACPI_STATUS_OK; + + UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + +#ifdef UACPI_KERNEL_INITIALIZATION + ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + if (uacpi_unlikely_error(ret)) + goto out; +#endif + + /* + * Initialization order here is identical to ACPICA because ACPI + * specification doesn't really have any detailed steps that explain + * how to do it. + */ + + root = uacpi_namespace_root(); + + begin_ts = uacpi_kernel_get_nanoseconds_since_boot(); + + // Step 1 - Execute \_INI + ini_eval(&ctx, root); + + // Step 2 - Execute \_SB._INI + ini_eval( + &ctx, uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_SB) + ); + + /* + * Step 3 - Run _REG methods for all globally installed + * address space handlers. + */ + handlers = uacpi_node_get_address_space_handlers(root); + if (handlers) { + handler = handlers->head; + + while (handler) { + if (uacpi_address_space_handler_is_default(handler)) + uacpi_reg_all_opregions(root, handler->space); + + handler = handler->next; + } + } + + // Step 4 - Run all other _STA and _INI methods + uacpi_namespace_for_each_child( + root, do_sta_ini, UACPI_NULL, + UACPI_OBJECT_ANY_BIT, UACPI_MAX_DEPTH_ANY, &ctx + ); + + end_ts = uacpi_kernel_get_nanoseconds_since_boot(); + + if (uacpi_likely(!g_uacpi_rt_ctx.bad_timesource)) { + uacpi_info( + "namespace initialization done in %"UACPI_PRIu64"ms: " + "%zu devices, %zu thermal zones\n", + UACPI_FMT64(elapsed_ms(begin_ts, end_ts)), + ctx.devices, ctx.thermal_zones + ); + } else { + uacpi_info( + "namespace initialization done: %zu devices, %zu thermal zones\n", + ctx.devices, ctx.thermal_zones + ); + } + + uacpi_trace( + "_STA calls: %zu (%zu errors), _INI calls: %zu (%zu errors)\n", + ctx.sta_executed, ctx.sta_errors, ctx.ini_executed, + ctx.ini_errors + ); + + g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED; +#ifdef UACPI_KERNEL_INITIALIZATION + ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED); +out: + if (uacpi_unlikely_error(ret)) + uacpi_state_reset(); +#endif + return ret; +} + +uacpi_status uacpi_eval( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **out_obj +) +{ + struct uacpi_namespace_node *node; + uacpi_control_method *method; + uacpi_object *obj; + uacpi_status ret = UACPI_STATUS_INVALID_ARGUMENT; + + if (uacpi_unlikely(parent == UACPI_NULL && path == UACPI_NULL)) + return ret; + + ret = uacpi_namespace_read_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + + if (path != UACPI_NULL) { + ret = uacpi_namespace_node_resolve( + parent, path, UACPI_SHOULD_LOCK_NO, + UACPI_MAY_SEARCH_ABOVE_PARENT_NO, UACPI_PERMANENT_ONLY_YES, + &node + ); + if (uacpi_unlikely_error(ret)) + goto out_read_unlock; + } else { + node = parent; + } + + obj = uacpi_namespace_node_get_object(node); + if (uacpi_unlikely(obj == UACPI_NULL)) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out_read_unlock; + } + + if (obj->type != UACPI_OBJECT_METHOD) { + uacpi_object *new_obj; + + if (uacpi_unlikely(out_obj == UACPI_NULL)) + goto out_read_unlock; + + new_obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED); + if (uacpi_unlikely(new_obj == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto out_read_unlock; + } + + ret = uacpi_object_assign( + new_obj, obj, UACPI_ASSIGN_BEHAVIOR_DEEP_COPY + ); + if (uacpi_unlikely_error(ret)) { + uacpi_object_unref(new_obj); + goto out_read_unlock; + } + *out_obj = new_obj; + + out_read_unlock: + uacpi_namespace_read_unlock(); + return ret; + } + + method = obj->method; + uacpi_shareable_ref(method); + uacpi_namespace_read_unlock(); + + // Upgrade to a write-lock since we're about to run a method + ret = uacpi_namespace_write_lock(); + if (uacpi_unlikely_error(ret)) + goto out_no_write_lock; + + ret = uacpi_execute_control_method(node, method, args, out_obj); + uacpi_namespace_write_unlock(); + +out_no_write_lock: + uacpi_method_unref(method); + return ret; +} + +uacpi_status uacpi_eval_simple( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +) +{ + return uacpi_eval(parent, path, UACPI_NULL, ret); +} + +uacpi_status uacpi_execute( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args +) +{ + return uacpi_eval(parent, path, args, UACPI_NULL); +} + +uacpi_status uacpi_execute_simple( + uacpi_namespace_node *parent, const uacpi_char *path +) +{ + return uacpi_eval(parent, path, UACPI_NULL, UACPI_NULL); +} + +#define TRACE_BAD_RET(path_fmt, type, ...) \ + uacpi_warn( \ + "unexpected '%s' object returned by method "path_fmt \ + ", expected type mask: %08X\n", uacpi_object_type_to_string(type), \ + __VA_ARGS__ \ + ) + +#define TRACE_NO_RET(path_fmt, ...) \ + uacpi_warn( \ + "no value returned from method "path_fmt", expected type mask: " \ + "%08X\n", __VA_ARGS__ \ + ) + +static void trace_invalid_return_type( + uacpi_namespace_node *parent, const uacpi_char *path, + uacpi_object_type_bits expected_mask, uacpi_object_type actual_type +) +{ + const uacpi_char *abs_path; + uacpi_bool dynamic_abs_path = UACPI_FALSE; + + if (parent == UACPI_NULL || (path != UACPI_NULL && path[0] == '\\')) { + abs_path = path; + } else { + abs_path = uacpi_namespace_node_generate_absolute_path(parent); + dynamic_abs_path = UACPI_TRUE; + } + + if (dynamic_abs_path && path != UACPI_NULL) { + if (actual_type == UACPI_OBJECT_UNINITIALIZED) + TRACE_NO_RET("%s.%s", abs_path, path, expected_mask); + else + TRACE_BAD_RET("%s.%s", actual_type, abs_path, path, expected_mask); + } else { + if (actual_type == UACPI_OBJECT_UNINITIALIZED) { + TRACE_NO_RET("%s", abs_path, expected_mask); + } else { + TRACE_BAD_RET("%s", actual_type, abs_path, expected_mask); + } + } + + if (dynamic_abs_path) + uacpi_free_dynamic_string(abs_path); +} + +uacpi_status uacpi_eval_typed( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object_type_bits ret_mask, + uacpi_object **out_obj +) +{ + uacpi_status ret; + uacpi_object *obj; + uacpi_object_type returned_type = UACPI_OBJECT_UNINITIALIZED; + + if (uacpi_unlikely(out_obj == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + ret = uacpi_eval(parent, path, args, &obj); + if (uacpi_unlikely_error(ret)) + return ret; + + if (obj != UACPI_NULL) + returned_type = obj->type; + + if (ret_mask && (ret_mask & (1 << returned_type)) == 0) { + trace_invalid_return_type(parent, path, ret_mask, returned_type); + uacpi_object_unref(obj); + return UACPI_STATUS_TYPE_MISMATCH; + } + + *out_obj = obj; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_eval_simple_typed( + uacpi_namespace_node *parent, const uacpi_char *path, + uacpi_object_type_bits ret_mask, uacpi_object **ret +) +{ + return uacpi_eval_typed(parent, path, UACPI_NULL, ret_mask, ret); +} + +uacpi_status uacpi_eval_integer( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_u64 *out_value +) +{ + uacpi_object *int_obj; + uacpi_status ret; + + ret = uacpi_eval_typed( + parent, path, args, UACPI_OBJECT_INTEGER_BIT, &int_obj + ); + if (uacpi_unlikely_error(ret)) + return ret; + + *out_value = int_obj->integer; + uacpi_object_unref(int_obj); + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_eval_simple_integer( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_u64 *out_value +) +{ + return uacpi_eval_integer(parent, path, UACPI_NULL, out_value); +} + +uacpi_status uacpi_eval_buffer_or_string( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +) +{ + return uacpi_eval_typed( + parent, path, args, + UACPI_OBJECT_BUFFER_BIT | UACPI_OBJECT_STRING_BIT, + ret + ); +} + +uacpi_status uacpi_eval_simple_buffer_or_string( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +) +{ + return uacpi_eval_typed( + parent, path, UACPI_NULL, + UACPI_OBJECT_BUFFER_BIT | UACPI_OBJECT_STRING_BIT, + ret + ); +} + +uacpi_status uacpi_eval_string( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +) +{ + return uacpi_eval_typed( + parent, path, args, UACPI_OBJECT_STRING_BIT, ret + ); +} + +uacpi_status uacpi_eval_simple_string( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +) +{ + return uacpi_eval_typed( + parent, path, UACPI_NULL, UACPI_OBJECT_STRING_BIT, ret + ); +} + +uacpi_status uacpi_eval_buffer( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +) +{ + return uacpi_eval_typed( + parent, path, args, UACPI_OBJECT_BUFFER_BIT, ret + ); +} + +uacpi_status uacpi_eval_simple_buffer( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +) +{ + return uacpi_eval_typed( + parent, path, UACPI_NULL, UACPI_OBJECT_BUFFER_BIT, ret + ); +} + +uacpi_status uacpi_eval_package( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +) +{ + return uacpi_eval_typed( + parent, path, args, UACPI_OBJECT_PACKAGE_BIT, ret + ); +} + +uacpi_status uacpi_eval_simple_package( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +) +{ + return uacpi_eval_typed( + parent, path, UACPI_NULL, UACPI_OBJECT_PACKAGE_BIT, ret + ); +} + +uacpi_status uacpi_get_aml_bitness(uacpi_u8 *out_bitness) +{ + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + *out_bitness = g_uacpi_rt_ctx.is_rev1 ? 32 : 64; + return UACPI_STATUS_OK; +} + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/dev/acpi/uacpi/utilities.c b/sys/dev/acpi/uacpi/utilities.c new file mode 100644 index 0000000..c7ca20a --- /dev/null +++ b/sys/dev/acpi/uacpi/utilities.c @@ -0,0 +1,1156 @@ +#include <uacpi/types.h> +#include <uacpi/status.h> +#include <uacpi/uacpi.h> + +#include <uacpi/internal/context.h> +#include <uacpi/internal/utilities.h> +#include <uacpi/internal/log.h> +#include <uacpi/internal/namespace.h> + +enum char_type { + CHAR_TYPE_CONTROL = 1 << 0, + CHAR_TYPE_SPACE = 1 << 1, + CHAR_TYPE_BLANK = 1 << 2, + CHAR_TYPE_PUNCTUATION = 1 << 3, + CHAR_TYPE_LOWER = 1 << 4, + CHAR_TYPE_UPPER = 1 << 5, + CHAR_TYPE_DIGIT = 1 << 6, + CHAR_TYPE_HEX_DIGIT = 1 << 7, + CHAR_TYPE_ALPHA = CHAR_TYPE_LOWER | CHAR_TYPE_UPPER, + CHAR_TYPE_ALHEX = CHAR_TYPE_ALPHA | CHAR_TYPE_HEX_DIGIT, + CHAR_TYPE_ALNUM = CHAR_TYPE_ALPHA | CHAR_TYPE_DIGIT, +}; + +static const uacpi_u8 ascii_map[256] = { + CHAR_TYPE_CONTROL, // 0 + CHAR_TYPE_CONTROL, // 1 + CHAR_TYPE_CONTROL, // 2 + CHAR_TYPE_CONTROL, // 3 + CHAR_TYPE_CONTROL, // 4 + CHAR_TYPE_CONTROL, // 5 + CHAR_TYPE_CONTROL, // 6 + CHAR_TYPE_CONTROL, // 7 + CHAR_TYPE_CONTROL, // -> 8 control codes + + CHAR_TYPE_CONTROL | CHAR_TYPE_SPACE | CHAR_TYPE_BLANK, // 9 tab + + CHAR_TYPE_CONTROL | CHAR_TYPE_SPACE, // 10 + CHAR_TYPE_CONTROL | CHAR_TYPE_SPACE, // 11 + CHAR_TYPE_CONTROL | CHAR_TYPE_SPACE, // 12 + CHAR_TYPE_CONTROL | CHAR_TYPE_SPACE, // -> 13 whitespaces + + CHAR_TYPE_CONTROL, // 14 + CHAR_TYPE_CONTROL, // 15 + CHAR_TYPE_CONTROL, // 16 + CHAR_TYPE_CONTROL, // 17 + CHAR_TYPE_CONTROL, // 18 + CHAR_TYPE_CONTROL, // 19 + CHAR_TYPE_CONTROL, // 20 + CHAR_TYPE_CONTROL, // 21 + CHAR_TYPE_CONTROL, // 22 + CHAR_TYPE_CONTROL, // 23 + CHAR_TYPE_CONTROL, // 24 + CHAR_TYPE_CONTROL, // 25 + CHAR_TYPE_CONTROL, // 26 + CHAR_TYPE_CONTROL, // 27 + CHAR_TYPE_CONTROL, // 28 + CHAR_TYPE_CONTROL, // 29 + CHAR_TYPE_CONTROL, // 30 + CHAR_TYPE_CONTROL, // -> 31 control codes + + CHAR_TYPE_SPACE | CHAR_TYPE_BLANK, // 32 space + + CHAR_TYPE_PUNCTUATION, // 33 + CHAR_TYPE_PUNCTUATION, // 34 + CHAR_TYPE_PUNCTUATION, // 35 + CHAR_TYPE_PUNCTUATION, // 36 + CHAR_TYPE_PUNCTUATION, // 37 + CHAR_TYPE_PUNCTUATION, // 38 + CHAR_TYPE_PUNCTUATION, // 39 + CHAR_TYPE_PUNCTUATION, // 40 + CHAR_TYPE_PUNCTUATION, // 41 + CHAR_TYPE_PUNCTUATION, // 42 + CHAR_TYPE_PUNCTUATION, // 43 + CHAR_TYPE_PUNCTUATION, // 44 + CHAR_TYPE_PUNCTUATION, // 45 + CHAR_TYPE_PUNCTUATION, // 46 + CHAR_TYPE_PUNCTUATION, // -> 47 punctuation + + CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 48 + CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 49 + CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 50 + CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 51 + CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 52 + CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 53 + CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 54 + CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 55 + CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 56 + CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // -> 57 digits + + CHAR_TYPE_PUNCTUATION, // 58 + CHAR_TYPE_PUNCTUATION, // 59 + CHAR_TYPE_PUNCTUATION, // 60 + CHAR_TYPE_PUNCTUATION, // 61 + CHAR_TYPE_PUNCTUATION, // 62 + CHAR_TYPE_PUNCTUATION, // 63 + CHAR_TYPE_PUNCTUATION, // -> 64 punctuation + + CHAR_TYPE_UPPER | CHAR_TYPE_HEX_DIGIT, // 65 + CHAR_TYPE_UPPER | CHAR_TYPE_HEX_DIGIT, // 66 + CHAR_TYPE_UPPER | CHAR_TYPE_HEX_DIGIT, // 67 + CHAR_TYPE_UPPER | CHAR_TYPE_HEX_DIGIT, // 68 + CHAR_TYPE_UPPER | CHAR_TYPE_HEX_DIGIT, // 69 + CHAR_TYPE_UPPER | CHAR_TYPE_HEX_DIGIT, // -> 70 ABCDEF + + CHAR_TYPE_UPPER, // 71 + CHAR_TYPE_UPPER, // 72 + CHAR_TYPE_UPPER, // 73 + CHAR_TYPE_UPPER, // 74 + CHAR_TYPE_UPPER, // 75 + CHAR_TYPE_UPPER, // 76 + CHAR_TYPE_UPPER, // 77 + CHAR_TYPE_UPPER, // 78 + CHAR_TYPE_UPPER, // 79 + CHAR_TYPE_UPPER, // 80 + CHAR_TYPE_UPPER, // 81 + CHAR_TYPE_UPPER, // 82 + CHAR_TYPE_UPPER, // 83 + CHAR_TYPE_UPPER, // 84 + CHAR_TYPE_UPPER, // 85 + CHAR_TYPE_UPPER, // 86 + CHAR_TYPE_UPPER, // 87 + CHAR_TYPE_UPPER, // 88 + CHAR_TYPE_UPPER, // 89 + CHAR_TYPE_UPPER, // -> 90 the rest of UPPERCASE alphabet + + CHAR_TYPE_PUNCTUATION, // 91 + CHAR_TYPE_PUNCTUATION, // 92 + CHAR_TYPE_PUNCTUATION, // 93 + CHAR_TYPE_PUNCTUATION, // 94 + CHAR_TYPE_PUNCTUATION, // 95 + CHAR_TYPE_PUNCTUATION, // -> 96 punctuation + + CHAR_TYPE_LOWER | CHAR_TYPE_HEX_DIGIT, // 97 + CHAR_TYPE_LOWER | CHAR_TYPE_HEX_DIGIT, // 98 + CHAR_TYPE_LOWER | CHAR_TYPE_HEX_DIGIT, // 99 + CHAR_TYPE_LOWER | CHAR_TYPE_HEX_DIGIT, // 100 + CHAR_TYPE_LOWER | CHAR_TYPE_HEX_DIGIT, // 101 + CHAR_TYPE_LOWER | CHAR_TYPE_HEX_DIGIT, // -> 102 abcdef + + CHAR_TYPE_LOWER, // 103 + CHAR_TYPE_LOWER, // 104 + CHAR_TYPE_LOWER, // 105 + CHAR_TYPE_LOWER, // 106 + CHAR_TYPE_LOWER, // 107 + CHAR_TYPE_LOWER, // 108 + CHAR_TYPE_LOWER, // 109 + CHAR_TYPE_LOWER, // 110 + CHAR_TYPE_LOWER, // 111 + CHAR_TYPE_LOWER, // 112 + CHAR_TYPE_LOWER, // 113 + CHAR_TYPE_LOWER, // 114 + CHAR_TYPE_LOWER, // 115 + CHAR_TYPE_LOWER, // 116 + CHAR_TYPE_LOWER, // 117 + CHAR_TYPE_LOWER, // 118 + CHAR_TYPE_LOWER, // 119 + CHAR_TYPE_LOWER, // 120 + CHAR_TYPE_LOWER, // 121 + CHAR_TYPE_LOWER, // -> 122 the rest of UPPERCASE alphabet + + CHAR_TYPE_PUNCTUATION, // 123 + CHAR_TYPE_PUNCTUATION, // 124 + CHAR_TYPE_PUNCTUATION, // 125 + CHAR_TYPE_PUNCTUATION, // -> 126 punctuation + + CHAR_TYPE_CONTROL // 127 backspace +}; + +static uacpi_bool is_char(uacpi_char c, enum char_type type) +{ + return (ascii_map[(uacpi_u8)c] & type) == type; +} + +static uacpi_char to_lower(uacpi_char c) +{ + if (is_char(c, CHAR_TYPE_UPPER)) + return c + ('a' - 'A'); + + return c; +} + +static uacpi_bool peek_one( + const uacpi_char **str, const uacpi_size *size, uacpi_char *out_char +) +{ + if (*size == 0) + return UACPI_FALSE; + + *out_char = **str; + return UACPI_TRUE; +} + +static uacpi_bool consume_one( + const uacpi_char **str, uacpi_size *size, uacpi_char *out_char +) +{ + if (!peek_one(str, size, out_char)) + return UACPI_FALSE; + + *str += 1; + *size -= 1; + return UACPI_TRUE; +} + +static uacpi_bool consume_if( + const uacpi_char **str, uacpi_size *size, enum char_type type +) +{ + uacpi_char c; + + if (!peek_one(str, size, &c) || !is_char(c, type)) + return UACPI_FALSE; + + *str += 1; + *size -= 1; + return UACPI_TRUE; +} + +static uacpi_bool consume_if_equals( + const uacpi_char **str, uacpi_size *size, uacpi_char c +) +{ + uacpi_char c1; + + if (!peek_one(str, size, &c1) || to_lower(c1) != c) + return UACPI_FALSE; + + *str += 1; + *size -= 1; + return UACPI_TRUE; +} + +uacpi_status uacpi_string_to_integer( + const uacpi_char *str, uacpi_size max_chars, enum uacpi_base base, + uacpi_u64 *out_value +) +{ + uacpi_status ret = UACPI_STATUS_INVALID_ARGUMENT; + uacpi_bool negative = UACPI_FALSE; + uacpi_u64 next, value = 0; + uacpi_char c = '\0'; + + while (consume_if(&str, &max_chars, CHAR_TYPE_SPACE)); + + if (consume_if_equals(&str, &max_chars, '-')) + negative = UACPI_TRUE; + else + consume_if_equals(&str, &max_chars, '+'); + + if (base == UACPI_BASE_AUTO) { + base = UACPI_BASE_DEC; + + if (consume_if_equals(&str, &max_chars, '0')) { + base = UACPI_BASE_OCT; + if (consume_if_equals(&str, &max_chars, 'x')) + base = UACPI_BASE_HEX; + } + } + + while (consume_one(&str, &max_chars, &c)) { + switch (ascii_map[(uacpi_u8)c] & (CHAR_TYPE_DIGIT | CHAR_TYPE_ALHEX)) { + case CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT: + next = c - '0'; + if (base == UACPI_BASE_OCT && next > 7) + goto out; + break; + case CHAR_TYPE_LOWER | CHAR_TYPE_HEX_DIGIT: + case CHAR_TYPE_UPPER | CHAR_TYPE_HEX_DIGIT: + if (base != UACPI_BASE_HEX) + goto out; + next = 10 + (to_lower(c) - 'a'); + break; + default: + goto out; + } + + next = (value * base) + next; + if ((next / base) != value) { + value = 0xFFFFFFFFFFFFFFFF; + goto out; + } + + value = next; + } + +out: + if (negative) + value = -((uacpi_i64)value); + + *out_value = value; + if (max_chars == 0 || c == '\0') + ret = UACPI_STATUS_OK; + + return ret; +} + +#ifndef UACPI_BAREBONES_MODE + +static inline uacpi_bool is_valid_name_byte(uacpi_u8 c) +{ + // ‘_’ := 0x5F + if (c == 0x5F) + return UACPI_TRUE; + + /* + * LeadNameChar := ‘A’-‘Z’ | ‘_’ + * DigitChar := ‘0’ - ‘9’ + * NameChar := DigitChar | LeadNameChar + * ‘A’-‘Z’ := 0x41 - 0x5A + * ‘0’-‘9’ := 0x30 - 0x39 + */ + return (ascii_map[c] & (CHAR_TYPE_DIGIT | CHAR_TYPE_UPPER)) != 0; +} + +uacpi_bool uacpi_is_valid_nameseg(uacpi_u8 *nameseg) +{ + return is_valid_name_byte(nameseg[0]) && + is_valid_name_byte(nameseg[1]) && + is_valid_name_byte(nameseg[2]) && + is_valid_name_byte(nameseg[3]); +} + +void uacpi_eisa_id_to_string(uacpi_u32 id, uacpi_char *out_string) +{ + static uacpi_char hex_to_ascii[16] = { + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'A', 'B', 'C', 'D', 'E', 'F' + }; + + /* + * For whatever reason bits are encoded upper to lower here, swap + * them around so that we don't have to do ridiculous bit shifts + * everywhere. + */ + union { + uacpi_u8 bytes[4]; + uacpi_u32 dword; + } orig, swapped; + + orig.dword = id; + swapped.bytes[0] = orig.bytes[3]; + swapped.bytes[1] = orig.bytes[2]; + swapped.bytes[2] = orig.bytes[1]; + swapped.bytes[3] = orig.bytes[0]; + + /* + * Bit 16 - 20: 3rd character (- 0x40) of mfg code + * Bit 21 - 25: 2nd character (- 0x40) of mfg code + * Bit 26 - 30: 1st character (- 0x40) of mfg code + */ + out_string[0] = (uacpi_char)(0x40 + ((swapped.dword >> 26) & 0x1F)); + out_string[1] = (uacpi_char)(0x40 + ((swapped.dword >> 21) & 0x1F)); + out_string[2] = (uacpi_char)(0x40 + ((swapped.dword >> 16) & 0x1F)); + + /* + * Bit 0 - 3 : 4th hex digit of product number + * Bit 4 - 7 : 3rd hex digit of product number + * Bit 8 - 11: 2nd hex digit of product number + * Bit 12 - 15: 1st hex digit of product number + */ + out_string[3] = hex_to_ascii[(swapped.dword >> 12) & 0x0F]; + out_string[4] = hex_to_ascii[(swapped.dword >> 8 ) & 0x0F]; + out_string[5] = hex_to_ascii[(swapped.dword >> 4 ) & 0x0F]; + out_string[6] = hex_to_ascii[(swapped.dword >> 0 ) & 0x0F]; + + out_string[7] = '\0'; +} + +#define PNP_ID_LENGTH 8 + +uacpi_status uacpi_eval_hid(uacpi_namespace_node *node, uacpi_id_string **out_id) +{ + uacpi_status ret; + uacpi_object *hid_ret; + uacpi_id_string *id = UACPI_NULL; + uacpi_u32 size; + + ret = uacpi_eval_typed( + node, "_HID", UACPI_NULL, + UACPI_OBJECT_INTEGER_BIT | UACPI_OBJECT_STRING_BIT, + &hid_ret + ); + if (ret != UACPI_STATUS_OK) + return ret; + + size = sizeof(uacpi_id_string); + + switch (hid_ret->type) { + case UACPI_OBJECT_STRING: { + uacpi_buffer *buf = hid_ret->buffer; + + size += buf->size; + if (uacpi_unlikely(buf->size == 0 || size < buf->size)) { + uacpi_object_name name = uacpi_namespace_node_name(node); + + uacpi_error( + "%.4s._HID: empty/invalid EISA ID string (%zu bytes)\n", + name.text, buf->size + ); + ret = UACPI_STATUS_AML_BAD_ENCODING; + break; + } + + id = uacpi_kernel_alloc(size); + if (uacpi_unlikely(id == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + break; + } + id->size = buf->size; + id->value = UACPI_PTR_ADD(id, sizeof(uacpi_id_string)); + + uacpi_memcpy(id->value, buf->text, buf->size); + id->value[buf->size - 1] = '\0'; + break; + } + + case UACPI_OBJECT_INTEGER: + size += PNP_ID_LENGTH; + + id = uacpi_kernel_alloc(size); + if (uacpi_unlikely(id == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + break; + } + id->size = PNP_ID_LENGTH; + id->value = UACPI_PTR_ADD(id, sizeof(uacpi_id_string)); + + uacpi_eisa_id_to_string(hid_ret->integer, id->value); + break; + } + + uacpi_object_unref(hid_ret); + if (uacpi_likely_success(ret)) + *out_id = id; + return ret; +} + +void uacpi_free_id_string(uacpi_id_string *id) +{ + if (id == UACPI_NULL) + return; + + uacpi_free(id, sizeof(uacpi_id_string) + id->size); +} + +uacpi_status uacpi_eval_cid( + uacpi_namespace_node *node, uacpi_pnp_id_list **out_list +) +{ + uacpi_status ret; + uacpi_object *object, *cid_ret; + uacpi_object **objects; + uacpi_size num_ids, i; + uacpi_u32 size; + uacpi_id_string *id; + uacpi_char *id_buffer; + uacpi_pnp_id_list *list; + + ret = uacpi_eval_typed( + node, "_CID", UACPI_NULL, + UACPI_OBJECT_INTEGER_BIT | UACPI_OBJECT_STRING_BIT | + UACPI_OBJECT_PACKAGE_BIT, + &cid_ret + ); + if (ret != UACPI_STATUS_OK) + return ret; + + switch (cid_ret->type) { + case UACPI_OBJECT_PACKAGE: + objects = cid_ret->package->objects; + num_ids = cid_ret->package->count; + break; + default: + objects = &cid_ret; + num_ids = 1; + break; + } + + size = sizeof(uacpi_pnp_id_list); + size += num_ids * sizeof(uacpi_id_string); + + for (i = 0; i < num_ids; ++i) { + object = objects[i]; + + switch (object->type) { + case UACPI_OBJECT_STRING: { + uacpi_size buf_size = object->buffer->size; + + if (uacpi_unlikely(buf_size == 0)) { + uacpi_object_name name = uacpi_namespace_node_name(node); + + uacpi_error( + "%.4s._CID: empty EISA ID string (sub-object %zu)\n", + name.text, i + ); + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } + + size += buf_size; + if (uacpi_unlikely(size < buf_size)) { + uacpi_object_name name = uacpi_namespace_node_name(node); + + uacpi_error( + "%.4s._CID: buffer size overflow (+ %zu)\n", + name.text, buf_size + ); + return UACPI_STATUS_AML_BAD_ENCODING; + } + + break; + } + + case UACPI_OBJECT_INTEGER: + size += PNP_ID_LENGTH; + break; + default: { + uacpi_object_name name = uacpi_namespace_node_name(node); + + uacpi_error( + "%.4s._CID: invalid package sub-object %zu type: %s\n", + name.text, i, + uacpi_object_type_to_string(object->type) + ); + return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; + } + } + } + + list = uacpi_kernel_alloc(size); + if (uacpi_unlikely(list == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + list->num_ids = num_ids; + list->size = size - sizeof(uacpi_pnp_id_list); + + id_buffer = UACPI_PTR_ADD(list, sizeof(uacpi_pnp_id_list)); + id_buffer += num_ids * sizeof(uacpi_id_string); + + for (i = 0; i < num_ids; ++i) { + object = objects[i]; + id = &list->ids[i]; + + switch (object->type) { + case UACPI_OBJECT_STRING: { + uacpi_buffer *buf = object->buffer; + + id->size = buf->size; + id->value = id_buffer; + + uacpi_memcpy(id->value, buf->text, id->size); + id->value[id->size - 1] = '\0'; + break; + } + + case UACPI_OBJECT_INTEGER: + id->size = PNP_ID_LENGTH; + id->value = id_buffer; + uacpi_eisa_id_to_string(object->integer, id_buffer); + break; + } + + id_buffer += id->size; + } + + uacpi_object_unref(cid_ret); + *out_list = list; + return ret; +} + +void uacpi_free_pnp_id_list(uacpi_pnp_id_list *list) +{ + if (list == UACPI_NULL) + return; + + uacpi_free(list, sizeof(uacpi_pnp_id_list) + list->size); +} + +uacpi_status uacpi_eval_sta(uacpi_namespace_node *node, uacpi_u32 *flags) +{ + uacpi_status ret; + uacpi_u64 value = 0; + + ret = uacpi_eval_integer(node, "_STA", UACPI_NULL, &value); + + /* + * ACPI 6.5 specification: + * If a device object (including the processor object) does not have + * an _STA object, then OSPM assumes that all of the above bits are + * set (i.e., the device is present, enabled, shown in the UI, + * and functioning). + */ + if (ret == UACPI_STATUS_NOT_FOUND) { + value = 0xFFFFFFFF; + ret = UACPI_STATUS_OK; + } + + *flags = value; + return ret; +} + +uacpi_status uacpi_eval_adr(uacpi_namespace_node *node, uacpi_u64 *out) +{ + return uacpi_eval_integer(node, "_ADR", UACPI_NULL, out); +} + +#define CLS_REPR_SIZE 7 + +static uacpi_u8 extract_package_byte_or_zero(uacpi_package *pkg, uacpi_size i) +{ + uacpi_object *obj; + + if (uacpi_unlikely(pkg->count <= i)) + return 0; + + obj = pkg->objects[i]; + if (uacpi_unlikely(obj->type != UACPI_OBJECT_INTEGER)) + return 0; + + return obj->integer; +} + +uacpi_status uacpi_eval_cls( + uacpi_namespace_node *node, uacpi_id_string **out_id +) +{ + uacpi_status ret; + uacpi_object *obj; + uacpi_package *pkg; + uacpi_u8 class_codes[3]; + uacpi_id_string *id_string; + + ret = uacpi_eval_typed( + node, "_CLS", UACPI_NULL, UACPI_OBJECT_PACKAGE_BIT, &obj + ); + if (ret != UACPI_STATUS_OK) + return ret; + + pkg = obj->package; + class_codes[0] = extract_package_byte_or_zero(pkg, 0); + class_codes[1] = extract_package_byte_or_zero(pkg, 1); + class_codes[2] = extract_package_byte_or_zero(pkg, 2); + + id_string = uacpi_kernel_alloc(sizeof(uacpi_id_string) + CLS_REPR_SIZE); + if (uacpi_unlikely(id_string == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto out; + } + + id_string->size = CLS_REPR_SIZE; + id_string->value = UACPI_PTR_ADD(id_string, sizeof(uacpi_id_string)); + + uacpi_snprintf( + id_string->value, CLS_REPR_SIZE, "%02X%02X%02X", + class_codes[0], class_codes[1], class_codes[2] + ); + +out: + if (uacpi_likely_success(ret)) + *out_id = id_string; + + uacpi_object_unref(obj); + return ret; +} + +uacpi_status uacpi_eval_uid( + uacpi_namespace_node *node, uacpi_id_string **out_uid +) +{ + uacpi_status ret; + uacpi_object *obj; + uacpi_id_string *id_string; + uacpi_u32 size; + + ret = uacpi_eval_typed( + node, "_UID", UACPI_NULL, + UACPI_OBJECT_INTEGER_BIT | UACPI_OBJECT_STRING_BIT, + &obj + ); + if (ret != UACPI_STATUS_OK) + return ret; + + if (obj->type == UACPI_OBJECT_STRING) { + size = obj->buffer->size; + if (uacpi_unlikely(size == 0 || size > 0xE0000000)) { + uacpi_object_name name = uacpi_namespace_node_name(node); + + uacpi_error( + "invalid %.4s._UID string size: %u\n", + name.text, size + ); + ret = UACPI_STATUS_AML_BAD_ENCODING; + goto out; + } + } else { + size = uacpi_snprintf( + UACPI_NULL, 0, "%"UACPI_PRIu64, UACPI_FMT64(obj->integer) + ) + 1; + } + + id_string = uacpi_kernel_alloc(sizeof(uacpi_id_string) + size); + if (uacpi_unlikely(id_string == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto out; + } + + id_string->value = UACPI_PTR_ADD(id_string, sizeof(uacpi_id_string)); + id_string->size = size; + + if (obj->type == UACPI_OBJECT_STRING) { + uacpi_memcpy(id_string->value, obj->buffer->text, size); + id_string->value[size - 1] = '\0'; + } else { + uacpi_snprintf( + id_string->value, id_string->size, "%"UACPI_PRIu64, + UACPI_FMT64(obj->integer) + ); + } + +out: + if (uacpi_likely_success(ret)) + *out_uid = id_string; + + uacpi_object_unref(obj); + return ret; +} + +static uacpi_bool matches_any( + uacpi_id_string *id, const uacpi_char *const *ids +) +{ + uacpi_size i; + + for (i = 0; ids[i]; ++i) { + if (uacpi_strcmp(id->value, ids[i]) == 0) + return UACPI_TRUE; + } + + return UACPI_FALSE; +} + +static uacpi_status uacpi_eval_dstate_method_template( + uacpi_namespace_node *parent, uacpi_char *template, uacpi_u8 num_methods, + uacpi_u8 *out_values +) +{ + uacpi_u8 i; + uacpi_status ret = UACPI_STATUS_NOT_FOUND, eval_ret; + uacpi_object *obj; + + // We expect either _SxD or _SxW, so increment template[2] + for (i = 0; i < num_methods; ++i, template[2]++) { + eval_ret = uacpi_eval_typed( + parent, template, UACPI_NULL, UACPI_OBJECT_INTEGER_BIT, &obj + ); + if (eval_ret == UACPI_STATUS_OK) { + ret = UACPI_STATUS_OK; + out_values[i] = obj->integer; + uacpi_object_unref(obj); + continue; + } + + out_values[i] = 0xFF; + if (uacpi_unlikely(eval_ret != UACPI_STATUS_NOT_FOUND)) { + const char *path; + + path = uacpi_namespace_node_generate_absolute_path(parent); + uacpi_warn( + "failed to evaluate %s.%s: %s\n", + path, template, uacpi_status_to_string(eval_ret) + ); + uacpi_free_dynamic_string(path); + } + } + + return ret; +} + +#define NODE_INFO_EVAL_ADD_ID(name) \ + if (uacpi_eval_##name(node, &name) == UACPI_STATUS_OK) { \ + size += name->size; \ + if (uacpi_unlikely(size < name->size)) { \ + ret = UACPI_STATUS_AML_BAD_ENCODING; \ + goto out; \ + } \ + } + +#define NODE_INFO_COPY_ID(name, flag) \ + if (name != UACPI_NULL) { \ + flags |= UACPI_NS_NODE_INFO_HAS_##flag; \ + info->name.value = cursor; \ + info->name.size = name->size; \ + uacpi_memcpy(cursor, name->value, name->size); \ + cursor += name->size; \ + } else { \ + uacpi_memzero(&info->name, sizeof(*name)); \ + } \ + +uacpi_status uacpi_get_namespace_node_info( + uacpi_namespace_node *node, uacpi_namespace_node_info **out_info +) +{ + uacpi_status ret = UACPI_STATUS_OK; + uacpi_u32 size = sizeof(uacpi_namespace_node_info); + uacpi_object *obj; + uacpi_namespace_node_info *info; + uacpi_id_string *hid = UACPI_NULL, *uid = UACPI_NULL, *cls = UACPI_NULL; + uacpi_pnp_id_list *cid = UACPI_NULL; + uacpi_char *cursor; + uacpi_u64 adr = 0; + uacpi_u8 flags = 0; + uacpi_u8 sxd[4], sxw[5]; + + obj = uacpi_namespace_node_get_object(node); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + if (obj->type == UACPI_OBJECT_DEVICE || + obj->type == UACPI_OBJECT_PROCESSOR) { + char dstate_method_template[5] = { '_', 'S', '1', 'D', '\0' }; + + NODE_INFO_EVAL_ADD_ID(hid) + NODE_INFO_EVAL_ADD_ID(uid) + NODE_INFO_EVAL_ADD_ID(cls) + NODE_INFO_EVAL_ADD_ID(cid) + + if (uacpi_eval_adr(node, &adr) == UACPI_STATUS_OK) + flags |= UACPI_NS_NODE_INFO_HAS_ADR; + + if (uacpi_eval_dstate_method_template( + node, dstate_method_template, sizeof(sxd), sxd + ) == UACPI_STATUS_OK) + flags |= UACPI_NS_NODE_INFO_HAS_SXD; + + dstate_method_template[2] = '0'; + dstate_method_template[3] = 'W'; + + if (uacpi_eval_dstate_method_template( + node, dstate_method_template, sizeof(sxw), sxw + ) == UACPI_STATUS_OK) + flags |= UACPI_NS_NODE_INFO_HAS_SXW; + } + + info = uacpi_kernel_alloc(size); + if (uacpi_unlikely(info == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto out; + } + info->size = size; + cursor = UACPI_PTR_ADD(info, sizeof(uacpi_namespace_node_info)); + info->name = uacpi_namespace_node_name(node); + info->type = obj->type; + info->num_params = info->type == UACPI_OBJECT_METHOD ? obj->method->args : 0; + + info->adr = adr; + if (flags & UACPI_NS_NODE_INFO_HAS_SXD) + uacpi_memcpy(info->sxd, sxd, sizeof(sxd)); + else + uacpi_memzero(info->sxd, sizeof(info->sxd)); + + if (flags & UACPI_NS_NODE_INFO_HAS_SXW) + uacpi_memcpy(info->sxw, sxw, sizeof(sxw)); + else + uacpi_memzero(info->sxw, sizeof(info->sxw)); + + if (cid != UACPI_NULL) { + uacpi_u32 i; + + uacpi_memcpy(&info->cid, cid, cid->size + sizeof(*cid)); + cursor += cid->num_ids * sizeof(uacpi_id_string); + + for (i = 0; i < cid->num_ids; ++i) { + info->cid.ids[i].value = cursor; + cursor += info->cid.ids[i].size; + } + + flags |= UACPI_NS_NODE_INFO_HAS_CID; + } else { + uacpi_memzero(&info->cid, sizeof(*cid)); + } + + NODE_INFO_COPY_ID(hid, HID) + NODE_INFO_COPY_ID(uid, UID) + NODE_INFO_COPY_ID(cls, CLS) + +out: + if (uacpi_likely_success(ret)) { + info->flags = flags; + *out_info = info; + } + + uacpi_free_id_string(hid); + uacpi_free_id_string(uid); + uacpi_free_id_string(cls); + uacpi_free_pnp_id_list(cid); + return ret; +} + +void uacpi_free_namespace_node_info(uacpi_namespace_node_info *info) +{ + if (info == UACPI_NULL) + return; + + uacpi_free(info, info->size); +} + +uacpi_bool uacpi_device_matches_pnp_id( + uacpi_namespace_node *node, const uacpi_char *const *ids +) +{ + uacpi_status st; + uacpi_bool ret = UACPI_FALSE; + uacpi_id_string *id = UACPI_NULL; + uacpi_pnp_id_list *id_list = UACPI_NULL; + + st = uacpi_eval_hid(node, &id); + if (st == UACPI_STATUS_OK && matches_any(id, ids)) { + ret = UACPI_TRUE; + goto out; + } + + st = uacpi_eval_cid(node, &id_list); + if (st == UACPI_STATUS_OK) { + uacpi_size i; + + for (i = 0; i < id_list->num_ids; ++i) { + if (matches_any(&id_list->ids[i], ids)) { + ret = UACPI_TRUE; + goto out; + } + } + } + +out: + uacpi_free_id_string(id); + uacpi_free_pnp_id_list(id_list); + return ret; +} + +struct device_find_ctx { + const uacpi_char *const *target_hids; + void *user; + uacpi_iteration_callback cb; +}; + +static uacpi_iteration_decision find_one_device( + void *opaque, uacpi_namespace_node *node, uacpi_u32 depth +) +{ + struct device_find_ctx *ctx = opaque; + uacpi_status ret; + uacpi_u32 flags; + + if (!uacpi_device_matches_pnp_id(node, ctx->target_hids)) + return UACPI_ITERATION_DECISION_CONTINUE; + + ret = uacpi_eval_sta(node, &flags); + if (uacpi_unlikely_error(ret)) + return UACPI_ITERATION_DECISION_NEXT_PEER; + + if (!(flags & ACPI_STA_RESULT_DEVICE_PRESENT) && + !(flags & ACPI_STA_RESULT_DEVICE_FUNCTIONING)) + return UACPI_ITERATION_DECISION_NEXT_PEER; + + return ctx->cb(ctx->user, node, depth); +} + + +uacpi_status uacpi_find_devices_at( + uacpi_namespace_node *parent, const uacpi_char *const *hids, + uacpi_iteration_callback cb, void *user +) +{ + struct device_find_ctx ctx = { 0 }; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ctx.target_hids = hids; + ctx.user = user; + ctx.cb = cb; + + return uacpi_namespace_for_each_child( + parent, find_one_device, UACPI_NULL, UACPI_OBJECT_DEVICE_BIT, + UACPI_MAX_DEPTH_ANY, &ctx + ); +} + +uacpi_status uacpi_find_devices( + const uacpi_char *hid, uacpi_iteration_callback cb, void *user +) +{ + const uacpi_char *hids[2] = { + UACPI_NULL, UACPI_NULL + }; + + hids[0] = hid; + + return uacpi_find_devices_at(uacpi_namespace_root(), hids, cb, user); +} + +uacpi_status uacpi_set_interrupt_model(uacpi_interrupt_model model) +{ + uacpi_status ret; + uacpi_object *arg; + uacpi_object_array args; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + arg = uacpi_create_object(UACPI_OBJECT_INTEGER); + if (uacpi_unlikely(arg == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + arg->integer = model; + args.objects = &arg; + args.count = 1; + + ret = uacpi_eval(uacpi_namespace_root(), "_PIC", &args, UACPI_NULL); + uacpi_object_unref(arg); + + if (ret == UACPI_STATUS_NOT_FOUND) + ret = UACPI_STATUS_OK; + + return ret; +} + +uacpi_status uacpi_get_pci_routing_table( + uacpi_namespace_node *parent, uacpi_pci_routing_table **out_table +) +{ + uacpi_status ret; + uacpi_object *obj, *entry_obj, *elem_obj; + uacpi_package *table_pkg, *entry_pkg; + uacpi_pci_routing_table_entry *entry; + uacpi_pci_routing_table *table; + uacpi_size size, i; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + obj = uacpi_namespace_node_get_object(parent); + if (uacpi_unlikely(obj == UACPI_NULL || obj->type != UACPI_OBJECT_DEVICE)) + return UACPI_STATUS_INVALID_ARGUMENT; + + ret = uacpi_eval_typed( + parent, "_PRT", UACPI_NULL, UACPI_OBJECT_PACKAGE_BIT, &obj + ); + if (uacpi_unlikely_error(ret)) + return ret; + + table_pkg = obj->package; + if (uacpi_unlikely(table_pkg->count == 0 || table_pkg->count > 1024)) { + uacpi_error("invalid number of _PRT entries: %zu\n", table_pkg->count); + uacpi_object_unref(obj); + return UACPI_STATUS_AML_BAD_ENCODING; + } + + size = table_pkg->count * sizeof(uacpi_pci_routing_table_entry); + table = uacpi_kernel_alloc(sizeof(uacpi_pci_routing_table) + size); + if (uacpi_unlikely(table == UACPI_NULL)) { + uacpi_object_unref(obj); + return UACPI_STATUS_OUT_OF_MEMORY; + } + table->num_entries = table_pkg->count; + + for (i = 0; i < table_pkg->count; ++i) { + entry_obj = table_pkg->objects[i]; + + if (uacpi_unlikely(entry_obj->type != UACPI_OBJECT_PACKAGE)) { + uacpi_error("_PRT sub-object %zu is not a package: %s\n", + i, uacpi_object_type_to_string(entry_obj->type)); + goto out_bad_encoding; + } + + entry_pkg = entry_obj->package; + if (uacpi_unlikely(entry_pkg->count != 4)) { + uacpi_error("invalid _PRT sub-package entry count %zu\n", + entry_pkg->count); + goto out_bad_encoding; + } + + entry = &table->entries[i]; + + elem_obj = entry_pkg->objects[0]; + if (uacpi_unlikely(elem_obj->type != UACPI_OBJECT_INTEGER)) { + uacpi_error("invalid _PRT sub-package %zu address type: %s\n", + i, uacpi_object_type_to_string(elem_obj->type)); + goto out_bad_encoding; + } + entry->address = elem_obj->integer; + + elem_obj = entry_pkg->objects[1]; + if (uacpi_unlikely(elem_obj->type != UACPI_OBJECT_INTEGER)) { + uacpi_error("invalid _PRT sub-package %zu pin type: %s\n", + i, uacpi_object_type_to_string(elem_obj->type)); + goto out_bad_encoding; + } + entry->pin = elem_obj->integer; + + elem_obj = entry_pkg->objects[2]; + switch (elem_obj->type) { + case UACPI_OBJECT_STRING: + ret = uacpi_object_resolve_as_aml_namepath( + elem_obj, parent, &entry->source + ); + if (uacpi_unlikely_error(ret)) { + uacpi_error("unable to lookup _PRT source %s: %s\n", + elem_obj->buffer->text, uacpi_status_to_string(ret)); + goto out_bad_encoding; + } + break; + case UACPI_OBJECT_INTEGER: + entry->source = UACPI_NULL; + break; + default: + uacpi_error("invalid _PRT sub-package %zu source type: %s\n", + i, uacpi_object_type_to_string(elem_obj->type)); + goto out_bad_encoding; + } + + elem_obj = entry_pkg->objects[3]; + if (uacpi_unlikely(elem_obj->type != UACPI_OBJECT_INTEGER)) { + uacpi_error("invalid _PRT sub-package %zu source index type: %s\n", + i, uacpi_object_type_to_string(elem_obj->type)); + goto out_bad_encoding; + } + entry->index = elem_obj->integer; + } + + uacpi_object_unref(obj); + *out_table = table; + return UACPI_STATUS_OK; + +out_bad_encoding: + uacpi_object_unref(obj); + uacpi_free_pci_routing_table(table); + return UACPI_STATUS_AML_BAD_ENCODING; +} + +void uacpi_free_pci_routing_table(uacpi_pci_routing_table *table) +{ + if (table == UACPI_NULL) + return; + + uacpi_free( + table, + sizeof(uacpi_pci_routing_table) + + table->num_entries * sizeof(uacpi_pci_routing_table_entry) + ); +} + +void uacpi_free_dynamic_string(const uacpi_char *str) +{ + if (str == UACPI_NULL) + return; + + uacpi_free((void*)str, uacpi_strlen(str) + 1); +} + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/dev/cons/cons.c b/sys/dev/cons/cons.c index 398b6ca..8470a60 100644 --- a/sys/dev/cons/cons.c +++ b/sys/dev/cons/cons.c @@ -31,6 +31,8 @@ #include <sys/types.h> #include <sys/ascii.h> #include <sys/device.h> +#include <sys/errno.h> +#include <sys/panic.h> #include <dev/video/fbdev.h> #include <dev/cons/font.h> #include <dev/cons/cons.h> @@ -38,6 +40,12 @@ #include <vm/dynalloc.h> #include <string.h> +#define HIDE_CURSOR(SCR) \ + cons_draw_cursor((SCR), (SCR)->bg) + +#define SHOW_CURSOR(SCR) \ + cons_draw_cursor((SCR), rgb_invert((SCR)->bg)) + /* Console background from kconf */ #if defined(__CONSOLE_BG) #define CONSOLE_BG __CONSOLE_BG @@ -52,29 +60,29 @@ #define CONSOLE_FG 0x00AA00 #endif /* __CONSOLE_FG */ - struct cons_screen g_root_scr = {0}; static struct cdevsw cons_cdevsw; -/* - * Create a chracter descriptor for drawing - * characters. - * - * @c: Character. - * @fg: Foreground. - * @bg: Background. - */ -static inline struct cons_char -cons_make_char(char c, uint32_t fg, uint32_t bg) +static void cons_draw_cursor(struct cons_screen *scr, uint32_t color); +static int cons_handle_special(struct cons_screen *scr, char c); + +static uint32_t +rgb_invert(uint32_t rgb) { - struct cons_char ch; + uint8_t r, g, b; + uint32_t ret; + + r = (rgb >> 16) & 0xFF; + g = (rgb >> 8) & 0xFF; + b = rgb & 0xFF; - ch.fg = fg; - ch.bg = bg; - ch.c = c; - return ch; + ret = (255 - r) << 16; + ret |= (255 - g) << 8; + ret |= 255 - b; + return ret; } + /* * Render a character onto the screen. * @@ -84,10 +92,10 @@ cons_make_char(char c, uint32_t fg, uint32_t bg) * @y: Y position of char. */ static void -cons_draw_char(struct cons_screen *scr, struct cons_char ch, - uint32_t x, uint32_t y) +cons_draw_char(struct cons_screen *scr, struct cons_char ch) { size_t idx; + uint32_t x, y; const uint8_t *glyph; if (scr->fb_mem == NULL) { @@ -95,12 +103,131 @@ cons_draw_char(struct cons_screen *scr, struct cons_char ch, } glyph = &CONS_FONT[(int)ch.c*16]; + x = ch.x; + y = ch.y; + for (uint32_t cy = 0; cy < FONT_HEIGHT; ++cy) { + idx = fbdev_get_index(&scr->fbdev, x + (FONT_WIDTH - 1), y + cy); for (uint32_t cx = 0; cx < FONT_WIDTH; ++cx) { - idx = fbdev_get_index(&scr->fbdev, x + (FONT_WIDTH - 1) - cx, y + cy); - scr->fb_mem[idx] = ISSET(glyph[cy], BIT(cx)) ? ch.fg : ch.bg; + scr->fb_mem[idx--] = ISSET(glyph[cy], BIT(cx)) ? ch.fg : ch.bg; + } + } +} + +/* + * Internal helper - flush console row + * + * @row: Row to flush. + */ +static int +cons_flush_row(struct cons_screen *scr, uint32_t row) +{ + struct cons_buf *bp; + struct cons_char cc; + + bp = scr->ob[row]; + if (ISSET(bp->flags, CONS_BUF_CLEAN)) { + return -EIO; + } + + for (int j = 0; j < bp->len; ++j) { + if (cons_obuf_pop(bp, &cc) != 0) { + continue; } + + cons_draw_char(scr, cc); + bp->flags |= CONS_BUF_CLEAN; + } + + return 0; +} + +/* + * Internal helper - flush console + * + */ +static int +cons_flush(struct cons_screen *scr) +{ + for (int i = 0; i < scr->nrows; ++i) { + cons_flush_row(scr, i); } + return 0; +} + +/* + * Handle a special character (e.g "\t", "\n", etc) + * + * @scr: Screen to handle this on. + * @c: Char to handle. + * + * Returns 0 if handled, otherwise -1. + */ +static int +cons_handle_special(struct cons_screen *scr, char c) +{ + struct cons_buf *bp; + + if (scr->ch_col >= scr->ncols - 20) { + scr->ch_col = 0; + cons_handle_special(scr, '\n'); + } + + switch (c) { + case ASCII_HT: + HIDE_CURSOR(scr); + scr->curs_col += 4; + scr->ch_col += 4; + if (scr->ch_col >= scr->ncols - 1) { + cons_handle_special(scr, '\n'); + } + SHOW_CURSOR(scr); + return 0; + case ASCII_NUL: + return 0; + case ASCII_BS: + bp = scr->ob[scr->ch_row]; + if (bp->head > bp->tail) { + --bp->head; + } + + HIDE_CURSOR(scr); + if (scr->ch_col > 0 && scr->curs_col > 0) { + --scr->ch_col; + --scr->curs_col; + } + SHOW_CURSOR(scr); + return 0; + case ASCII_LF: + /* Are we past screen width? */ + if (scr->ch_row >= scr->nrows - 1) { + cons_clear_scr(scr, scr->bg); + return 0; + } + + HIDE_CURSOR(scr); + + /* Make a newline */ + cons_flush(scr); + ++scr->ch_row; + scr->ch_col = 0; + cons_flush(scr); + + /* Update cursor */ + scr->curs_row += 1; + scr->curs_col = 0; + SHOW_CURSOR(scr); + return 0; + case ASCII_FF: + /* + * Fuck what they say, this is now the + * "flush" byte ::) + */ + cons_flush(scr); + return 0; + } + + return -1; } static void @@ -108,10 +235,16 @@ cons_draw_cursor(struct cons_screen *scr, uint32_t color) { size_t idx; + /* Past screen width? */ + if (scr->curs_col >= scr->ncols) { + scr->curs_col = 0; + scr->curs_row++; + } + for (uint32_t cy = 0; cy < FONT_HEIGHT; ++cy) { + idx = fbdev_get_index(&scr->fbdev, scr->curs_col * FONT_WIDTH, (scr->curs_row * FONT_HEIGHT) + cy); for (uint32_t cx = 0; cx < FONT_WIDTH; ++cx) { - idx = fbdev_get_index(&scr->fbdev, (scr->curs_col * FONT_WIDTH) + cx, (scr->curs_row * FONT_HEIGHT) + cy); - scr->fb_mem[idx] = color; + scr->fb_mem[idx++] = color; } } } @@ -122,41 +255,78 @@ cons_draw_cursor(struct cons_screen *scr, uint32_t color) * @scr: Screen to clear. * @bg: Color to clear it to. */ -static void +void cons_clear_scr(struct cons_screen *scr, uint32_t bg) { struct fbdev fbdev = scr->fbdev; + cons_flush(scr); + HIDE_CURSOR(scr); + + scr->ch_col = 0; + scr->ch_row = 0; + scr->curs_col = 0; + scr->curs_row = 0; + for (size_t i = 0; i < fbdev.height * fbdev.pitch; ++i) { scr->fb_mem[i] = bg; } + + SHOW_CURSOR(scr); + } /* - * Handle a special character (e.g "\t", "\n", etc) - * - * @scr: Screen to handle this on. - * @c: Char to handle. + * Quickly put a character on the screen. + * XXX: Does not acquire the screen's lock or show/hide the cursor. * - * Returns 0 if handled, otherwise -1. + * @scr: Screen. + * @c: Character to draw. */ -static int -cons_handle_special(struct cons_screen *scr, char c) +static void +cons_fast_putch(struct cons_screen *scr, char c) { - switch (c) { - case ASCII_LF: - /* Make a newline */ + struct cons_char cc; + struct cons_buf *bp; + int ansi; + + ansi = ansi_feed(&scr->ansi_s, c); + if (ansi > 0) { + c = ASCII_NUL; + } else if (ansi < 0) { + c = ASCII_NUL; + } + + /* Handle specials */ + if (cons_handle_special(scr, c) == 0) { + return; + } + + /* Create a new character */ + cc.c = c; + cc.fg = scr->fg; + cc.bg = scr->bg; + cc.x = scr->ch_col * FONT_WIDTH; + cc.y = scr->ch_row * FONT_HEIGHT; + + /* Push our new character */ + bp = scr->ob[scr->ch_row]; + bp->flags &= ~CONS_BUF_CLEAN; + cons_obuf_push(bp, cc); + ++scr->ch_col; + + /* Check screen bounds */ + if (cc.x >= (scr->ncols * FONT_WIDTH) - 1) { scr->ch_col = 0; ++scr->ch_row; + } - cons_draw_cursor(scr, scr->bg); + ++scr->curs_col; + if (scr->curs_col > scr->ncols - 1) { scr->curs_col = 0; - scr->curs_row++; - cons_draw_cursor(scr, scr->last_chr.fg); - return 0; + if (scr->curs_row < scr->nrows) + ++scr->curs_row; } - - return -1; } /* @@ -165,19 +335,111 @@ cons_handle_special(struct cons_screen *scr, char c) static int dev_write(dev_t dev, struct sio_txn *sio, int flags) { - char *p; + cons_putstr(&g_root_scr, sio->buf, sio->len); + cons_flush(&g_root_scr); + return sio->len; +} - p = sio->buf; - spinlock_acquire(&g_root_scr.lock); +/* + * Character device function. + */ +static int +dev_read(dev_t dev, struct sio_txn *sio, int flags) +{ + struct cons_input input; + uint8_t *p; + int retval; + size_t n; + + p = (uint8_t *)sio->buf; + n = sio->len; + + /* Must be a power of two */ + if ((n & 1) != 0) { + return -EFAULT; + } - for (size_t i = 0; i < sio->len; ++i) { - cons_putch(&g_root_scr, p[i]); + retval = cons_ibuf_pop(&g_root_scr, &input); + if (retval < 0) { + return -EAGAIN; } + spinlock_acquire(&g_root_scr.lock); + for (;;) { + /* Buffer too small */ + if (n == 0) { + break; + } + + *p++ = input.chr; + *p++ = input.scancode; + n -= 2; + + /* Try to get the next byte */ + retval = cons_ibuf_pop(&g_root_scr, &input); + if (retval < 0) { + break; + } + } spinlock_release(&g_root_scr.lock); return sio->len; } +static int +cons_init_bufs(struct cons_screen *scr) +{ + struct cons_buf *bp; + size_t ob_len; + + scr->ib = cons_new_buf(CONS_BUF_INPUT, scr->ncols); + if (g_root_scr.ib == NULL) { + panic("out of memory\n"); + } + + ob_len = sizeof(*scr->ob) * scr->nrows; + scr->ob = dynalloc(ob_len); + + /* Allocate all output buffers per line */ + for (size_t i = 0; i < scr->nrows; ++i) { + bp = cons_new_buf(CONS_BUF_OUTPUT, scr->ncols); + if (bp == NULL) { + panic("out of memory\n"); + } + bp->flags |= CONS_BUF_CLEAN; + scr->ob[i] = bp; + } + + return 0; +} + +/* + * Reset console color. + */ +void +cons_reset_color(struct cons_screen *scr) +{ + g_root_scr.fg = CONSOLE_FG; + g_root_scr.bg = CONSOLE_BG; +} + +void +cons_update_color(struct cons_screen *scr, uint32_t fg, uint32_t bg) +{ + scr->fg = fg; + scr->bg = bg; +} + +void +cons_reset_cursor(struct cons_screen *scr) +{ + HIDE_CURSOR(scr); + scr->ch_col = 0; + scr->ch_row = 0; + scr->curs_col = 0; + scr->curs_row = 0; + SHOW_CURSOR(scr); +} + /* * Put a character on the screen. * @@ -187,44 +449,38 @@ dev_write(dev_t dev, struct sio_txn *sio, int flags) int cons_putch(struct cons_screen *scr, char c) { - struct cons_char cons_chr; - - if (scr->ch_col > scr->ncols) { - /* Make a newline as we past the max col */ - scr->ch_col = 0; - ++scr->ch_row; - } + spinlock_acquire(&scr->lock); + HIDE_CURSOR(scr); - if (scr->curs_row >= scr->nrows) { - /* Went over the screen size */ - /* TODO: Scroll instead of just clearing the screen */ - scr->ch_col = 0; - scr->ch_row = 0; - cons_clear_scr(scr, scr->bg); + cons_fast_putch(scr, c); - scr->curs_col = 0; - scr->curs_row = 0; - cons_draw_cursor(scr, scr->last_chr.fg); - } + SHOW_CURSOR(scr); + spinlock_release(&scr->lock); + return 0; +} - /* - * If this is a special char that we can handle - * then handle it and return. - */ - if (cons_handle_special(scr, c) == 0) { - return 0; - } +/* + * Put a string on the screen. + * + * @scr: Screen. + * @s: String to draw. + * @l: Length of s. + */ +int +cons_putstr(struct cons_screen *scr, const char *s, size_t len) +{ + const char *p = s; - cons_chr = cons_make_char(c, scr->fg, scr->bg); - scr->last_chr = cons_chr; + spinlock_acquire(&scr->lock); + HIDE_CURSOR(scr); - /* Draw cursor and character */ - scr->curs_col++; - cons_draw_cursor(scr, scr->last_chr.fg); - cons_draw_char(scr, cons_chr, scr->ch_col * FONT_WIDTH, - scr->ch_row * FONT_HEIGHT); + while (len--) { + cons_fast_putch(scr, *p); + ++p; + } - ++scr->ch_col; + SHOW_CURSOR(scr); + spinlock_release(&scr->lock); return 0; } @@ -233,6 +489,8 @@ cons_init(void) { struct fbdev fbdev = fbdev_get(); + g_root_scr.ch_col = 0; + g_root_scr.ch_row = 0; g_root_scr.fg = CONSOLE_FG; g_root_scr.bg = CONSOLE_BG; g_root_scr.fb_mem = fbdev.mem; @@ -240,6 +498,8 @@ cons_init(void) g_root_scr.ncols = fbdev.width / FONT_WIDTH; g_root_scr.fbdev = fbdev; memset(&g_root_scr.lock, 0, sizeof(g_root_scr.lock)); + cons_init_bufs(&g_root_scr); + SHOW_CURSOR(&g_root_scr); } /* @@ -267,6 +527,6 @@ cons_expose(void) } static struct cdevsw cons_cdevsw = { - .read = noread, + .read = dev_read, .write = dev_write }; diff --git a/sys/dev/cons/cons_ansi.c b/sys/dev/cons/cons_ansi.c new file mode 100644 index 0000000..ab1f22a --- /dev/null +++ b/sys/dev/cons/cons_ansi.c @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <sys/cdefs.h> +#include <dev/cons/cons.h> +#include <dev/cons/ansi.h> +#include <string.h> + +__always_inline static inline bool +is_valid_color(int c) +{ + return c >= '0' && c <= '7'; +} + +static inline void +ansi_reset(struct ansi_state *statep) +{ + memset(statep, 0, sizeof(*statep)); +} + +/* + * Feed a byte into the ANSI escape sequence + * state machine. + * + * @statep: State machine pointer. + * @c: Byte to feed. + * + * On success, `c' is returned. On failure, + * 0 is returned. Values less than 0 indicate + * success with console attributes updated + * (ANSI_UPDATE_*). + */ +int +ansi_feed(struct ansi_state *statep, char c) +{ + /* Standard colors */ + static uint32_t colortab[] = { + ANSI_BLACK, ANSI_RED, + ANSI_GREEN, ANSI_YELLOW, + ANSI_BLUE, ANSI_MAGENTA, + ANSI_CYAN, ANSI_WHITE + }; + + /* + * Handle the control sequence introducer + * bytes. + */ + switch (statep->csi) { + case 0: /* '\033' */ + if (c != '\033') { + return 0; + } + statep->csi = 1; + statep->prev = c; + return c; + case 1: /* '[' */ + if (c != '[') { + ansi_reset(statep); + return 0; + } + statep->csi = 2; + statep->prev = c; + return c; + case 2: + if (c == 'H') { + cons_clear_scr(&g_root_scr, g_root_scr.bg); + return ANSI_UPDATE_CURSOR; + } + break; + } + + if (!statep->set_fg && !statep->set_bg) { + /* Reset attributes? */ + if (statep->reset_color) { + ansi_reset(statep); + cons_reset_color(&g_root_scr); + return ANSI_UPDATE_COLOR; + } + + /* Mark attributes to be reset? */ + if (c == '0') { + statep->reset_color = 1; + statep->prev = c; + return c; + } + + /* Expect foreground */ + if (c != '3') { + ansi_reset(statep); + return 0; + } + statep->set_fg = 1; + statep->prev = c; + return c; + } + + if (statep->set_fg && c != ';') { + /* Make sure this is valid */ + if (!is_valid_color(c)) { + ansi_reset(statep); + return 0; + } + + /* Set the foreground */ + statep->fg = colortab[c - '0']; + statep->set_bg = 1; + statep->set_fg = 0; + statep->prev = c; + return c; + } + + if (statep->set_bg) { + if (c == ';') { + statep->prev = c; + return c; + } + + /* Expect '4' after ';' */ + if (statep->prev == ';' && c != '4') { + ansi_reset(statep); + return 0; + } + + if (c == 'm') { + cons_update_color(&g_root_scr, statep->fg, statep->bg); + ansi_reset(statep); + return ANSI_UPDATE_COLOR; + } + + /* Make sure this is valid */ + if (!is_valid_color(c)) { + ansi_reset(statep); + return 0; + } + + /* Set the background */ + statep->bg = colortab[c - '0']; + statep->prev = c; + return c; + } + + ansi_reset(statep); + return 0; +} diff --git a/sys/dev/cons/cons_buf.c b/sys/dev/cons/cons_buf.c new file mode 100644 index 0000000..84a38ce --- /dev/null +++ b/sys/dev/cons/cons_buf.c @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <sys/errno.h> +#include <dev/cons/consvar.h> +#include <dev/cons/cons.h> +#include <vm/dynalloc.h> +#include <string.h> +#include <assert.h> + +/* + * Create a new console buffer. + * + * @type: Buffer type (CONS_BUF_*) + * @len: Max length of buffer. + */ +struct cons_buf * +cons_new_buf(uint8_t type, size_t len) +{ + struct cons_buf *bp; + size_t alloc_len; + + if ((bp = dynalloc(sizeof(*bp))) == NULL) { + return NULL; + } + + memset(bp, 0, sizeof(*bp)); + bp->type = type; + bp->len = len; + + /* Create the actual buffers now */ + switch (type) { + case CONS_BUF_INPUT: + alloc_len = sizeof(*bp->ibuf) * len; + bp->ibuf = dynalloc(alloc_len); + break; + case CONS_BUF_OUTPUT: + alloc_len = sizeof(*bp->obuf) * len; + bp->obuf = dynalloc(alloc_len); + break; + } + + return bp; +} + +/* + * Push a character to a console output + * buffer. + * + * @bp: Pointer to console buffer. + * @c: Character to push. + */ +int +cons_obuf_push(struct cons_buf *bp, struct cons_char c) +{ + uint8_t next; + int retval = 0; + + if (bp == NULL) { + return -EINVAL; + } + + spinlock_acquire(&bp->lock); + __assert(bp->type == CONS_BUF_OUTPUT); + next = bp->head + 1; + if (next > bp->len) { + retval = -ENOSPC; + goto done; + } + + bp->obuf[bp->head] = c; + bp->head = next; + +done: + spinlock_release(&bp->lock); + return retval; +} + +/* + * Pop a character from the console + * buffer. + * + * @bp: Pointer to console buffer. + * @res: Result is written here. + */ +int +cons_obuf_pop(struct cons_buf *bp, struct cons_char *res) +{ + uint8_t next; + int retval = 0; + + if (bp == NULL || res == NULL) { + return -EINVAL; + } + + __assert(bp->type == CONS_BUF_OUTPUT); + spinlock_acquire(&bp->lock); + + /* Do we have any data left? */ + if (bp->head == bp->tail) { + bp->head = 0; + bp->tail = 0; + retval = -EAGAIN; + goto done; + } + + next = bp->tail + 1; + if (next > bp->len) { + next = 0; + } + + *res = bp->obuf[bp->tail]; + bp->tail = next; + +done: + spinlock_release(&bp->lock); + return retval; +} + +int +cons_ibuf_push(struct cons_screen *scr, struct cons_input in) +{ + struct cons_buf *bp; + uint8_t head_next; + int retval = 0; + + if (scr == NULL) { + return -EINVAL; + } + + bp = scr->ib; + spinlock_acquire(&bp->lock); + __assert(bp->type == CONS_BUF_INPUT); + + head_next = bp->head + 1; + if (head_next > bp->len) { + retval = -ENOSPC; + goto done; + } + + bp->ibuf[bp->head] = in; + bp->head = head_next; + +done: + spinlock_release(&bp->lock); + return retval; +} + +int +cons_ibuf_pop(struct cons_screen *scr, struct cons_input *res) +{ + uint8_t tail_next; + struct cons_buf *bp; + int retval = 0; + + if (scr == NULL || res == NULL) { + return -EINVAL; + } + + bp = scr->ib; + __assert(bp->type == CONS_BUF_INPUT); + spinlock_acquire(&bp->lock); + + /* Do we have any data left? */ + if (bp->head == bp->tail) { + bp->head = 0; + bp->tail = 0; + retval = -EAGAIN; + goto done; + } + + tail_next = bp->tail + 1; + if (tail_next > bp->len) { + tail_next = 0; + } + + *res = bp->ibuf[bp->tail]; + bp->tail = tail_next; + +done: + spinlock_release(&bp->lock); + return retval; +} diff --git a/sys/dev/dcdr/cache.c b/sys/dev/dcdr/cache.c index c44c8ea..33f977e 100644 --- a/sys/dev/dcdr/cache.c +++ b/sys/dev/dcdr/cache.c @@ -126,6 +126,20 @@ struct dcd * dcdr_cachein(struct dcdr *dcdr, void *block, off_t lba) { struct dcd *dcd, *tmp; + struct dcdr_lookup check; + int status; + + /* + * If there is already a block within this + * DCDR, then we simply need to copy the + * new data into the old DCD. + */ + status = dcdr_lookup(dcdr, lba, &check); + if (status == 0) { + dcd = check.dcd_res; + memcpy(dcd->block, block, dcdr->bsize); + return dcd; + } dcd = dynalloc(sizeof(*dcd)); if (dcd == NULL) { diff --git a/sys/dev/dmi/dmi.c b/sys/dev/dmi/dmi.c new file mode 100644 index 0000000..8c7f21c --- /dev/null +++ b/sys/dev/dmi/dmi.c @@ -0,0 +1,251 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <sys/limine.h> +#include <sys/errno.h> +#include <sys/param.h> +#include <sys/driver.h> +#include <sys/cdefs.h> +#include <sys/syslog.h> +#include <dev/dmi/dmi.h> +#include <dev/acpi/tables.h> +#include <string.h> + +#define DMI_BIOS_INFO 0 +#define DMI_SYSTEM_INFO 1 +#define DMI_PROCESSOR_INFO 4 +#define DMI_END_OF_TABLE 127 + +/* String offsets */ +#define BIOSINFO_VENDOR 0x01 +#define SYSINFO_PRODUCT 0x02 +#define SYSINFO_FAMILY 0x03 +#define PROCINFO_MANUFACT 0x02 +#define PROCINFO_PARTNO 0x06 + +static struct limine_smbios_request smbios_req = { + .id = LIMINE_SMBIOS_REQUEST, + .revision = 0 +}; + +/* DMI/SMBIOS structure header */ +struct __packed dmi_shdr { + uint8_t type; + uint8_t length; + uint16_t handle; +} *hdrs[DMI_END_OF_TABLE + 1]; + +/* + * Grab a structure header from a type + * + * @type: A DMI structure type to find + * + * Returns NULL if not found. + */ +static inline struct dmi_shdr * +dmi_shdr(uint8_t type) +{ + struct dmi_shdr *hdr; + + hdr = hdrs[type]; + if (hdr == NULL) { + return NULL; + } + + return hdr; +} + +/* + * Grab a string from the DMI/SMBIOS formatted + * section. + * + * @hdr: DMI header to lookup string index + * @index: 1-based string index + * + * See section 6.1.3 of the DTMF SMBIOS Reference + * Specification + */ +static const char * +dmi_str_index(struct dmi_shdr *hdr, uint8_t index) +{ + const char *strdata = PTR_OFFSET(hdr, hdr->length); + + for (uint8_t i = 1; *strdata != '\0'; ++i) { + if (i == index) { + return strdata; + } + + strdata += strlen(strdata) + 1; + } + + return NULL; +} + +/* + * Get the DMI/SMBIOS structure size from a + * header. + */ +static size_t +dmi_struct_size(struct dmi_shdr *hdr) +{ + const char *strdata; + size_t i = 1; + + strdata = PTR_OFFSET(hdr, hdr->length); + while (strdata[i - 1] != '\0' || strdata[i] != '\0') { + ++i; + } + + return hdr->length + i + 1; +} + +/* + * Get the vendor string from the DMI/SMBIOS BIOS + * info structure + * + * Returns NULL if not found. + */ +const char * +dmi_vendor(void) +{ + struct dmi_shdr *hdr; + + if ((hdr = dmi_shdr(DMI_BIOS_INFO)) == NULL) { + return NULL; + } + + return dmi_str_index(hdr, BIOSINFO_VENDOR); +} + +/* + * Return the product string from the DMI/SMBIOS System + * Info structure + * + * Returns NULL if not found. + */ +const char * +dmi_product(void) +{ + struct dmi_shdr *hdr; + + if ((hdr = dmi_shdr(DMI_SYSTEM_INFO)) == NULL) { + return NULL; + } + + return dmi_str_index(hdr, SYSINFO_PRODUCT); +} + +/* + * Return the product version from the DMI/SMBIOS + * System Info structure + * + * Returns NULL if not found + */ +const char * +dmi_prodver(void) +{ + struct dmi_shdr *hdr; + + if ((hdr = dmi_shdr(DMI_SYSTEM_INFO)) == NULL) { + return NULL; + } + + return dmi_str_index(hdr, SYSINFO_FAMILY); +} + +/* + * Return the CPU manufacturer string from the + * DMI/SMBIOS Processor Info structure + * + * Returns NULL if not found + */ +const char * +dmi_cpu_manufact(void) +{ + struct dmi_shdr *hdr; + + if ((hdr = dmi_shdr(DMI_PROCESSOR_INFO)) == NULL) { + return NULL; + } + + return dmi_str_index(hdr, PROCINFO_MANUFACT); +} + +static int +dmi_init(void) +{ + struct dmi_entry32 *entry32 = NULL; + struct limine_smbios_response *resp = smbios_req.response; + struct dmi_entry64 *entry64 = NULL; + struct dmi_shdr *hdr = NULL; + size_t scount = 0, smax_len = 0; + size_t nbytes = 0, cur_nbytes = 0; + + if (resp == NULL) { + return -ENODEV; + } + if (resp->entry_32 == 0 && resp->entry_64 == 0) { + return -ENODEV; + } + + if (resp->entry_64 != 0) { + entry64 = (void *)resp->entry_64; + hdr = (void *)entry64->addr; + smax_len = entry64->max_size; + } else if (resp->entry_32 != 0) { + entry32 = (void *)(uint64_t)resp->entry_32; + hdr = (void *)(uint64_t)entry32->addr; + scount = entry32->nstruct; + } else { + return -ENODEV; + } + + memset(hdrs, 0, sizeof(hdrs)); + for (size_t i = 0; i < scount; ++i) { + if (hdr->type == DMI_END_OF_TABLE) { + break; + } + + if (hdr->type < NELEM(hdrs)) { + hdrs[hdr->type] = hdr; + } + cur_nbytes = dmi_struct_size(hdr); + if (smax_len > 0 && (nbytes + cur_nbytes) >= smax_len) { + break; + } + + nbytes += cur_nbytes; + hdr = PTR_OFFSET(hdr, cur_nbytes); + } + + return 0; +} + +DRIVER_EXPORT(dmi_init); diff --git a/sys/dev/ic/ahci.c b/sys/dev/ic/ahci.c index b483e7a..5dbf4a7 100644 --- a/sys/dev/ic/ahci.c +++ b/sys/dev/ic/ahci.c @@ -29,19 +29,36 @@ #include <sys/types.h> #include <sys/driver.h> +#include <sys/device.h> #include <sys/errno.h> #include <sys/syslog.h> +#include <sys/sio.h> +#include <sys/param.h> +#include <sys/bitops.h> #include <sys/mmio.h> #include <dev/pci/pci.h> +#include <dev/pci/pciregs.h> #include <dev/timer.h> #include <dev/ic/ahcivar.h> #include <dev/ic/ahciregs.h> +#include <dev/dcdr/cache.h> +#include <fs/devfs.h> +#include <fs/ctlfs.h> +#include <vm/dynalloc.h> +#include <vm/physmem.h> +#include <machine/cdefs.h> +#include <string.h> #define pr_trace(fmt, ...) kprintf("ahci: " fmt, ##__VA_ARGS__) #define pr_error(...) pr_trace(__VA_ARGS__) +static uint32_t devs_max = 0; +static struct bdevsw ahci_bdevsw; +static struct hba_device *devs; static struct pci_device *ahci_dev; static struct timer tmr; +static struct ahci_hba g_hba; +static struct driver_var __driver_var; /* * Poll register to have 'bits' set/unset. @@ -78,7 +95,54 @@ ahci_poll_reg(volatile uint32_t *reg, uint32_t bits, bool pollset) } } - return val; + return 0; +} + +static struct hba_device * +ahci_get_dev(dev_t dev) +{ + for (int i = 0; i < devs_max; ++i) { + if (devs[i].dev == dev) { + return &devs[i]; + } + } + + return NULL; +} + +/* + * Allocate a command slot for a port on + * the HBA. + */ +static int +ahci_alloc_cmdslot(struct ahci_hba *hba, struct hba_port *port) +{ + uint32_t slotlist; + + slotlist = mmio_read32(&port->ci); + slotlist |= mmio_read32(&port->sact); + + for (int i = 0; i < hba->nslots; ++i) { + if (!ISSET(slotlist, i)) { + return i; + } + } + + return -EAGAIN; +} + +/* + * Get the command list base. + */ +static paddr_t +ahci_cmdbase(struct hba_port *port) +{ + paddr_t basel, baseh, base; + + basel = mmio_read32(&port->clb); + baseh = mmio_read32(&port->clbu); + base = COMBINE32(baseh, basel); + return base; } static int @@ -114,12 +178,739 @@ ahci_hba_reset(struct ahci_hba *hba) return 0; } +/* + * Dump identify structure for debugging + * purposes. + */ +static void +ahci_dump_identity(struct ata_identity *identity) +{ + char serial_number[20]; + char model_number[40]; + char tmp; + + memcpy(serial_number, identity->serial_number, sizeof(serial_number)); + memcpy(model_number, identity->model_number, sizeof(model_number)); + + serial_number[sizeof(serial_number) - 1] = '\0'; + model_number[sizeof(model_number) - 1] = '\0'; + + /* Fixup endianess for serial number */ + for (size_t i = 0; i < sizeof(serial_number); i += 2) { + tmp = serial_number[i]; + serial_number[i] = serial_number[i + 1]; + serial_number[i + 1] = tmp; + } + + /* Fixup endianess for model number */ + for (size_t i = 0; i < sizeof(model_number); i += 2) { + tmp = model_number[i]; + model_number[i] = model_number[i + 1]; + model_number[i + 1] = tmp; + } + + pr_trace("model number: %s\n", model_number); +} + +/* + * Stop an HBA port's command list and FIS + * engine. + */ +static int +hba_port_stop(struct hba_port *port) +{ + const uint32_t RUN_MASK = (AHCI_PXCMD_FR | AHCI_PXCMD_CR); + uint32_t cmd, tmp; + + /* Ensure the port is running */ + cmd = mmio_read32(&port->cmd); + if (!ISSET(cmd, RUN_MASK)) { + return 0; + } + + cmd &= ~(AHCI_PXCMD_ST | AHCI_PXCMD_FRE); + mmio_write32(&port->cmd, cmd); + + /* + * The spec states that once the port is stopped, + * PxCMD.CR and PxCMD.FR become unset + */ + tmp = AHCI_PXCMD_FR | AHCI_PXCMD_CR; + if (ahci_poll_reg(&port->cmd, tmp, false) < 0) { + return -EAGAIN; + } + + return 0; +} + +/* + * Bring up an HBA port's command list + * and FIS engine. + */ +static int +hba_port_start(struct hba_port *port) +{ + const uint32_t RUN_MASK = (AHCI_PXCMD_FR | AHCI_PXCMD_CR); + uint32_t cmd, tmp; + + /* Ensure the port is not running */ + cmd = mmio_read32(&port->cmd); + if (ISSET(cmd, RUN_MASK)) { + return 0; + } + + /* Bring up the port */ + cmd |= AHCI_PXCMD_ST | AHCI_PXCMD_FRE; + mmio_write32(&port->cmd, cmd); + + tmp = AHCI_PXCMD_FR | AHCI_PXCMD_CR; + if (ahci_poll_reg(&port->cmd, tmp, true) < 0) { + return -EAGAIN; + } + + return 0; +} + +/* + * Check for interface errors, returns + * 0 on success (i.e., no errors), otherwise + * the "ERR" word of PxSERR. + */ +static int +hba_port_chkerr(struct hba_port *port) +{ + uint32_t serr; + uint16_t err; + uint8_t critical = 0; + + serr = mmio_read32(&port->serr); + err = serr & 0xFFFF; + if (err == 0) { + return 0; + } + + if (ISSET(err, AHCI_SERR_I)) { + pr_error("recovered data integrity error\n"); + } + if (ISSET(err, AHCI_SERR_M)) { + pr_error("recovered comms error\n"); + } + if (ISSET(err, AHCI_SERR_T)) { + pr_error("transient data integrity error\n"); + } + if (ISSET(err, AHCI_SERR_C)) { + pr_error("persistent comms error\n"); + critical = 1; + } + if (ISSET(err, AHCI_SERR_P)) { + pr_error("protocol error\n"); + critical = 1; + } + if (ISSET(err, AHCI_SERR_E)) { + pr_error("internal hba error\n"); + critical = 1; + } + if (critical) { + pr_error("CRITICAL - DISABLING PORT **\n"); + hba_port_stop(port); + return err; + } + + mmio_write32(&port->serr, 0xFFFFFFFF); + return err; + +} + +/* + * Reset a port on the HBA + * + * XXX: This function stops the port once the + * COMRESET is complete. + */ +static int +hba_port_reset(struct ahci_hba *hba, struct hba_port *port) +{ + uint32_t sctl, ssts, cmd; + uint8_t det, ipm, spd; + uint32_t elapsed = 0; + + sctl = mmio_read32(&port->sctl); + + /* + * Transmit a COMRESET to the device. If the HBA + * supports staggered spin-up, we'll need to set + * the PxCMD.SUD bit as well. + */ + sctl = (sctl & ~0x0F) | AHCI_DET_COMRESET; + mmio_write32(&port->sctl, sctl); + if (hba->sss) { + cmd = mmio_read32(&port->cmd); + cmd |= AHCI_PXCMD_SUD; + mmio_write32(&port->cmd, cmd); + } + + /* + * Wait for the link to become reestablished + * between the port and the HBA. + */ + tmr.msleep(300); + sctl &= ~AHCI_DET_COMRESET; + mmio_write32(&port->sctl, sctl); + + for (;;) { + if (elapsed >= AHCI_TIMEOUT) { + break; + } + ssts = mmio_read32(&port->ssts); + det = AHCI_PXSSTS_DET(ssts); + if (det == AHCI_DET_COMM) { + break; + } + + tmr.msleep(10); + elapsed += 10; + } + + ipm = AHCI_PXSSTS_IPM(ssts); + spd = AHCI_PXSSTS_SPD(ssts); + + if (det == AHCI_DET_PRESENT) { + pr_error("SATA link timeout\n"); + return -EAGAIN; + } + if (det != AHCI_DET_COMM) { + return -EAGAIN; + } + + /* + * Ensure the interface is in an active + * state. + */ + if (ipm != AHCI_IPM_ACTIVE) { + pr_error("device interface not active\n"); + return -EAGAIN; + } + + switch (spd) { + case AHCI_SPD_GEN1: + pr_trace("SATA link rate @ ~1.5 Gb/s\n"); + break; + case AHCI_SPD_GEN2: + pr_trace("SATA link rate @ ~3 Gb/s\n"); + break; + case AHCI_SPD_GEN3: + pr_trace("SATA link rate @ ~6 Gb/s\n"); + break; + } + + return 0; +} + +static int +ahci_submit_cmd(struct ahci_hba *hba, struct hba_port *port, uint8_t slot) +{ + const uint32_t BUSY_BITS = (AHCI_PXTFD_BSY | AHCI_PXTFD_DRQ); + const uint8_t MAX_ATTEMPTS = 3; + uint32_t ci; + uint8_t attempts = 0; + int status = 0; + + /* + * Spin on `TFD.BSY` and `TFD.DRQ` to ensure + * that the port is not busy before we send + * any commands. + */ + if (ahci_poll_reg(&port->tfd, BUSY_BITS, false) < 0) { + pr_trace("cmd failed, port busy (slot=%d)\n", slot); + return -EBUSY; + } + + /* + * Submit and wait for completion, this may take + * a bit so give it several attempts. + */ + ci = mmio_read32(&port->ci); + mmio_write32(&port->ci, ci | BIT(slot)); + while ((attempts++) < MAX_ATTEMPTS) { + status = ahci_poll_reg(&port->ci, BIT(slot), false); + if (status == 0) { + break; + } + } + if (status != 0) { + return status; + } + + return hba_port_chkerr(port); +} + +/* + * Send an ATA IDENTIFY command to a + * SATA device. + */ +static int +ahci_identify(struct ahci_hba *hba, struct hba_device *dp) +{ + paddr_t base, buf; + struct hba_port *port; + struct ahci_cmd_hdr *cmdhdr; + struct ahci_cmdtab *cmdtbl; + struct ahci_fis_h2d *fis; + uint16_t *p; + int cmdslot, status; + + buf = vm_alloc_frame(1); + if (buf == 0) { + pr_trace("failed to alloc frame\n"); + return -ENOMEM; + } + + port = dp->io; + cmdslot = ahci_alloc_cmdslot(hba, port); + if (cmdslot < 0) { + pr_trace("failed to alloc cmdslot\n"); + vm_free_frame(buf, 1); + return cmdslot; + } + + base = ahci_cmdbase(port); + base += cmdslot * sizeof(*cmdhdr); + + /* Setup the command header */ + cmdhdr = PHYS_TO_VIRT(base); + cmdhdr->w = 0; + cmdhdr->cfl = sizeof(struct ahci_fis_h2d) / 4; + cmdhdr->prdtl = 1; + + cmdtbl = PHYS_TO_VIRT(cmdhdr->ctba); + cmdtbl->prdt[0].dba = buf; + cmdtbl->prdt[0].dbc = 511; + cmdtbl->prdt[0].i = 0; + + fis = (void *)&cmdtbl->cfis; + fis->command = ATA_CMD_IDENTIFY; + fis->c = 1; + fis->type = FIS_TYPE_H2D; + + if ((status = ahci_submit_cmd(hba, port, cmdslot)) != 0) { + goto done; + } + + ahci_dump_identity(PHYS_TO_VIRT(buf)); + p = (uint16_t *)PHYS_TO_VIRT(buf); + dp->nlba = (p[61] << 16) | p[60]; + pr_trace("max block size: %d\n", dp->nlba); +done: + vm_free_frame(buf, 1); + return status; +} + +/* + * Send a read/write command to a SATA drive + * + * @hba: Host bus adapter of target port + * @dev: Device to send over + * @sio: System I/O descriptor + * @write: If true, data pointed to by `sio` will be written + * + * XXX: - The `len` field in `sio` is block relative, in other words, + * set to 1 to read one block (512 bytes per block), etc. + * + * - The `offset` field in `sio` is the LBA address. + */ +static int +ahci_sata_rw(struct ahci_hba *hba, struct hba_device *dev, struct sio_txn *sio, + bool write) +{ + paddr_t base, buf; + char *p, *dest; + bool dcdr_hit = false; + struct hba_port *port; + struct dcdr_lookup dcd_lookup; + struct dcd *dcd; + struct ahci_cmd_hdr *cmdhdr; + struct ahci_cmdtab *cmdtbl; + struct ahci_fis_h2d *fis; + int cmdslot, status; + size_t nblocks, cur_lba; + size_t len; + + if (sio == NULL) { + return -EINVAL; + } + if (sio->len == 0 || sio->buf == NULL) { + return -EINVAL; + } + + port = dev->io; + + /* + * Compute how many blocks can be cached. + * + * XXX: We do not want to fill the entire DCDR + * with a single drive read to reduce the + * frequency of DCDR evictions. + * + * TODO: We should also take advantage of logical + * block coalescing. + */ + nblocks = sio->len; + if (nblocks >= AHCI_DCDR_CAP) { + nblocks = AHCI_DCDR_CAP / 2; + } + + /* + * If we are reading the drive, see if we have + * anything in the cache. + * + * XXX: If there is a break in the cache and we + * have a miss inbetween, other DCDs are + * ignored. Wonder how we can mitigate + * fragmentation. + */ + cur_lba = sio->offset; + len = sio->len; + for (size_t i = 0; i < nblocks && !write; ++i) { + status = dcdr_lookup(dev->dcdr, cur_lba, &dcd_lookup); + if (status != 0) { + break; + } + if (len == 0) { + break; + } + + dcdr_hit = true; + dcd = dcd_lookup.dcd_res; + + /* Hit, copy the cached data */ + dest = &((char *)sio->buf)[i * 512]; + p = dcd->block; + memcpy(dest, p, 512); + + ++cur_lba; + --len; + } + + /* Did we get everything already? */ + if (len == 0) { + return 0; + } + + buf = VIRT_TO_PHYS(sio->buf); + cmdslot = ahci_alloc_cmdslot(hba, port); + if (cmdslot < 0) { + pr_trace("failed to alloc cmdslot\n"); + return cmdslot; + } + + base = ahci_cmdbase(port); + base += cmdslot * sizeof(*cmdhdr); + + /* Setup the command header */ + cmdhdr = PHYS_TO_VIRT(base); + cmdhdr->w = write; + cmdhdr->cfl = sizeof(struct ahci_fis_h2d) / 4; + cmdhdr->prdtl = 1; + + cmdtbl = PHYS_TO_VIRT(cmdhdr->ctba); + cmdtbl->prdt[0].dba = buf; + cmdtbl->prdt[0].dbc = (sio->len << 9) - 1; + cmdtbl->prdt[0].i = 0; + + fis = (void *)&cmdtbl->cfis; + fis->command = write ? ATA_CMD_WRITE_DMA : ATA_CMD_READ_DMA; + fis->c = 1; + fis->type = FIS_TYPE_H2D; + fis->device = (1 << 6); /* LBA */ + + /* Setup LBA */ + fis->lba0 = cur_lba & 0xFF; + fis->lba1 = (cur_lba >> 8) & 0xFF; + fis->lba2 = (cur_lba >> 16) & 0xFF; + fis->lba3 = (cur_lba >> 24) & 0xFF; + fis->lba4 = (cur_lba >> 32) & 0xFF; + fis->lba5 = (cur_lba >> 40) & 0xFF; + + /* Setup count */ + fis->countl = len & 0xFF; + fis->counth = (len >> 8) & 0xFF; + + if ((status = ahci_submit_cmd(hba, port, cmdslot)) != 0) { + return status; + } + + /* Don't cache again on hit */ + if (!write && dcdr_hit) { + return 0; + } + + /* Cache our read */ + for (size_t i = 0; i < nblocks; ++i) { + cur_lba = sio->offset + i; + p = sio->buf; + dcdr_cachein(dev->dcdr, &p[i * 512], cur_lba); + } + return 0; +} + +static int +sata_dev_rw(dev_t dev, struct sio_txn *sio, bool write) +{ + const size_t BSIZE = 512; + struct sio_txn wr_sio; + struct hba_device *devp; + size_t block_count, len; + off_t block_off, read_off; + char *buf; + int status; + + if (sio == NULL) { + return -EINVAL; + } + if (sio->len == 0 || sio->buf == NULL) { + return -EINVAL; + } + if (dev > devs_max) { + return -ENODEV; + } + + devp = ahci_get_dev(dev); + if (__unlikely(devp == NULL)) { + return -ENODEV; + } + + /* Compute block count and offset */ + block_count = ALIGN_UP(sio->len, BSIZE); + block_count /= BSIZE; + block_off = sio->offset / BSIZE; + + /* Allocate internal buffer */ + len = block_count * BSIZE; + buf = dynalloc_memalign(len, 0x1000); + if (buf == NULL) { + return -ENOMEM; + } + + /* Copy SIO buffer if write */ + if (write) { + memset(buf, 0, len); + memcpy(buf, sio->buf, sio->len); + } + + /* + * Perform the r/w operation and copy internal buffer + * out if this is a read operation. + */ + wr_sio.buf = buf; + wr_sio.len = block_count; + wr_sio.offset = block_off; + status = ahci_sata_rw(&g_hba, devp, &wr_sio, write); + if (status == 0 && !write) { + read_off = sio->offset & (BSIZE - 1); + memcpy(sio->buf, buf + read_off, sio->len); + } + + dynfree(buf); + return sio->len; +} + +/* + * Device interface read + */ +static int +ahci_dev_read(dev_t dev, struct sio_txn *sio, int flags) +{ + while (DRIVER_DEFERRED()) { + md_pause(); + } + + return sata_dev_rw(dev, sio, false); +} + +/* + * Device interface write + */ +static int +ahci_dev_write(dev_t dev, struct sio_txn *sio, int flags) +{ + while (DRIVER_DEFERRED()) { + md_pause(); + } + + return sata_dev_rw(dev, sio, true); +} + +/* + * Device interface number of blocks + */ +static int +ahci_dev_bsize(dev_t dev) +{ + struct hba_device *dp; + + while (DRIVER_DEFERRED()) { + md_pause(); + } + + if ((dp = ahci_get_dev(dev)) == NULL) { + return -ENODEV; + } + + return dp->nlba; +} + +/* + * Initialize a drive on an HBA port + * + * @hba: HBA descriptor + * @portno: Port number + */ +static int +ahci_init_port(struct ahci_hba *hba, uint32_t portno) +{ + char devname[128]; + struct hba_memspace *abar = hba->io; + struct hba_port *port; + struct hba_device *dp; + struct ctlfs_dev dev; + size_t clen, pagesz; + uint32_t lo, hi, sig; + paddr_t fra, cmdlist, tmp; + int error; + + pagesz = DEFAULT_PAGESIZE; + port = &abar->ports[portno]; + + if ((error = hba_port_reset(hba, port)) < 0) { + return error; + } + sig = mmio_read32(&port->sig); + if (sig == ATAPI_SIG) { + return -ENOTSUP; + } + + pr_trace("found device @ port %d\n", portno); + dp = &devs[portno]; + dp->io = port; + dp->hba = hba; + dp->dev = portno; + + dp->dcdr = dcdr_alloc(512, AHCI_DCDR_CAP); + if (dp->dcdr == NULL) { + pr_error("failed to alloc dcdr\n"); + return -ENOMEM; + } + + /* Allocate a command list */ + clen = ALIGN_UP(hba->nslots * AHCI_CMDENTRY_SIZE, pagesz); + clen /= pagesz; + cmdlist = vm_alloc_frame(clen); + if (cmdlist == 0) { + pr_trace("failed to alloc command list\n"); + return -ENOMEM; + } + + /* Allocate FIS receive area */ + dp->cmdlist = PHYS_TO_VIRT(cmdlist); + fra = vm_alloc_frame(1); + if (fra == 0) { + pr_trace("failed to allocate FIS receive area\n"); + vm_free_frame(cmdlist, clen); + return -ENOMEM; + } + + dp->fra = PHYS_TO_VIRT(fra); + + /* Write the command list */ + lo = cmdlist & 0xFFFFFFFF; + hi = cmdlist >> 32; + mmio_write32(&port->clb, lo); + mmio_write32(&port->clbu, hi); + + /* Write the FIS receive area */ + lo = fra & 0xFFFFFFFF; + hi = fra >> 32; + mmio_write32(&port->fb, lo); + mmio_write32(&port->fbu, hi); + + /* Each command header has a H2D FIS area */ + for (int i = 0; i < hba->nslots; ++i) { + tmp = vm_alloc_frame(1); + dp->cmdlist[i].prdtl = 1; + dp->cmdlist[i].ctba = tmp; + } + + mmio_write32(&port->serr, 0xFFFFFFFF); + + if ((error = hba_port_start(port)) < 0) { + for (int i = 0; i < hba->nslots; ++i) { + vm_free_frame(dp->cmdlist[i].ctba, 1); + } + vm_free_frame(cmdlist, clen); + vm_free_frame(fra, 1); + pr_trace("failed to start port %d\n", portno); + return error; + } + + ahci_identify(hba, dp); + + if (hba->major == 0) { + hba->major = dev_alloc_major(); + } + dp->dev = dev_alloc(hba->major); + snprintf(devname, sizeof(devname), "sd%d", dp->dev); + + /* Register the device */ + dev_register(hba->major, dp->dev, &ahci_bdevsw); + pr_trace("drive @ /dev/%s\n", devname); + + /* Register a control node */ + dev.mode = 0444; + ctlfs_create_node(devname, &dev); + pr_trace("drive control @ /ctl/%s/\n", devname); + + /* Register control files */ + dev.devname = devname; + dev.ops = &g_sata_bsize_ops; + ctlfs_create_entry("bsize", &dev); + return devfs_create_entry(devname, hba->major, dp->dev, 060444); +} + +/* + * Scan the HBA for implemented ports + */ +static int +ahci_hba_scan(struct ahci_hba *hba) +{ + struct hba_memspace *abar = hba->io; + uint32_t pi; + size_t len; + + len = hba->nports * sizeof(struct hba_device); + devs_max = hba->nports; + if ((devs = dynalloc(len)) == NULL) { + pr_trace("failed to allocate dev descriptors\n"); + return -ENOMEM; + } + + memset(devs, 0, len); + pi = mmio_read32(&abar->pi); + for (int i = 0; i < sizeof(pi) * 8; ++i) { + if (ISSET(pi, BIT(i))) { + ahci_init_port(hba, i); + } + } + + return 0; +} + static int ahci_hba_init(struct ahci_hba *hba) { struct hba_memspace *abar = hba->io; int error; uint32_t tmp; + uint32_t cap, pi; /* * God knows what state the HBA is in by the time @@ -132,6 +923,12 @@ ahci_hba_init(struct ahci_hba *hba) } pr_trace("successfully performed a hard reset\n"); + cap = mmio_read32(&abar->cap); + hba->maxports = AHCI_CAP_NP(cap); + hba->nslots = AHCI_CAP_NCS(cap); + hba->ems = AHCI_CAP_EMS(cap); + hba->sal = AHCI_CAP_SAL(cap); + hba->sss = AHCI_CAP_SSS(cap); /* * The HBA provides backwards compatibility with @@ -142,17 +939,51 @@ ahci_hba_init(struct ahci_hba *hba) tmp = mmio_read32(&abar->ghc); tmp |= AHCI_GHC_AE; mmio_write32(&abar->ghc, tmp); + + /* + * CAP.NCS reports the maximum number of ports the + * HBA silicon supports but a lot of hardware will + * not implement the full number of ports supported. + * + * the `PI' register is a bit-significant register + * used to determine which ports are implemented, + * therefore we can just count how many bits are + * set in this register and that would be how many + * ports are implemented total. + */ + pi = mmio_read32(&abar->pi); + hba->nports = popcnt(pi); + pr_trace("hba implements %d port(s)\n", hba->nports); + + if ((error = ahci_hba_scan(hba)) != 0) { + return error; + } + return 0; } +/* + * Init PCI related controller bits + */ +static void +ahci_init_pci(void) +{ + uint32_t tmp; + + /* Enable bus mastering and MMIO */ + tmp = pci_readl(ahci_dev, PCIREG_CMDSTATUS); + tmp |= (PCI_BUS_MASTERING | PCI_MEM_SPACE); + pci_writel(ahci_dev, PCIREG_CMDSTATUS, tmp); +} + static int ahci_init(void) { struct pci_lookup lookup; int status; - struct ahci_hba hba; void *abar_vap = NULL; + g_hba.major = 0; lookup.pci_class = 0x01; lookup.pci_subclass = 0x06; @@ -193,14 +1024,20 @@ ahci_init(void) * ahci_dev struct, so that we can perform MMIO and then issue * a hard reset. */ - if ((status = pci_map_bar(ahci_dev, 5, &abar_vap)) != 0) { return status; } - hba.io = (struct hba_memspace*)abar_vap; - ahci_hba_init(&hba); + ahci_init_pci(); + g_hba.io = (struct hba_memspace*)abar_vap; + ahci_hba_init(&g_hba); return 0; } -DRIVER_EXPORT(ahci_init); +static struct bdevsw ahci_bdevsw = { + .read = ahci_dev_read, + .write = ahci_dev_write, + .bsize = ahci_dev_bsize +}; + +DRIVER_DEFER(ahci_init); diff --git a/sys/dev/ic/ahci_ctl.c b/sys/dev/ic/ahci_ctl.c new file mode 100644 index 0000000..282a141 --- /dev/null +++ b/sys/dev/ic/ahci_ctl.c @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <sys/errno.h> +#include <dev/ic/ahcivar.h> +#include <fs/ctlfs.h> +#include <string.h> + +static int +ctl_bsize_read(struct ctlfs_dev *cdp, struct sio_txn *sio) +{ + uint32_t bsize = AHCI_SECTOR_SIZE; + uint32_t len = sizeof(bsize); + + if (sio == NULL) { + return -EINVAL; + } + if (sio->buf == NULL) { + return -EINVAL; + } + + if (sio->len < len) { + len = sio->len; + } + + memcpy(sio->buf, &bsize, len); + return len; +} + +/* + * Operations for /ctl/sdx/bsize + */ +const struct ctlops g_sata_bsize_ops = { + .read = ctl_bsize_read, + .write = NULL, +}; diff --git a/sys/dev/ic/nvme.c b/sys/dev/ic/nvme.c index 749ac93..822b085 100644 --- a/sys/dev/ic/nvme.c +++ b/sys/dev/ic/nvme.c @@ -425,7 +425,7 @@ nvme_dev_rw(dev_t dev, struct sio_txn *sio, bool write) */ ns = nvme_get_ns(dev); if (__unlikely(ns == NULL)) - return -EIO; + return -ENODEV; /* Calculate the block count and offset */ block_count = ALIGN_UP(sio->len, ns->lba_bsize); @@ -625,6 +625,10 @@ nvme_init(void) return -ENODEV; } + pr_trace("NVMe storage ctrl <hba? at pci%d:%x.%x.%d>\n", + nvme_dev->bus, nvme_dev->device_id, nvme_dev->func, + nvme_dev->slot); + /* Try to request a general purpose timer */ if (req_timer(TIMER_GP, &tmr) != TMRR_SUCCESS) { pr_error("failed to fetch general purpose timer\n"); @@ -658,4 +662,4 @@ static struct bdevsw nvme_bdevsw = { .write = nowrite }; -DRIVER_EXPORT(nvme_init); +DRIVER_DEFER(nvme_init); diff --git a/sys/dev/pci/pci.c b/sys/dev/pci/pci.c index 8328ffc..9dfb90e 100644 --- a/sys/dev/pci/pci.c +++ b/sys/dev/pci/pci.c @@ -31,14 +31,33 @@ #include <sys/queue.h> #include <sys/syslog.h> #include <sys/errno.h> +#include <sys/spinlock.h> +#include <sys/mmio.h> #include <dev/pci/pci.h> #include <dev/pci/pciregs.h> +#include <dev/acpi/acpi.h> +#include <dev/acpi/tables.h> +#include <machine/pci/pci.h> #include <vm/dynalloc.h> +#include <vm/vm.h> #include <lib/assert.h> #define pr_trace(fmt, ...) kprintf("pci: " fmt, ##__VA_ARGS__) static TAILQ_HEAD(, pci_device) device_list; +static struct spinlock devlist_lock = {0}; +static struct acpi_mcfg *mcfg; + +struct cam_hook { + /* PCI CAM */ + pcireg_t(*cam_readl)(struct pci_device *dev, uint32_t off); + void(*cam_writel)(struct pci_device *dev, uint32_t off, pcireg_t val); + + /* PCIe ECAM */ + pcireg_t(*ecam_readl)(struct pci_device *dev, uint32_t off); + void(*ecam_writel)(struct pci_device *dev, uint32_t off, pcireg_t val); + void *ecam_base[1]; +} cam_hook = { NULL }; static bool pci_dev_exists(uint8_t bus, uint8_t slot, uint8_t func) @@ -121,6 +140,9 @@ pci_set_device_info(struct pci_device *dev) dev->prog_if = PCIREG_PROGIF(classrev); dev->hdr_type = (uint8_t)pci_readl(dev, PCIREG_HDRTYPE); + /* This is a PCIe device if it has CAP ID of 0x10 */ + dev->pci_express = pci_get_cap(dev, 0x10) != 0; + /* Set type-specific data */ switch (dev->hdr_type & ~BIT(7)) { case PCI_HDRTYPE_NORMAL: @@ -149,6 +171,53 @@ pci_set_device_info(struct pci_device *dev) static void pci_scan_bus(uint8_t bus); +static inline vaddr_t +pcie_ecam_addr(struct pci_device *dev) +{ + vaddr_t base = (vaddr_t)cam_hook.ecam_base[0]; + + base += dev->bus << 20 | + dev->slot << 15 | + dev->func << 12; + return base; +} + +static pcireg_t +pcie_ecam_readl(struct pci_device *dev, uint32_t offset) +{ + vaddr_t address; + + address = pcie_ecam_addr(dev); + address += (offset & ~3); + return mmio_read32((void *)address); +} + +static void +pcie_ecam_writel(struct pci_device *dev, uint32_t offset, pcireg_t val) +{ + vaddr_t address; + + address = pcie_ecam_addr(dev); + address += (offset & ~3); + mmio_write32((void *)address, val); +} + +static int +pcie_init(struct acpi_mcfg_base *base) +{ + void *iobase; + + pr_trace("[group %02d] @ bus [%02d - %02d]\n", base->seg_grpno, + base->bus_start, base->bus_end); + pr_trace("ecam @ %p\n", base->base_pa); + + iobase = PHYS_TO_VIRT(base->base_pa); + cam_hook.ecam_base[0] = iobase; + cam_hook.ecam_writel = pcie_ecam_writel; + cam_hook.ecam_readl = pcie_ecam_readl; + return 0; +} + /* * Attempt to register a device. * @@ -262,12 +331,55 @@ pci_get_device(struct pci_lookup lookup, uint16_t lookup_type) return NULL; } + +void +pci_add_device(struct pci_device *dev) +{ + spinlock_acquire(&devlist_lock); + TAILQ_INSERT_TAIL(&device_list, dev, link); + spinlock_release(&devlist_lock); +} + + +pcireg_t +pci_readl(struct pci_device *dev, uint32_t offset) +{ + bool have_ecam = cam_hook.ecam_readl != NULL; + + if (dev->pci_express && have_ecam) { + return cam_hook.ecam_readl(dev, offset); + } + + return cam_hook.cam_readl(dev, offset); +} + +void +pci_writel(struct pci_device *dev, uint32_t offset, pcireg_t val) +{ + bool have_ecam = cam_hook.ecam_writel != NULL; + + if (dev->pci_express && have_ecam) { + cam_hook.ecam_writel(dev, offset, val); + return; + } + + cam_hook.cam_writel(dev, offset, val); +} + int pci_init(void) { size_t ndev; TAILQ_INIT(&device_list); + mcfg = acpi_query("MCFG"); + if (mcfg != NULL) { + pcie_init(&mcfg->base[0]); + } + + cam_hook.cam_readl = md_pci_readl; + cam_hook.cam_writel = md_pci_writel; + /* Recursively scan bus 0 */ pci_scan_bus(0); ndev = TAILQ_NELEM(&device_list); diff --git a/sys/dev/phy/e1000.c b/sys/dev/phy/e1000.c new file mode 100644 index 0000000..95efe6d --- /dev/null +++ b/sys/dev/phy/e1000.c @@ -0,0 +1,358 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <sys/driver.h> +#include <sys/errno.h> +#include <sys/syslog.h> +#include <sys/mmio.h> +#include <dev/phy/e1000regs.h> +#include <dev/pci/pci.h> +#include <dev/pci/pciregs.h> +#include <dev/timer.h> +#include <net/if_var.h> +#include <string.h> + +#define pr_trace(fmt, ...) kprintf("e1000: " fmt, ##__VA_ARGS__) +#define pr_error(...) pr_trace(__VA_ARGS__) + +#define E1000_VENDOR 0x8086 +#define E1000_DEVICE 0x100E +#define E1000_TIMEOUT 500 /* In msec */ + +static struct timer tmr; +static struct pci_device *e1000; +static struct netif netif; + +struct e1000_nic { + void *vap; + uint8_t has_eeprom : 1; + uint16_t eeprom_size; + uint16_t io_port; +}; + +static int +e1000_poll_reg(volatile uint32_t *reg, uint32_t bits, bool pollset) +{ + size_t usec_start, usec; + size_t elapsed_msec; + uint32_t val; + bool tmp; + + usec_start = tmr.get_time_usec(); + + for (;;) { + val = mmio_read32(reg); + tmp = (pollset) ? ISSET(val, bits) : !ISSET(val, bits); + + usec = tmr.get_time_usec(); + elapsed_msec = (usec - usec_start) / 1000; + + /* If tmp is set, the register updated in time */ + if (tmp) { + break; + } + + /* Exit with an error if we timeout */ + if (elapsed_msec > E1000_TIMEOUT) { + return -ETIME; + } + } + + return 0; +} + +/* + * Query information about any EEPROMs for diagnostic + * purposes. + * + * TODO: Some wacky older chips don't show their presence + * too easily, we could fallback to microwire / SPI + * bit banging to see if it responds to us manually + * clocking a dummy read operation in. + */ +static void +eeprom_query(struct e1000_nic *np) +{ + uint16_t size_bits = 1024; + uint32_t eecd, *eecd_p; + const char *typestr = "microwire"; + + eecd_p = PTR_OFFSET(np->vap, E1000_EECD); + + /* + * First we should check if there is an EEPROM + * on-board as if not, there is nothing we can do + * here. + */ + eecd = mmio_read32(eecd_p); + if (!ISSET(eecd, E1000_EECD_PRES)) { + return; + } + + np->has_eeprom = 1; + if (ISSET(eecd, E1000_EECD_TYPE)) { + typestr = "SPI"; + } + if (ISSET(eecd, E1000_EECD_SIZE)) { + size_bits = 4096; + } + + np->eeprom_size = size_bits; + pr_trace("%d-bit %s EEPROM detected\n", size_bits, typestr); +} + +/* + * If there is no EEPROM, we can still read + * the MAC address through the Receive address + * registers + * + * XXX: This is typically only used as a fallback. + * + * Returns a less than zero value if an ethernet + * address is not found, which would be kind of + * not good. + * + * @np: NIC descriptor + * @addr: Pointer to MAC address data + */ +static int +e1000_read_recvaddr(struct e1000_nic *np, struct netif_addr *addr) +{ + const uint32_t RECVADDR_OFF = 0x5400; + uint32_t tmp; + uint32_t *dword_p; + + dword_p = PTR_OFFSET(np->vap, RECVADDR_OFF); + + if (dword_p[0] == 0) { + pr_error("bad hwaddr in recvaddr\n"); + return -ENOTSUP; + } + + /* DWORD 0 */ + tmp = mmio_read32(&dword_p[0]); + addr->data[0] = tmp & 0xFF; + addr->data[1] = (tmp >> 8) & 0xFF; + addr->data[2] = (tmp >> 16) & 0xFF; + addr->data[3] = (tmp >> 24) & 0xFF; + + /* DWORD 1 */ + tmp = mmio_read32(&dword_p[1]); + addr->data[4] = tmp & 0xFF; + addr->data[5] = (tmp >> 8) & 0xFF; + return 0; +} + +/* + * Read 16-bytes from the NIC's on-board EEPROM. + * + * XXX: This should only be used if the caller is + * certain that the NIC has an EEPROM + * + * @addr: EEPROM address to read from + * + * A returned value of 0xFFFF should be seen as invalid. + */ +static uint16_t +eeprom_readw(struct e1000_nic *np, uint8_t addr) +{ + uint32_t eerd, *eerd_p; + int error; + + if (!np->has_eeprom) { + pr_error("e1000_read_eeprom: EEPROM not present\n"); + return 0xFFFF; + } + + eerd_p = PTR_OFFSET(np->vap, E1000_EERD); + eerd = (addr << 8) | E1000_EERD_START; + mmio_write32(eerd_p, eerd); + + error = e1000_poll_reg(eerd_p, E1000_EERD_DONE, true); + if (error < 0) { + pr_error("e1000_read_eeprom: timeout\n"); + return 0xFFFF; + } + + eerd = mmio_read32(eerd_p); + return (eerd >> 16) & 0xFFFF; +} + +/* + * Read the MAC address from the NICs EEPROM. + * + * XXX: This should usually work, however if the NIC does + * not have an on-board EEPROM, this will fail. In such + * cases, e1000_read_recvaddr() can be called instead. + * + * @np: NIC descriptor + * @addr: Pointer to MAC address data + */ +static int +e1000_read_macaddr(struct e1000_nic *np, struct netif_addr *addr) +{ + uint16_t eeprom_word; + + if (!np->has_eeprom) { + pr_trace("EEPROM not present, trying recvaddr\n"); + return e1000_read_recvaddr(np, addr); + } + + /* Word 0 */ + eeprom_word = eeprom_readw(np, E1000_HWADDR0); + addr->data[0] = (eeprom_word & 0xFF); + addr->data[1] = (eeprom_word >> 8) & 0xFF; + + /* Word 1 */ + eeprom_word = eeprom_readw(np, E1000_HWADDR1); + addr->data[2] = (eeprom_word & 0xFF); + addr->data[3] = (eeprom_word >> 8) & 0xFF; + + /* Word 2 */ + eeprom_word = eeprom_readw(np, E1000_HWADDR2); + addr->data[4] = (eeprom_word & 0xFF); + addr->data[5] = (eeprom_word >> 8) & 0xFF; + return 0; +} + +/* + * Reset the entire E1000 + */ +static int +e1000_reset(struct e1000_nic *np) +{ + uint32_t ctl, *ctl_p; + int error; + + ctl_p = PTR_OFFSET(np->vap, E1000_CTL); + ctl = mmio_read32(&ctl_p); + ctl |= E1000_CTL_RST; + mmio_write32(&ctl_p, ctl); + + error = e1000_poll_reg(ctl_p, E1000_CTL_RST, false); + if (error < 0) { + pr_error("reset timeout\n"); + return error; + } + + return 0; +} + +/* + * Initialize an E1000(e) chip + */ +static int +e1000_chip_init(struct e1000_nic *np) +{ + struct netif_addr *addr = &netif.addr; + int error; + + /* + * To ensure that BIOS/UEFI or whatever firmware got us + * here didn't fuck anything up in the process or at the + * very least, put the controller in a seemingly alright + * state that gives us a suprise screwing in the future, + * we'll reset everything to its default startup state. + * + * Better safe than sorry... + */ + if ((error = e1000_reset(np)) < 0) { + return error; + } + + eeprom_query(np); + if ((error = e1000_read_macaddr(np, addr)) < 0) { + return error; + } + + pr_trace("MAC address: %x:%x:%x:%x:%x:%x\n", + (uint64_t)addr->data[0], (uint64_t)addr->data[1], + (uint64_t)addr->data[2], (uint64_t)addr->data[3], + (uint64_t)addr->data[4], (uint64_t)addr->data[5]); + + return 0; +} + +/* + * Enables PCI specific bits like bus mastering (for DMA) + * as well as MMIO. + */ +static void +e1000_init_pci(void) +{ + uint32_t tmp; + + tmp = pci_readl(e1000, PCIREG_CMDSTATUS); + tmp |= (PCI_BUS_MASTERING | PCI_MEM_SPACE); + pci_writel(e1000, PCIREG_CMDSTATUS, tmp); +} + +static int +e1000_init(void) +{ + struct pci_lookup lookup; + struct e1000_nic nic; + int status; + + lookup.vendor_id = E1000_VENDOR; + lookup.device_id = E1000_DEVICE; + e1000 = pci_get_device(lookup, PCI_DEVICE_ID | PCI_VENDOR_ID); + if (e1000 == NULL) { + return -ENODEV; + } + + /* Get a GP timer */ + if (req_timer(TIMER_GP, &tmr) != TMRR_SUCCESS) { + pr_error("failed to fetch general purpose timer\n"); + return -ENODEV; + } + + /* We need msleep() */ + if (tmr.msleep == NULL) { + pr_error("general purpose timer has no msleep()\n"); + return -ENODEV; + } + + memset(&nic, 0, sizeof(nic)); + pr_trace("e1000 at pci%d:%x.%x.%d\n", + e1000->bus, e1000->device_id, e1000->func, + e1000->slot); + + if ((status = pci_map_bar(e1000, 0, &nic.vap)) != 0) { + pr_error("failed to map BAR0\n"); + return status; + } + + e1000_init_pci(); + e1000_chip_init(&nic); + return 0; +} + +DRIVER_EXPORT(e1000_init); diff --git a/sys/dev/phy/rt8139.c b/sys/dev/phy/rtl.c index e2f87e1..691f767 100644 --- a/sys/dev/phy/rt8139.c +++ b/sys/dev/phy/rtl.c @@ -30,29 +30,30 @@ #include <sys/types.h> #include <sys/errno.h> #include <sys/syslog.h> +#include <sys/spinlock.h> #include <sys/driver.h> +#include <sys/device.h> #include <dev/pci/pci.h> -#include <dev/phy/rt8139.h> +#include <dev/phy/rtl.h> #include <dev/timer.h> #include <dev/pci/pciregs.h> +#include <net/netbuf.h> +#include <net/if_var.h> #include <vm/physmem.h> +#include <vm/dynalloc.h> #include <vm/vm.h> #include <machine/pio.h> +#include <machine/intr.h> #include <string.h> -/* TODO: Make this smoother */ -#if defined(__x86_64__) -#include <machine/intr.h> -#include <machine/ioapic.h> -#include <machine/lapic.h> -#include <machine/idt.h> -#endif +#define IFNAME "rt0" -#define pr_trace(fmt, ...) kprintf("rt8139: " fmt, ##__VA_ARGS__) +#define pr_trace(fmt, ...) kprintf("rt81xx: " fmt, ##__VA_ARGS__) #define pr_error(...) pr_trace(__VA_ARGS__) #define RX_BUF_SIZE 3 /* In pages */ #define RX_REAL_BUF_SIZE 8192 /* In bytes */ +#define TXQ_ENTRIES 4 #define RX_PTR_MASK (~3) @@ -63,11 +64,26 @@ #define HAVE_PIO 0 #endif /* _MACHINE_HAVE_PIO */ +static struct spinlock netif_lock; +static struct netbuf netif_buf[TXQ_ENTRIES]; static struct pci_device *dev; +static struct netif netif; static struct timer tmr; +static uint32_t tx_ptr = 0; +static uint32_t netif_enq_ptr = 0; static uint16_t ioport; static paddr_t rxbuf, txbuf; +/* TXAD regs */ +static uint16_t tsads[TXQ_ENTRIES] = { + RT_TXAD_N(0), RT_TXAD_N(4), + RT_TXAD_N(8), RT_TXAD_N(12) +}; +static uint16_t tsds[TXQ_ENTRIES] = { + RT_TXSTATUS_N(0), RT_TXSTATUS_N(4), + RT_TXSTATUS_N(8), RT_TXSTATUS_N(8) +}; + /* * Write to an RTL8139 register * @@ -156,53 +172,112 @@ rt_poll(uint8_t reg, uint8_t size, uint32_t bits, bool pollset) return val; } -#if defined(__x86_64__) -__isr static void -rt8139_pin_irq(void *sp) +static int +rt_tx(void *packet, size_t len) +{ + static uint32_t tx_ptr = 0; + void *tx_data; + paddr_t tx_pa; + + tx_data = dynalloc(len); + if (tx_data == NULL) { + return -ENOMEM; + } + + memcpy(tx_data, packet, len); + tx_pa = VIRT_TO_PHYS(tx_data); + rt_write(tsads[tx_ptr], 4, tx_pa); + rt_write(tsds[tx_ptr++], 4, len); + if (tx_ptr > TXQ_ENTRIES - 1) { + tx_ptr = 0; + } + return 0; +} + +static void +__rt81xx_tx_start(struct netif *nifp) +{ + struct netbuf *dest; + int error; + + for (int i = 0; i < netif_enq_ptr; ++i) { + dest = &netif_buf[i]; + error = rt_tx(dest->data, dest->len); + if (error < 0) { + pr_error("tx_start fail @queue %d (errno=%d)\n", i, error); + } + } +} + +static void +rt81xx_tx_start(struct netif *nifp) +{ + spinlock_acquire(&netif_lock); + __rt81xx_tx_start(nifp); + spinlock_release(&netif_lock); +} + +static int +rt81xx_tx_enq(struct netif *nifp, struct netbuf *nbp, void *data) +{ + struct netbuf *dest; + + spinlock_acquire(&netif_lock); + dest = &netif_buf[netif_enq_ptr++]; + memcpy(dest, nbp, sizeof(*dest)); + + if (netif_enq_ptr > TXQ_ENTRIES - 1) { + __rt81xx_tx_start(nifp); + netif_enq_ptr = 0; + } + spinlock_release(&netif_lock); + return 0; +} + +static int +rt81xx_intr(void *sp) { - static uint32_t packet_ptr = 0; uint16_t len; uint16_t *p; uint16_t status; status = rt_read(RT_INTRSTATUS, 2); - p = (uint16_t *)(rxbuf + packet_ptr); + p = (uint16_t *)(rxbuf + tx_ptr); len = *(p + 1); /* Length after header */ p += 2; /* Points to data now */ - if (status & RT_TOK) { - return; + if (!ISSET(status, RT_TOK | RT_ROK)) { + return 0; + } + + if (ISSET(status, RT_TOK)) { + pr_trace("sent packet\n"); + return 1; } /* Update rxbuf offset in CAPR */ - packet_ptr = (packet_ptr + len + 4 + 3) & RX_PTR_MASK; - if (packet_ptr > RX_REAL_BUF_SIZE) { - packet_ptr -= RX_REAL_BUF_SIZE; + tx_ptr = (tx_ptr + len + 4 + 3) & RX_PTR_MASK; + if (tx_ptr > RX_REAL_BUF_SIZE) { + tx_ptr -= RX_REAL_BUF_SIZE; } - rt_write(RT_RXBUFTAIL, 2, packet_ptr - 0x10); + rt_write(RT_RXBUFTAIL, 2, tx_ptr - 0x10); rt_write(RT_INTRSTATUS, 2, RT_ACKW); - lapic_eoi(); + return 1; /* handled */ } static int -rtl8139_irq_init(void) +rt81xx_irq_init(void) { - int vec; + struct intr_hand ih; - vec = intr_alloc_vector("rt8139", IPL_BIO); - if (vec < 0) { - return vec; + ih.func = rt81xx_intr; + ih.priority = IPL_BIO; + ih.irq = dev->irq_line; + if (intr_register("rt81xx", &ih) == NULL) { + return -EIO; } - - /* Map interrupt vector to IRQ */ - idt_set_desc(vec, IDT_INT_GATE, ISR(rt8139_pin_irq), 0); - ioapic_set_vec(dev->irq_line, vec); - ioapic_irq_unmask(dev->irq_line); return 0; } -#else -#define rtl8139_irq_init(...) -ENOTSUP -#endif static void rt_init_pci(void) @@ -218,6 +293,11 @@ rt_init_pci(void) static int rt_init_mac(void) { + struct netif_addr *addr = &netif.addr; + uint8_t conf; + uint32_t tmp; + int error; + /* * First step is ensuring the MAC is in known * and consistent state by resetting it. God @@ -226,7 +306,52 @@ rt_init_mac(void) ioport = dev->bar[0] & ~1; pr_trace("resetting MAC...\n"); rt_write(RT_CHIPCMD, 1, RT_RST); - rt_poll(RT_CHIPCMD, 1, RT_RST, 0); + error = rt_poll(RT_CHIPCMD, 1, RT_RST, 0); + if (error < 0) { + pr_error("RTL8139 reset timeout\n"); + return error; + } + + /* + * Tell the RTL8139 to load config data from + * the 93C46. This is done by clearing EEM1 + * and setting EEM0. This whole process should + * take roughly 2 milliseconds. + * + * XXX: EEPROM autoloads *should* happen during a hardware + * reset but some cards might not follow spec so force + * it. + */ + conf = rt_read(RT_CFG9346, 1); + conf &= ~RT_EEM1; + conf |= RT_EEM0; + rt_write(RT_CFG9346, 1, conf); + + /* MAC address dword 0 */ + tmp = rt_read(RT_IDR0, 4); + addr->data[0] = tmp & 0xFF; + addr->data[1] = (tmp >> 8) & 0xFF; + addr->data[2] = (tmp >> 16) & 0xFF; + addr->data[3] = (tmp >> 24) & 0xFF; + + /* MAC address word 1 */ + tmp = rt_read(RT_IDR2, 4); + addr->data[4] = (tmp >> 16) & 0xFF; + addr->data[5] = (tmp >> 24) & 0xFF; + + pr_trace("MAC address: %x:%x:%x:%x:%x:%x\n", + (uint64_t)addr->data[0], (uint64_t)addr->data[1], + (uint64_t)addr->data[2], (uint64_t)addr->data[3], + (uint64_t)addr->data[4], (uint64_t)addr->data[5]); + + /* + * Alright, now we don't want those EEM bits + * sticking lopsided so lets put the RTL8139 + * back into normal operation... + */ + conf = rt_read(RT_CFG9346, 1); + conf &= ~(RT_EEM1 | RT_EEM0); + rt_write(RT_CFG9346, 1, conf); rxbuf = vm_alloc_frame(RX_BUF_SIZE); txbuf = vm_alloc_frame(RX_BUF_SIZE); @@ -241,6 +366,11 @@ rt_init_mac(void) return -ENOMEM; } + memcpy(netif.name, IFNAME, strlen(IFNAME) + 1); + netif.tx_enq = rt81xx_tx_enq; + netif.tx_start = rt81xx_tx_start; + netif_add(&netif); + /* * Configure the chip: * @@ -258,19 +388,17 @@ rt_init_mac(void) * - Enable interrupts through ROK/TOK * - Enable RX state machines * - * TODO: Support TX - * */ - rtl8139_irq_init(); + rt81xx_irq_init(); rt_write(RT_RXBUF, 4, rxbuf); rt_write(RT_RXCONFIG, 4, RT_AB | RT_AM | RT_APM | RT_AAP); rt_write(RT_INTRMASK, 2, RT_ROK | RT_TOK); - rt_write(RT_CHIPCMD, 1, RT_RE); + rt_write(RT_CHIPCMD, 1, RT_RE | RT_TE); return 0; } static int -rt813l_init(void) +rt81xx_init(void) { struct pci_lookup lookup; @@ -312,4 +440,4 @@ rt813l_init(void) return rt_init_mac(); } -DRIVER_EXPORT(rt813l_init); +DRIVER_DEFER(rt81xx_init); diff --git a/sys/dev/usb/xhci.c b/sys/dev/usb/xhci.c index 67a1e4e..46ec4af 100644 --- a/sys/dev/usb/xhci.c +++ b/sys/dev/usb/xhci.c @@ -37,6 +37,8 @@ #include <dev/usb/xhciregs.h> #include <dev/usb/xhcivar.h> #include <dev/pci/pci.h> +#include <dev/pci/pciregs.h> +#include <dev/acpi/acpi.h> #include <vm/physmem.h> #include <vm/dynalloc.h> #include <assert.h> @@ -55,10 +57,11 @@ static struct pci_device *hci_dev; static struct timer tmr; -__attribute__((__interrupt__)) static void -xhci_common_isr(void *sf) +static int +xhci_intr(void *sf) { pr_trace("received xHCI interrupt (via PCI MSI-X)\n"); + return 1; /* handled */ } /* @@ -145,15 +148,17 @@ xhci_parse_ecp(struct xhci_hc *hc) break; case XHCI_ECAP_USBLEGSUP: /* Begin xHC BIOS handoff to us */ - pr_trace("establishing xHC ownership...\n"); - val |= XHCI_OS_SEM; - mmio_write32(p, val); - - /* Ensure the xHC responded correctly */ - if (xhci_poll32(p, XHCI_OS_SEM, 1) < 0) - return -EIO; - if (xhci_poll32(p, XHCI_BIOS_SEM, 0) < 0) - return -EIO; + if (!ISSET(hc->quirks, XHCI_QUIRK_HANDOFF)) { + pr_trace("establishing xHC ownership...\n"); + val |= XHCI_OS_SEM; + mmio_write32(p, val); + + /* Ensure the xHC responded correctly */ + if (xhci_poll32(p, XHCI_OS_SEM, 1) < 0) + return -EIO; + if (xhci_poll32(p, XHCI_BIOS_SEM, 0) < 0) + return -EIO; + } break; } @@ -171,6 +176,7 @@ xhci_init_scratchpads(struct xhci_hc *hc) struct xhci_caps *caps = XHCI_CAPS(hc->base); uint16_t max_bufs_lo, max_bufs_hi; uint16_t max_bufs; + size_t len; uintptr_t *bufarr, tmp; max_bufs_lo = XHCI_MAX_SP_LO(caps->hcsparams1); @@ -184,8 +190,9 @@ xhci_init_scratchpads(struct xhci_hc *hc) return 0; } - pr_trace("using %d pages for xHC scratchpads\n"); - bufarr = dynalloc_memalign(sizeof(uintptr_t)*max_bufs, 0x1000); + len = sizeof(uint64_t) * max_bufs; + pr_trace("using %d bytes for xHC scratchpads\n", len); + bufarr = dynalloc_memalign(len, 0x1000); if (bufarr == NULL) { pr_error("failed to allocate scratchpad buffer array\n"); return -1; @@ -229,7 +236,7 @@ xhci_init_msix(struct xhci_hc *hc) struct msi_intr intr; intr.name = "xHCI MSI-X"; - intr.handler = xhci_common_isr; + intr.handler = xhci_intr; return pci_enable_msix(hci_dev, &intr); } @@ -251,7 +258,7 @@ xhci_init_evring(struct xhci_hc *hc) memset(segtab, 0, DEFAULT_PAGESIZE); /* Set the size of the event ring segment table */ - erst_size = PTR_OFFSET(runtime, 0x28); + erst_size = PTR_OFFSET(runtime, XHCI_RT_ERSTSZ); mmio_write32(erst_size, 1); /* Allocate the event ring segment */ @@ -265,20 +272,20 @@ xhci_init_evring(struct xhci_hc *hc) segtab->size = XHCI_EVRING_LEN; /* Setup the event ring dequeue pointer */ - erdp = PTR_OFFSET(runtime, 0x38); + erdp = PTR_OFFSET(runtime, XHCI_RT_ERDP); mmio_write64(erdp, segtab->base); /* Point ERSTBA to our event ring segment */ - erstba = PTR_OFFSET(runtime, 0x30); + erstba = PTR_OFFSET(runtime, XHCI_RT_ERSTBA); mmio_write64(erstba, VIRT_TO_PHYS(segtab)); hc->evring = PHYS_TO_VIRT(segtab->base); /* Setup interrupt moderation */ - imod = PTR_OFFSET(runtime, 0x24); + imod = PTR_OFFSET(runtime, XHCI_RT_IMOD); mmio_write32(imod, XHCI_IMOD_DEFAULT); /* Enable interrupts */ - iman = PTR_OFFSET(runtime, 0x20); + iman = PTR_OFFSET(runtime, XHCI_RT_IMAN); tmp = mmio_read32(iman); mmio_write32(iman, tmp | XHCI_IMAN_IE); } @@ -414,6 +421,28 @@ xhci_init_hc(struct xhci_hc *hc) uintptr_t dcbaap, cmdring; struct xhci_caps *caps; struct xhci_opregs *opregs; + const char *vendor; + + /* + * The firmware on some Dell machines handle the + * xHCI BIOS/OS handoff very poorly. Updating the + * the OS semaphore in the USBLEGSUP register will + * result in the chipset firing off an SMI which is + * supposed to perform the actual handoff. + * + * However, Dell is stupid as always and the machine + * can get stuck in SMM which results in the machine + * locking up in a *very* bad way. In other words, the + * OS execution is literally halted and further SMIs like + * thermal, power, and fan events are deferred forever + * (no bueno!!). The best thing to do is to not perform + * a handoff if the host board is by Dell (bad Dell!!). + */ + vendor = acpi_oemid(); + if (memcmp(vendor, "DELL", 4) == 0) { + pr_trace("detected xhc handoff quirk\n"); + hc->quirks |= XHCI_QUIRK_HANDOFF; + } caps = (struct xhci_caps *)hc->base; caplength = mmio_read8(&caps->caplength); @@ -470,6 +499,16 @@ xhci_init_hc(struct xhci_hc *hc) return 0; } +static void +xhci_init_pci(void) +{ + uint32_t tmp; + + tmp = pci_readl(hci_dev, PCIREG_CMDSTATUS); + tmp |= (PCI_BUS_MASTERING | PCI_MEM_SPACE); + pci_writel(hci_dev, PCIREG_CMDSTATUS, tmp); +} + static int xhci_init(void) { @@ -502,6 +541,7 @@ xhci_init(void) return -ENODEV; } + xhci_init_pci(); return xhci_init_hc(&xhc); } diff --git a/sys/dev/video/fbdev.c b/sys/dev/video/fbdev.c index cf4954a..8a2499d 100644 --- a/sys/dev/video/fbdev.c +++ b/sys/dev/video/fbdev.c @@ -28,17 +28,65 @@ */ #include <sys/types.h> +#include <sys/errno.h> #include <sys/limine.h> +#include <sys/device.h> +#include <sys/driver.h> +#include <sys/fbdev.h> #include <dev/video/fbdev.h> +#include <fs/devfs.h> +#include <fs/ctlfs.h> +#include <vm/vm.h> +#include <string.h> #define FRAMEBUFFER \ framebuffer_req.response->framebuffers[0] +static struct cdevsw fb_cdevsw; +static const struct ctlops fb_size_ctl; static volatile struct limine_framebuffer_request framebuffer_req = { .id = LIMINE_FRAMEBUFFER_REQUEST, .revision = 0 }; +static paddr_t +fbdev_mmap(dev_t dev, size_t size, off_t off, int flags) +{ + size_t max_bounds; + + max_bounds = FRAMEBUFFER->pitch * FRAMEBUFFER->height; + if ((off + size) > max_bounds) { + return 0; + } + + return VIRT_TO_PHYS(FRAMEBUFFER->address); +} + +static int +ctl_attr_read(struct ctlfs_dev *cdp, struct sio_txn *sio) +{ + struct fbattr attr; + size_t len = sizeof(attr); + + if (sio == NULL) { + return -EINVAL; + } + if (sio->buf == NULL) { + return -EINVAL; + } + + if (len > sio->len) { + len = sio->len; + } + + attr.width = FRAMEBUFFER->width; + attr.height = FRAMEBUFFER->height; + attr.pitch = FRAMEBUFFER->pitch; + attr.bpp = FRAMEBUFFER->bpp; + memcpy(sio->buf, &attr, len); + return len; +} + struct fbdev fbdev_get(void) { @@ -48,5 +96,43 @@ fbdev_get(void) ret.width = FRAMEBUFFER->width; ret.height = FRAMEBUFFER->height; ret.pitch = FRAMEBUFFER->pitch; + ret.bpp = FRAMEBUFFER->bpp; return ret; } + +static int +fbdev_init(void) +{ + struct ctlfs_dev ctl; + char devname[] = "fb0"; + devmajor_t major; + dev_t dev; + + /* Register the device here */ + major = dev_alloc_major(); + dev = dev_alloc(major); + dev_register(major, dev, &fb_cdevsw); + devfs_create_entry(devname, major, dev, 0444); + + + /* Register control files */ + ctl.mode = 0444; + ctlfs_create_node(devname, &ctl); + ctl.devname = devname; + ctl.ops = &fb_size_ctl; + ctlfs_create_entry("attr", &ctl); + return 0; +} + +static struct cdevsw fb_cdevsw = { + .read = noread, + .write = nowrite, + .mmap = fbdev_mmap +}; + +static const struct ctlops fb_size_ctl = { + .read = ctl_attr_read, + .write = NULL, +}; + +DRIVER_EXPORT(fbdev_init); diff --git a/sys/fs/ctlfs.c b/sys/fs/ctlfs.c new file mode 100644 index 0000000..9225114 --- /dev/null +++ b/sys/fs/ctlfs.c @@ -0,0 +1,389 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <sys/errno.h> +#include <sys/vnode.h> +#include <sys/device.h> +#include <sys/syslog.h> +#include <sys/mount.h> +#include <sys/queue.h> +#include <fs/ctlfs.h> +#include <vm/dynalloc.h> +#include <string.h> + +#define CTLFS_MPNAME "ctl" +#define CTLFS_ENTRY_MAG 0x43454E54UL /* 'CENT' */ +#define CTLFS_NODE_MAG 0x43544C4EUL /* 'CTLN' */ + +#define pr_trace(fmt, ...) kprintf("ctlfs: " fmt, ##__VA_ARGS__) +#define pr_error(...) pr_trace(__VA_ARGS__) + +static const struct vops ctlfs_vops; + +struct ctlfs_hdr { + uint32_t magic; + char *name; +}; + +/* + * Control fs entry, represents a control + * file within a ctlfs node. + * -- HDR START -- + * @magic: Magic number [MUST BE FIRST] (CTLFS_ENTRY_MAG) + * @name: Entry name [MUST BE SECOND] + * -- HDR END -- + * @parent: Parent (ctlfs_node) + * @io: Ctlfs operations. + * @mode: Access flags. + * @link: TAILQ link. + */ +struct ctlfs_entry { + uint32_t magic; + char *name; + struct ctlfs_node *parent; + const struct ctlops *io; + mode_t mode; + TAILQ_ENTRY(ctlfs_entry) link; +}; + +/* + * Control fs node, represents a directory + * within ctlfs. These directories represent + * devices, each device directory contains + * control files. + * + * For example: + * + * /ctl/sd1/bsize # Block size + * /ctl/sd1/health # Health + * [et cetera] + * + * @magic: Magic number [MUST BE FIRST] (CTLFS_NODE_MAG) + * @name: Name of node [MUST BE SECOND] + * @mode: Access flags. + * @major: Device major number. + * @minor: Device major number. + * @eq: Entries for this ctlfs node. + */ +struct ctlfs_node { + uint32_t magic; + char *name; + mode_t mode; + TAILQ_HEAD(, ctlfs_entry) eq; + TAILQ_ENTRY(ctlfs_node) link; +}; + +static TAILQ_HEAD(, ctlfs_node) nodeq; + +/* + * Look up entries within a ctlfs + * node by name. + */ +static struct ctlfs_entry * +entry_lookup(struct ctlfs_node *cnp, const char *name) +{ + struct ctlfs_entry *ep; + + TAILQ_FOREACH(ep, &cnp->eq, link) { + if (strcmp(ep->name, name) == 0) { + return ep; + } + } + + return NULL; +} + +/* + * Lookup a ctlfs entry by name. + */ +static struct ctlfs_node * +node_lookup(const char *name) +{ + struct ctlfs_node *cnp; + + TAILQ_FOREACH(cnp, &nodeq, link) { + if (strcmp(cnp->name, name) == 0) { + return cnp; + } + } + + return NULL; +} + +static int +ctlfs_init(struct fs_info *fip) +{ + struct vnode *vp; + struct mount *mp; + int error; + + if ((error = vfs_alloc_vnode(&vp, VDIR)) != 0) { + pr_error("failed to alloc vnode\n"); + return error; + } + + vp->vops = &ctlfs_vops; + if ((mp = vfs_alloc_mount(vp, fip)) == NULL) { + pr_trace("failed to alloc mountpoint\n"); + return -ENOMEM; + } + + error = vfs_name_mount(mp, CTLFS_MPNAME); + if (error != 0) { + pr_trace("failed to mount @ /%s\n", CTLFS_MPNAME); + return error; + } + + TAILQ_INSERT_TAIL(&g_mountlist, mp, mnt_list); + TAILQ_INIT(&nodeq); + return 0; +} + +static int +ctlfs_lookup(struct vop_lookup_args *args) +{ + int error; + const char *name = args->name; + struct vnode *vp, *dirvp; + struct ctlfs_node *cnp = NULL; + struct ctlfs_entry *enp = NULL; + + if (name == NULL) { + return -EINVAL; + } + + dirvp = args->dirvp; + if (dirvp == NULL) { + return -EIO; + } + + /* + * If we already have data within this vnode + * it *might* be a control node but we'll have + * to verify its magic number... + */ + if (dirvp->data != NULL) { + cnp = (struct ctlfs_node *)dirvp->data; + if (cnp->magic != CTLFS_NODE_MAG) { + pr_error("bad `cnp' magic (name=%s)\n", name); + return -EIO; + } + } + + /* + * Handle cases where we are looking up + * relative to a control node. + */ + if (cnp != NULL) { + enp = entry_lookup(cnp, name); + if (enp == NULL) { + return -ENOENT; + } + + /* Create a vnode for this enp */ + error = vfs_alloc_vnode(&vp, VCHR); + if (error != 0) { + return error; + } + + vp->data = (void *)enp; + vp->vops = &ctlfs_vops; + *args->vpp = vp; + return 0; + } + + /* Does this entry exist? */ + if ((cnp = node_lookup(name)) == NULL) { + return -ENOENT; + } + + if ((error = vfs_alloc_vnode(&vp, VDIR)) != 0) { + return error; + } + + vp->data = cnp; + vp->vops = &ctlfs_vops; + *args->vpp = vp; + return 0; +} + +/* + * Create a ctlfs node (directory) within the + * root fs. + * + * @name: Node name (e.g., "sd1" for "/ctl/sd1/") + * @dp: Device related arguments (see ctlfs_dev) + * Args used: + * - mode (access flags) + * + */ +int +ctlfs_create_node(const char *name, const struct ctlfs_dev *dp) +{ + struct ctlfs_node *cnp; + size_t namelen; + + if (name == NULL || dp == NULL) { + return -EINVAL; + } + + cnp = dynalloc(sizeof(*cnp)); + if (cnp == NULL) { + return -ENOMEM; + } + + namelen = strlen(name); + cnp->name = dynalloc(namelen + 1); + if (cnp->name == NULL) { + dynfree(cnp); + return -ENOMEM; + } + + memcpy(cnp->name, name, namelen); + cnp->name[namelen] = '\0'; + cnp->mode = dp->mode; + cnp->magic = CTLFS_NODE_MAG; + TAILQ_INSERT_TAIL(&nodeq, cnp, link); + TAILQ_INIT(&cnp->eq); + return 0; +} + +/* + * Create a ctlfs entry within a specific node. + * + * @name: Name e.g., "/health" for "/ctl/xxx/health". + * @dp: Device related arguments (see ctlfs_dev) + * Args used: + * - devname (name of device) + * - mode (access flags) + * - ops (operations vector) + */ +int +ctlfs_create_entry(const char *name, const struct ctlfs_dev *dp) +{ + struct ctlfs_entry *enp; + struct ctlfs_node *parent; + size_t namelen; + + if (name == NULL || dp == NULL) { + return -EINVAL; + } + if (dp->devname == NULL) { + return -EINVAL; + } + if (dp->ops == NULL) { + return -EINVAL; + } + + parent = node_lookup(dp->devname); + if (parent == NULL) { + pr_trace("could not find %s\n", dp->devname); + return -ENOENT; + } + + enp = dynalloc(sizeof(*enp)); + if (enp == NULL) { + return -ENOMEM; + } + + namelen = strlen(name); + enp->name = dynalloc(namelen + 1); + if (enp->name == NULL) { + dynfree(enp); + return -ENOMEM; + } + + memcpy(enp->name, name, namelen); + enp->name[namelen] = '\0'; + enp->io = dp->ops; + enp->magic = CTLFS_ENTRY_MAG; + enp->mode = dp->mode; + enp->parent = parent; + TAILQ_INSERT_TAIL(&parent->eq, enp, link); + return 0; +} + +/* + * Read a control file + * + * Args passed to driver: + * - ctlfs_dev.ctlname + * - ctlfs_dev.iop + * - ctlfs_dev.mode + */ +static int +ctlfs_read(struct vnode *vp, struct sio_txn *sio) +{ + const struct ctlops *iop; + struct ctlfs_entry *enp; + struct ctlfs_dev dev; + + if ((enp = vp->data) == NULL) { + pr_error("no vnode data for ctlfs entry\n"); + return -EIO; + } + if (enp->magic != CTLFS_ENTRY_MAG) { + pr_error("ctlfs entry has bad magic\n"); + return -EIO; + } + if ((iop = enp->io) == NULL) { + pr_error("no i/o ops for ctlfs entry\n"); + return -EIO; + } + if (iop->read == NULL) { + pr_trace("no read op for ctlfs entry\n"); + return -EIO; + } + + dev.ctlname = enp->name; + dev.ops = iop; + dev.mode = enp->mode; + return iop->read(&dev, sio); +} + +static int +ctlfs_reclaim(struct vnode *vp) +{ + vp->data = NULL; + return 0; +} + +static const struct vops ctlfs_vops = { + .lookup = ctlfs_lookup, + .read = ctlfs_read, + .getattr = NULL, + .write = NULL, + .reclaim = ctlfs_reclaim, + .create = NULL +}; + +const struct vfsops g_ctlfs_vfsops = { + .init = ctlfs_init +}; diff --git a/sys/fs/devfs.c b/sys/fs/devfs.c index 024239d..293ee0a 100644 --- a/sys/fs/devfs.c +++ b/sys/fs/devfs.c @@ -30,6 +30,7 @@ #include <sys/types.h> #include <sys/vnode.h> #include <sys/errno.h> +#include <sys/stat.h> #include <sys/mount.h> #include <sys/device.h> #include <fs/devfs.h> @@ -126,6 +127,8 @@ devfs_lookup(struct vop_lookup_args *args) vp->data = dnp; vp->vops = &g_devfs_vops; + vp->major = dnp->major; + vp->dev = dnp->dev; *args->vpp = vp; return 0; } @@ -136,6 +139,8 @@ devfs_getattr(struct vop_getattr_args *args) struct vnode *vp; struct vattr *attr; struct devfs_node *dnp; + struct bdevsw *bdev; + size_t size = 0; vp = args->vp; if ((dnp = vp->data) == NULL) { @@ -145,6 +150,13 @@ devfs_getattr(struct vop_getattr_args *args) return -EIO; } + if (dnp->is_block) { + bdev = dev_get(dnp->major, dnp->dev); + if (bdev->bsize != NULL) { + size = bdev->bsize(dnp->dev); + } + } + /* * Set stat attributes from device node structure * found within vnode data. @@ -153,20 +165,13 @@ devfs_getattr(struct vop_getattr_args *args) * size is hardwired to 0. */ attr->mode = dnp->mode; - attr->size = 0; + attr->size = size; return 0; } static int devfs_reclaim(struct vnode *vp) { - struct devfs_node *dnp; - - if ((dnp = vp->data) != NULL) { - dynfree(dnp->name); - dynfree(vp->data); - } - vp->data = NULL; return 0; } @@ -255,7 +260,7 @@ devfs_create_entry(const char *name, devmajor_t major, dev_t dev, mode_t mode) memcpy(dnp->name, name, name_len); dnp->name[name_len] = '\0'; - + dnp->is_block = ISSET(mode, S_IFBLK) ? 1 : 0; dnp->major = major; dnp->dev = dev; dnp->mode = mode; @@ -268,7 +273,8 @@ const struct vops g_devfs_vops = { .reclaim = devfs_reclaim, .read = devfs_read, .write = devfs_write, - .getattr = devfs_getattr + .getattr = devfs_getattr, + .create = NULL }; const struct vfsops g_devfs_vfsops = { diff --git a/sys/fs/initramfs.c b/sys/fs/initramfs.c index fd746ef..c41deb4 100644 --- a/sys/fs/initramfs.c +++ b/sys/fs/initramfs.c @@ -33,12 +33,16 @@ #include <sys/errno.h> #include <sys/limine.h> #include <sys/panic.h> +#include <sys/param.h> #include <sys/vnode.h> #include <fs/initramfs.h> #include <vm/dynalloc.h> #include <string.h> -#define CPIO_TRAILER "TRAILER!!!" +#define OMAR_EOF "RAMO" +#define OMAR_REG 0 +#define OMAR_DIR 1 +#define BLOCK_SIZE 512 /* * File or directory. @@ -51,20 +55,18 @@ struct initramfs_node { }; /* - * ODC CPIO header + * The OMAR file header, describes the basics + * of a file. + * + * @magic: Header magic ("OMAR") + * @len: Length of the file + * @namelen: Length of the filename */ -struct cpio_hdr { - char c_magic[6]; - char c_dev[6]; - char c_ino[6]; - char c_mode[6]; - char c_uid[6]; - char c_gid[6]; - char c_nlink[6]; - char c_rdev[6]; - char c_mtime[11]; - char c_namesize[6]; - char c_filesize[11]; +struct __packed omar_hdr { + char magic[4]; + uint8_t type; + uint8_t namelen; + uint32_t len; }; static volatile struct limine_module_request mod_req = { @@ -92,21 +94,6 @@ get_module(const char *path, uint64_t *size) { } /* - * Convert octal to base 10 - */ -static uint32_t -oct2dec(const char *in, size_t sz) -{ - size_t val = 0; - - for (size_t i = 0; i < sz; ++i) { - val = val * 8 + (in[i] - '0'); - } - - return val; -} - -/* * Get a file from initramfs * * @path: Path of file to get. @@ -115,41 +102,54 @@ oct2dec(const char *in, size_t sz) static int initramfs_get_file(const char *path, struct initramfs_node *res) { - const struct cpio_hdr *hdr; struct initramfs_node node; - uintptr_t addr; - size_t namesize, filesize; - mode_t mode; + const struct omar_hdr *hdr; + const char *p, *name; + char namebuf[256]; + off_t off; - addr = (uintptr_t)initramfs; + p = initramfs; for (;;) { - hdr = (void *)addr; - namesize = oct2dec(hdr->c_namesize, sizeof(hdr->c_namesize)); - filesize = oct2dec(hdr->c_filesize, sizeof(hdr->c_filesize)); - mode = oct2dec(hdr->c_mode, sizeof(hdr->c_mode)); + hdr = (struct omar_hdr *)p; + if (strncmp(hdr->magic, OMAR_EOF, sizeof(OMAR_EOF)) == 0) { + break; + } - /* Make sure the magic is correct */ - if (strncmp(hdr->c_magic, "070707", 6) != 0) { + /* Ensure the file is valid */ + if (strncmp(hdr->magic, "OMAR", 4) != 0) { + /* Bad magic */ + return -EINVAL; + } + if (hdr->namelen > sizeof(namebuf) - 1) { return -EINVAL; } - addr += sizeof(struct cpio_hdr); - node.path = (const char *)addr; + name = (char *)p + sizeof(struct omar_hdr); + memcpy(namebuf, name, hdr->namelen); + namebuf[hdr->namelen] = '\0'; - /* Is this the requested file? */ - if (strcmp(node.path, path) == 0) { - node.data = (void *)(addr + namesize); - node.size = filesize; - node.mode = mode; + /* Compute offset to next block */ + if (hdr->type == OMAR_DIR) { + off = 512; + } else { + off = ALIGN_UP(sizeof(*hdr) + hdr->namelen + hdr->len, BLOCK_SIZE); + } + + /* Skip header and name, right to the data */ + p = (char *)hdr + sizeof(struct omar_hdr); + p += hdr->namelen; + + if (strcmp(namebuf, path) == 0) { + node.mode = 0700; + node.size = hdr->len; + node.data = (void *)p; *res = node; return 0; } - /* Get next header and see if we are at the end */ - addr += (namesize + filesize); - if (strcmp(node.path, CPIO_TRAILER) == 0) { - break; - } + hdr = (struct omar_hdr *)((char *)hdr + off); + p = (char *)hdr; + memset(namebuf, 0, sizeof(namebuf)); } return -ENOENT; @@ -223,6 +223,8 @@ initramfs_read(struct vnode *vp, struct sio_txn *sio) return -EIO; if (sio->buf == NULL) return -EIO; + if (sio->len > n->size) + sio->len = n->size; src = n->data; dest = sio->buf; @@ -256,9 +258,9 @@ initramfs_init(struct fs_info *fip) struct mount *mp; int status; - initramfs = get_module("/boot/ramfs.cpio", &initramfs_size); + initramfs = get_module("/boot/ramfs.omar", &initramfs_size); if (initramfs == NULL) { - panic("failed to open initramfs cpio image\n"); + panic("failed to open initramfs OMAR image\n"); } status = vfs_alloc_vnode(&g_root_vnode, VDIR); @@ -277,7 +279,8 @@ const struct vops g_initramfs_vops = { .read = initramfs_read, .write = NULL, .reclaim = initramfs_reclaim, - .getattr = initramfs_getattr + .getattr = initramfs_getattr, + .create = NULL, }; const struct vfsops g_initramfs_vfsops = { diff --git a/sys/fs/tmpfs.c b/sys/fs/tmpfs.c new file mode 100644 index 0000000..9dce89a --- /dev/null +++ b/sys/fs/tmpfs.c @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/mount.h> +#include <sys/errno.h> +#include <sys/syslog.h> +#include <sys/types.h> +#include <sys/cdefs.h> +#include <sys/param.h> +#include <sys/panic.h> +#include <sys/vnode.h> +#include <vm/dynalloc.h> +#include <vm/vm_obj.h> +#include <vm/vm_page.h> +#include <vm/vm_pager.h> +#include <fs/tmpfs.h> +#include <string.h> + +#define ROOT_RPATH "/tmp" +#define TMPFS_BSIZE DEFAULT_PAGESIZE + +#define pr_trace(fmt, ...) kprintf("tmpfs: " fmt, ##__VA_ARGS__) +#define pr_error(...) pr_trace(__VA_ARGS__) + +static TAILQ_HEAD(, tmpfs_node) root; + +/* + * Generate a vnode for a specific tmpfs + * node. + */ +static int +tmpfs_ref(struct tmpfs_node *np) +{ + struct vnode *vp = NULL; + int retval = 0; + + if (np->vp == NULL) { + spinlock_acquire(&np->lock); + retval = vfs_alloc_vnode(&vp, np->type); + np->vp = vp; + spinlock_release(&np->lock); + } + + if (vp != NULL) { + vp->data = np; + vp->vops = &g_tmpfs_vops; + } + + return retval; +} + +/* + * Perform lookup within the tmpfs namespace + * + * XXX: This operations is serialized + * TODO: Support multiple directories (only fs root now) + * + * @rpath: /tmp/ relative path to lookup + * @res: The result is written here (must NOT be NULL) + */ +static int +tmpfs_do_lookup(const char *rpath, struct tmpfs_node **res) +{ + struct tmpfs_node *cnp; + struct tmpfs_node *dirent; + int error = 0; + + /* + * If the directory is the node that we are + * looking for, return it. But if it is not + * and it is empty then there is nothing + * we can do. + */ + cnp = TAILQ_FIRST(&root); + if (strcmp(cnp->rpath, rpath) == 0) { + *res = cnp; + return 0; + } + if (TAILQ_NELEM(&cnp->dirents) == 0) { + return -ENOENT; + } + + /* + * Go through each tmpfs dirent to see if we can + * find the file we are looking for. + */ + spinlock_acquire(&cnp->lock); + dirent = TAILQ_FIRST(&cnp->dirents); + while (dirent != NULL) { + if (strcmp(dirent->rpath, rpath) == 0) { + break; + } + + dirent = TAILQ_NEXT(dirent, link); + } + + spinlock_release(&cnp->lock); + if (res == NULL) { + return -ENOENT; + } + + if ((error = tmpfs_ref(dirent)) != 0) { + return error; + } + + *res = dirent; + return 0; +} + +/* + * TMPFS lookup callback for the VFS + * + * Takes some arguments and returns a vnode + * in args->vpp + */ +static int +tmpfs_lookup(struct vop_lookup_args *args) +{ + struct tmpfs_node *np; + int error; + + if (args == NULL) { + return -EINVAL; + } + if (args->name == NULL) { + return -EINVAL; + } + + /* + * Attempt to find the node we want, if it already + * has a vnode attached to it then that's something we + * want. However we should allocate a new vnode if we + * need to. + */ + error = tmpfs_do_lookup(args->name, &np); + if (error != 0) { + return error; + } + + *args->vpp = np->vp; + return 0; +} + +/* + * TMPFS create callback for the VFS + * + * Creates a new TMPFS node + */ +static int +tmpfs_create(struct vop_create_args *args) +{ + const char *pcp = args->path; /* Stay away from boat, kids */ + struct vnode *dirvp; + struct tmpfs_node *np; + struct tmpfs_node *root_np; + int error; + + /* Validate inputs */ + if (args == NULL) + return -EINVAL; + if (pcp == NULL) + return -EIO; + if ((dirvp = args->dirvp) == NULL) + return -EIO; + + /* Remove the leading "/tmp/" */ + pcp += sizeof(ROOT_RPATH); + if (*pcp == '\0') { + return -ENOENT; + } + + np = dynalloc(sizeof(*np)); + if (np == NULL) { + return -ENOMEM; + } + + memset(np, 0, sizeof(*np)); + + /* + * TODO: Support multiple directories. + * + * XXX: We currently only create a TMPFS_REG node as + * to keep things initially simple. + */ + root_np = TAILQ_FIRST(&root); + np->dirvp = dirvp; + np->type = TMPFS_REG; + memcpy(np->rpath, pcp, strlen(pcp) + 1); + TAILQ_INSERT_TAIL(&root_np->dirents, np, link); + + if ((error = tmpfs_ref(np)) != 0) { + return error; + } + + *args->vpp = np->vp; + return 0; +} + +/* + * TMPFS write callback for VFS + * + * Node buffers are orthogonally managed. That is, each + * node has their own respective data buffers. When + * writing to a node, we need to take into account of the + * length of the buffer. This value may need to expanded as + * well as more pages allocated if the amount of bytes to + * be written exceeds it. + */ +static int +tmpfs_write(struct vnode *vp, struct sio_txn *sio) +{ + struct tmpfs_node *np; + uint8_t *buf; + + if (sio->buf == NULL || sio->len == 0) { + return -EINVAL; + } + + /* This should not happen but you never know */ + if ((np = vp->data) == NULL) { + return -EIO; + } + + /* Is this even a regular file? */ + if (np->type != VREG) { + return -EISDIR; + } + + spinlock_acquire(&np->lock); + + /* + * If the residual byte count is zero, we need to + * allocate a new page to be used. However if this + * fails we'll throw back an -ENOMEM. + */ + if (np->len == 0) { + np->data = dynalloc(TMPFS_BSIZE); + if (np->data == NULL) { + spinlock_release(&np->lock); + return -ENOMEM; + } + np->len += TMPFS_BSIZE; + } + + /* + * If the length to be written exceeds the residual byte + * count. We will try to expand the buffer by the page + * size. However, if this fails, we will split the write + * into a suitable size that does not overflow what we + * have left. + */ + if ((sio->offset + sio->len) > np->len) { + np->data = dynrealloc(np->data, (sio->offset + sio->len)); + if (np->data == NULL) { + sio->len = np->len; + } else { + np->len = sio->offset + sio->len; + } + } + + buf = np->data; + memcpy(&buf[sio->offset], sio->buf, sio->len); + spinlock_release(&np->lock); + kprintf("%d\n", sio->len); + return sio->len; +} + +/* + * TMPFS read callback for VFS + */ +static int +tmpfs_read(struct vnode *vp, struct sio_txn *sio) +{ + struct tmpfs_node *np; + uint8_t *buf; + + if (sio->buf == NULL || sio->len == 0) { + return -EINVAL; + } + + /* This should not happen but you never know */ + if ((np = vp->data) == NULL) { + return -EIO; + } + + /* Is this even a regular file? */ + if (np->type != VREG) { + return -EISDIR; + } + + spinlock_acquire(&np->lock); + + if (sio->offset > np->len - 1) { + return -EINVAL; + } + if ((sio->offset + sio->len) > np->len) { + sio->len = np->len; + } + + buf = np->data; + memcpy(sio->buf, &buf[sio->offset], sio->len); + spinlock_release(&np->lock); + return sio->len; +} + +static int +tmpfs_reclaim(struct vnode *vp) +{ + struct tmpfs_node *np; + + if ((np = vp->data) == NULL) { + return 0; + } + + np->vp = NULL; + vp->data = NULL; + return 0; +} + +static int +tmpfs_init(struct fs_info *fip) +{ + struct tmpfs_node *np; + struct vnode *vp; + struct mount *mp; + int error; + + /* Grab ourselves a new vnode for /tmp */ + if ((error = vfs_alloc_vnode(&vp, VDIR)) != 0) { + return error; + } + + vp->vops = &g_tmpfs_vops; + mp = vfs_alloc_mount(vp, fip); + vfs_name_mount(mp, "tmp"); + TAILQ_INSERT_TAIL(&g_mountlist, mp, mnt_list); + + /* Pre-allocate the first entry */ + if ((np = dynalloc(sizeof(*np))) == NULL) { + return -ENOMEM; + } + + TAILQ_INIT(&root); + memset(np, 0, sizeof(*np)); + + memcpy(np->rpath, ROOT_RPATH, sizeof(ROOT_RPATH)); + np->type = TMPFS_DIR; + TAILQ_INIT(&np->dirents); + TAILQ_INSERT_TAIL(&root, np, link); + return 0; +} + +const struct vops g_tmpfs_vops = { + .lookup = tmpfs_lookup, + .getattr = NULL, + .read = tmpfs_read, + .write = tmpfs_write, + .reclaim = tmpfs_reclaim, + .create = tmpfs_create, +}; + +const struct vfsops g_tmpfs_vfsops = { + .init = tmpfs_init +}; diff --git a/sys/include/arch/aarch64/cdefs.h b/sys/include/arch/aarch64/cdefs.h new file mode 100644 index 0000000..aaf8649 --- /dev/null +++ b/sys/include/arch/aarch64/cdefs.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _AARCH64_CDEFS_H_ +#define _AARCH64_CDEFS_H_ + +#include <sys/cdefs.h> +#include <machine/sync.h> + +#define md_pause() __ASMV("yield") +#define md_intoff() __ASMV("msr daifset, #2") +#define md_inton() __ASMV("msr daifclr, #2") +#define md_hlt() __ASMV("hlt #0") + +#endif /* !_AARCH64_CDEFS_H_ */ diff --git a/sys/include/arch/aarch64/cpu.h b/sys/include/arch/aarch64/cpu.h new file mode 100644 index 0000000..8c2d837 --- /dev/null +++ b/sys/include/arch/aarch64/cpu.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_CPU_H_ +#define _MACHINE_CPU_H_ + +#include <sys/types.h> +#include <sys/cdefs.h> +#include <sys/proc.h> + +struct cpu_info { + struct proc *curtd; + struct cpu_info *self; +}; + +__dead void cpu_halt_all(void); +void cpu_startup(struct cpu_info *ci); +void cpu_halt_others(void); + +void mp_bootstrap_aps(struct cpu_info *ci); +struct cpu_info *this_cpu(void); + +extern struct cpu_info g_bsp_ci; + +#endif /* !_MACHINE_CPU_H_ */ diff --git a/sys/include/arch/aarch64/exception.h b/sys/include/arch/aarch64/exception.h new file mode 100644 index 0000000..9e89c81 --- /dev/null +++ b/sys/include/arch/aarch64/exception.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_EXCEPTION_H_ +#define _MACHINE_EXCEPTION_H_ + +#include <sys/types.h> +#include <machine/frame.h> + +/* Exception class */ +#define EC_UNKNOWN 0x00 /* Unknown type */ +#define EC_WF 0x01 /* Trapped WF instruction */ +#define EC_MCRMRC 0x03 /* Trapped MCR/MRC */ +#define EC_MCRRC 0x04 /* Trapped MCRR/MRRC */ +#define EC_LDCSTC 0x06 /* Trapped LDC/STC */ +#define EC_SVE 0x07 /* Trapped SVE/SIMD/FP op */ +#define EC_BRE 0x0D /* Branch target exception */ +#define EC_ILLX 0x0E /* Illegal execution state */ +#define EC_SVC64 0x15 /* AARCH64 SVC */ +#define EC_PCALIGN 0x22 /* PC alignment fault */ +#define EC_DABORT 0x24 /* Data abort (w/o ELx change) */ +#define EC_EDABORT 0x25 /* Data abort (w/ ELx change) */ +#define EC_SPALIGN 0x26 /* SP alignment fault */ +#define EC_SERR 0x2F /* System error (what the fuck!) */ + +void handle_exception(struct trapframe *tf); + +#endif /* !_MACHINE_EXCEPTION_H_ */ diff --git a/sys/include/arch/aarch64/frame.h b/sys/include/arch/aarch64/frame.h new file mode 100644 index 0000000..143f4d0 --- /dev/null +++ b/sys/include/arch/aarch64/frame.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_FRAME_H_ +#define _MACHINE_FRAME_H_ + +#include <sys/types.h> +#include <sys/cdefs.h> + +typedef uint64_t lreg_t; +typedef uint64_t frament_t; + +/* Stack regs */ +struct sregs { + lreg_t sp_el0; + lreg_t sp_el1; + lreg_t sp_el2; +}; + +/* Program status */ +struct pstat { + lreg_t spsr_el1; + lreg_t spsr_el2; + lreg_t spsr_el3; +}; + +struct __aligned(16) trapframe { + lreg_t x30; + lreg_t x29; + lreg_t x28; + lreg_t x27; + lreg_t x26; + lreg_t x25; + lreg_t x24; + lreg_t x23; + lreg_t x22; + lreg_t x21; + lreg_t x20; + lreg_t x19; + lreg_t x18; + lreg_t x17; + lreg_t x16; + lreg_t x15; + lreg_t x14; + lreg_t x13; + lreg_t x12; + lreg_t x11; + lreg_t x10; + lreg_t x9; + lreg_t x8; + lreg_t x7; + lreg_t x6; + lreg_t x5; + lreg_t x4; + lreg_t x3; + lreg_t x2; + lreg_t x1; + lreg_t x0; + lreg_t elr; + lreg_t esr; + frament_t trapno; +}; + +#define TF_IP(TFP) ((TFP)->pc) + +#endif /* !_MACHINE_FRAME_H_ */ diff --git a/sys/include/arch/aarch64/frameasm.h b/sys/include/arch/aarch64/frameasm.h new file mode 100644 index 0000000..ca7f81a --- /dev/null +++ b/sys/include/arch/aarch64/frameasm.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_FRAMEASM_H_ +#define _MACHINE_FRAMEASM_H_ + +/* XXX: Must be 16-byte aligned!!! */ +#define XFRAME_STACK_SIZE (38 * 8) + +/* Trap numbers */ +#define TRAPNO_UNKNOWN #0 +#define TRAPNO_XSYNC #1 /* Synchronous */ +#define TRAPNO_XIRQ #2 /* IRQ */ +#define TRAPNO_XFIQ #3 /* FIQ */ +#define TRAPNO_XSERR #4 /* System error */ + +#define PUSH_XFRAME(TRAPNO) \ + sub sp, sp, #XFRAME_STACK_SIZE ; \ + stp x30, x29, [sp, #(0 * 8)] ; \ + stp x28, x27, [sp, #(2 * 8)] ; \ + stp x26, x25, [sp, #(4 * 8)] ; \ + stp x24, x23, [sp, #(6 * 8)] ; \ + stp x22, x21, [sp, #(8 * 8)] ; \ + stp x20, x19, [sp, #(10 * 8)] ; \ + stp x18, x17, [sp, #(12 * 8)] ; \ + stp x16, x15, [sp, #(14 * 8)] ; \ + stp x14, x13, [sp, #(16 * 8)] ; \ + stp x12, x11, [sp, #(18 * 8)] ; \ + stp x10, x9, [sp, #(20 * 8)] ; \ + stp x8, x7, [sp, #(22 * 8)] ; \ + stp x6, x5, [sp, #(24 * 8)] ; \ + stp x4, x3, [sp, #(26 * 8)] ; \ + stp x2, x1, [sp, #(28 * 8)] ; \ + str x0, [sp, #(30 * 8)] ; \ + ; \ + mrs x0, elr_el1 ; \ + str x0, [sp, #(31 * 8)] ; \ + mrs x0, esr_el1 ; \ + str x0, [sp, #(32 * 8)] ; \ + mov x0, TRAPNO ; \ + str x0, [sp, #(33 * 8)] ; \ + mov x0, sp + +#define POP_XFRAME() \ + ldr x0, [sp, #(30 * 8)] ; \ + ldp x2, x1, [sp, #(28 * 8)] ; \ + ldp x4, x3, [sp, #(26 * 8)] ; \ + ldp x6, x5, [sp, #(24 * 8)] ; \ + ldp x8, x7, [sp, #(22 * 8)] ; \ + ldp x10, x9, [sp, #(20 * 8)] ; \ + ldp x12, x11, [sp, #(18 * 8)] ; \ + ldp x14, x13, [sp, #(16 * 8)] ; \ + ldp x16, x15, [sp, #(14 * 8)] ; \ + ldp x18, x17, [sp, #(12 * 8)] ; \ + ldp x20, x19, [sp, #(10 * 8)] ; \ + ldp x22, x21, [sp, #(8 * 8)] ; \ + ldp x24, x23, [sp, #(6 * 8)] ; \ + ldp x26, x25, [sp, #(4 * 8)] ; \ + ldp x28, x27, [sp, #(2 * 8)] ; \ + ldp x30, x29, [sp, #(0 * 8)] ; \ + add sp, sp, #XFRAME_STACK_SIZE + +#endif /* !_MACHINE_FRAMEASM_H_ */ diff --git a/sys/include/arch/aarch64/intr.h b/sys/include/arch/aarch64/intr.h new file mode 100644 index 0000000..b85564f --- /dev/null +++ b/sys/include/arch/aarch64/intr.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_INTR_H_ +#define _MACHINE_INTR_H_ + +#include <sys/types.h> + +/* + * Interrupt priority levels + */ +#define IPL_NONE 0 /* Don't defer anything */ +#define IPL_BIO 1 /* Block I/O */ +#define IPL_CLOCK 2 /* Clock */ +#define IPL_HIGH 3 /* Defer everything */ + +struct intr_entry { + int priority; +}; + +struct intr_hand { + int(*func)(void *); + char *name; + int priority; + int irq; + int vector; +}; + +void *intr_register(const char *name, const struct intr_hand *ih); + +#endif /* !_MACHINE_INTR_H_ */ diff --git a/sys/include/arch/aarch64/pcb.h b/sys/include/arch/aarch64/pcb.h new file mode 100644 index 0000000..f9a0b1a --- /dev/null +++ b/sys/include/arch/aarch64/pcb.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_PCB_H_ +#define _MACHINE_PCB_H_ + +#include <sys/types.h> +#include <vm/pmap.h> + +struct pcb { + struct vas addrsp; +}; + +#endif /* !_MACHINE_PCB_H_ */ diff --git a/sys/include/arch/aarch64/pci/pci.h b/sys/include/arch/aarch64/pci/pci.h new file mode 100644 index 0000000..189a423 --- /dev/null +++ b/sys/include/arch/aarch64/pci/pci.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_PCI_H_ +#define _MACHINE_PCI_H_ + +#include <sys/types.h> +#include <sys/cdefs.h> +#include <dev/pci/pci.h> + +__weak pcireg_t md_pci_readl(struct pci_device *dev, uint32_t off); +__weak void md_pci_writel(struct pci_device *dev, uint32_t off, pcireg_t val); + +#endif /* !_MACHINE_PCI_H_ */ diff --git a/sys/include/arch/aarch64/pio.h b/sys/include/arch/aarch64/pio.h new file mode 100644 index 0000000..4aaeece --- /dev/null +++ b/sys/include/arch/aarch64/pio.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_PIO_H_ +#define _MACHINE_PIO_H_ + +#include <sys/cdefs.h> + +#define inb(...) (uint8_t)0 +#define inw(...) (uint16_t)0 +#define inl(...) (uint32_t)0 +#define outb(...) __nothing +#define outw(...) __nothing +#define outl(...) __nothing + + +#endif /* _MACHINE_PIO_H_ */ diff --git a/sys/include/arch/aarch64/sync.h b/sys/include/arch/aarch64/sync.h new file mode 100644 index 0000000..f331f43 --- /dev/null +++ b/sys/include/arch/aarch64/sync.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_SYNC_H_ +#define _MACHINE_SYNC_H_ + +int md_sync_all(void); + +#endif /* !_MACHINE_SYNC_H_ */ diff --git a/sys/include/arch/aarch64/syscall.h b/sys/include/arch/aarch64/syscall.h new file mode 100644 index 0000000..84a51e0 --- /dev/null +++ b/sys/include/arch/aarch64/syscall.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_SYSCALL_H_ +#define _MACHINE_SYSCALL_H_ + +#if !defined(__ASSEMBLER__) +__always_inline static inline long +syscall0(uint64_t code) +{ + return 0; +} + +__always_inline static inline long +syscall1(uint64_t code, uint64_t arg0) +{ + return 0; +} + +__always_inline static long inline +syscall2(uint64_t code, uint64_t arg0, uint64_t arg1) +{ + return 0; +} + +__always_inline static inline long +syscall3(uint64_t code, uint64_t arg0, uint64_t arg1, uint64_t arg2) +{ + return 0; +} + +__always_inline static inline long +syscall4(uint64_t code, uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3) +{ + return 0; +} + +__always_inline static inline long +syscall5(uint64_t code, uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4) +{ + return 0; +} + +__always_inline static inline long +syscall6(uint64_t code, uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5) +{ + return 0; +} + +#define _SYSCALL_N(a0, a1, a2, a3, a4, a5, a6, name, ...) \ + name + +#define syscall(...) \ +_SYSCALL_N(__VA_ARGS__, syscall6, syscall5, \ + syscall4, syscall3, syscall2, syscall1, \ + syscall0)(__VA_ARGS__) + +#endif /* !__ASSEMBLER__ */ +#endif /* !_MACHINE_SYSCALL_H_ */ diff --git a/sys/include/arch/aarch64/vas.h b/sys/include/arch/aarch64/vas.h new file mode 100644 index 0000000..07cd576 --- /dev/null +++ b/sys/include/arch/aarch64/vas.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_VAS_H_ +#define _MACHINE_VAS_H_ + +#include <sys/types.h> +#include <sys/spinlock.h> + +/* + * VAS structure - describes a virtual address space + */ +struct vas { + paddr_t ttbr0_el1; /* Lower half */ + paddr_t ttbr1_el1; /* Higher half */ + struct spinlock *lock; +}; + +#endif /* !_MACHINE_VAS_H_ */ diff --git a/sys/include/arch/amd64/bus.h b/sys/include/arch/amd64/bus.h index 00cb3ba..25088b4 100644 --- a/sys/include/arch/amd64/bus.h +++ b/sys/include/arch/amd64/bus.h @@ -36,13 +36,7 @@ struct bus_resource; -/* - * Hyra assumes that the bootloader uses PDE[256] for some - * higher half mappings. To avoid conflicts with those mappings, - * this offset is used to start device memory at PDE[257]. This - * will give us more than enough space. - */ -#define MMIO_OFFSET (VM_HIGHER_HALF + 0x8000000000) +#define MMIO_OFFSET VM_HIGHER_HALF /* Resource signature size max */ #define RSIG_MAX 16 diff --git a/sys/include/arch/amd64/cdefs.h b/sys/include/arch/amd64/cdefs.h index 256fd8b..0a20324 100644 --- a/sys/include/arch/amd64/cdefs.h +++ b/sys/include/arch/amd64/cdefs.h @@ -41,5 +41,15 @@ #define md_pause() __ASMV("rep; nop") /* (F3 90) PAUSE */ #define md_intoff() __ASMV("cli") /* Clear interrupts */ #define md_inton() __ASMV("sti") /* Enable interrupts */ +#define md_hlt() __ASMV("hlt") /* Halt the processor */ + +/* + * AMD64 specific defines + */ +#define __invlpg(VA) \ + __ASMV("invlpg %0" \ + : \ + : "m" ((VA)) \ + : "memory") #endif /* !_AMD64_CDEFS_H_ */ diff --git a/sys/include/arch/amd64/cpu.h b/sys/include/arch/amd64/cpu.h index ce42416..2d08d6e 100644 --- a/sys/include/arch/amd64/cpu.h +++ b/sys/include/arch/amd64/cpu.h @@ -33,6 +33,7 @@ #include <sys/types.h> #include <sys/cdefs.h> #include <sys/proc.h> +#include <sys/spinlock.h> #include <machine/tss.h> #define CPU_IRQ(IRQ_N) (BIT((IRQ_N)) & 0xFF) @@ -40,17 +41,25 @@ struct cpu_info { uint32_t apicid; uint8_t has_x2apic : 1; + uint8_t tlb_shootdown : 1; uint8_t ipl; size_t lapic_tmr_freq; uint8_t irq_mask; + vaddr_t shootdown_va; struct tss_entry *tss; struct proc *curtd; + struct spinlock lock; struct cpu_info *self; }; __dead void cpu_halt_all(void); +void cpu_halt_others(void); void cpu_startup(struct cpu_info *ci); +struct cpu_info *cpu_get(uint32_t index); +uint32_t cpu_count(void); +void cpu_shootdown_tlb(vaddr_t va); + struct cpu_info *this_cpu(void); void mp_bootstrap_aps(struct cpu_info *ci); diff --git a/sys/include/arch/amd64/frame.h b/sys/include/arch/amd64/frame.h index 31dcdef..2bd9a7c 100644 --- a/sys/include/arch/amd64/frame.h +++ b/sys/include/arch/amd64/frame.h @@ -58,4 +58,6 @@ struct trapframe { uint64_t ss; }; +#define TF_IP(TFP) ((TFP)->rip) + #endif /* !_MACHINE_FRAME_H_ */ diff --git a/sys/include/arch/amd64/frameasm.h b/sys/include/arch/amd64/frameasm.h index 22217eb..4dc075e 100644 --- a/sys/include/arch/amd64/frameasm.h +++ b/sys/include/arch/amd64/frameasm.h @@ -30,6 +30,8 @@ #ifndef _MACHINE_FRAMEASM_H_ #define _MACHINE_FRAMEASM_H_ +#define ALIGN_TEXT .align 8, 0x90 + /* * If the interrupt has an error code, this macro shall * be used to create the trapframe. @@ -121,6 +123,7 @@ */ #define TRAPENTRY_EC(ENTLABEL, TRAPNO) \ ENTLABEL: ; \ + cli ; \ testq $0x3, 16(%rsp) ; \ jz 1f ; \ lfence ; \ @@ -133,7 +136,8 @@ jz 2f ; \ lfence ; \ swapgs ; \ - 2: iretq + 2: sti ; \ + iretq /* * Trap entry where no error code is on @@ -141,6 +145,7 @@ */ #define TRAPENTRY(ENTLABEL, TRAPNO) \ ENTLABEL: ; \ + cli ; \ testq $0x3, 8(%rsp) ; \ jz 1f ; \ lfence ; \ @@ -153,6 +158,7 @@ jz 2f ; \ lfence ; \ swapgs ; \ - 2: iretq + 2: sti ; \ + iretq #endif /* !_MACHINE_FRAMEASM_H_ */ diff --git a/sys/include/arch/amd64/gdt.h b/sys/include/arch/amd64/gdt.h index f87416f..0c5faf1 100644 --- a/sys/include/arch/amd64/gdt.h +++ b/sys/include/arch/amd64/gdt.h @@ -4,18 +4,48 @@ #include <sys/types.h> #include <sys/cdefs.h> +#define GDT_TSS_INDEX 5 +#define GDT_ENTRY_COUNT 7 + +/* Segment selectors */ #define KERNEL_CS 0x08 #define KERNEL_DS 0x10 -#define USER_CS 0x18 -#define USER_DS 0x20 -#define GDT_TSS 5 +#define USER_CS 0x18 +#define USER_DS 0x20 + +/* + * Bit definitions for regular segment descriptors + * + * See Intel SPG 3/25 Section 3.4.5 - Segment Descriptors + */ + +#define GDT_ATTRIBUTE_ACCESSED BIT(0) /* Accessed */ +#define GDT_ATTRIBUTE_EXECUTABLE BIT(3) /* Executable */ +#define GDT_ATTRIBUTE_NONSYSTEM BIT(4) /* Code/data */ +#define GDT_ATTRIBUTE_PRESENT BIT(7) /* Present */ +#define GDT_ATTRIBUTE_64BIT_CODE BIT(13) /* 64-bit code */ +#define GDT_ATTRIBUTE_32BIT BIT(14) /* 32-bit code/data */ +#define GDT_ATTRIBUTE_GRANULARITY BIT(15) /* 4KiB limit granularity */ + +/* Attributes for executable segments */ +#define GDT_ATTRIBUTE_READABLE BIT(1) /* Readable */ +#define GDT_ATTRIBUTE_CONFORMING BIT(2) /* Conforming */ + +/* Attributes for non-executable segments */ +#define GDT_ATTRIBUTE_WRITABLE BIT(1) /* Writable */ +#define GDT_ATTRIBUTE_EXPANDS_DOWN BIT(2) /* See SPG 3/25 Section 6.8.1 */ + +/* DPL (Descriptor Privilege Level) specifier */ +#define GDT_ATTRIBUTE_DPL0 0 +#define GDT_ATTRIBUTE_DPL1 (1 << 5) +#define GDT_ATTRIBUTE_DPL2 (2 << 5) +#define GDT_ATTRIBUTE_DPL3 (3 << 5) struct __packed gdt_entry { uint16_t limit; uint16_t base_low; uint8_t base_mid; - uint8_t access; - uint8_t granularity; + uint16_t attributes; uint8_t base_hi; }; @@ -24,27 +54,28 @@ struct __packed gdtr { uintptr_t offset; }; +extern struct gdt_entry g_gdt_data[GDT_ENTRY_COUNT]; +extern const struct gdtr g_gdtr; + __always_inline static inline void -gdt_load(struct gdtr *gdtr) +gdt_load(void) { - __ASMV("lgdt %0\n" - "push $8\n" /* Push CS */ - "lea 1f(%%rip), %%rax\n" /* Load 1 label address into RAX */ - "push %%rax\n" /* Push the return address (label 1) */ - "lretq\n" /* Far return to update CS */ - "1:\n" - " mov $0x10, %%eax\n" - " mov %%eax, %%ds\n" - " mov %%eax, %%es\n" - " mov %%eax, %%fs\n" - " mov %%eax, %%gs\n" - " mov %%eax, %%ss\n" - : - : "m" (*gdtr) - : "rax", "memory" - ); + __ASMV("lgdt %0\n" + "push %1\n" /* Push code segment selector */ + "lea 1f(%%rip), %%rax\n" /* Load label 1 address into RAX */ + "push %%rax\n" /* Push return address (label 1) */ + "lretq\n" /* Far return to update CS */ + "1:\n" + " mov %2, %%ax\n" /* Load data segment selectors */ + " mov %%ax, %%ds\n" + " mov %%ax, %%es\n" + " mov %%ax, %%fs\n" + " mov %%ax, %%gs\n" + " mov %%ax, %%ss\n" + : + : "m" (g_gdtr), "i"(KERNEL_CS), "i"(KERNEL_DS) + : "rax", "memory" + ); } -extern struct gdt_entry g_gdt_data[256]; - #endif /* !AMD64_GDT_H_ */ diff --git a/sys/include/arch/amd64/intr.h b/sys/include/arch/amd64/intr.h index c643945..c848b6f 100644 --- a/sys/include/arch/amd64/intr.h +++ b/sys/include/arch/amd64/intr.h @@ -47,11 +47,59 @@ #define IPL_CLOCK 2 /* Clock */ #define IPL_HIGH 3 /* Defer everything */ -struct intr_entry { +struct intr_hand; + +/* + * Contains information passed to driver + * + * @ihp: Interrupt handler + * @data: Driver specific data + */ +struct intr_data { + struct intr_hand *ihp; + union { + void *data; + uint64_t data_u64; + }; +}; + +/* + * Interrupt handler + * + * [r]: Required for intr_register() + * [o]: Not required for intr_register() + * [v]: Returned by intr_register() + * + * @func: The actual handler [r] + * @data: Interrupt data [o/v] + * @name: Interrupt name [v] + * @priority: Interrupt priority [r] + * @irq: Interrupt request number [o] + * @vector: Interrupt vector [v] + * + * XXX: `name' must be null terminated ('\0') + * + * XXX: `irq` can be set to -1 for MSI/MSI-X + * interrupts. + * + * XXX: `func` must be the first field in this + * structure so that it may be called through + * assembly. + * + * XXX: `ist' should usually be set to -1 but can be + * used if an interrupt requires its own stack. + */ +struct intr_hand { + int(*func)(void *); + struct intr_data data; + char *name; int priority; + int irq; + int vector; }; -int intr_alloc_vector(const char *name, uint8_t priority); +void *intr_register(const char *name, const struct intr_hand *ih); + int splraise(uint8_t s); void splx(uint8_t s); diff --git a/sys/include/arch/amd64/ioapic.h b/sys/include/arch/amd64/ioapic.h index c11a85c..4cae800 100644 --- a/sys/include/arch/amd64/ioapic.h +++ b/sys/include/arch/amd64/ioapic.h @@ -31,7 +31,8 @@ #define _MACHINE_IOAPIC_H_ #include <sys/types.h> -#include <dev/acpi/tables.h> + +struct ioapic; void ioapic_init(struct ioapic *p); void ioapic_gsi_mask(uint8_t gsi); diff --git a/sys/include/arch/amd64/isa/i8042var.h b/sys/include/arch/amd64/isa/i8042var.h index ebd96ad..13c3095 100644 --- a/sys/include/arch/amd64/isa/i8042var.h +++ b/sys/include/arch/amd64/isa/i8042var.h @@ -82,7 +82,5 @@ void i8042_quirk(int mask); /* Internal - do not use */ void i8042_sync(void); -void i8042_kb_isr(void); -void i8042_kb_event(void); #endif /* _I8042VAR_H_ */ diff --git a/sys/include/arch/amd64/pci/pci.h b/sys/include/arch/amd64/pci/pci.h new file mode 100644 index 0000000..189a423 --- /dev/null +++ b/sys/include/arch/amd64/pci/pci.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_PCI_H_ +#define _MACHINE_PCI_H_ + +#include <sys/types.h> +#include <sys/cdefs.h> +#include <dev/pci/pci.h> + +__weak pcireg_t md_pci_readl(struct pci_device *dev, uint32_t off); +__weak void md_pci_writel(struct pci_device *dev, uint32_t off, pcireg_t val); + +#endif /* !_MACHINE_PCI_H_ */ diff --git a/sys/include/arch/amd64/syscall.h b/sys/include/arch/amd64/syscall.h new file mode 100644 index 0000000..cc401e9 --- /dev/null +++ b/sys/include/arch/amd64/syscall.h @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_SYSCALL_H_ +#define _MACHINE_SYSCALL_H_ + +#if !defined(__ASSEMBLER__) +__always_inline static inline long +syscall0(uint64_t code) +{ + volatile long ret; + __ASMV("int $0x80" : "=a"(ret) : "a"(code)); + return ret; +} + +__always_inline static inline long +syscall1(uint64_t code, uint64_t arg0) +{ + volatile long ret; + __ASMV("int $0x80" : "=a"(ret) : "a"(code), "D"(arg0) : "memory"); + return ret; +} + +__always_inline static long inline +syscall2(uint64_t code, uint64_t arg0, uint64_t arg1) +{ + volatile long ret; + __ASMV("int $0x80" : "=a"(ret) : "a"(code), "D"(arg0), "S"(arg1) : "memory"); + return ret; +} + +__always_inline static inline long +syscall3(uint64_t code, uint64_t arg0, uint64_t arg1, uint64_t arg2) +{ + volatile long ret; + __ASMV("int $0x80" : "=a"(ret) : "a"(code), "D"(arg0), "S"(arg1), "d"(arg2) : "memory"); + return ret; +} + +__always_inline static inline long +syscall4(uint64_t code, uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3) +{ + volatile long ret; + register uint64_t _arg3 asm("r10") = arg3; + __ASMV("int $0x80" : "=a"(ret) : "a"(code), "D"(arg0), "S"(arg1), "d"(arg2), "r"(_arg3) : "memory"); + return ret; +} + +__always_inline static inline long +syscall5(uint64_t code, uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4) +{ + volatile long ret; + register uint64_t _arg3 asm("r10") = arg3; + register uint64_t _arg4 asm("r9") = arg4; + __ASMV("int $0x80" : "=a"(ret) : "a"(code), "D"(arg0), "S"(arg1), "d"(arg2), "r"(_arg3), "r"(_arg4) : "memory"); + return ret; +} + +__always_inline static inline long +syscall6(uint64_t code, uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5) +{ + volatile long ret; + register uint64_t _arg3 asm("r10") = arg3; + register uint64_t _arg4 asm("r9") = arg4; + register uint64_t _arg5 asm("r8") = arg5; + __ASMV("int $0x80" : "=a"(ret) : "a"(code), "D"(arg0), "S"(arg1), "d"(arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) : "memory"); + return ret; +} + +#define _SYSCALL_N(a0, a1, a2, a3, a4, a5, a6, name, ...) \ + name + +#define syscall(...) \ +_SYSCALL_N(__VA_ARGS__, syscall6, syscall5, \ + syscall4, syscall3, syscall2, syscall1, \ + syscall0)(__VA_ARGS__) + +#endif /* !__ASSEMBLER__ */ +#endif /* !_MACHINE_SYSCALL_H_ */ diff --git a/sys/include/dev/acpi/acpi.h b/sys/include/dev/acpi/acpi.h index 9cd6b87..5108748 100644 --- a/sys/include/dev/acpi/acpi.h +++ b/sys/include/dev/acpi/acpi.h @@ -30,8 +30,15 @@ #ifndef _ACPI_H_ #define _ACPI_H_ +#include <sys/types.h> + +#define ACPI_SLEEP_S5 0x00000000 + const char *acpi_oemid(void); void *acpi_query(const char *query); + +paddr_t acpi_rsdp(void); +int acpi_sleep(int type); void acpi_init(void); #endif /* !_ACPI_H_ */ diff --git a/sys/include/dev/acpi/tables.h b/sys/include/dev/acpi/tables.h index 5215c86..5340c7f 100644 --- a/sys/include/dev/acpi/tables.h +++ b/sys/include/dev/acpi/tables.h @@ -132,4 +132,65 @@ struct __packed acpi_hpet { uint8_t page_protection; }; +/* + * PCIe / ACPI MCFG base address description + * table. + * + * @base_pa: Enhanced configuration base [physical] + * @seg_grpno: PCI segment group number + * @bus_start: Host bridge bus start + * @bus_end: Host bridge bus end + */ +struct __packed acpi_mcfg_base { + uint64_t base_pa; + uint16_t seg_grpno; + uint8_t bus_start; + uint8_t bus_end; + uint32_t reserved; +}; + +/* + * PCIe / ACPI MCFG structure + * + * @hdr: ACPI header + * @reserved: Do not use + * @base: ECAM MMIO address list + */ +struct __packed acpi_mcfg { + struct acpi_header hdr; + uint32_t reserved[2]; + struct acpi_mcfg_base base[1]; +}; + +struct __packed dmi_entry32 { + char signature[4]; /* _SM_ */ + uint8_t checksum; /* Sum of table bytes */ + uint8_t length; /* Length of entry table */ + uint8_t major; /* DMI major */ + uint8_t minor; /* DMI minor */ + uint16_t max_size; /* Max structure size */ + uint8_t rev; /* Entry revision */ + char fmt_area[5]; /* Formatted area */ + char isignature[5]; /* Intermediate signature */ + uint8_t ichecksum; /* Intermediate checksum */ + uint16_t table_len; /* Length of SMBIOS structure table */ + uint32_t addr; /* 32-bit physical start of SMBIOS structure table */ + uint16_t nstruct; /* Total number of structures */ + uint8_t bcd_rev; +}; + +struct __packed dmi_entry64 { + char signature[5]; /* _SM_ */ + uint8_t checksum; /* Sum of table bytes */ + uint8_t length; /* Length of entry table */ + uint8_t major; /* DMI major */ + uint8_t minor; /* DMI minor */ + uint8_t docrev; + uint8_t entry_rev; + uint8_t reserved; + uint16_t max_size; /* Max structure size */ + uint16_t padding; + uint64_t addr; /* 64-bit physical address */ +}; + #endif /* _ACPI_TABLES_H_ */ diff --git a/sys/include/dev/acpi/uacpi.h b/sys/include/dev/acpi/uacpi.h new file mode 100644 index 0000000..d38e087 --- /dev/null +++ b/sys/include/dev/acpi/uacpi.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _UACPI_BRIDGE_H_ +#define _UACPI_BRIDGE_H_ + +int uacpi_init(void); + +#endif /* !_UACPI_BRIDGE_H_ */ diff --git a/sys/include/dev/acpi/uacpi/uacpi/acpi.h b/sys/include/dev/acpi/uacpi/uacpi/acpi.h new file mode 100644 index 0000000..79eb31b --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/acpi.h @@ -0,0 +1,1430 @@ +#pragma once + +#include <uacpi/platform/compiler.h> +#include <uacpi/helpers.h> +#include <uacpi/types.h> + +/* + * ----------------------------------------------------- + * Common structures provided by the ACPI specification + * ----------------------------------------------------- + */ + +#define ACPI_RSDP_SIGNATURE "RSD PTR " +#define ACPI_RSDT_SIGNATURE "RSDT" +#define ACPI_XSDT_SIGNATURE "XSDT" +#define ACPI_MADT_SIGNATURE "APIC" +#define ACPI_FADT_SIGNATURE "FACP" +#define ACPI_FACS_SIGNATURE "FACS" +#define ACPI_MCFG_SIGNATURE "MCFG" +#define ACPI_HPET_SIGNATURE "HPET" +#define ACPI_SRAT_SIGNATURE "SRAT" +#define ACPI_SLIT_SIGNATURE "SLIT" +#define ACPI_DSDT_SIGNATURE "DSDT" +#define ACPI_SSDT_SIGNATURE "SSDT" +#define ACPI_PSDT_SIGNATURE "PSDT" +#define ACPI_ECDT_SIGNATURE "ECDT" +#define ACPI_RHCT_SIGNATURE "RHCT" + +#define ACPI_AS_ID_SYS_MEM 0x00 +#define ACPI_AS_ID_SYS_IO 0x01 +#define ACPI_AS_ID_PCI_CFG_SPACE 0x02 +#define ACPI_AS_ID_EC 0x03 +#define ACPI_AS_ID_SMBUS 0x04 +#define ACPI_AS_ID_SYS_CMOS 0x05 +#define ACPI_AS_ID_PCI_BAR_TGT 0x06 +#define ACPI_AS_ID_IPMI 0x07 +#define ACPI_AS_ID_GP_IO 0x08 +#define ACPI_AS_ID_GENERIC_SBUS 0x09 +#define ACPI_AS_ID_PCC 0x0A +#define ACPI_AS_ID_FFH 0x7F +#define ACPI_AS_ID_OEM_BASE 0xC0 +#define ACPI_AS_ID_OEM_END 0xFF + +#define ACPI_ACCESS_UD 0 +#define ACPI_ACCESS_BYTE 1 +#define ACPI_ACCESS_WORD 2 +#define ACPI_ACCESS_DWORD 3 +#define ACPI_ACCESS_QWORD 4 + +UACPI_PACKED(struct acpi_gas { + uacpi_u8 address_space_id; + uacpi_u8 register_bit_width; + uacpi_u8 register_bit_offset; + uacpi_u8 access_size; + uacpi_u64 address; +}) +UACPI_EXPECT_SIZEOF(struct acpi_gas, 12); + +UACPI_PACKED(struct acpi_rsdp { + uacpi_char signature[8]; + uacpi_u8 checksum; + uacpi_char oemid[6]; + uacpi_u8 revision; + uacpi_u32 rsdt_addr; + + // vvvv available if .revision >= 2.0 only + uacpi_u32 length; + uacpi_u64 xsdt_addr; + uacpi_u8 extended_checksum; + uacpi_u8 rsvd[3]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rsdp, 36); + +UACPI_PACKED(struct acpi_sdt_hdr { + uacpi_char signature[4]; + uacpi_u32 length; + uacpi_u8 revision; + uacpi_u8 checksum; + uacpi_char oemid[6]; + uacpi_char oem_table_id[8]; + uacpi_u32 oem_revision; + uacpi_u32 creator_id; + uacpi_u32 creator_revision; +}) +UACPI_EXPECT_SIZEOF(struct acpi_sdt_hdr, 36); + +UACPI_PACKED(struct acpi_rsdt { + struct acpi_sdt_hdr hdr; + uacpi_u32 entries[]; +}) + +UACPI_PACKED(struct acpi_xsdt { + struct acpi_sdt_hdr hdr; + uacpi_u64 entries[]; +}) + +UACPI_PACKED(struct acpi_entry_hdr { + /* + * - acpi_madt_entry_type for the APIC table + * - acpi_srat_entry_type for the SRAT table + */ + uacpi_u8 type; + uacpi_u8 length; +}) + +// acpi_madt->flags +#define ACPI_PCAT_COMPAT (1 << 0) + +enum acpi_madt_entry_type { + ACPI_MADT_ENTRY_TYPE_LAPIC = 0, + ACPI_MADT_ENTRY_TYPE_IOAPIC = 1, + ACPI_MADT_ENTRY_TYPE_INTERRUPT_SOURCE_OVERRIDE = 2, + ACPI_MADT_ENTRY_TYPE_NMI_SOURCE = 3, + ACPI_MADT_ENTRY_TYPE_LAPIC_NMI = 4, + ACPI_MADT_ENTRY_TYPE_LAPIC_ADDRESS_OVERRIDE = 5, + ACPI_MADT_ENTRY_TYPE_IOSAPIC = 6, + ACPI_MADT_ENTRY_TYPE_LSAPIC = 7, + ACPI_MADT_ENTRY_TYPE_PLATFORM_INTERRUPT_SOURCES = 8, + ACPI_MADT_ENTRY_TYPE_LOCAL_X2APIC = 9, + ACPI_MADT_ENTRY_TYPE_LOCAL_X2APIC_NMI = 0xA, + ACPI_MADT_ENTRY_TYPE_GICC = 0xB, + ACPI_MADT_ENTRY_TYPE_GICD = 0xC, + ACPI_MADT_ENTRY_TYPE_GIC_MSI_FRAME = 0xD, + ACPI_MADT_ENTRY_TYPE_GICR = 0xE, + ACPI_MADT_ENTRY_TYPE_GIC_ITS = 0xF, + ACPI_MADT_ENTRY_TYPE_MULTIPROCESSOR_WAKEUP = 0x10, + ACPI_MADT_ENTRY_TYPE_CORE_PIC = 0x11, + ACPI_MADT_ENTRY_TYPE_LIO_PIC = 0x12, + ACPI_MADT_ENTRY_TYPE_HT_PIC = 0x13, + ACPI_MADT_ENTRY_TYPE_EIO_PIC = 0x14, + ACPI_MADT_ENTRY_TYPE_MSI_PIC = 0x15, + ACPI_MADT_ENTRY_TYPE_BIO_PIC = 0x16, + ACPI_MADT_ENTRY_TYPE_LPC_PIC = 0x17, + ACPI_MADT_ENTRY_TYPE_RINTC = 0x18, + ACPI_MADT_ENTRY_TYPE_IMSIC = 0x19, + ACPI_MADT_ENTRY_TYPE_APLIC = 0x1A, + ACPI_MADT_ENTRY_TYPE_PLIC = 0x1B, + ACPI_MADT_ENTRY_TYPE_RESERVED = 0x1C, // 0x1C..0x7F + ACPI_MADT_ENTRY_TYPE_OEM = 0x80, // 0x80..0xFF +}; + +UACPI_PACKED(struct acpi_madt { + struct acpi_sdt_hdr hdr; + uacpi_u32 local_interrupt_controller_address; + uacpi_u32 flags; + struct acpi_entry_hdr entries[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt, 44); + +/* + * - acpi_madt_lapic->flags + * - acpi_madt_lsapic->flags + * - acpi_madt_x2apic->flags + */ +#define ACPI_PIC_ENABLED (1 << 0) +#define ACPI_PIC_ONLINE_CAPABLE (1 << 1) + +UACPI_PACKED(struct acpi_madt_lapic { + struct acpi_entry_hdr hdr; + uacpi_u8 uid; + uacpi_u8 id; + uacpi_u32 flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_lapic, 8); + +UACPI_PACKED(struct acpi_madt_ioapic { + struct acpi_entry_hdr hdr; + uacpi_u8 id; + uacpi_u8 rsvd; + uacpi_u32 address; + uacpi_u32 gsi_base; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_ioapic, 12); + +/* + * - acpi_madt_interrupt_source_override->flags + * - acpi_madt_nmi_source->flags + * - acpi_madt_lapic_nmi->flags + * - acpi_madt_platform_interrupt_source->flags + * - acpi_madt_x2apic_nmi->flags + */ +#define ACPI_MADT_POLARITY_MASK 0b11 +#define ACPI_MADT_POLARITY_CONFORMING 0b00 +#define ACPI_MADT_POLARITY_ACTIVE_HIGH 0b01 +#define ACPI_MADT_POLARITY_ACTIVE_LOW 0b11 + +#define ACPI_MADT_TRIGGERING_MASK 0b1100 +#define ACPI_MADT_TRIGGERING_CONFORMING 0b0000 +#define ACPI_MADT_TRIGGERING_EDGE 0b0100 +#define ACPI_MADT_TRIGGERING_LEVEL 0b1100 + +UACPI_PACKED(struct acpi_madt_interrupt_source_override { + struct acpi_entry_hdr hdr; + uacpi_u8 bus; + uacpi_u8 source; + uacpi_u32 gsi; + uacpi_u16 flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_interrupt_source_override, 10); + +UACPI_PACKED(struct acpi_madt_nmi_source { + struct acpi_entry_hdr hdr; + uacpi_u16 flags; + uacpi_u32 gsi; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_nmi_source, 8); + +UACPI_PACKED(struct acpi_madt_lapic_nmi { + struct acpi_entry_hdr hdr; + uacpi_u8 uid; + uacpi_u16 flags; + uacpi_u8 lint; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_lapic_nmi, 6); + +UACPI_PACKED(struct acpi_madt_lapic_address_override { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd; + uacpi_u64 address; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_lapic_address_override, 12); + +UACPI_PACKED(struct acpi_madt_iosapic { + struct acpi_entry_hdr hdr; + uacpi_u8 id; + uacpi_u8 rsvd; + uacpi_u32 gsi_base; + uacpi_u64 address; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_iosapic, 16); + +UACPI_PACKED(struct acpi_madt_lsapic { + struct acpi_entry_hdr hdr; + uacpi_u8 acpi_id; + uacpi_u8 id; + uacpi_u8 eid; + uacpi_u8 reserved[3]; + uacpi_u32 flags; + uacpi_u32 uid; + uacpi_char uid_string[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_lsapic, 16); + +// acpi_madt_platform_interrupt_source->platform_flags +#define ACPI_CPEI_PROCESSOR_OVERRIDE (1 << 0) + +UACPI_PACKED(struct acpi_madt_platform_interrupt_source { + struct acpi_entry_hdr hdr; + uacpi_u16 flags; + uacpi_u8 type; + uacpi_u8 processor_id; + uacpi_u8 processor_eid; + uacpi_u8 iosapic_vector; + uacpi_u32 gsi; + uacpi_u32 platform_flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_platform_interrupt_source, 16); + +UACPI_PACKED(struct acpi_madt_x2apic { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd; + uacpi_u32 id; + uacpi_u32 flags; + uacpi_u32 uid; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_x2apic, 16); + +UACPI_PACKED(struct acpi_madt_x2apic_nmi { + struct acpi_entry_hdr hdr; + uacpi_u16 flags; + uacpi_u32 uid; + uacpi_u8 lint; + uacpi_u8 reserved[3]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_x2apic_nmi, 12); + +// acpi_madt_gicc->flags +#define ACPI_GICC_ENABLED (1 << 0) +#define ACPI_GICC_PERF_INTERRUPT_MODE (1 << 1) +#define ACPI_GICC_VGIC_MAINTENANCE_INTERRUPT_MODE (1 << 2) +#define ACPI_GICC_ONLINE_CAPABLE (1 << 3) + +// ACPI_GICC_*_INTERRUPT_MODE +#define ACPI_GICC_TRIGGERING_EDGE 1 +#define ACPI_GICC_TRIGGERING_LEVEL 0 + +UACPI_PACKED(struct acpi_madt_gicc { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd0; + uacpi_u32 interface_number; + uacpi_u32 acpi_id; + uacpi_u32 flags; + uacpi_u32 parking_protocol_version; + uacpi_u32 perf_interrupt_gsiv; + uacpi_u64 parked_address; + uacpi_u64 address; + uacpi_u64 gicv; + uacpi_u64 gich; + uacpi_u32 vgic_maitenante_interrupt; + uacpi_u64 gicr_base_address; + uacpi_u64 mpidr; + uacpi_u8 power_efficiency_class; + uacpi_u8 rsvd1; + uacpi_u16 spe_overflow_interrupt; + uacpi_u16 trbe_interrupt; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_gicc, 82); + +UACPI_PACKED(struct acpi_madt_gicd { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd0; + uacpi_u32 id; + uacpi_u64 address; + uacpi_u32 system_vector_base; + uacpi_u8 gic_version; + uacpi_u8 reserved1[3]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_gicd, 24); + +// acpi_madt_gic_msi_frame->flags +#define ACPI_SPI_SELECT (1 << 0) + +UACPI_PACKED(struct acpi_madt_gic_msi_frame { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd; + uacpi_u32 id; + uacpi_u64 address; + uacpi_u32 flags; + uacpi_u16 spi_count; + uacpi_u16 spi_base; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_gic_msi_frame, 24); + +UACPI_PACKED(struct acpi_madt_gicr { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd; + uacpi_u64 address; + uacpi_u32 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_gicr, 16); + +UACPI_PACKED(struct acpi_madt_gic_its { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd0; + uacpi_u32 id; + uacpi_u64 address; + uacpi_u32 rsvd1; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_gic_its, 20); + +UACPI_PACKED(struct acpi_madt_multiprocessor_wakeup { + struct acpi_entry_hdr hdr; + uacpi_u16 mailbox_version; + uacpi_u32 rsvd; + uacpi_u64 mailbox_address; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_multiprocessor_wakeup, 16); + +#define ACPI_CORE_PIC_ENABLED (1 << 0) + +UACPI_PACKED(struct acpi_madt_core_pic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u32 acpi_id; + uacpi_u32 id; + uacpi_u32 flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_core_pic, 15); + +UACPI_PACKED(struct acpi_madt_lio_pic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u64 address; + uacpi_u16 size; + uacpi_u16 cascade_vector; + uacpi_u64 cascade_vector_mapping; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_lio_pic, 23); + +UACPI_PACKED(struct acpi_madt_ht_pic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u64 address; + uacpi_u16 size; + uacpi_u64 cascade_vector; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_ht_pic, 21); + +UACPI_PACKED(struct acpi_madt_eio_pic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u8 cascade_vector; + uacpi_u8 node; + uacpi_u64 node_map; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_eio_pic, 13); + +UACPI_PACKED(struct acpi_madt_msi_pic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u64 address; + uacpi_u32 start; + uacpi_u32 count; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_msi_pic, 19); + +UACPI_PACKED(struct acpi_madt_bio_pic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u64 address; + uacpi_u16 size; + uacpi_u16 hardware_id; + uacpi_u16 gsi_base; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_bio_pic, 17); + +UACPI_PACKED(struct acpi_madt_lpc_pic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u64 address; + uacpi_u16 size; + uacpi_u16 cascade_vector; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_lpc_pic, 15); + +UACPI_PACKED(struct acpi_madt_rintc { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u8 rsvd; + uacpi_u32 flags; + uacpi_u64 hart_id; + uacpi_u32 uid; + uacpi_u32 ext_intc_id; + uacpi_u64 address; + uacpi_u32 size; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_rintc, 36); + +UACPI_PACKED(struct acpi_madt_imsic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u8 rsvd; + uacpi_u32 flags; + uacpi_u16 num_ids; + uacpi_u16 num_guest_ids; + uacpi_u8 guest_index_bits; + uacpi_u8 hart_index_bits; + uacpi_u8 group_index_bits; + uacpi_u8 group_index_shift; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_imsic, 16); + +UACPI_PACKED(struct acpi_madt_aplic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u8 id; + uacpi_u32 flags; + uacpi_u64 hardware_id; + uacpi_u16 idc_count; + uacpi_u16 sources_count; + uacpi_u32 gsi_base; + uacpi_u64 address; + uacpi_u32 size; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_aplic, 36); + +UACPI_PACKED(struct acpi_madt_plic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u8 id; + uacpi_u64 hardware_id; + uacpi_u16 sources_count; + uacpi_u16 max_priority; + uacpi_u32 flags; + uacpi_u32 size; + uacpi_u64 address; + uacpi_u32 gsi_base; + +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_plic, 36); + +enum acpi_srat_entry_type { + ACPI_SRAT_ENTRY_TYPE_PROCESSOR_AFFINITY = 0, + ACPI_SRAT_ENTRY_TYPE_MEMORY_AFFINITY = 1, + ACPI_SRAT_ENTRY_TYPE_X2APIC_AFFINITY = 2, + ACPI_SRAT_ENTRY_TYPE_GICC_AFFINITY = 3, + ACPI_SRAT_ENTRY_TYPE_GIC_ITS_AFFINITY = 4, + ACPI_SRAT_ENTRY_TYPE_GENERIC_INITIATOR_AFFINITY = 5, + ACPI_SRAT_ENTRY_TYPE_GENERIC_PORT_AFFINITY = 6, + ACPI_SRAT_ENTRY_TYPE_RINTC_AFFINITY = 7, +}; + +UACPI_PACKED(struct acpi_srat { + struct acpi_sdt_hdr hdr; + uacpi_u32 rsvd0; + uacpi_u64 rsvd1; + struct acpi_entry_hdr entries[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_srat, 48); + +/* + * acpi_srat_processor_affinity->flags + * acpi_srat_x2apic_affinity->flags + */ +#define ACPI_SRAT_PROCESSOR_ENABLED (1 << 0) + +UACPI_PACKED(struct acpi_srat_processor_affinity { + struct acpi_entry_hdr hdr; + uacpi_u8 proximity_domain_low; + uacpi_u8 id; + uacpi_u32 flags; + uacpi_u8 eid; + uacpi_u8 proximity_domain_high[3]; + uacpi_u32 clock_domain; +}) +UACPI_EXPECT_SIZEOF(struct acpi_srat_processor_affinity, 16); + +// acpi_srat_memory_affinity->flags +#define ACPI_SRAT_MEMORY_ENABLED (1 << 0) +#define ACPI_SRAT_MEMORY_HOTPLUGGABLE (1 << 1) +#define ACPI_SRAT_MEMORY_NON_VOLATILE (1 << 2) + +UACPI_PACKED(struct acpi_srat_memory_affinity { + struct acpi_entry_hdr hdr; + uacpi_u32 proximity_domain; + uacpi_u16 rsvd0; + uacpi_u64 address; + uacpi_u64 length; + uacpi_u32 rsvd1; + uacpi_u32 flags; + uacpi_u64 rsdv2; +}) +UACPI_EXPECT_SIZEOF(struct acpi_srat_memory_affinity, 40); + +UACPI_PACKED(struct acpi_srat_x2apic_affinity { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd0; + uacpi_u32 proximity_domain; + uacpi_u32 id; + uacpi_u32 flags; + uacpi_u32 clock_domain; + uacpi_u32 rsvd1; +}) +UACPI_EXPECT_SIZEOF(struct acpi_srat_x2apic_affinity, 24); + +// acpi_srat_gicc_affinity->flags +#define ACPI_SRAT_GICC_ENABLED (1 << 0) + +UACPI_PACKED(struct acpi_srat_gicc_affinity { + struct acpi_entry_hdr hdr; + uacpi_u32 proximity_domain; + uacpi_u32 uid; + uacpi_u32 flags; + uacpi_u32 clock_domain; +}) +UACPI_EXPECT_SIZEOF(struct acpi_srat_gicc_affinity, 18); + +UACPI_PACKED(struct acpi_srat_gic_its_affinity { + struct acpi_entry_hdr hdr; + uacpi_u32 proximity_domain; + uacpi_u16 rsvd; + uacpi_u32 id; +}) +UACPI_EXPECT_SIZEOF(struct acpi_srat_gic_its_affinity, 12); + +// acpi_srat_generic_affinity->flags +#define ACPI_GENERIC_AFFINITY_ENABLED (1 << 0) +#define ACPI_GENERIC_AFFINITY_ARCH_TRANSACTIONS (1 << 1) + +UACPI_PACKED(struct acpi_srat_generic_affinity { + struct acpi_entry_hdr hdr; + uacpi_u8 rsvd0; + uacpi_u8 handle_type; + uacpi_u32 proximity_domain; + uacpi_u8 handle[16]; + uacpi_u32 flags; + uacpi_u32 rsvd1; +}) +UACPI_EXPECT_SIZEOF(struct acpi_srat_generic_affinity, 32); + +// acpi_srat_rintc_affinity->flags +#define ACPI_SRAT_RINTC_AFFINITY_ENABLED (1 << 0) + +UACPI_PACKED(struct acpi_srat_rintc_affinity { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd; + uacpi_u32 proximity_domain; + uacpi_u32 uid; + uacpi_u32 flags; + uacpi_u32 clock_domain; +}) +UACPI_EXPECT_SIZEOF(struct acpi_srat_rintc_affinity, 20); + +UACPI_PACKED(struct acpi_slit { + struct acpi_sdt_hdr hdr; + uacpi_u64 num_localities; + uacpi_u8 matrix[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_slit, 44); + +/* + * acpi_gtdt->el*_flags + * acpi_gtdt_timer_entry->physical_flags + * acpi_gtdt_timer_entry->virtual_flags + * acpi_gtdt_watchdog->flags + */ +#define ACPI_GTDT_TRIGGERING (1 << 0) +#define ACPI_GTDT_TRIGGERING_EDGE 1 +#define ACPI_GTDT_TRIGGERING_LEVEL 0 + +/* + * acpi_gtdt->el*_flags + * acpi_gtdt_timer_entry->physical_flags + * acpi_gtdt_timer_entry->virtual_flags + * acpi_gtdt_watchdog->flags + */ +#define ACPI_GTDT_POLARITY (1 << 1) +#define ACPI_GTDT_POLARITY_ACTIVE_LOW 1 +#define ACPI_GTDT_POLARITY_ACTIVE_HIGH 0 + +// acpi_gtdt->el*_flags +#define ACPI_GTDT_ALWAYS_ON_CAPABLE (1 << 2) + +UACPI_PACKED(struct acpi_gtdt { + struct acpi_sdt_hdr hdr; + uacpi_u64 cnt_control_base; + uacpi_u32 rsvd; + uacpi_u32 el1_secure_gsiv; + uacpi_u32 el1_secure_flags; + uacpi_u32 el1_non_secure_gsiv; + uacpi_u32 el1_non_secure_flags; + uacpi_u32 el1_virtual_gsiv; + uacpi_u32 el1_virtual_flags; + uacpi_u32 el2_gsiv; + uacpi_u32 el2_flags; + uacpi_u64 cnt_read_base; + uacpi_u32 platform_timer_count; + uacpi_u32 platform_timer_offset; + + // revision >= 3 + uacpi_u32 el2_virtual_gsiv; + uacpi_u32 el2_virtual_flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_gtdt, 104); + +enum acpi_gtdt_entry_type { + ACPI_GTDT_ENTRY_TYPE_TIMER = 0, + ACPI_GTDT_ENTRY_TYPE_WATCHDOG = 1, +}; + +UACPI_PACKED(struct acpi_gtdt_entry_hdr { + uacpi_u8 type; + uacpi_u16 length; +}) + +UACPI_PACKED(struct acpi_gtdt_timer { + struct acpi_gtdt_entry_hdr hdr; + uacpi_u8 rsvd; + uacpi_u64 cnt_ctl_base; + uacpi_u32 timer_count; + uacpi_u32 timer_offset; +}) +UACPI_EXPECT_SIZEOF(struct acpi_gtdt_timer, 20); + +// acpi_gtdt_timer_entry->common_flags +#define ACPI_GTDT_TIMER_ENTRY_SECURE (1 << 0) +#define ACPI_GTDT_TIMER_ENTRY_ALWAYS_ON_CAPABLE (1 << 1) + +UACPI_PACKED(struct acpi_gtdt_timer_entry { + uacpi_u8 frame_number; + uacpi_u8 rsvd[3]; + uacpi_u64 cnt_base; + uacpi_u64 el0_cnt_base; + uacpi_u32 physical_gsiv; + uacpi_u32 physical_flags; + uacpi_u32 virtual_gsiv; + uacpi_u32 virtual_flags; + uacpi_u32 common_flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_gtdt_timer_entry, 40); + +// acpi_gtdt_watchdog->flags +#define ACPI_GTDT_WATCHDOG_SECURE (1 << 2) + +UACPI_PACKED(struct acpi_gtdt_watchdog { + struct acpi_gtdt_entry_hdr hdr; + uacpi_u8 rsvd; + uacpi_u64 refresh_frame; + uacpi_u64 control_frame; + uacpi_u32 gsiv; + uacpi_u32 flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_gtdt_watchdog, 28); + +// acpi_fdt->iapc_flags +#define ACPI_IA_PC_LEGACY_DEVS (1 << 0) +#define ACPI_IA_PC_8042 (1 << 1) +#define ACPI_IA_PC_NO_VGA (1 << 2) +#define ACPI_IA_PC_NO_MSI (1 << 3) +#define ACPI_IA_PC_NO_PCIE_ASPM (1 << 4) +#define ACPI_IA_PC_NO_CMOS_RTC (1 << 5) + +// acpi_fdt->flags +#define ACPI_WBINVD (1 << 0) +#define ACPI_WBINVD_FLUSH (1 << 1) +#define ACPI_PROC_C1 (1 << 2) +#define ACPI_P_LVL2_UP (1 << 3) +#define ACPI_PWR_BUTTON (1 << 4) +#define ACPI_SLP_BUTTON (1 << 5) +#define ACPI_FIX_RTC (1 << 6) +#define ACPI_RTC_S4 (1 << 7) +#define ACPI_TMR_VAL_EXT (1 << 8) +#define ACPI_DCK_CAP (1 << 9) +#define ACPI_RESET_REG_SUP (1 << 10) +#define ACPI_SEALED_CASE (1 << 11) +#define ACPI_HEADLESS (1 << 12) +#define ACPI_CPU_SW_SLP (1 << 13) +#define ACPI_PCI_EXP_WAK (1 << 14) +#define ACPI_USE_PLATFORM_CLOCK (1 << 15) +#define ACPI_S4_RTC_STS_VALID (1 << 16) +#define ACPI_REMOTE_POWER_ON_CAPABLE (1 << 17) +#define ACPI_FORCE_APIC_CLUSTER_MODEL (1 << 18) +#define ACPI_FORCE_APIC_PHYS_DEST_MODE (1 << 19) +#define ACPI_HW_REDUCED_ACPI (1 << 20) +#define ACPI_LOW_POWER_S0_IDLE_CAPABLE (1 << 21) + +// acpi_fdt->arm_flags +#define ACPI_ARM_PSCI_COMPLIANT (1 << 0) +#define ACPI_ARM_PSCI_USE_HVC (1 << 1) + +UACPI_PACKED(struct acpi_fadt { + struct acpi_sdt_hdr hdr; + uacpi_u32 firmware_ctrl; + uacpi_u32 dsdt; + uacpi_u8 int_model; + uacpi_u8 preferred_pm_profile; + uacpi_u16 sci_int; + uacpi_u32 smi_cmd; + uacpi_u8 acpi_enable; + uacpi_u8 acpi_disable; + uacpi_u8 s4bios_req; + uacpi_u8 pstate_cnt; + uacpi_u32 pm1a_evt_blk; + uacpi_u32 pm1b_evt_blk; + uacpi_u32 pm1a_cnt_blk; + uacpi_u32 pm1b_cnt_blk; + uacpi_u32 pm2_cnt_blk; + uacpi_u32 pm_tmr_blk; + uacpi_u32 gpe0_blk; + uacpi_u32 gpe1_blk; + uacpi_u8 pm1_evt_len; + uacpi_u8 pm1_cnt_len; + uacpi_u8 pm2_cnt_len; + uacpi_u8 pm_tmr_len; + uacpi_u8 gpe0_blk_len; + uacpi_u8 gpe1_blk_len; + uacpi_u8 gpe1_base; + uacpi_u8 cst_cnt; + uacpi_u16 p_lvl2_lat; + uacpi_u16 p_lvl3_lat; + uacpi_u16 flush_size; + uacpi_u16 flush_stride; + uacpi_u8 duty_offset; + uacpi_u8 duty_width; + uacpi_u8 day_alrm; + uacpi_u8 mon_alrm; + uacpi_u8 century; + uacpi_u16 iapc_boot_arch; + uacpi_u8 rsvd; + uacpi_u32 flags; + struct acpi_gas reset_reg; + uacpi_u8 reset_value; + uacpi_u16 arm_boot_arch; + uacpi_u8 fadt_minor_verison; + uacpi_u64 x_firmware_ctrl; + uacpi_u64 x_dsdt; + struct acpi_gas x_pm1a_evt_blk; + struct acpi_gas x_pm1b_evt_blk; + struct acpi_gas x_pm1a_cnt_blk; + struct acpi_gas x_pm1b_cnt_blk; + struct acpi_gas x_pm2_cnt_blk; + struct acpi_gas x_pm_tmr_blk; + struct acpi_gas x_gpe0_blk; + struct acpi_gas x_gpe1_blk; + struct acpi_gas sleep_control_reg; + struct acpi_gas sleep_status_reg; + uacpi_u64 hypervisor_vendor_identity; +}) +UACPI_EXPECT_SIZEOF(struct acpi_fadt, 276); + +// acpi_facs->flags +#define ACPI_S4BIOS_F (1 << 0) +#define ACPI_64BIT_WAKE_SUPPORTED_F (1 << 1) + +// acpi_facs->ospm_flags +#define ACPI_64BIT_WAKE_F (1 << 0) + +struct acpi_facs { + uacpi_char signature[4]; + uacpi_u32 length; + uacpi_u32 hardware_signature; + uacpi_u32 firmware_waking_vector; + uacpi_u32 global_lock; + uacpi_u32 flags; + uacpi_u64 x_firmware_waking_vector; + uacpi_u8 version; + uacpi_char rsvd0[3]; + uacpi_u32 ospm_flags; + uacpi_char rsvd1[24]; +}; +UACPI_EXPECT_SIZEOF(struct acpi_facs, 64); + +UACPI_PACKED(struct acpi_mcfg_allocation { + uacpi_u64 address; + uacpi_u16 segment; + uacpi_u8 start_bus; + uacpi_u8 end_bus; + uacpi_u32 rsvd; +}) +UACPI_EXPECT_SIZEOF(struct acpi_mcfg_allocation, 16); + +UACPI_PACKED(struct acpi_mcfg { + struct acpi_sdt_hdr hdr; + uacpi_u64 rsvd; + struct acpi_mcfg_allocation entries[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_mcfg, 44); + +// acpi_hpet->block_id +#define ACPI_HPET_PCI_VENDOR_ID_SHIFT 16 +#define ACPI_HPET_LEGACY_REPLACEMENT_IRQ_ROUTING_CAPABLE (1 << 15) +#define ACPI_HPET_COUNT_SIZE_CAP (1 << 13) +#define ACPI_HPET_NUMBER_OF_COMPARATORS_SHIFT 8 +#define ACPI_HPET_NUMBER_OF_COMPARATORS_MASK 0b11111 +#define ACPI_HPET_HARDWARE_REV_ID_MASK 0b11111111 + +// acpi_hpet->flags +#define ACPI_HPET_PAGE_PROTECTION_MASK 0b11 +#define ACPI_HPET_PAGE_NO_PROTECTION 0 +#define ACPI_HPET_PAGE_4K_PROTECTED 1 +#define ACPI_HPET_PAGE_64K_PROTECTED 2 + +UACPI_PACKED(struct acpi_hpet { + struct acpi_sdt_hdr hdr; + uacpi_u32 block_id; + struct acpi_gas address; + uacpi_u8 number; + uacpi_u16 min_clock_tick; + uacpi_u8 flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_hpet, 56); + +// PM1{a,b}_STS +#define ACPI_PM1_STS_TMR_STS_IDX 0 +#define ACPI_PM1_STS_BM_STS_IDX 4 +#define ACPI_PM1_STS_GBL_STS_IDX 5 +#define ACPI_PM1_STS_PWRBTN_STS_IDX 8 +#define ACPI_PM1_STS_SLPBTN_STS_IDX 9 +#define ACPI_PM1_STS_RTC_STS_IDX 10 +#define ACPI_PM1_STS_IGN0_IDX 11 +#define ACPI_PM1_STS_PCIEXP_WAKE_STS_IDX 14 +#define ACPI_PM1_STS_WAKE_STS_IDX 15 + +#define ACPI_PM1_STS_TMR_STS_MASK (1 << ACPI_PM1_STS_TMR_STS_IDX) +#define ACPI_PM1_STS_BM_STS_MASK (1 << ACPI_PM1_STS_BM_STS_IDX) +#define ACPI_PM1_STS_GBL_STS_MASK (1 << ACPI_PM1_STS_GBL_STS_IDX) +#define ACPI_PM1_STS_PWRBTN_STS_MASK (1 << ACPI_PM1_STS_PWRBTN_STS_IDX) +#define ACPI_PM1_STS_SLPBTN_STS_MASK (1 << ACPI_PM1_STS_SLPBTN_STS_IDX) +#define ACPI_PM1_STS_RTC_STS_MASK (1 << ACPI_PM1_STS_RTC_STS_IDX) +#define ACPI_PM1_STS_IGN0_MASK (1 << ACPI_PM1_STS_IGN0_IDX) +#define ACPI_PM1_STS_PCIEXP_WAKE_STS_MASK (1 << ACPI_PM1_STS_PCIEXP_WAKE_STS_IDX) +#define ACPI_PM1_STS_WAKE_STS_MASK (1 << ACPI_PM1_STS_WAKE_STS_IDX) + +#define ACPI_PM1_STS_CLEAR 1 + +// PM1{a,b}_EN +#define ACPI_PM1_EN_TMR_EN_IDX 0 +#define ACPI_PM1_EN_GBL_EN_IDX 5 +#define ACPI_PM1_EN_PWRBTN_EN_IDX 8 +#define ACPI_PM1_EN_SLPBTN_EN_IDX 9 +#define ACPI_PM1_EN_RTC_EN_IDX 10 +#define ACPI_PM1_EN_PCIEXP_WAKE_DIS_IDX 14 + +#define ACPI_PM1_EN_TMR_EN_MASK (1 << ACPI_PM1_EN_TMR_EN_IDX) +#define ACPI_PM1_EN_GBL_EN_MASK (1 << ACPI_PM1_EN_GBL_EN_IDX) +#define ACPI_PM1_EN_PWRBTN_EN_MASK (1 << ACPI_PM1_EN_PWRBTN_EN_IDX) +#define ACPI_PM1_EN_SLPBTN_EN_MASK (1 << ACPI_PM1_EN_SLPBTN_EN_IDX) +#define ACPI_PM1_EN_RTC_EN_MASK (1 << ACPI_PM1_EN_RTC_EN_IDX) +#define ACPI_PM1_EN_PCIEXP_WAKE_DIS_MASK (1 << ACPI_PM1_EN_PCIEXP_WAKE_DIS_IDX) + +// PM1{a,b}_CNT_BLK +#define ACPI_PM1_CNT_SCI_EN_IDX 0 +#define ACPI_PM1_CNT_BM_RLD_IDX 1 +#define ACPI_PM1_CNT_GBL_RLS_IDX 2 +#define ACPI_PM1_CNT_RSVD0_IDX 3 +#define ACPI_PM1_CNT_RSVD1_IDX 4 +#define ACPI_PM1_CNT_RSVD2_IDX 5 +#define ACPI_PM1_CNT_RSVD3_IDX 6 +#define ACPI_PM1_CNT_RSVD4_IDX 7 +#define ACPI_PM1_CNT_RSVD5_IDX 8 +#define ACPI_PM1_CNT_IGN0_IDX 9 +#define ACPI_PM1_CNT_SLP_TYP_IDX 10 +#define ACPI_PM1_CNT_SLP_EN_IDX 13 +#define ACPI_PM1_CNT_RSVD6_IDX 14 +#define ACPI_PM1_CNT_RSVD7_IDX 15 + +#define ACPI_SLP_TYP_MAX 0x7 + +#define ACPI_PM1_CNT_SCI_EN_MASK (1 << ACPI_PM1_CNT_SCI_EN_IDX) +#define ACPI_PM1_CNT_BM_RLD_MASK (1 << ACPI_PM1_CNT_BM_RLD_IDX) +#define ACPI_PM1_CNT_GBL_RLS_MASK (1 << ACPI_PM1_CNT_GBL_RLS_IDX) +#define ACPI_PM1_CNT_SLP_TYP_MASK (ACPI_SLP_TYP_MAX << ACPI_PM1_CNT_SLP_TYP_IDX) +#define ACPI_PM1_CNT_SLP_EN_MASK (1 << ACPI_PM1_CNT_SLP_EN_IDX) + +/* + * SCI_EN is not in this mask even though the spec says it must be preserved. + * This is because it's known to be bugged on some hardware that relies on + * software writing 1 to it after resume (as indicated by a similar comment in + * ACPICA) + */ +#define ACPI_PM1_CNT_PRESERVE_MASK ( \ + (1 << ACPI_PM1_CNT_RSVD0_IDX) | \ + (1 << ACPI_PM1_CNT_RSVD1_IDX) | \ + (1 << ACPI_PM1_CNT_RSVD2_IDX) | \ + (1 << ACPI_PM1_CNT_RSVD3_IDX) | \ + (1 << ACPI_PM1_CNT_RSVD4_IDX) | \ + (1 << ACPI_PM1_CNT_RSVD5_IDX) | \ + (1 << ACPI_PM1_CNT_IGN0_IDX ) | \ + (1 << ACPI_PM1_CNT_RSVD6_IDX) | \ + (1 << ACPI_PM1_CNT_RSVD7_IDX) \ +) + +// PM2_CNT +#define ACPI_PM2_CNT_ARB_DIS_IDX 0 +#define ACPI_PM2_CNT_ARB_DIS_MASK (1 << ACPI_PM2_CNT_ARB_DIS_IDX) + +// All bits are reserved but this first one +#define ACPI_PM2_CNT_PRESERVE_MASK (~((uacpi_u64)ACPI_PM2_CNT_ARB_DIS_MASK)) + +// SLEEP_CONTROL_REG +#define ACPI_SLP_CNT_RSVD0_IDX 0 +#define ACPI_SLP_CNT_IGN0_IDX 1 +#define ACPI_SLP_CNT_SLP_TYP_IDX 2 +#define ACPI_SLP_CNT_SLP_EN_IDX 5 +#define ACPI_SLP_CNT_RSVD1_IDX 6 +#define ACPI_SLP_CNT_RSVD2_IDX 7 + +#define ACPI_SLP_CNT_SLP_TYP_MASK (ACPI_SLP_TYP_MAX << ACPI_SLP_CNT_SLP_TYP_IDX) +#define ACPI_SLP_CNT_SLP_EN_MASK (1 << ACPI_SLP_CNT_SLP_EN_IDX) + +#define ACPI_SLP_CNT_PRESERVE_MASK ( \ + (1 << ACPI_SLP_CNT_RSVD0_IDX) | \ + (1 << ACPI_SLP_CNT_IGN0_IDX) | \ + (1 << ACPI_SLP_CNT_RSVD1_IDX) | \ + (1 << ACPI_SLP_CNT_RSVD2_IDX) \ +) + +// SLEEP_STATUS_REG +#define ACPI_SLP_STS_WAK_STS_IDX 7 + +#define ACPI_SLP_STS_WAK_STS_MASK (1 << ACPI_SLP_STS_WAK_STS_IDX) + +// All bits are reserved but this last one +#define ACPI_SLP_STS_PRESERVE_MASK (~((uacpi_u64)ACPI_SLP_STS_WAK_STS_MASK)) + +#define ACPI_SLP_STS_CLEAR 1 + +UACPI_PACKED(struct acpi_dsdt { + struct acpi_sdt_hdr hdr; + uacpi_u8 definition_block[]; +}) + +UACPI_PACKED(struct acpi_ssdt { + struct acpi_sdt_hdr hdr; + uacpi_u8 definition_block[]; +}) + +/* + * ACPI 6.5 specification: + * Bit [0] - Set if the device is present. + * Bit [1] - Set if the device is enabled and decoding its resources. + * Bit [2] - Set if the device should be shown in the UI. + * Bit [3] - Set if the device is functioning properly (cleared if device + * failed its diagnostics). + * Bit [4] - Set if the battery is present. + */ +#define ACPI_STA_RESULT_DEVICE_PRESENT (1 << 0) +#define ACPI_STA_RESULT_DEVICE_ENABLED (1 << 1) +#define ACPI_STA_RESULT_DEVICE_SHOWN_IN_UI (1 << 2) +#define ACPI_STA_RESULT_DEVICE_FUNCTIONING (1 << 3) +#define ACPI_STA_RESULT_DEVICE_BATTERY_PRESENT (1 << 4) + +#define ACPI_REG_DISCONNECT 0 +#define ACPI_REG_CONNECT 1 + +UACPI_PACKED(struct acpi_ecdt { + struct acpi_sdt_hdr hdr; + struct acpi_gas ec_control; + struct acpi_gas ec_data; + uacpi_u32 uid; + uacpi_u8 gpe_bit; + uacpi_char ec_id[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_ecdt, 65); + +UACPI_PACKED(struct acpi_rhct_hdr { + uacpi_u16 type; + uacpi_u16 length; + uacpi_u16 revision; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct_hdr, 6); + +// acpi_rhct->flags +#define ACPI_TIMER_CANNOT_WAKE_CPU (1 << 0) + +UACPI_PACKED(struct acpi_rhct { + struct acpi_sdt_hdr hdr; + uacpi_u32 flags; + uacpi_u64 timebase_frequency; + uacpi_u32 node_count; + uacpi_u32 nodes_offset; + struct acpi_rhct_hdr entries[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct, 56); + +enum acpi_rhct_entry_type { + ACPI_RHCT_ENTRY_TYPE_ISA_STRING = 0, + ACPI_RHCT_ENTRY_TYPE_CMO = 1, + ACPI_RHCT_ENTRY_TYPE_MMU = 2, + ACPI_RHCT_ENTRY_TYPE_HART_INFO = 65535, +}; + +UACPI_PACKED(struct acpi_rhct_isa_string { + struct acpi_rhct_hdr hdr; + uacpi_u16 length; + uacpi_u8 isa[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct_isa_string, 8); + +UACPI_PACKED(struct acpi_rhct_cmo { + struct acpi_rhct_hdr hdr; + uacpi_u8 rsvd; + uacpi_u8 cbom_size; + uacpi_u8 cbop_size; + uacpi_u8 cboz_size; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct_cmo, 10); + +enum acpi_rhct_mmu_type { + ACPI_RHCT_MMU_TYPE_SV39 = 0, + ACPI_RHCT_MMU_TYPE_SV48 = 1, + ACPI_RHCT_MMU_TYPE_SV57 = 2, +}; + +UACPI_PACKED(struct acpi_rhct_mmu { + struct acpi_rhct_hdr hdr; + uacpi_u8 rsvd; + uacpi_u8 type; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct_mmu, 8); + +UACPI_PACKED(struct acpi_rhct_hart_info { + struct acpi_rhct_hdr hdr; + uacpi_u16 offset_count; + uacpi_u32 uid; + uacpi_u32 offsets[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct_hart_info, 12); + +#define ACPI_LARGE_ITEM (1 << 7) + +#define ACPI_SMALL_ITEM_NAME_IDX 3 +#define ACPI_SMALL_ITEM_NAME_MASK 0xF +#define ACPI_SMALL_ITEM_LENGTH_MASK 0x7 + +#define ACPI_LARGE_ITEM_NAME_MASK 0x7F + +// Small items +#define ACPI_RESOURCE_IRQ 0x04 +#define ACPI_RESOURCE_DMA 0x05 +#define ACPI_RESOURCE_START_DEPENDENT 0x06 +#define ACPI_RESOURCE_END_DEPENDENT 0x07 +#define ACPI_RESOURCE_IO 0x08 +#define ACPI_RESOURCE_FIXED_IO 0x09 +#define ACPI_RESOURCE_FIXED_DMA 0x0A +#define ACPI_RESOURCE_VENDOR_TYPE0 0x0E +#define ACPI_RESOURCE_END_TAG 0x0F + +// Large items +#define ACPI_RESOURCE_MEMORY24 0x01 +#define ACPI_RESOURCE_GENERIC_REGISTER 0x02 +#define ACPI_RESOURCE_VENDOR_TYPE1 0x04 +#define ACPI_RESOURCE_MEMORY32 0x05 +#define ACPI_RESOURCE_FIXED_MEMORY32 0x06 +#define ACPI_RESOURCE_ADDRESS32 0x07 +#define ACPI_RESOURCE_ADDRESS16 0x08 +#define ACPI_RESOURCE_EXTENDED_IRQ 0x09 +#define ACPI_RESOURCE_ADDRESS64 0x0A +#define ACPI_RESOURCE_ADDRESS64_EXTENDED 0x0B +#define ACPI_RESOURCE_GPIO_CONNECTION 0x0C +#define ACPI_RESOURCE_PIN_FUNCTION 0x0D +#define ACPI_RESOURCE_SERIAL_CONNECTION 0x0E +#define ACPI_RESOURCE_PIN_CONFIGURATION 0x0F +#define ACPI_RESOURCE_PIN_GROUP 0x10 +#define ACPI_RESOURCE_PIN_GROUP_FUNCTION 0x11 +#define ACPI_RESOURCE_PIN_GROUP_CONFIGURATION 0x12 +#define ACPI_RESOURCE_CLOCK_INPUT 0x13 + +/* + * Resources as encoded by the raw AML byte stream. + * For decode API & human usable structures refer to uacpi/resources.h + */ +UACPI_PACKED(struct acpi_small_item { + uacpi_u8 type_and_length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_small_item, 1); + +UACPI_PACKED(struct acpi_resource_irq { + struct acpi_small_item common; + uacpi_u16 irq_mask; + uacpi_u8 flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_irq, 4); + +UACPI_PACKED(struct acpi_resource_dma { + struct acpi_small_item common; + uacpi_u8 channel_mask; + uacpi_u8 flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_dma, 3); + +UACPI_PACKED(struct acpi_resource_start_dependent { + struct acpi_small_item common; + uacpi_u8 flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_start_dependent, 2); + +UACPI_PACKED(struct acpi_resource_end_dependent { + struct acpi_small_item common; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_end_dependent, 1); + +UACPI_PACKED(struct acpi_resource_io { + struct acpi_small_item common; + uacpi_u8 information; + uacpi_u16 minimum; + uacpi_u16 maximum; + uacpi_u8 alignment; + uacpi_u8 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_io, 8); + +UACPI_PACKED(struct acpi_resource_fixed_io { + struct acpi_small_item common; + uacpi_u16 address; + uacpi_u8 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_fixed_io, 4); + +UACPI_PACKED(struct acpi_resource_fixed_dma { + struct acpi_small_item common; + uacpi_u16 request_line; + uacpi_u16 channel; + uacpi_u8 transfer_width; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_fixed_dma, 6); + +UACPI_PACKED(struct acpi_resource_vendor_defined_type0 { + struct acpi_small_item common; + uacpi_u8 byte_data[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_vendor_defined_type0, 1); + +UACPI_PACKED(struct acpi_resource_end_tag { + struct acpi_small_item common; + uacpi_u8 checksum; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_end_tag, 2); + +UACPI_PACKED(struct acpi_large_item { + uacpi_u8 type; + uacpi_u16 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_large_item, 3); + +UACPI_PACKED(struct acpi_resource_memory24 { + struct acpi_large_item common; + uacpi_u8 information; + uacpi_u16 minimum; + uacpi_u16 maximum; + uacpi_u16 alignment; + uacpi_u16 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_memory24, 12); + +UACPI_PACKED(struct acpi_resource_vendor_defined_type1 { + struct acpi_large_item common; + uacpi_u8 byte_data[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_vendor_defined_type1, 3); + +UACPI_PACKED(struct acpi_resource_memory32 { + struct acpi_large_item common; + uacpi_u8 information; + uacpi_u32 minimum; + uacpi_u32 maximum; + uacpi_u32 alignment; + uacpi_u32 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_memory32, 20); + +UACPI_PACKED(struct acpi_resource_fixed_memory32 { + struct acpi_large_item common; + uacpi_u8 information; + uacpi_u32 address; + uacpi_u32 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_fixed_memory32, 12); + +UACPI_PACKED(struct acpi_resource_address { + struct acpi_large_item common; + uacpi_u8 type; + uacpi_u8 flags; + uacpi_u8 type_flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_address, 6); + +UACPI_PACKED(struct acpi_resource_address64 { + struct acpi_resource_address common; + uacpi_u64 granularity; + uacpi_u64 minimum; + uacpi_u64 maximum; + uacpi_u64 translation_offset; + uacpi_u64 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_address64, 46); + +UACPI_PACKED(struct acpi_resource_address32 { + struct acpi_resource_address common; + uacpi_u32 granularity; + uacpi_u32 minimum; + uacpi_u32 maximum; + uacpi_u32 translation_offset; + uacpi_u32 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_address32, 26); + +UACPI_PACKED(struct acpi_resource_address16 { + struct acpi_resource_address common; + uacpi_u16 granularity; + uacpi_u16 minimum; + uacpi_u16 maximum; + uacpi_u16 translation_offset; + uacpi_u16 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_address16, 16); + +UACPI_PACKED(struct acpi_resource_address64_extended { + struct acpi_resource_address common; + uacpi_u8 revision_id; + uacpi_u8 rsvd; + uacpi_u64 granularity; + uacpi_u64 minimum; + uacpi_u64 maximum; + uacpi_u64 translation_offset; + uacpi_u64 length; + uacpi_u64 attributes; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_address64_extended, 56); + +UACPI_PACKED(struct acpi_resource_extended_irq { + struct acpi_large_item common; + uacpi_u8 flags; + uacpi_u8 num_irqs; + uacpi_u32 irqs[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_extended_irq, 5); + +UACPI_PACKED(struct acpi_resource_generic_register { + struct acpi_large_item common; + uacpi_u8 address_space_id; + uacpi_u8 bit_width; + uacpi_u8 bit_offset; + uacpi_u8 access_size; + uacpi_u64 address; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_generic_register, 15); + +UACPI_PACKED(struct acpi_resource_gpio_connection { + struct acpi_large_item common; + uacpi_u8 revision_id; + uacpi_u8 type; + uacpi_u16 general_flags; + uacpi_u16 connection_flags; + uacpi_u8 pull_configuration; + uacpi_u16 drive_strength; + uacpi_u16 debounce_timeout; + uacpi_u16 pin_table_offset; + uacpi_u8 source_index; + uacpi_u16 source_offset; + uacpi_u16 vendor_data_offset; + uacpi_u16 vendor_data_length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_gpio_connection, 23); + +#define ACPI_SERIAL_TYPE_I2C 1 +#define ACPI_SERIAL_TYPE_SPI 2 +#define ACPI_SERIAL_TYPE_UART 3 +#define ACPI_SERIAL_TYPE_CSI2 4 +#define ACPI_SERIAL_TYPE_MAX ACPI_SERIAL_TYPE_CSI2 + +UACPI_PACKED(struct acpi_resource_serial { + struct acpi_large_item common; + uacpi_u8 revision_id; + uacpi_u8 source_index; + uacpi_u8 type; + uacpi_u8 flags; + uacpi_u16 type_specific_flags; + uacpi_u8 type_specific_revision_id; + uacpi_u16 type_data_length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_serial, 12); + +UACPI_PACKED(struct acpi_resource_serial_i2c { + struct acpi_resource_serial common; + uacpi_u32 connection_speed; + uacpi_u16 slave_address; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_serial_i2c, 18); + +UACPI_PACKED(struct acpi_resource_serial_spi { + struct acpi_resource_serial common; + uacpi_u32 connection_speed; + uacpi_u8 data_bit_length; + uacpi_u8 phase; + uacpi_u8 polarity; + uacpi_u16 device_selection; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_serial_spi, 21); + +UACPI_PACKED(struct acpi_resource_serial_uart { + struct acpi_resource_serial common; + uacpi_u32 baud_rate; + uacpi_u16 rx_fifo; + uacpi_u16 tx_fifo; + uacpi_u8 parity; + uacpi_u8 lines_enabled; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_serial_uart, 22); + +UACPI_PACKED(struct acpi_resource_serial_csi2 { + struct acpi_resource_serial common; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_serial_csi2, 12); + +UACPI_PACKED(struct acpi_resource_pin_function { + struct acpi_large_item common; + uacpi_u8 revision_id; + uacpi_u16 flags; + uacpi_u8 pull_configuration; + uacpi_u16 function_number; + uacpi_u16 pin_table_offset; + uacpi_u8 source_index; + uacpi_u16 source_offset; + uacpi_u16 vendor_data_offset; + uacpi_u16 vendor_data_length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_pin_function, 18); + +UACPI_PACKED(struct acpi_resource_pin_configuration { + struct acpi_large_item common; + uacpi_u8 revision_id; + uacpi_u16 flags; + uacpi_u8 type; + uacpi_u32 value; + uacpi_u16 pin_table_offset; + uacpi_u8 source_index; + uacpi_u16 source_offset; + uacpi_u16 vendor_data_offset; + uacpi_u16 vendor_data_length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_pin_configuration, 20); + +UACPI_PACKED(struct acpi_resource_pin_group { + struct acpi_large_item common; + uacpi_u8 revision_id; + uacpi_u16 flags; + uacpi_u16 pin_table_offset; + uacpi_u16 source_lable_offset; + uacpi_u16 vendor_data_offset; + uacpi_u16 vendor_data_length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_pin_group, 14); + +UACPI_PACKED(struct acpi_resource_pin_group_function { + struct acpi_large_item common; + uacpi_u8 revision_id; + uacpi_u16 flags; + uacpi_u16 function; + uacpi_u8 source_index; + uacpi_u16 source_offset; + uacpi_u16 source_lable_offset; + uacpi_u16 vendor_data_offset; + uacpi_u16 vendor_data_length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_pin_group_function, 17); + +UACPI_PACKED(struct acpi_resource_pin_group_configuration { + struct acpi_large_item common; + uacpi_u8 revision_id; + uacpi_u16 flags; + uacpi_u8 type; + uacpi_u32 value; + uacpi_u8 source_index; + uacpi_u16 source_offset; + uacpi_u16 source_lable_offset; + uacpi_u16 vendor_data_offset; + uacpi_u16 vendor_data_length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_pin_group_configuration, 20); + +UACPI_PACKED(struct acpi_resource_clock_input { + struct acpi_large_item common; + uacpi_u8 revision_id; + uacpi_u16 flags; + uacpi_u16 divisor; + uacpi_u32 numerator; + uacpi_u8 source_index; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_clock_input, 13); diff --git a/sys/include/dev/acpi/uacpi/uacpi/context.h b/sys/include/dev/acpi/uacpi/uacpi/context.h new file mode 100644 index 0000000..d5a46e5 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/context.h @@ -0,0 +1,53 @@ +#pragma once + +#include <uacpi/types.h> +#include <uacpi/log.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Set the minimum log level to be accepted by the logging facilities. Any logs + * below this level are discarded and not passed to uacpi_kernel_log, etc. + * + * 0 is treated as a special value that resets the setting to the default value. + * + * E.g. for a log level of UACPI_LOG_INFO: + * UACPI_LOG_DEBUG -> discarded + * UACPI_LOG_TRACE -> discarded + * UACPI_LOG_INFO -> allowed + * UACPI_LOG_WARN -> allowed + * UACPI_LOG_ERROR -> allowed + */ +void uacpi_context_set_log_level(uacpi_log_level); + +/* + * Enables table checksum validation at installation time instead of first use. + * Note that this makes uACPI map the entire table at once, which not all + * hosts are able to handle at early init. + */ +void uacpi_context_set_proactive_table_checksum(uacpi_bool); + +#ifndef UACPI_BAREBONES_MODE +/* + * Set the maximum number of seconds a While loop is allowed to run for before + * getting timed out. + * + * 0 is treated a special value that resets the setting to the default value. + */ +void uacpi_context_set_loop_timeout(uacpi_u32 seconds); + +/* + * Set the maximum call stack depth AML can reach before getting aborted. + * + * 0 is treated as a special value that resets the setting to the default value. + */ +void uacpi_context_set_max_call_stack_depth(uacpi_u32 depth); + +uacpi_u32 uacpi_context_get_loop_timeout(void); +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/event.h b/sys/include/dev/acpi/uacpi/uacpi/event.h new file mode 100644 index 0000000..a21fe6e --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/event.h @@ -0,0 +1,286 @@ +#pragma once + +#include <uacpi/types.h> +#include <uacpi/uacpi.h> +#include <uacpi/acpi.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +typedef enum uacpi_fixed_event { + UACPI_FIXED_EVENT_TIMER_STATUS = 1, + UACPI_FIXED_EVENT_POWER_BUTTON, + UACPI_FIXED_EVENT_SLEEP_BUTTON, + UACPI_FIXED_EVENT_RTC, + UACPI_FIXED_EVENT_MAX = UACPI_FIXED_EVENT_RTC, +} uacpi_fixed_event; + +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_install_fixed_event_handler( + uacpi_fixed_event event, uacpi_interrupt_handler handler, uacpi_handle user +)) + +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_uninstall_fixed_event_handler( + uacpi_fixed_event event +)) + +/* + * Enable/disable a fixed event. Note that the event is automatically enabled + * upon installing a handler to it. + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_enable_fixed_event(uacpi_fixed_event event) +) +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_disable_fixed_event(uacpi_fixed_event event) +) + +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_clear_fixed_event(uacpi_fixed_event event) +) + +typedef enum uacpi_event_info { + // Event is enabled in software + UACPI_EVENT_INFO_ENABLED = (1 << 0), + + // Event is enabled in software (only for wake) + UACPI_EVENT_INFO_ENABLED_FOR_WAKE = (1 << 1), + + // Event is masked + UACPI_EVENT_INFO_MASKED = (1 << 2), + + // Event has a handler attached + UACPI_EVENT_INFO_HAS_HANDLER = (1 << 3), + + // Hardware enable bit is set + UACPI_EVENT_INFO_HW_ENABLED = (1 << 4), + + // Hardware status bit is set + UACPI_EVENT_INFO_HW_STATUS = (1 << 5), +} uacpi_event_info; + +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_fixed_event_info( + uacpi_fixed_event event, uacpi_event_info *out_info +)) + +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_gpe_info( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_event_info *out_info +)) + +// Set if the handler wishes to reenable the GPE it just handled +#define UACPI_GPE_REENABLE (1 << 7) + +typedef uacpi_interrupt_ret (*uacpi_gpe_handler)( + uacpi_handle ctx, uacpi_namespace_node *gpe_device, uacpi_u16 idx +); + +typedef enum uacpi_gpe_triggering { + UACPI_GPE_TRIGGERING_LEVEL = 0, + UACPI_GPE_TRIGGERING_EDGE = 1, + UACPI_GPE_TRIGGERING_MAX = UACPI_GPE_TRIGGERING_EDGE, +} uacpi_gpe_triggering; + +const uacpi_char *uacpi_gpe_triggering_to_string( + uacpi_gpe_triggering triggering +); + +/* + * Installs a handler to the provided GPE at 'idx' controlled by device + * 'gpe_device'. The GPE is automatically disabled & cleared according to the + * configured triggering upon invoking the handler. The event is optionally + * re-enabled (by returning UACPI_GPE_REENABLE from the handler) + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_install_gpe_handler( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, uacpi_handle ctx +)) + +/* + * Installs a raw handler to the provided GPE at 'idx' controlled by device + * 'gpe_device'. The handler is dispatched immediately after the event is + * received, status & enable bits are untouched. + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_install_gpe_handler_raw( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, uacpi_handle ctx +)) + +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_uninstall_gpe_handler( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_gpe_handler handler +)) + +/* + * Marks the GPE 'idx' managed by 'gpe_device' as wake-capable. 'wake_device' is + * optional and configures the GPE to generate an implicit notification whenever + * an event occurs. + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_setup_gpe_for_wake( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_namespace_node *wake_device +)) + +/* + * Mark a GPE managed by 'gpe_device' as enabled/disabled for wake. The GPE must + * have previously been marked by calling uacpi_gpe_setup_for_wake. This + * function only affects the GPE enable register state following the call to + * uacpi_gpe_enable_all_for_wake. + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_enable_gpe_for_wake( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_disable_gpe_for_wake( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) + +/* + * Finalize GPE initialization by enabling all GPEs not configured for wake and + * having a matching AML handler detected. + * + * This should be called after the kernel power managment subsystem has + * enumerated all of the devices, executing their _PRW methods etc., and + * marking those it wishes to use for wake by calling uacpi_setup_gpe_for_wake + * or uacpi_mark_gpe_for_wake. + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_finalize_gpe_initialization(void) +) + +/* + * Enable/disable a general purpose event managed by 'gpe_device'. Internally + * this uses reference counting to make sure a GPE is not disabled until all + * possible users of it do so. GPEs not marked for wake are enabled + * automatically so this API is only needed for wake events or those that don't + * have a corresponding AML handler. + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_enable_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_disable_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) + +/* + * Clear the status bit of the event 'idx' managed by 'gpe_device'. + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_clear_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) + +/* + * Suspend/resume a general purpose event managed by 'gpe_device'. This bypasses + * the reference counting mechanism and unconditionally clears/sets the + * corresponding bit in the enable registers. This is used for switching the GPE + * to poll mode. + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_suspend_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_resume_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) + +/* + * Finish handling the GPE managed by 'gpe_device' at 'idx'. This clears the + * status registers if it hasn't been cleared yet and re-enables the event if + * it was enabled before. + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_finish_handling_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) + +/* + * Hard mask/umask a general purpose event at 'idx' managed by 'gpe_device'. + * This is used to permanently silence an event so that further calls to + * enable/disable as well as suspend/resume get ignored. This might be necessary + * for GPEs that cause an event storm due to the kernel's inability to properly + * handle them. The only way to enable a masked event is by a call to unmask. + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_mask_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_unmask_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) + +/* + * Disable all GPEs currently set up on the system. + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_disable_all_gpes(void) +) + +/* + * Enable all GPEs not marked as wake. This is only needed after the system + * wakes from a shallow sleep state and is called automatically by wake code. + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_enable_all_runtime_gpes(void) +) + +/* + * Enable all GPEs marked as wake. This is only needed before the system goes + * to sleep is called automatically by sleep code. + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_enable_all_wake_gpes(void) +) + +/* + * Install/uninstall a new GPE block, usually defined by a device in the + * namespace with a _HID of ACPI0006. + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_install_gpe_block( + uacpi_namespace_node *gpe_device, uacpi_u64 address, + uacpi_address_space address_space, uacpi_u16 num_registers, + uacpi_u32 irq +)) +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_uninstall_gpe_block( + uacpi_namespace_node *gpe_device +)) + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/helpers.h b/sys/include/dev/acpi/uacpi/uacpi/helpers.h new file mode 100644 index 0000000..520359e --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/helpers.h @@ -0,0 +1,12 @@ +#pragma once + +#include <uacpi/platform/compiler.h> + +#define UACPI_BUILD_BUG_ON_WITH_MSG(expr, msg) UACPI_STATIC_ASSERT(!(expr), msg) + +#define UACPI_BUILD_BUG_ON(expr) \ + UACPI_BUILD_BUG_ON_WITH_MSG(expr, "BUILD BUG: " #expr " evaluated to true") + +#define UACPI_EXPECT_SIZEOF(type, size) \ + UACPI_BUILD_BUG_ON_WITH_MSG(sizeof(type) != size, \ + "BUILD BUG: invalid type size") diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/compiler.h b/sys/include/dev/acpi/uacpi/uacpi/internal/compiler.h new file mode 100644 index 0000000..68033fd --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/compiler.h @@ -0,0 +1,3 @@ +#pragma once + +#include <uacpi/platform/compiler.h> diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/context.h b/sys/include/dev/acpi/uacpi/uacpi/internal/context.h new file mode 100644 index 0000000..ca587f6 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/context.h @@ -0,0 +1,155 @@ +#pragma once + +#include <uacpi/acpi.h> +#include <uacpi/types.h> +#include <uacpi/uacpi.h> +#include <uacpi/internal/dynamic_array.h> +#include <uacpi/internal/shareable.h> +#include <uacpi/context.h> + +struct uacpi_runtime_context { + /* + * A local copy of FADT that has been verified & converted to most optimal + * format for faster access to the registers. + */ + struct acpi_fadt fadt; + + uacpi_u64 flags; + +#ifndef UACPI_BAREBONES_MODE + /* + * A cached pointer to FACS so that we don't have to look it up in interrupt + * contexts as we can't take mutexes. + */ + struct acpi_facs *facs; + + /* + * pm1{a,b}_evt_blk split into two registers for convenience + */ + struct acpi_gas pm1a_status_blk; + struct acpi_gas pm1b_status_blk; + struct acpi_gas pm1a_enable_blk; + struct acpi_gas pm1b_enable_blk; + +#define UACPI_SLEEP_TYP_INVALID 0xFF + uacpi_u8 last_sleep_typ_a; + uacpi_u8 last_sleep_typ_b; + + uacpi_u8 s0_sleep_typ_a; + uacpi_u8 s0_sleep_typ_b; + + uacpi_bool global_lock_acquired; + +#ifndef UACPI_REDUCED_HARDWARE + uacpi_bool was_in_legacy_mode; + uacpi_bool has_global_lock; + uacpi_bool sci_handle_valid; + uacpi_handle sci_handle; +#endif + uacpi_u64 opcodes_executed; + + uacpi_u32 loop_timeout_seconds; + uacpi_u32 max_call_stack_depth; + + uacpi_u32 global_lock_seq_num; + + /* + * These are stored here to protect against stuff like: + * - CopyObject(JUNK, \) + * - CopyObject(JUNK, \_GL) + */ + uacpi_mutex *global_lock_mutex; + uacpi_object *root_object; + +#ifndef UACPI_REDUCED_HARDWARE + uacpi_handle *global_lock_event; + uacpi_handle *global_lock_spinlock; + uacpi_bool global_lock_pending; +#endif + + uacpi_bool bad_timesource; + uacpi_u8 init_level; +#endif // !UACPI_BAREBONES_MODE + +#ifndef UACPI_REDUCED_HARDWARE + uacpi_bool is_hardware_reduced; +#endif + + /* + * This is a per-table value but we mimic the NT implementation: + * treat all other definition blocks as if they were the same revision + * as DSDT. + */ + uacpi_bool is_rev1; + + uacpi_u8 log_level; +}; + +extern struct uacpi_runtime_context g_uacpi_rt_ctx; + +static inline uacpi_bool uacpi_check_flag(uacpi_u64 flag) +{ + return (g_uacpi_rt_ctx.flags & flag) == flag; +} + +static inline uacpi_bool uacpi_should_log(enum uacpi_log_level lvl) +{ + return lvl <= g_uacpi_rt_ctx.log_level; +} + +static inline uacpi_bool uacpi_is_hardware_reduced(void) +{ +#ifndef UACPI_REDUCED_HARDWARE + return g_uacpi_rt_ctx.is_hardware_reduced; +#else + return UACPI_TRUE; +#endif +} + +#ifndef UACPI_BAREBONES_MODE + +static inline const uacpi_char *uacpi_init_level_to_string(uacpi_u8 lvl) +{ + switch (lvl) { + case UACPI_INIT_LEVEL_EARLY: + return "early"; + case UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED: + return "subsystem initialized"; + case UACPI_INIT_LEVEL_NAMESPACE_LOADED: + return "namespace loaded"; + case UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED: + return "namespace initialized"; + default: + return "<invalid>"; + } +} + +#define UACPI_ENSURE_INIT_LEVEL_AT_LEAST(lvl) \ + do { \ + if (uacpi_unlikely(g_uacpi_rt_ctx.init_level < lvl)) { \ + uacpi_error( \ + "while evaluating %s: init level %d (%s) is too low, " \ + "expected at least %d (%s)\n", __FUNCTION__, \ + g_uacpi_rt_ctx.init_level, \ + uacpi_init_level_to_string(g_uacpi_rt_ctx.init_level), lvl, \ + uacpi_init_level_to_string(lvl) \ + ); \ + return UACPI_STATUS_INIT_LEVEL_MISMATCH; \ + } \ + } while (0) + +#define UACPI_ENSURE_INIT_LEVEL_IS(lvl) \ + do { \ + if (uacpi_unlikely(g_uacpi_rt_ctx.init_level != lvl)) { \ + uacpi_error( \ + "while evaluating %s: invalid init level %d (%s), " \ + "expected %d (%s)\n", __FUNCTION__, \ + g_uacpi_rt_ctx.init_level, \ + uacpi_init_level_to_string(g_uacpi_rt_ctx.init_level), lvl, \ + uacpi_init_level_to_string(lvl) \ + ); \ + return UACPI_STATUS_INIT_LEVEL_MISMATCH; \ + } \ + } while (0) + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/dynamic_array.h b/sys/include/dev/acpi/uacpi/uacpi/internal/dynamic_array.h new file mode 100644 index 0000000..4adc00f --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/dynamic_array.h @@ -0,0 +1,185 @@ +#pragma once + +#include <uacpi/types.h> +#include <uacpi/internal/stdlib.h> +#include <uacpi/kernel_api.h> + +#define DYNAMIC_ARRAY_WITH_INLINE_STORAGE(name, type, inline_capacity) \ + struct name { \ + type inline_storage[inline_capacity]; \ + type *dynamic_storage; \ + uacpi_size dynamic_capacity; \ + uacpi_size size_including_inline; \ + }; \ + +#define DYNAMIC_ARRAY_SIZE(arr) ((arr)->size_including_inline) + +#define DYNAMIC_ARRAY_WITH_INLINE_STORAGE_EXPORTS(name, type, prefix) \ + prefix uacpi_size name##_inline_capacity(struct name *arr); \ + prefix type *name##_at(struct name *arr, uacpi_size idx); \ + prefix type *name##_alloc(struct name *arr); \ + prefix type *name##_calloc(struct name *arr); \ + prefix void name##_pop(struct name *arr); \ + prefix uacpi_size name##_size(struct name *arr); \ + prefix type *name##_last(struct name *arr) \ + prefix void name##_clear(struct name *arr); + +#ifndef UACPI_BAREBONES_MODE +#define DYNAMIC_ARRAY_ALLOC_FN(name, type, prefix) \ + UACPI_MAYBE_UNUSED \ + prefix type *name##_alloc(struct name *arr) \ + { \ + uacpi_size inline_cap; \ + type *out_ptr; \ + \ + inline_cap = name##_inline_capacity(arr); \ + \ + if (arr->size_including_inline >= inline_cap) { \ + uacpi_size dynamic_size; \ + \ + dynamic_size = arr->size_including_inline - inline_cap; \ + if (dynamic_size == arr->dynamic_capacity) { \ + uacpi_size bytes, type_size; \ + void *new_buf; \ + \ + type_size = sizeof(*arr->dynamic_storage); \ + \ + if (arr->dynamic_capacity == 0) { \ + bytes = type_size * inline_cap; \ + } else { \ + bytes = (arr->dynamic_capacity / 2) * type_size; \ + if (bytes == 0) \ + bytes += type_size; \ + \ + bytes += arr->dynamic_capacity * type_size; \ + } \ + \ + new_buf = uacpi_kernel_alloc(bytes); \ + if (uacpi_unlikely(new_buf == UACPI_NULL)) \ + return UACPI_NULL; \ + \ + arr->dynamic_capacity = bytes / type_size; \ + \ + if (arr->dynamic_storage) { \ + uacpi_memcpy(new_buf, arr->dynamic_storage, \ + dynamic_size * type_size); \ + } \ + uacpi_free(arr->dynamic_storage, dynamic_size * type_size); \ + arr->dynamic_storage = new_buf; \ + } \ + \ + out_ptr = &arr->dynamic_storage[dynamic_size]; \ + goto ret; \ + } \ + out_ptr = &arr->inline_storage[arr->size_including_inline]; \ + ret: \ + arr->size_including_inline++; \ + return out_ptr; \ + } + +#define DYNAMIC_ARRAY_CLEAR_FN(name, type, prefix) \ + prefix void name##_clear(struct name *arr) \ + { \ + uacpi_free( \ + arr->dynamic_storage, \ + arr->dynamic_capacity * sizeof(*arr->dynamic_storage) \ + ); \ + arr->size_including_inline = 0; \ + arr->dynamic_capacity = 0; \ + arr->dynamic_storage = UACPI_NULL; \ + } +#else +#define DYNAMIC_ARRAY_ALLOC_FN(name, type, prefix) \ + UACPI_MAYBE_UNUSED \ + prefix type *name##_alloc(struct name *arr) \ + { \ + uacpi_size inline_cap; \ + type *out_ptr; \ + \ + inline_cap = name##_inline_capacity(arr); \ + \ + if (arr->size_including_inline >= inline_cap) { \ + uacpi_size dynamic_size; \ + \ + dynamic_size = arr->size_including_inline - inline_cap; \ + if (uacpi_unlikely(dynamic_size == arr->dynamic_capacity)) \ + return UACPI_NULL; \ + \ + out_ptr = &arr->dynamic_storage[dynamic_size]; \ + goto ret; \ + } \ + out_ptr = &arr->inline_storage[arr->size_including_inline]; \ + ret: \ + arr->size_including_inline++; \ + return out_ptr; \ + } + +#define DYNAMIC_ARRAY_CLEAR_FN(name, type, prefix) \ + prefix void name##_clear(struct name *arr) \ + { \ + arr->size_including_inline = 0; \ + arr->dynamic_capacity = 0; \ + arr->dynamic_storage = UACPI_NULL; \ + } +#endif + +#define DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(name, type, prefix) \ + UACPI_MAYBE_UNUSED \ + prefix uacpi_size name##_inline_capacity(struct name *arr) \ + { \ + return sizeof(arr->inline_storage) / sizeof(arr->inline_storage[0]); \ + } \ + \ + UACPI_MAYBE_UNUSED \ + prefix uacpi_size name##_capacity(struct name *arr) \ + { \ + return name##_inline_capacity(arr) + arr->dynamic_capacity; \ + } \ + \ + prefix type *name##_at(struct name *arr, uacpi_size idx) \ + { \ + if (idx >= arr->size_including_inline) \ + return UACPI_NULL; \ + \ + if (idx < name##_inline_capacity(arr)) \ + return &arr->inline_storage[idx]; \ + \ + return &arr->dynamic_storage[idx - name##_inline_capacity(arr)]; \ + } \ + \ + DYNAMIC_ARRAY_ALLOC_FN(name, type, prefix) \ + \ + UACPI_MAYBE_UNUSED \ + prefix type *name##_calloc(struct name *arr) \ + { \ + type *ret; \ + \ + ret = name##_alloc(arr); \ + if (ret) \ + uacpi_memzero(ret, sizeof(*ret)); \ + \ + return ret; \ + } \ + \ + UACPI_MAYBE_UNUSED \ + prefix void name##_pop(struct name *arr) \ + { \ + if (arr->size_including_inline == 0) \ + return; \ + \ + arr->size_including_inline--; \ + } \ + \ + UACPI_MAYBE_UNUSED \ + prefix uacpi_size name##_size(struct name *arr) \ + { \ + return arr->size_including_inline; \ + } \ + \ + UACPI_MAYBE_UNUSED \ + prefix type *name##_last(struct name *arr) \ + { \ + return name##_at(arr, arr->size_including_inline - 1); \ + } \ + \ + DYNAMIC_ARRAY_CLEAR_FN(name, type, prefix) diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/event.h b/sys/include/dev/acpi/uacpi/uacpi/internal/event.h new file mode 100644 index 0000000..40ced0d --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/event.h @@ -0,0 +1,25 @@ +#pragma once + +#include <uacpi/event.h> + +// This fixed event is internal-only, and we don't expose it in the enum +#define UACPI_FIXED_EVENT_GLOBAL_LOCK 0 + +UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_initialize_events_early(void) +) + +UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_initialize_events(void) +) +UACPI_STUB_IF_REDUCED_HARDWARE( + void uacpi_deinitialize_events(void) +) + +UACPI_STUB_IF_REDUCED_HARDWARE( + void uacpi_events_match_post_dynamic_table_load(void) +) + +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_clear_all_events(void) +) diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/helpers.h b/sys/include/dev/acpi/uacpi/uacpi/internal/helpers.h new file mode 100644 index 0000000..f02b589 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/helpers.h @@ -0,0 +1,7 @@ +#pragma once + +#include <uacpi/helpers.h> + +#define UACPI_ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) + +#define UACPI_UNUSED(x) (void)(x) diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/interpreter.h b/sys/include/dev/acpi/uacpi/uacpi/internal/interpreter.h new file mode 100644 index 0000000..410c379 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/interpreter.h @@ -0,0 +1,24 @@ +#pragma once + +#include <uacpi/types.h> +#include <uacpi/status.h> +#include <uacpi/internal/namespace.h> + +#ifndef UACPI_BAREBONES_MODE + +enum uacpi_table_load_cause { + UACPI_TABLE_LOAD_CAUSE_LOAD_OP, + UACPI_TABLE_LOAD_CAUSE_LOAD_TABLE_OP, + UACPI_TABLE_LOAD_CAUSE_INIT, + UACPI_TABLE_LOAD_CAUSE_HOST, +}; + +uacpi_status uacpi_execute_table(void*, enum uacpi_table_load_cause cause); +uacpi_status uacpi_osi(uacpi_handle handle, uacpi_object *retval); + +uacpi_status uacpi_execute_control_method( + uacpi_namespace_node *scope, uacpi_control_method *method, + const uacpi_object_array *args, uacpi_object **ret +); + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/io.h b/sys/include/dev/acpi/uacpi/uacpi/internal/io.h new file mode 100644 index 0000000..839489a --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/io.h @@ -0,0 +1,77 @@ +#pragma once + +#include <uacpi/internal/types.h> +#include <uacpi/acpi.h> +#include <uacpi/io.h> + +#ifndef UACPI_BAREBONES_MODE + +typedef struct uacpi_mapped_gas { + uacpi_handle mapping; + uacpi_u8 access_bit_width; + uacpi_u8 total_bit_width; + uacpi_u8 bit_offset; + + uacpi_status (*read)( + uacpi_handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out + ); + uacpi_status (*write)( + uacpi_handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 in + ); + + void (*unmap)(uacpi_handle, uacpi_size); +} uacpi_mapped_gas; + +uacpi_status uacpi_map_gas_noalloc( + const struct acpi_gas *gas, uacpi_mapped_gas *out_mapped +); +void uacpi_unmap_gas_nofree(uacpi_mapped_gas *gas); + +uacpi_size uacpi_round_up_bits_to_bytes(uacpi_size bit_length); + +void uacpi_read_buffer_field( + const uacpi_buffer_field *field, void *dst +); +void uacpi_write_buffer_field( + uacpi_buffer_field *field, const void *src, uacpi_size size +); + +uacpi_status uacpi_field_unit_get_read_type( + struct uacpi_field_unit *field, uacpi_object_type *out_type +); + +uacpi_status uacpi_field_unit_get_bit_length( + struct uacpi_field_unit *field, uacpi_size *out_length +); + +uacpi_status uacpi_read_field_unit( + uacpi_field_unit *field, void *dst, uacpi_size size, + uacpi_data_view *wtr_response +); +uacpi_status uacpi_write_field_unit( + uacpi_field_unit *field, const void *src, uacpi_size size, + uacpi_data_view *wtr_response +); + +uacpi_status uacpi_system_memory_read( + void *ptr, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out +); +uacpi_status uacpi_system_memory_write( + void *ptr, uacpi_size offset, uacpi_u8 width, uacpi_u64 in +); + +uacpi_status uacpi_system_io_read( + uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out +); +uacpi_status uacpi_system_io_write( + uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 in +); + +uacpi_status uacpi_pci_read( + uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out +); +uacpi_status uacpi_pci_write( + uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 in +); + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/log.h b/sys/include/dev/acpi/uacpi/uacpi/internal/log.h new file mode 100644 index 0000000..e8b0451 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/log.h @@ -0,0 +1,23 @@ +#pragma once + +#include <uacpi/kernel_api.h> +#include <uacpi/internal/context.h> +#include <uacpi/log.h> + +#ifdef UACPI_FORMATTED_LOGGING +#define uacpi_log uacpi_kernel_log +#else +UACPI_PRINTF_DECL(2, 3) +void uacpi_log(uacpi_log_level, const uacpi_char*, ...); +#endif + +#define uacpi_log_lvl(lvl, ...) \ + do { if (uacpi_should_log(lvl)) uacpi_log(lvl, __VA_ARGS__); } while (0) + +#define uacpi_debug(...) uacpi_log_lvl(UACPI_LOG_DEBUG, __VA_ARGS__) +#define uacpi_trace(...) uacpi_log_lvl(UACPI_LOG_TRACE, __VA_ARGS__) +#define uacpi_info(...) uacpi_log_lvl(UACPI_LOG_INFO, __VA_ARGS__) +#define uacpi_warn(...) uacpi_log_lvl(UACPI_LOG_WARN, __VA_ARGS__) +#define uacpi_error(...) uacpi_log_lvl(UACPI_LOG_ERROR, __VA_ARGS__) + +void uacpi_logger_initialize(void); diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/mutex.h b/sys/include/dev/acpi/uacpi/uacpi/internal/mutex.h new file mode 100644 index 0000000..4fa2c9b --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/mutex.h @@ -0,0 +1,82 @@ +#pragma once + +#include <uacpi/internal/types.h> +#include <uacpi/kernel_api.h> + +#ifndef UACPI_BAREBONES_MODE + +uacpi_bool uacpi_this_thread_owns_aml_mutex(uacpi_mutex*); + +uacpi_status uacpi_acquire_aml_mutex(uacpi_mutex*, uacpi_u16 timeout); +uacpi_status uacpi_release_aml_mutex(uacpi_mutex*); + +static inline uacpi_status uacpi_acquire_native_mutex(uacpi_handle mtx) +{ + if (uacpi_unlikely(mtx == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + return uacpi_kernel_acquire_mutex(mtx, 0xFFFF); +} + +uacpi_status uacpi_acquire_native_mutex_with_timeout( + uacpi_handle mtx, uacpi_u16 timeout +); + +static inline uacpi_status uacpi_release_native_mutex(uacpi_handle mtx) +{ + if (uacpi_unlikely(mtx == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + uacpi_kernel_release_mutex(mtx); + return UACPI_STATUS_OK; +} + +static inline uacpi_status uacpi_acquire_native_mutex_may_be_null( + uacpi_handle mtx +) +{ + if (mtx == UACPI_NULL) + return UACPI_STATUS_OK; + + return uacpi_kernel_acquire_mutex(mtx, 0xFFFF); +} + +static inline uacpi_status uacpi_release_native_mutex_may_be_null( + uacpi_handle mtx +) +{ + if (mtx == UACPI_NULL) + return UACPI_STATUS_OK; + + uacpi_kernel_release_mutex(mtx); + return UACPI_STATUS_OK; +} + +struct uacpi_recursive_lock { + uacpi_handle mutex; + uacpi_size depth; + uacpi_thread_id owner; +}; + +uacpi_status uacpi_recursive_lock_init(struct uacpi_recursive_lock *lock); +uacpi_status uacpi_recursive_lock_deinit(struct uacpi_recursive_lock *lock); + +uacpi_status uacpi_recursive_lock_acquire(struct uacpi_recursive_lock *lock); +uacpi_status uacpi_recursive_lock_release(struct uacpi_recursive_lock *lock); + +struct uacpi_rw_lock { + uacpi_handle read_mutex; + uacpi_handle write_mutex; + uacpi_size num_readers; +}; + +uacpi_status uacpi_rw_lock_init(struct uacpi_rw_lock *lock); +uacpi_status uacpi_rw_lock_deinit(struct uacpi_rw_lock *lock); + +uacpi_status uacpi_rw_lock_read(struct uacpi_rw_lock *lock); +uacpi_status uacpi_rw_unlock_read(struct uacpi_rw_lock *lock); + +uacpi_status uacpi_rw_lock_write(struct uacpi_rw_lock *lock); +uacpi_status uacpi_rw_unlock_write(struct uacpi_rw_lock *lock); + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/namespace.h b/sys/include/dev/acpi/uacpi/uacpi/internal/namespace.h new file mode 100644 index 0000000..369c5a4 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/namespace.h @@ -0,0 +1,123 @@ +#pragma once + +#include <uacpi/types.h> +#include <uacpi/internal/shareable.h> +#include <uacpi/status.h> +#include <uacpi/namespace.h> + +#ifndef UACPI_BAREBONES_MODE + +#define UACPI_NAMESPACE_NODE_FLAG_ALIAS (1 << 0) + +/* + * This node has been uninstalled and has no object associated with it. + * + * This is used to handle edge cases where an object needs to reference + * a namespace node, where the node might end up going out of scope before + * the object lifetime ends. + */ +#define UACPI_NAMESPACE_NODE_FLAG_DANGLING (1u << 1) + +/* + * This node is method-local and must not be exposed via public API as its + * lifetime is limited. + */ +#define UACPI_NAMESPACE_NODE_FLAG_TEMPORARY (1u << 2) + +#define UACPI_NAMESPACE_NODE_PREDEFINED (1u << 31) + +typedef struct uacpi_namespace_node { + struct uacpi_shareable shareable; + uacpi_object_name name; + uacpi_u32 flags; + uacpi_object *object; + struct uacpi_namespace_node *parent; + struct uacpi_namespace_node *child; + struct uacpi_namespace_node *next; +} uacpi_namespace_node; + +uacpi_status uacpi_initialize_namespace(void); +void uacpi_deinitialize_namespace(void); + +uacpi_namespace_node *uacpi_namespace_node_alloc(uacpi_object_name name); +void uacpi_namespace_node_unref(uacpi_namespace_node *node); + + +uacpi_status uacpi_namespace_node_type_unlocked( + const uacpi_namespace_node *node, uacpi_object_type *out_type +); +uacpi_status uacpi_namespace_node_is_one_of_unlocked( + const uacpi_namespace_node *node, uacpi_object_type_bits type_mask, + uacpi_bool *out +); + +uacpi_object *uacpi_namespace_node_get_object(const uacpi_namespace_node *node); + +uacpi_object *uacpi_namespace_node_get_object_typed( + const uacpi_namespace_node *node, uacpi_object_type_bits type_mask +); + +uacpi_status uacpi_namespace_node_acquire_object( + const uacpi_namespace_node *node, uacpi_object **out_obj +); +uacpi_status uacpi_namespace_node_acquire_object_typed( + const uacpi_namespace_node *node, uacpi_object_type_bits, + uacpi_object **out_obj +); + +uacpi_status uacpi_namespace_node_reacquire_object( + uacpi_object *obj +); +uacpi_status uacpi_namespace_node_release_object( + uacpi_object *obj +); + +uacpi_status uacpi_namespace_node_install( + uacpi_namespace_node *parent, uacpi_namespace_node *node +); +uacpi_status uacpi_namespace_node_uninstall(uacpi_namespace_node *node); + +uacpi_namespace_node *uacpi_namespace_node_find_sub_node( + uacpi_namespace_node *parent, + uacpi_object_name name +); + +enum uacpi_may_search_above_parent { + UACPI_MAY_SEARCH_ABOVE_PARENT_NO, + UACPI_MAY_SEARCH_ABOVE_PARENT_YES, +}; + +enum uacpi_permanent_only { + UACPI_PERMANENT_ONLY_NO, + UACPI_PERMANENT_ONLY_YES, +}; + +enum uacpi_should_lock { + UACPI_SHOULD_LOCK_NO, + UACPI_SHOULD_LOCK_YES, +}; + +uacpi_status uacpi_namespace_node_resolve( + uacpi_namespace_node *scope, const uacpi_char *path, enum uacpi_should_lock, + enum uacpi_may_search_above_parent, enum uacpi_permanent_only, + uacpi_namespace_node **out_node +); + +uacpi_status uacpi_namespace_do_for_each_child( + uacpi_namespace_node *parent, uacpi_iteration_callback descending_callback, + uacpi_iteration_callback ascending_callback, + uacpi_object_type_bits, uacpi_u32 max_depth, enum uacpi_should_lock, + enum uacpi_permanent_only, void *user +); + +uacpi_bool uacpi_namespace_node_is_dangling(uacpi_namespace_node *node); +uacpi_bool uacpi_namespace_node_is_temporary(uacpi_namespace_node *node); +uacpi_bool uacpi_namespace_node_is_predefined(uacpi_namespace_node *node); + +uacpi_status uacpi_namespace_read_lock(void); +uacpi_status uacpi_namespace_read_unlock(void); + +uacpi_status uacpi_namespace_write_lock(void); +uacpi_status uacpi_namespace_write_unlock(void); + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/notify.h b/sys/include/dev/acpi/uacpi/uacpi/internal/notify.h new file mode 100644 index 0000000..c1fa8bb --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/notify.h @@ -0,0 +1,13 @@ +#pragma once + +#include <uacpi/internal/types.h> +#include <uacpi/notify.h> + +#ifndef UACPI_BAREBONES_MODE + +uacpi_status uacpi_initialize_notify(void); +void uacpi_deinitialize_notify(void); + +uacpi_status uacpi_notify_all(uacpi_namespace_node *node, uacpi_u64 value); + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/opcodes.h b/sys/include/dev/acpi/uacpi/uacpi/internal/opcodes.h new file mode 100644 index 0000000..53ef334 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/opcodes.h @@ -0,0 +1,1390 @@ +#pragma once + +#include <uacpi/types.h> + +typedef uacpi_u16 uacpi_aml_op; + +#define UACPI_EXT_PREFIX 0x5B +#define UACPI_EXT_OP(op) ((UACPI_EXT_PREFIX << 8) | (op)) + +#define UACPI_DUAL_NAME_PREFIX 0x2E +#define UACPI_MULTI_NAME_PREFIX 0x2F +#define UACPI_NULL_NAME 0x00 + +/* + * Opcodes that tell the parser VM how to take apart every AML instruction. + * Every AML opcode has a list of these that is executed by the parser. + */ +enum uacpi_parse_op { + UACPI_PARSE_OP_END = 0, + + /* + * End the execution of the current instruction with a warning if the item + * at decode_ops[pc + 1] is NULL. + */ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, + + // Emit a warning as if the current opcode is being skipped + UACPI_PARSE_OP_EMIT_SKIP_WARN, + + // SimpleName := NameString | ArgObj | LocalObj + UACPI_PARSE_OP_SIMPLE_NAME, + + // SuperName := SimpleName | DebugObj | ReferenceTypeOpcode + UACPI_PARSE_OP_SUPERNAME, + // The resulting item will be set to null if name couldn't be resolved + UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED, + + // TermArg := ExpressionOpcode | DataObject | ArgObj | LocalObj + UACPI_PARSE_OP_TERM_ARG, + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, + + /* + * Same as TERM_ARG, but named references are passed as-is. + * This means methods are not invoked, fields are not read, etc. + */ + UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT, + + /* + * Same as UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT but allows unresolved + * name strings. + */ + UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED, + + // Operand := TermArg => Integer + UACPI_PARSE_OP_OPERAND, + + // TermArg => String + UACPI_PARSE_OP_STRING, + + /* + * ComputationalData := ByteConst | WordConst | DWordConst | QWordConst | + * String | ConstObj | RevisionOp | DefBuffer + */ + UACPI_PARSE_OP_COMPUTATIONAL_DATA, + + // Target := SuperName | NullName + UACPI_PARSE_OP_TARGET, + + // Parses a pkglen + UACPI_PARSE_OP_PKGLEN, + + /* + * Parses a pkglen and records it, the end of this pkglen is considered + * the end of the instruction. The PC is always set to the end of this + * package once parser reaches UACPI_PARSE_OP_END. + */ + UACPI_PARSE_OP_TRACKED_PKGLEN, + + /* + * Parse a NameString and create the last nameseg. + * Note that this errors out if last nameseg already exists. + */ + UACPI_PARSE_OP_CREATE_NAMESTRING, + + /* + * same as UACPI_PARSE_OP_CREATE_NAMESTRING, but attempting to create an + * already existing object is not fatal if currently loading a table. + */ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, + + /* + * Parse a NameString and put the node into the ready parts array. + * Note that this errors out if the referenced node doesn't exist. + */ + UACPI_PARSE_OP_EXISTING_NAMESTRING, + + /* + * Same as UACPI_PARSE_OP_EXISTING_NAMESTRING except the op doesn't error + * out if namestring couldn't be resolved. + */ + UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL, + + /* + * Same as UACPI_PARSE_OP_EXISTING_NAMESTRING, but undefined references + * are not fatal if currently loading a table. + */ + UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD, + + // Invoke a handler at op_handlers[spec->code] + UACPI_PARSE_OP_INVOKE_HANDLER, + + // Allocate an object an put it at the front of the item list + UACPI_PARSE_OP_OBJECT_ALLOC, + + UACPI_PARSE_OP_EMPTY_OBJECT_ALLOC, + + // Convert last item into a shallow/deep copy of itself + UACPI_PARSE_OP_OBJECT_CONVERT_TO_SHALLOW_COPY, + UACPI_PARSE_OP_OBJECT_CONVERT_TO_DEEP_COPY, + + /* + * Same as UACPI_PARSE_OP_OBJECT_ALLOC except the type of the allocated + * object is specified at decode_ops[pc + 1] + */ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, + + // Record current AML program counter as a QWORD immediate + UACPI_PARSE_OP_RECORD_AML_PC, + + // Load a QWORD immediate located at decode_ops[pc + 1] + UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT, + + // Load a decode_ops[pc + 1] byte imm at decode_ops[pc + 2] + UACPI_PARSE_OP_LOAD_INLINE_IMM, + + // Load a QWORD zero immediate + UACPI_PARSE_OP_LOAD_ZERO_IMM, + + // Load a decode_ops[pc + 1] byte imm from the instructions stream + UACPI_PARSE_OP_LOAD_IMM, + + // Same as UACPI_PARSE_OP_LOAD_IMM, expect the resulting value is an object + UACPI_PARSE_OP_LOAD_IMM_AS_OBJECT, + + // Create & Load an integer constant representing either true or false + UACPI_PARSE_OP_LOAD_FALSE_OBJECT, + UACPI_PARSE_OP_LOAD_TRUE_OBJECT, + + // Truncate the last item in the list if needed + UACPI_PARSE_OP_TRUNCATE_NUMBER, + + // Ensure the type of item is decode_ops[pc + 1] + UACPI_PARSE_OP_TYPECHECK, + + // Install the namespace node specified in items[decode_ops[pc + 1]] + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, + + // Move item to the previous (preempted) op + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, + + /* + * Same as UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, but the object + * is copied instead. (Useful when dealing with multiple targets) + * TODO: optimize this so that we can optionally move the object + * if target was a null target. + */ + UACPI_PARSE_OP_OBJECT_COPY_TO_PREV, + + // Store the last item to the target at items[decode_ops[pc + 1]] + UACPI_PARSE_OP_STORE_TO_TARGET, + + /* + * Store the item at items[decode_ops[pc + 2]] to target + * at items[decode_ops[pc + 1]] + */ + UACPI_PARSE_OP_STORE_TO_TARGET_INDIRECT, + + /* + * Error if reached. Should be used for opcodes that are supposed to be + * converted at op parse time, e.g. invoking a method or referring to + * a named object. + */ + UACPI_PARSE_OP_UNREACHABLE, + + // Invalid opcode, should never be encountered in the stream + UACPI_PARSE_OP_BAD_OPCODE, + + // Decrement the current AML instruction pointer + UACPI_PARSE_OP_AML_PC_DECREMENT, + + // Decrement the immediate at decode_ops[pc + 1] + UACPI_PARSE_OP_IMM_DECREMENT, + + // Remove the last item off the item stack + UACPI_PARSE_OP_ITEM_POP, + + // Dispatch the method call from items[0] and return from current op_exec + UACPI_PARSE_OP_DISPATCH_METHOD_CALL, + + /* + * Dispatch a table load with scope node at items[0] and method at items[1]. + * The last item is expected to be an integer object that is set to 0 in + * case load fails. + */ + UACPI_PARSE_OP_DISPATCH_TABLE_LOAD, + + /* + * Convert the current resolved namestring to either a method call + * or a named object reference. + */ + UACPI_PARSE_OP_CONVERT_NAMESTRING, + + /* + * Execute the next instruction only if currently tracked package still + * has data left, otherwise skip decode_ops[pc + 1] bytes. + */ + UACPI_PARSE_OP_IF_HAS_DATA, + + /* + * Execute the next instruction only if the handle at + * items[decode_ops[pc + 1]] is null. Otherwise skip + * decode_ops[pc + 2] bytes. + */ + UACPI_PARSE_OP_IF_NULL, + + /* + * Execute the next instruction only if the handle at + * items[-1] is null. Otherwise skip decode_ops[pc + 1] bytes. + */ + UACPI_PARSE_OP_IF_LAST_NULL, + + // The inverse of UACPI_PARSE_OP_IF_NULL + UACPI_PARSE_OP_IF_NOT_NULL, + + // The inverse of UACPI_PARSE_OP_IF_LAST_NULL + UACPI_PARSE_OP_IF_LAST_NOT_NULL, + + /* + * Execute the next instruction only if the last immediate is equal to + * decode_ops[pc + 1], otherwise skip decode_ops[pc + 2] bytes. + */ + UACPI_PARSE_OP_IF_LAST_EQUALS, + + /* + * Execute the next instruction only if the last object is a false value + * (has a value of 0), otherwise skip decode_ops[pc + 1] bytes. + */ + UACPI_PARSE_OP_IF_LAST_FALSE, + + // The inverse of UACPI_PARSE_OP_IF_LAST_FALSE + UACPI_PARSE_OP_IF_LAST_TRUE, + + /* + * Switch to opcode at decode_ops[pc + 1] only if the next AML instruction + * in the stream is equal to it. Note that this looks ahead of the tracked + * package if one is active. Switching to the next op also applies the + * currently tracked package. + */ + UACPI_PARSE_OP_SWITCH_TO_NEXT_IF_EQUALS, + + /* + * Execute the next instruction only if this op was switched to from op at + * (decode_ops[pc + 1] | decode_ops[pc + 2] << 8), otherwise skip + * decode_ops[pc + 3] bytes. + */ + UACPI_PARSE_OP_IF_SWITCHED_FROM, + + /* + * pc = decode_ops[pc + 1] + */ + UACPI_PARSE_OP_JMP, + UACPI_PARSE_OP_MAX = UACPI_PARSE_OP_JMP, +}; +const uacpi_char *uacpi_parse_op_to_string(enum uacpi_parse_op op); + +/* + * A few notes about op properties: + * Technically the spec says that RefOfOp is considered a SuperName, but NT + * disagrees about this. For example Store(..., RefOf) fails with + * "Invalid SuperName". MethodInvocation could also technically be considered + * a SuperName, but NT doesn't allow that either: Store(..., MethodInvocation) + * fails with "Invalid Target Method, expected a DataObject" error. + */ + +enum uacpi_op_property { + UACPI_OP_PROPERTY_TERM_ARG = 1, + UACPI_OP_PROPERTY_SUPERNAME = 2, + UACPI_OP_PROPERTY_SIMPLE_NAME = 4, + UACPI_OP_PROPERTY_TARGET = 8, + + // The ops to execute are pointed to by indirect_decode_ops + UACPI_OP_PROPERTY_OUT_OF_LINE = 16, + + // Error if encountered in the AML byte strem + UACPI_OP_PROPERTY_RESERVED = 128, +}; + +struct uacpi_op_spec { + uacpi_char *name; + union { + uacpi_u8 decode_ops[16]; + uacpi_u8 *indirect_decode_ops; + }; + uacpi_u8 properties; + uacpi_aml_op code; +}; + +const struct uacpi_op_spec *uacpi_get_op_spec(uacpi_aml_op); + +#define UACPI_INTERNAL_OP(code) \ + UACPI_OP(Internal_##code, code, 0, { UACPI_PARSE_OP_UNREACHABLE }) + +#define UACPI_BAD_OPCODE(code) \ + UACPI_OP(Reserved_##code, code, 0, { UACPI_PARSE_OP_BAD_OPCODE }) + +#define UACPI_METHOD_CALL_OPCODE(nargs) \ + UACPI_OP( \ + InternalOpMethodCall##nargs##Args, 0xF7 + nargs, \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_RESERVED, \ + { \ + UACPI_PARSE_OP_LOAD_INLINE_IMM, 1, nargs, \ + UACPI_PARSE_OP_IF_NOT_NULL, 1, 6, \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_OBJECT_CONVERT_TO_SHALLOW_COPY, \ + UACPI_PARSE_OP_IMM_DECREMENT, 1, \ + UACPI_PARSE_OP_JMP, 3, \ + UACPI_PARSE_OP_OBJECT_ALLOC, \ + UACPI_PARSE_OP_DISPATCH_METHOD_CALL, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ + ) + +/* + * ------------------------------------------------------------- + * RootChar := ‘\’ + * ParentPrefixChar := ‘^’ + * ‘\’ := 0x5C + * ‘^’ := 0x5E + * MultiNamePrefix := 0x2F + * DualNamePrefix := 0x2E + * ------------------------------------------------------------ + * ‘A’-‘Z’ := 0x41 - 0x5A + * ‘_’ := 0x5F + * LeadNameChar := ‘A’-‘Z’ | ‘_’ + * NameSeg := <leadnamechar namechar namechar namechar> + * NameString := <rootchar namepath> | <prefixpath namepath> + * PrefixPath := Nothing | <’^’ prefixpath> + * DualNamePath := DualNamePrefix NameSeg NameSeg + * MultiNamePath := MultiNamePrefix SegCount NameSeg(SegCount) + */ +#define UACPI_UNRESOLVED_NAME_STRING_OP(character, code) \ + UACPI_OP( \ + UACPI_InternalOpUnresolvedNameString_##character, code, \ + UACPI_OP_PROPERTY_SIMPLE_NAME | \ + UACPI_OP_PROPERTY_SUPERNAME | \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_AML_PC_DECREMENT, \ + UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL, \ + UACPI_PARSE_OP_CONVERT_NAMESTRING, \ + } \ + ) + +#define UACPI_BUILD_LOCAL_OR_ARG_OP(prefix, base, offset) \ +UACPI_OP( \ + prefix##offset##Op, base + offset, \ + UACPI_OP_PROPERTY_SUPERNAME | \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_SIMPLE_NAME, \ + { \ + UACPI_PARSE_OP_EMPTY_OBJECT_ALLOC, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ + +#define UACPI_LOCALX_OP(idx) UACPI_BUILD_LOCAL_OR_ARG_OP(Local, 0x60, idx) +#define UACPI_ARGX_OP(idx) UACPI_BUILD_LOCAL_OR_ARG_OP(Arg, 0x68, idx) + +#define UACPI_BUILD_PACKAGE_OP(name, code, jmp_off, ...) \ +UACPI_OP( \ + name##Op, code, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + ##__VA_ARGS__, \ + UACPI_PARSE_OP_IF_HAS_DATA, 4, \ + UACPI_PARSE_OP_RECORD_AML_PC, \ + UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED, \ + UACPI_PARSE_OP_JMP, jmp_off, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_PACKAGE, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) + +#define UACPI_BUILD_BINARY_MATH_OP(prefix, code) \ +UACPI_OP( \ + prefix##Op, code, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_TRUNCATE_NUMBER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 2, \ + UACPI_PARSE_OP_OBJECT_COPY_TO_PREV, \ + } \ +) + +#define UACPI_BUILD_UNARY_MATH_OP(type, code) \ +UACPI_OP( \ + type##Op, code, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 1, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) + +#define UACPI_DO_BUILD_BUFFER_FIELD_OP(type, code, node_idx, ...) \ +UACPI_OP( \ + type##FieldOp, code, 0, \ + { \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_TYPECHECK, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_OPERAND, \ + ##__VA_ARGS__, \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, node_idx, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_BUFFER_FIELD, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, node_idx, \ + } \ +) + +#define UACPI_BUILD_BUFFER_FIELD_OP(type, code) \ + UACPI_DO_BUILD_BUFFER_FIELD_OP(Create##type, code, 2) + +#define UACPI_INTEGER_LITERAL_OP(type, code, bytes) \ +UACPI_OP( \ + type##Prefix, code, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_LOAD_IMM_AS_OBJECT, bytes, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ + +#define UACPI_BUILD_BINARY_LOGIC_OP(type, code) \ +UACPI_OP( \ + type##Op, code, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_COMPUTATIONAL_DATA, \ + UACPI_PARSE_OP_COMPUTATIONAL_DATA, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) + +#define UACPI_BUILD_TO_OP(kind, code, dst_type) \ +UACPI_OP( \ + To##kind##Op, code, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_COMPUTATIONAL_DATA, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, dst_type, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 1, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) + +#define UACPI_BUILD_INC_DEC_OP(prefix, code) \ +UACPI_OP( \ + prefix##Op, code, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_TRUNCATE_NUMBER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 0, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ + +#define UACPI_ENUMERATE_OPCODES \ +UACPI_OP( \ + ZeroOp, 0x00, \ + UACPI_OP_PROPERTY_TARGET | \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + OneOp, 0x01, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT, \ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_BAD_OPCODE(0x02) \ +UACPI_BAD_OPCODE(0x03) \ +UACPI_BAD_OPCODE(0x04) \ +UACPI_BAD_OPCODE(0x05) \ +UACPI_OP( \ + AliasOp, 0x06, 0, \ + { \ + UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 0, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 1, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, 1, \ + } \ +) \ +UACPI_BAD_OPCODE(0x07) \ +UACPI_OP( \ + NameOp, 0x08, 0, \ + { \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 0, \ + UACPI_PARSE_OP_OBJECT_CONVERT_TO_DEEP_COPY, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, 0, \ + } \ +) \ +UACPI_BAD_OPCODE(0x09) \ +UACPI_INTEGER_LITERAL_OP(Byte, 0x0A, 1) \ +UACPI_INTEGER_LITERAL_OP(Word, 0x0B, 2) \ +UACPI_INTEGER_LITERAL_OP(DWord, 0x0C, 4) \ +UACPI_OP( \ + StringPrefix, 0x0D, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_STRING, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_INTEGER_LITERAL_OP(QWord, 0x0E, 8) \ +UACPI_BAD_OPCODE(0x0F) \ +UACPI_OP( \ + ScopeOp, 0x10, 0, \ + { \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 1, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + BufferOp, 0x11, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_RECORD_AML_PC, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_BUILD_PACKAGE_OP( \ + Package, 0x12, 3, \ + UACPI_PARSE_OP_LOAD_IMM, 1 \ +) \ +UACPI_BUILD_PACKAGE_OP( \ + VarPackage, 0x13, 2, \ + UACPI_PARSE_OP_OPERAND \ +) \ +UACPI_OP( \ + MethodOp, 0x14, 0, \ + { \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 1, \ + UACPI_PARSE_OP_RECORD_AML_PC, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_METHOD, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, 1, \ + } \ +) \ +UACPI_OP( \ + ExternalOp, 0x15, 0, \ + { \ + UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + } \ +) \ +UACPI_BAD_OPCODE(0x16) \ +UACPI_BAD_OPCODE(0x17) \ +UACPI_BAD_OPCODE(0x18) \ +UACPI_BAD_OPCODE(0x19) \ +UACPI_BAD_OPCODE(0x1A) \ +UACPI_BAD_OPCODE(0x1B) \ +UACPI_BAD_OPCODE(0x1C) \ +UACPI_BAD_OPCODE(0x1D) \ +UACPI_BAD_OPCODE(0x1E) \ +UACPI_BAD_OPCODE(0x1F) \ +UACPI_BAD_OPCODE(0x20) \ +UACPI_BAD_OPCODE(0x21) \ +UACPI_BAD_OPCODE(0x22) \ +UACPI_BAD_OPCODE(0x23) \ +UACPI_BAD_OPCODE(0x24) \ +UACPI_BAD_OPCODE(0x25) \ +UACPI_BAD_OPCODE(0x26) \ +UACPI_BAD_OPCODE(0x27) \ +UACPI_BAD_OPCODE(0x28) \ +UACPI_BAD_OPCODE(0x29) \ +UACPI_BAD_OPCODE(0x2A) \ +UACPI_BAD_OPCODE(0x2B) \ +UACPI_BAD_OPCODE(0x2C) \ +UACPI_BAD_OPCODE(0x2D) \ +UACPI_UNRESOLVED_NAME_STRING_OP(DualNamePrefix, 0x2E) \ +UACPI_UNRESOLVED_NAME_STRING_OP(MultiNamePrefix, 0x2F) \ +UACPI_INTERNAL_OP(0x30) \ +UACPI_INTERNAL_OP(0x31) \ +UACPI_INTERNAL_OP(0x32) \ +UACPI_INTERNAL_OP(0x33) \ +UACPI_INTERNAL_OP(0x34) \ +UACPI_INTERNAL_OP(0x35) \ +UACPI_INTERNAL_OP(0x36) \ +UACPI_INTERNAL_OP(0x37) \ +UACPI_INTERNAL_OP(0x38) \ +UACPI_INTERNAL_OP(0x39) \ +UACPI_BAD_OPCODE(0x3A) \ +UACPI_BAD_OPCODE(0x3B) \ +UACPI_BAD_OPCODE(0x3C) \ +UACPI_BAD_OPCODE(0x3D) \ +UACPI_BAD_OPCODE(0x3E) \ +UACPI_BAD_OPCODE(0x3F) \ +UACPI_BAD_OPCODE(0x40) \ +UACPI_UNRESOLVED_NAME_STRING_OP(A, 0x41) \ +UACPI_UNRESOLVED_NAME_STRING_OP(B, 0x42) \ +UACPI_UNRESOLVED_NAME_STRING_OP(C, 0x43) \ +UACPI_UNRESOLVED_NAME_STRING_OP(D, 0x44) \ +UACPI_UNRESOLVED_NAME_STRING_OP(E, 0x45) \ +UACPI_UNRESOLVED_NAME_STRING_OP(F, 0x46) \ +UACPI_UNRESOLVED_NAME_STRING_OP(G, 0x47) \ +UACPI_UNRESOLVED_NAME_STRING_OP(H, 0x48) \ +UACPI_UNRESOLVED_NAME_STRING_OP(I, 0x49) \ +UACPI_UNRESOLVED_NAME_STRING_OP(J, 0x4A) \ +UACPI_UNRESOLVED_NAME_STRING_OP(K, 0x4B) \ +UACPI_UNRESOLVED_NAME_STRING_OP(L, 0x4C) \ +UACPI_UNRESOLVED_NAME_STRING_OP(M, 0x4D) \ +UACPI_UNRESOLVED_NAME_STRING_OP(N, 0x4E) \ +UACPI_UNRESOLVED_NAME_STRING_OP(O, 0x4F) \ +UACPI_UNRESOLVED_NAME_STRING_OP(P, 0x50) \ +UACPI_UNRESOLVED_NAME_STRING_OP(Q, 0x51) \ +UACPI_UNRESOLVED_NAME_STRING_OP(R, 0x52) \ +UACPI_UNRESOLVED_NAME_STRING_OP(S, 0x53) \ +UACPI_UNRESOLVED_NAME_STRING_OP(T, 0x54) \ +UACPI_UNRESOLVED_NAME_STRING_OP(U, 0x55) \ +UACPI_UNRESOLVED_NAME_STRING_OP(V, 0x56) \ +UACPI_UNRESOLVED_NAME_STRING_OP(W, 0x57) \ +UACPI_UNRESOLVED_NAME_STRING_OP(X, 0x58) \ +UACPI_UNRESOLVED_NAME_STRING_OP(Y, 0x59) \ +UACPI_UNRESOLVED_NAME_STRING_OP(Z, 0x5A) \ +UACPI_INTERNAL_OP(0x5B) \ +UACPI_UNRESOLVED_NAME_STRING_OP(RootChar, 0x5C) \ +UACPI_BAD_OPCODE(0x5D) \ +UACPI_UNRESOLVED_NAME_STRING_OP(ParentPrefixChar, 0x5E) \ +UACPI_UNRESOLVED_NAME_STRING_OP(Underscore, 0x5F) \ +UACPI_LOCALX_OP(0) \ +UACPI_LOCALX_OP(1) \ +UACPI_LOCALX_OP(2) \ +UACPI_LOCALX_OP(3) \ +UACPI_LOCALX_OP(4) \ +UACPI_LOCALX_OP(5) \ +UACPI_LOCALX_OP(6) \ +UACPI_LOCALX_OP(7) \ +UACPI_ARGX_OP(0) \ +UACPI_ARGX_OP(1) \ +UACPI_ARGX_OP(2) \ +UACPI_ARGX_OP(3) \ +UACPI_ARGX_OP(4) \ +UACPI_ARGX_OP(5) \ +UACPI_ARGX_OP(6) \ +UACPI_BAD_OPCODE(0x6F) \ +UACPI_OP( \ + StoreOp, 0x70, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TERM_ARG, \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_ITEM_POP, \ + UACPI_PARSE_OP_OBJECT_COPY_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + RefOfOp, 0x71, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_OBJECT_ALLOC, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_BUILD_BINARY_MATH_OP(Add, 0x72) \ +UACPI_OP( \ + ConcatOp, 0x73, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_COMPUTATIONAL_DATA, \ + UACPI_PARSE_OP_COMPUTATIONAL_DATA, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 2, \ + UACPI_PARSE_OP_OBJECT_COPY_TO_PREV, \ + } \ +) \ +UACPI_BUILD_BINARY_MATH_OP(Subtract, 0x74) \ +UACPI_BUILD_INC_DEC_OP(Increment, 0x75) \ +UACPI_BUILD_INC_DEC_OP(Decrement, 0x76) \ +UACPI_BUILD_BINARY_MATH_OP(Multiply, 0x77) \ +UACPI_OP( \ + DivideOp, 0x78, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 3, \ + UACPI_PARSE_OP_OBJECT_COPY_TO_PREV, \ + UACPI_PARSE_OP_STORE_TO_TARGET_INDIRECT, 2, 4, \ + } \ +) \ +UACPI_BUILD_BINARY_MATH_OP(ShiftLeft, 0x79) \ +UACPI_BUILD_BINARY_MATH_OP(ShiftRight, 0x7A) \ +UACPI_BUILD_BINARY_MATH_OP(And, 0x7B) \ +UACPI_BUILD_BINARY_MATH_OP(Nand, 0x7C) \ +UACPI_BUILD_BINARY_MATH_OP(Or, 0x7D) \ +UACPI_BUILD_BINARY_MATH_OP(Nor, 0x7E) \ +UACPI_BUILD_BINARY_MATH_OP(Xor, 0x7F) \ +UACPI_BUILD_UNARY_MATH_OP(Not, 0x80) \ +UACPI_BUILD_UNARY_MATH_OP(FindSetLeftBit, 0x81) \ +UACPI_BUILD_UNARY_MATH_OP(FindSetRightBit, 0x82) \ +UACPI_OP( \ + DerefOfOp, 0x83, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_OBJECT_ALLOC, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + ConcatResOp, 0x84, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_TYPECHECK, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_TYPECHECK, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 2, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_BUILD_BINARY_MATH_OP(Mod, 0x85) \ +UACPI_OP( \ + NotifyOp, 0x86, 0, \ + { \ + /* This is technically wrong according to spec but I was */ \ + /* unable to find any examples of anything else after */ \ + /* inspecting about 500 AML dumps. Spec says this is a */ \ + /* SuperName that must evaluate to Device/ThermalZone or */ \ + /* Processor, just ignore for now. */ \ + UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 0, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + SizeOfOp, 0x87, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + IndexOp, 0x88, \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_SUPERNAME | \ + UACPI_OP_PROPERTY_SIMPLE_NAME, \ + { \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_EMPTY_OBJECT_ALLOC, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 2, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + MatchOp, 0x89, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_TYPECHECK, UACPI_OBJECT_PACKAGE, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_BUILD_BUFFER_FIELD_OP(DWord, 0x8A) \ +UACPI_BUILD_BUFFER_FIELD_OP(Word, 0x8B) \ +UACPI_BUILD_BUFFER_FIELD_OP(Byte, 0x8C) \ +UACPI_BUILD_BUFFER_FIELD_OP(Bit, 0x8D) \ +UACPI_OP( \ + ObjectTypeOp, 0x8E, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_BUILD_BUFFER_FIELD_OP(QWord, 0x8F) \ +UACPI_BUILD_BINARY_LOGIC_OP(Land, 0x90) \ +UACPI_BUILD_BINARY_LOGIC_OP(Lor, 0x91) \ +UACPI_OP( \ + LnotOp, 0x92, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_OBJECT_ALLOC, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_BUILD_BINARY_LOGIC_OP(LEqual, 0x93) \ +UACPI_BUILD_BINARY_LOGIC_OP(LGreater, 0x94) \ +UACPI_BUILD_BINARY_LOGIC_OP(LLess, 0x95) \ +UACPI_BUILD_TO_OP(Buffer, 0x96, UACPI_OBJECT_BUFFER) \ +UACPI_BUILD_TO_OP(DecimalString, 0x97, UACPI_OBJECT_STRING) \ +UACPI_BUILD_TO_OP(HexString, 0x98, UACPI_OBJECT_STRING) \ +UACPI_BUILD_TO_OP(Integer, 0x99, UACPI_OBJECT_INTEGER) \ +UACPI_BAD_OPCODE(0x9A) \ +UACPI_BAD_OPCODE(0x9B) \ +UACPI_OP( \ + ToStringOp, 0x9C, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_TYPECHECK, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_STRING, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 2, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + CopyObjectOp, 0x9D, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TERM_ARG, \ + UACPI_PARSE_OP_OBJECT_COPY_TO_PREV, \ + UACPI_PARSE_OP_SIMPLE_NAME, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + MidOp, 0x9E, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 3, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + ContinueOp, 0x9F, 0, \ + { \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + IfOp, 0xA0, 0, \ + { \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_IF_LAST_NULL, 3, \ + UACPI_PARSE_OP_EMIT_SKIP_WARN, \ + UACPI_PARSE_OP_JMP, 9, \ + UACPI_PARSE_OP_IF_LAST_FALSE, 4, \ + UACPI_PARSE_OP_SWITCH_TO_NEXT_IF_EQUALS, 0xA1, 0x00, \ + UACPI_PARSE_OP_END, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + ElseOp, 0xA1, 0, \ + { \ + UACPI_PARSE_OP_IF_SWITCHED_FROM, 0xA0, 0x00, 10, \ + UACPI_PARSE_OP_IF_LAST_NULL, 3, \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + UACPI_PARSE_OP_EMIT_SKIP_WARN, \ + UACPI_PARSE_OP_END, \ + UACPI_PARSE_OP_ITEM_POP, \ + UACPI_PARSE_OP_ITEM_POP, \ + UACPI_PARSE_OP_PKGLEN, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_END, \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + } \ +) \ +UACPI_OP( \ + WhileOp, 0xA2, 0, \ + { \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 1, \ + UACPI_PARSE_OP_IF_LAST_TRUE, 1, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + NoopOp, 0xA3, 0, \ + { \ + UACPI_PARSE_OP_END, \ + } \ +) \ +UACPI_OP( \ + ReturnOp, 0xA4, 0, \ + { \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + BreakOp, 0xA5, 0, \ + { \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_BAD_OPCODE(0xA6) \ +UACPI_BAD_OPCODE(0xA7) \ +UACPI_BAD_OPCODE(0xA8) \ +UACPI_BAD_OPCODE(0xA9) \ +UACPI_BAD_OPCODE(0xAA) \ +UACPI_BAD_OPCODE(0xAB) \ +UACPI_BAD_OPCODE(0xAC) \ +UACPI_BAD_OPCODE(0xAD) \ +UACPI_BAD_OPCODE(0xAE) \ +UACPI_BAD_OPCODE(0xAF) \ +UACPI_BAD_OPCODE(0xB0) \ +UACPI_BAD_OPCODE(0xB1) \ +UACPI_BAD_OPCODE(0xB2) \ +UACPI_BAD_OPCODE(0xB3) \ +UACPI_BAD_OPCODE(0xB4) \ +UACPI_BAD_OPCODE(0xB5) \ +UACPI_BAD_OPCODE(0xB6) \ +UACPI_BAD_OPCODE(0xB7) \ +UACPI_BAD_OPCODE(0xB8) \ +UACPI_BAD_OPCODE(0xB9) \ +UACPI_BAD_OPCODE(0xBA) \ +UACPI_BAD_OPCODE(0xBB) \ +UACPI_BAD_OPCODE(0xBC) \ +UACPI_BAD_OPCODE(0xBD) \ +UACPI_BAD_OPCODE(0xBE) \ +UACPI_BAD_OPCODE(0xBF) \ +UACPI_BAD_OPCODE(0xC0) \ +UACPI_BAD_OPCODE(0xC1) \ +UACPI_BAD_OPCODE(0xC2) \ +UACPI_BAD_OPCODE(0xC3) \ +UACPI_BAD_OPCODE(0xC4) \ +UACPI_BAD_OPCODE(0xC5) \ +UACPI_BAD_OPCODE(0xC6) \ +UACPI_BAD_OPCODE(0xC7) \ +UACPI_BAD_OPCODE(0xC8) \ +UACPI_BAD_OPCODE(0xC9) \ +UACPI_BAD_OPCODE(0xCA) \ +UACPI_BAD_OPCODE(0xCB) \ +UACPI_OP( \ + BreakPointOp, 0xCC, 0, \ + { \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_BAD_OPCODE(0xCD) \ +UACPI_BAD_OPCODE(0xCE) \ +UACPI_BAD_OPCODE(0xCF) \ +UACPI_BAD_OPCODE(0xD0) \ +UACPI_BAD_OPCODE(0xD1) \ +UACPI_BAD_OPCODE(0xD2) \ +UACPI_BAD_OPCODE(0xD3) \ +UACPI_BAD_OPCODE(0xD4) \ +UACPI_BAD_OPCODE(0xD5) \ +UACPI_BAD_OPCODE(0xD6) \ +UACPI_BAD_OPCODE(0xD7) \ +UACPI_BAD_OPCODE(0xD8) \ +UACPI_BAD_OPCODE(0xD9) \ +UACPI_BAD_OPCODE(0xDA) \ +UACPI_BAD_OPCODE(0xDB) \ +UACPI_BAD_OPCODE(0xDC) \ +UACPI_BAD_OPCODE(0xDD) \ +UACPI_BAD_OPCODE(0xDE) \ +UACPI_BAD_OPCODE(0xDF) \ +UACPI_BAD_OPCODE(0xE0) \ +UACPI_BAD_OPCODE(0xE1) \ +UACPI_BAD_OPCODE(0xE2) \ +UACPI_BAD_OPCODE(0xE3) \ +UACPI_BAD_OPCODE(0xE4) \ +UACPI_BAD_OPCODE(0xE5) \ +UACPI_BAD_OPCODE(0xE6) \ +UACPI_BAD_OPCODE(0xE7) \ +UACPI_BAD_OPCODE(0xE8) \ +UACPI_BAD_OPCODE(0xE9) \ +UACPI_BAD_OPCODE(0xEA) \ +UACPI_BAD_OPCODE(0xEB) \ +UACPI_BAD_OPCODE(0xEC) \ +UACPI_BAD_OPCODE(0xED) \ +UACPI_BAD_OPCODE(0xEE) \ +UACPI_BAD_OPCODE(0xEF) \ +UACPI_BAD_OPCODE(0xF0) \ +UACPI_BAD_OPCODE(0xF1) \ +UACPI_BAD_OPCODE(0xF2) \ +UACPI_BAD_OPCODE(0xF3) \ +UACPI_OP( \ + InternalOpReadFieldAsBuffer, 0xF4, \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_RESERVED, \ + { \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + InternalOpReadFieldAsInteger, 0xF5, \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_RESERVED, \ + { \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + InternalOpNamedObject, 0xF6, \ + UACPI_OP_PROPERTY_SIMPLE_NAME | \ + UACPI_OP_PROPERTY_SUPERNAME | \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_RESERVED, \ + { \ + UACPI_PARSE_OP_EMPTY_OBJECT_ALLOC, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_METHOD_CALL_OPCODE(0) \ +UACPI_METHOD_CALL_OPCODE(1) \ +UACPI_METHOD_CALL_OPCODE(2) \ +UACPI_METHOD_CALL_OPCODE(3) \ +UACPI_METHOD_CALL_OPCODE(4) \ +UACPI_METHOD_CALL_OPCODE(5) \ +UACPI_METHOD_CALL_OPCODE(6) \ +UACPI_METHOD_CALL_OPCODE(7) \ +UACPI_OP( \ + OnesOp, 0xFF, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT, \ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, \ + UACPI_PARSE_OP_TRUNCATE_NUMBER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) + +extern uacpi_u8 uacpi_field_op_decode_ops[]; +extern uacpi_u8 uacpi_index_field_op_decode_ops[]; +extern uacpi_u8 uacpi_bank_field_op_decode_ops[]; +extern uacpi_u8 uacpi_load_op_decode_ops[]; +extern uacpi_u8 uacpi_load_table_op_decode_ops[]; + +#define UACPI_BUILD_NAMED_SCOPE_OBJECT_OP(name, code, type, ...) \ +UACPI_OP( \ + name##Op, UACPI_EXT_OP(code), 0, \ + { \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + ##__VA_ARGS__, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 1, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, type, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, 1, \ + } \ +) + +#define UACPI_BUILD_TO_FROM_BCD(type, code) \ +UACPI_OP( \ + type##BCDOp, UACPI_EXT_OP(code), \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, \ + UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 1, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) + +#define UACPI_ENUMERATE_EXT_OPCODES \ +UACPI_OP( \ + ReservedExtOp, UACPI_EXT_OP(0x00), 0, \ + { \ + UACPI_PARSE_OP_BAD_OPCODE, \ + } \ +) \ +UACPI_OP( \ + MutexOp, UACPI_EXT_OP(0x01), 0, \ + { \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 0, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, \ + UACPI_OBJECT_MUTEX, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, 0, \ + } \ +) \ +UACPI_OP( \ + EventOp, UACPI_EXT_OP(0x02), 0, \ + { \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 0, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, \ + UACPI_OBJECT_EVENT, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, 0, \ + } \ +) \ +UACPI_OP( \ + CondRefOfOp, UACPI_EXT_OP(0x12), \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_IF_NULL, 0, 3, \ + UACPI_PARSE_OP_LOAD_FALSE_OBJECT, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + UACPI_PARSE_OP_END, \ + UACPI_PARSE_OP_OBJECT_ALLOC, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 1, \ + UACPI_PARSE_OP_LOAD_TRUE_OBJECT, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_DO_BUILD_BUFFER_FIELD_OP( \ + Create, UACPI_EXT_OP(0x13), 3, \ + UACPI_PARSE_OP_OPERAND \ +) \ +UACPI_OUT_OF_LINE_OP( \ + LoadTableOp, UACPI_EXT_OP(0x1F), \ + uacpi_load_table_op_decode_ops, \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_OUT_OF_LINE \ +) \ +UACPI_OUT_OF_LINE_OP( \ + LoadOp, UACPI_EXT_OP(0x20), \ + uacpi_load_op_decode_ops, \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_OUT_OF_LINE \ +) \ +UACPI_OP( \ + StallOp, UACPI_EXT_OP(0x21), 0, \ + { \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + SleepOp, UACPI_EXT_OP(0x22), 0, \ + { \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + AcquireOp, UACPI_EXT_OP(0x23), \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_LOAD_IMM, 2, \ + UACPI_PARSE_OP_LOAD_TRUE_OBJECT, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + SignalOp, UACPI_EXT_OP(0x24), 0, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + WaitOp, UACPI_EXT_OP(0x25), \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_LOAD_TRUE_OBJECT, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + ResetOp, UACPI_EXT_OP(0x26), 0, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + ReleaseOp, UACPI_EXT_OP(0x27), 0, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_BUILD_TO_FROM_BCD(From, 0x28) \ +UACPI_BUILD_TO_FROM_BCD(To, 0x29) \ +UACPI_OP( \ + UnloadOp, UACPI_EXT_OP(0x2A), 0, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + RevisionOp, UACPI_EXT_OP(0x30), \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT, \ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + DebugOp, UACPI_EXT_OP(0x31), \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_SUPERNAME | \ + UACPI_OP_PROPERTY_TARGET, \ + { \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, \ + UACPI_OBJECT_DEBUG, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + FatalOp, UACPI_EXT_OP(0x32), 0, \ + { \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_LOAD_IMM, 4, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + TimerOp, UACPI_EXT_OP(0x33), \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, \ + UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + OpRegionOp, UACPI_EXT_OP(0x80), 0, \ + { \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 0, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, \ + UACPI_OBJECT_OPERATION_REGION, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, 0, \ + } \ +) \ +UACPI_OUT_OF_LINE_OP( \ + FieldOp, UACPI_EXT_OP(0x81), \ + uacpi_field_op_decode_ops, \ + UACPI_OP_PROPERTY_OUT_OF_LINE \ +) \ +UACPI_BUILD_NAMED_SCOPE_OBJECT_OP( \ + Device, 0x82, UACPI_OBJECT_DEVICE \ +) \ +UACPI_BUILD_NAMED_SCOPE_OBJECT_OP( \ + Processor, 0x83, UACPI_OBJECT_PROCESSOR, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_LOAD_IMM, 4, \ + UACPI_PARSE_OP_LOAD_IMM, 1 \ +) \ +UACPI_BUILD_NAMED_SCOPE_OBJECT_OP( \ + PowerRes, 0x84, UACPI_OBJECT_POWER_RESOURCE, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_LOAD_IMM, 2 \ +) \ +UACPI_BUILD_NAMED_SCOPE_OBJECT_OP( \ + ThermalZone, 0x85, UACPI_OBJECT_THERMAL_ZONE \ +) \ +UACPI_OUT_OF_LINE_OP( \ + IndexFieldOp, UACPI_EXT_OP(0x86), \ + uacpi_index_field_op_decode_ops, \ + UACPI_OP_PROPERTY_OUT_OF_LINE \ +) \ +UACPI_OUT_OF_LINE_OP( \ + BankFieldOp, UACPI_EXT_OP(0x87), \ + uacpi_bank_field_op_decode_ops, \ + UACPI_OP_PROPERTY_OUT_OF_LINE \ +) \ +UACPI_OP( \ + DataRegionOp, UACPI_EXT_OP(0x88), 0, \ + { \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_STRING, \ + UACPI_PARSE_OP_STRING, \ + UACPI_PARSE_OP_STRING, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 0, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, \ + UACPI_OBJECT_OPERATION_REGION, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, 0, \ + } \ +) + +enum uacpi_aml_op { +#define UACPI_OP(name, code, ...) UACPI_AML_OP_##name = code, +#define UACPI_OUT_OF_LINE_OP(name, code, ...) UACPI_AML_OP_##name = code, + UACPI_ENUMERATE_OPCODES + UACPI_ENUMERATE_EXT_OPCODES +#undef UACPI_OP +#undef UACPI_OUT_OF_LINE_OP +}; diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/opregion.h b/sys/include/dev/acpi/uacpi/uacpi/internal/opregion.h new file mode 100644 index 0000000..a1173f4 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/opregion.h @@ -0,0 +1,49 @@ +#pragma once + +#include <uacpi/internal/types.h> +#include <uacpi/opregion.h> + +#ifndef UACPI_BAREBONES_MODE + +uacpi_status uacpi_initialize_opregion(void); +void uacpi_deinitialize_opregion(void); + +void uacpi_trace_region_error( + uacpi_namespace_node *node, uacpi_char *message, uacpi_status ret +); + +uacpi_status uacpi_install_address_space_handler_with_flags( + uacpi_namespace_node *device_node, enum uacpi_address_space space, + uacpi_region_handler handler, uacpi_handle handler_context, + uacpi_u16 flags +); + +void uacpi_opregion_uninstall_handler(uacpi_namespace_node *node); + +uacpi_bool uacpi_address_space_handler_is_default( + uacpi_address_space_handler *handler +); + +uacpi_address_space_handlers *uacpi_node_get_address_space_handlers( + uacpi_namespace_node *node +); + +uacpi_status uacpi_initialize_opregion_node(uacpi_namespace_node *node); + +uacpi_status uacpi_opregion_attach(uacpi_namespace_node *node); + +void uacpi_install_default_address_space_handlers(void); + +uacpi_bool uacpi_is_buffer_access_address_space(uacpi_address_space space); + +union uacpi_opregion_io_data { + uacpi_u64 *integer; + uacpi_data_view buffer; +}; + +uacpi_status uacpi_dispatch_opregion_io( + uacpi_field_unit *field, uacpi_u32 offset, + uacpi_region_op op, union uacpi_opregion_io_data data +); + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/osi.h b/sys/include/dev/acpi/uacpi/uacpi/internal/osi.h new file mode 100644 index 0000000..6d7b0db --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/osi.h @@ -0,0 +1,8 @@ +#pragma once + +#include <uacpi/osi.h> + +uacpi_status uacpi_initialize_interfaces(void); +void uacpi_deinitialize_interfaces(void); + +uacpi_status uacpi_handle_osi(const uacpi_char *string, uacpi_bool *out_value); diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/registers.h b/sys/include/dev/acpi/uacpi/uacpi/internal/registers.h new file mode 100644 index 0000000..84694ac --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/registers.h @@ -0,0 +1,7 @@ +#pragma once + +#include <uacpi/types.h> +#include <uacpi/registers.h> + +uacpi_status uacpi_initialize_registers(void); +void uacpi_deinitialize_registers(void); diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/resources.h b/sys/include/dev/acpi/uacpi/uacpi/internal/resources.h new file mode 100644 index 0000000..4c4a1ff --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/resources.h @@ -0,0 +1,327 @@ +#pragma once + +#include <uacpi/internal/types.h> +#include <uacpi/resources.h> + +#ifndef UACPI_BAREBONES_MODE + +enum uacpi_aml_resource { + UACPI_AML_RESOURCE_TYPE_INVALID = 0, + + // Small resources + UACPI_AML_RESOURCE_IRQ, + UACPI_AML_RESOURCE_DMA, + UACPI_AML_RESOURCE_START_DEPENDENT, + UACPI_AML_RESOURCE_END_DEPENDENT, + UACPI_AML_RESOURCE_IO, + UACPI_AML_RESOURCE_FIXED_IO, + UACPI_AML_RESOURCE_FIXED_DMA, + UACPI_AML_RESOURCE_VENDOR_TYPE0, + UACPI_AML_RESOURCE_END_TAG, + + // Large resources + UACPI_AML_RESOURCE_MEMORY24, + UACPI_AML_RESOURCE_GENERIC_REGISTER, + UACPI_AML_RESOURCE_VENDOR_TYPE1, + UACPI_AML_RESOURCE_MEMORY32, + UACPI_AML_RESOURCE_FIXED_MEMORY32, + UACPI_AML_RESOURCE_ADDRESS32, + UACPI_AML_RESOURCE_ADDRESS16, + UACPI_AML_RESOURCE_EXTENDED_IRQ, + UACPI_AML_RESOURCE_ADDRESS64, + UACPI_AML_RESOURCE_ADDRESS64_EXTENDED, + UACPI_AML_RESOURCE_GPIO_CONNECTION, + UACPI_AML_RESOURCE_PIN_FUNCTION, + UACPI_AML_RESOURCE_SERIAL_CONNECTION, + UACPI_AML_RESOURCE_PIN_CONFIGURATION, + UACPI_AML_RESOURCE_PIN_GROUP, + UACPI_AML_RESOURCE_PIN_GROUP_FUNCTION, + UACPI_AML_RESOURCE_PIN_GROUP_CONFIGURATION, + UACPI_AML_RESOURCE_CLOCK_INPUT, + UACPI_AML_RESOURCE_MAX = UACPI_AML_RESOURCE_CLOCK_INPUT, +}; + +enum uacpi_aml_resource_size_kind { + UACPI_AML_RESOURCE_SIZE_KIND_FIXED, + UACPI_AML_RESOURCE_SIZE_KIND_FIXED_OR_ONE_LESS, + UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE, +}; + +enum uacpi_aml_resource_kind { + UACPI_AML_RESOURCE_KIND_SMALL = 0, + UACPI_AML_RESOURCE_KIND_LARGE, +}; + +enum uacpi_resource_convert_opcode { + UACPI_RESOURCE_CONVERT_OPCODE_END = 0, + + /* + * AML -> native: + * Take the mask at 'aml_offset' and convert to an array of uacpi_u8 + * at 'native_offset' with the value corresponding to the bit index. + * The array size is written to the byte at offset 'arg2'. + * + * native -> AML: + * Walk each element of the array at 'native_offset' and set the + * corresponding bit in the mask at 'aml_offset' to 1. The array size is + * read from the byte at offset 'arg2'. + */ + UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_8, + UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_16, + + /* + * AML -> native: + * Grab the bits at the byte at 'aml_offset' + 'bit_index', and copy its + * value into the byte at 'native_offset'. + * + * native -> AML: + * Grab first N bits at 'native_offset' and copy to 'aml_offset' starting + * at the 'bit_index'. + * + * NOTE: + * These must be contiguous in this order. + */ + UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_1, + UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_2, + UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_3, + UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_6 = + UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_3 + 3, + + /* + * AML -> native: + * Copy N bytes at 'aml_offset' to 'native_offset'. + * + * native -> AML: + * Copy N bytes at 'native_offset' to 'aml_offset'. + * + * 'imm' is added to the accumulator. + * + * NOTE: These are affected by the current value in the accumulator. If it's + * set to 0 at the time of evalution, this is executed once, N times + * otherwise. 0xFF is considered a special value, which resets the + * accumulator to 0 unconditionally. + */ + UACPI_RESOURCE_CONVERT_OPCODE_FIELD_8, + UACPI_RESOURCE_CONVERT_OPCODE_FIELD_16, + UACPI_RESOURCE_CONVERT_OPCODE_FIELD_32, + UACPI_RESOURCE_CONVERT_OPCODE_FIELD_64, + + /* + * If the length of the current resource is less than 'arg0', then skip + * 'imm' instructions. + */ + UACPI_RESOURCE_CONVERT_OPCODE_SKIP_IF_AML_SIZE_LESS_THAN, + + /* + * Skip 'imm' instructions if 'arg0' is not equal to the value in the + * accumulator. + */ + UACPI_RESOURCE_CONVERT_OPCODE_SKIP_IF_NOT_EQUALS, + + /* + * AML -> native: + * Set the byte at 'native_offset' to 'imm'. + * + * native -> AML: + * Set the byte at 'aml_offset' to 'imm'. + */ + UACPI_RESOURCE_CONVERT_OPCODE_SET_TO_IMM, + + /* + * AML -> native: + * Load the AML resoruce length into the accumulator as well as the field at + * 'native_offset' of width N. + * + * native -> AML: + * Load the resource length into the accumulator. + */ + UACPI_RESOURCE_CONVERT_OPCODE_LOAD_AML_SIZE_32, + + /* + * AML -> native: + * Load the 8 bit field at 'aml_offset' into the accumulator and store at + * 'native_offset'. + * + * native -> AML: + * Load the 8 bit field at 'native_offset' into the accumulator and store + * at 'aml_offset'. + * + * The accumulator is multiplied by 'imm' unless it's set to zero. + */ + UACPI_RESOURCE_CONVERT_OPCODE_LOAD_8_STORE, + + /* + * Load the N bit field at 'native_offset' into the accumulator + */ + UACPI_RESOURCE_CONVERT_OPCODE_LOAD_8_NATIVE, + UACPI_RESOURCE_CONVERT_OPCODE_LOAD_16_NATIVE, + + /* + * Load 'imm' into the accumulator. + */ + UACPI_RESOURCE_CONVERT_OPCODE_LOAD_IMM, + + /* + * AML -> native: + * Load the resource source at offset = aml size + accumulator into the + * uacpi_resource_source struct at 'native_offset'. The string bytes are + * written to the offset at resource size + accumulator. The presence is + * detected by comparing the length of the resource to the offset, + * 'arg2' optionally specifies the offset to the upper bound of the string. + * + * native -> AML: + * Load the resource source from the uacpi_resource_source struct at + * 'native_offset' to aml_size + accumulator. aml_size + accumulator is + * optionally written to 'aml_offset' if it's specified. + */ + UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE, + UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE_NO_INDEX, + UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_LABEL, + + /* + * AML -> native: + * Load the pin table with upper bound specified at 'aml_offset'. + * The table length is calculated by subtracting the upper bound from + * aml_size and is written into the accumulator. + * + * native -> AML: + * Load the pin table length from 'native_offset' and multiply by 2, store + * the result in the accumulator. + */ + UACPI_RESOURCE_CONVERT_OPCODE_LOAD_PIN_TABLE_LENGTH, + + /* + * AML -> native: + * Store the accumulator divided by 2 at 'native_offset'. + * The table is copied to the offset at resource size from offset at + * aml_size with the pointer written to the offset at 'arg2'. + * + * native -> AML: + * Read the pin table from resource size offset, write aml_size to + * 'aml_offset'. Copy accumulator bytes to the offset at aml_size. + */ + UACPI_RESOURCE_CONVERT_OPCODE_PIN_TABLE, + + /* + * AML -> native: + * Load vendor data with offset stored at 'aml_offset'. The length is + * calculated as aml_size - aml_offset and is written to 'native_offset'. + * The data is written to offset - aml_size with the pointer written back + * to the offset at 'arg2'. + * + * native -> AML: + * Read vendor data from the pointer at offset 'arg2' and size at + * 'native_offset', the offset to write to is calculated as the difference + * between the data pointer and the native resource end pointer. + * offset + aml_size is written to 'aml_offset' and the data is copied + * there as well. + */ + UACPI_RESOURCE_CONVERT_OPCODE_VENDOR_DATA, + + /* + * AML -> native: + * Read the serial type from the byte at 'aml_offset' and write it to the + * type field of the uacpi_resource_serial_bus_common structure. Convert + * the serial type to native and set the resource type to it. Copy the + * vendor data to the offset at native size, the length is calculated + * as type_data_length - extra-type-specific-size, and is written to + * vendor_data_length, as well as the accumulator. The data pointer is + * written to vendor_data. + * + * native -> AML: + * Set the serial type at 'aml_offset' to the value stored at + * 'native_offset'. Load the vendor data to the offset at aml_size, + * the length is read from 'vendor_data_length', and the data is copied from + * 'vendor_data'. + */ + UACPI_RESOURCE_CONVERT_OPCODE_SERIAL_TYPE_SPECIFIC, + + /* + * Produces an error if encountered in the instruction stream. + * Used to trap invalid/unexpected code flow. + */ + UACPI_RESOURCE_CONVERT_OPCODE_UNREACHABLE, +}; + +struct uacpi_resource_convert_instruction { + uacpi_u8 code; + + union { + uacpi_u8 aml_offset; + uacpi_u8 arg0; + } f1; + + union { + uacpi_u8 native_offset; + uacpi_u8 arg1; + } f2; + + union { + uacpi_u8 imm; + uacpi_u8 bit_index; + uacpi_u8 arg2; + } f3; +}; + +struct uacpi_resource_spec { + uacpi_u8 type : 5; + uacpi_u8 native_type : 5; + uacpi_u8 resource_kind : 1; + uacpi_u8 size_kind : 2; + + /* + * Size of the resource as appears in the AML byte stream, for variable + * length resources this is the minimum. + */ + uacpi_u16 aml_size; + + /* + * Size of the native human-readable uacpi resource, for variable length + * resources this is the minimum. The final length is this field plus the + * result of extra_size_for_native(). + */ + uacpi_u16 native_size; + + /* + * Calculate the amount of extra bytes that must be allocated for a specific + * native resource given the AML counterpart. This being NULL means no extra + * bytes are needed, aka native resources is always the same size. + */ + uacpi_size (*extra_size_for_native)( + const struct uacpi_resource_spec*, void*, uacpi_size + ); + + /* + * Calculate the number of bytes needed to represent a native resource as + * AML. The 'aml_size' field is used if this is NULL. + */ + uacpi_size (*size_for_aml)( + const struct uacpi_resource_spec*, uacpi_resource* + ); + + const struct uacpi_resource_convert_instruction *to_native; + const struct uacpi_resource_convert_instruction *to_aml; +}; + +typedef uacpi_iteration_decision (*uacpi_aml_resource_iteration_callback)( + void*, uacpi_u8 *data, uacpi_u16 resource_size, + const struct uacpi_resource_spec* +); + +uacpi_status uacpi_for_each_aml_resource( + uacpi_data_view, uacpi_aml_resource_iteration_callback cb, void *user +); + +uacpi_status uacpi_find_aml_resource_end_tag( + uacpi_data_view, uacpi_size *out_offset +); + +uacpi_status uacpi_native_resources_from_aml( + uacpi_data_view, uacpi_resources **out_resources +); + +uacpi_status uacpi_native_resources_to_aml( + uacpi_resources *resources, uacpi_object **out_template +); + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/shareable.h b/sys/include/dev/acpi/uacpi/uacpi/internal/shareable.h new file mode 100644 index 0000000..e00d850 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/shareable.h @@ -0,0 +1,21 @@ +#pragma once + +#include <uacpi/types.h> + +struct uacpi_shareable { + uacpi_u32 reference_count; +}; + +void uacpi_shareable_init(uacpi_handle); + +uacpi_bool uacpi_bugged_shareable(uacpi_handle); +void uacpi_make_shareable_bugged(uacpi_handle); + +uacpi_u32 uacpi_shareable_ref(uacpi_handle); +uacpi_u32 uacpi_shareable_unref(uacpi_handle); + +void uacpi_shareable_unref_and_delete_if_last( + uacpi_handle, void (*do_free)(uacpi_handle) +); + +uacpi_u32 uacpi_shareable_refcount(uacpi_handle); diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/stdlib.h b/sys/include/dev/acpi/uacpi/uacpi/internal/stdlib.h new file mode 100644 index 0000000..853c1bc --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/stdlib.h @@ -0,0 +1,131 @@ +#pragma once + +#include <uacpi/internal/types.h> +#include <uacpi/internal/helpers.h> +#include <uacpi/platform/libc.h> +#include <uacpi/platform/config.h> +#include <uacpi/kernel_api.h> + +#define offsetof(st, m) \ + ((size_t)&(((st *)0)->m)) + +#ifdef UACPI_USE_BUILTIN_STRING + +#ifndef uacpi_memcpy +void *uacpi_memcpy(void *dest, const void *src, uacpi_size count); +#endif + +#ifndef uacpi_memmove +void *uacpi_memmove(void *dest, const void *src, uacpi_size count); +#endif + +#ifndef uacpi_memset +void *uacpi_memset(void *dest, uacpi_i32 ch, uacpi_size count); +#endif + +#ifndef uacpi_memcmp +uacpi_i32 uacpi_memcmp(const void *lhs, const void *rhs, uacpi_size count); +#endif + +#else + +#ifndef uacpi_memcpy + #ifdef UACPI_COMPILER_HAS_BUILTIN_MEMCPY + #define uacpi_memcpy __builtin_memcpy + #else + extern void *memcpy(void *dest, const void *src, uacpi_size count); + #define uacpi_memcpy memcpy + #endif +#endif + +#ifndef uacpi_memmove + #ifdef UACPI_COMPILER_HAS_BUILTIN_MEMMOVE + #define uacpi_memmove __builtin_memmove + #else + extern void *memmove(void *dest, const void *src, uacpi_size count); + #define uacpi_memmove memmove + #endif +#endif + +#ifndef uacpi_memset + #ifdef UACPI_COMPILER_HAS_BUILTIN_MEMSET + #define uacpi_memset __builtin_memset + #else + extern void *memset(void *dest, int ch, uacpi_size count); + #define uacpi_memset memset + #endif +#endif + +#ifndef uacpi_memcmp + #ifdef UACPI_COMPILER_HAS_BUILTIN_MEMCMP + #define uacpi_memcmp __builtin_memcmp + #else + extern int memcmp(const void *lhs, const void *rhs, uacpi_size count); + #define uacpi_memcmp memcmp + #endif +#endif + +#endif + +#ifndef uacpi_strlen +uacpi_size uacpi_strlen(const uacpi_char *str); +#endif + +#ifndef uacpi_strnlen +uacpi_size uacpi_strnlen(const uacpi_char *str, uacpi_size max); +#endif + +#ifndef uacpi_strcmp +uacpi_i32 uacpi_strcmp(const uacpi_char *lhs, const uacpi_char *rhs); +#endif + +#ifndef uacpi_snprintf +UACPI_PRINTF_DECL(3, 4) +uacpi_i32 uacpi_snprintf( + uacpi_char *buffer, uacpi_size capacity, const uacpi_char *fmt, ... +); +#endif + +#ifndef uacpi_vsnprintf +uacpi_i32 uacpi_vsnprintf( + uacpi_char *buffer, uacpi_size capacity, const uacpi_char *fmt, + uacpi_va_list vlist +); +#endif + +#ifdef UACPI_SIZED_FREES +#define uacpi_free(mem, size) uacpi_kernel_free(mem, size) +#else +#define uacpi_free(mem, _) uacpi_kernel_free(mem) +#endif + +#define uacpi_memzero(ptr, size) uacpi_memset(ptr, 0, size) + +#define UACPI_COMPARE(x, y, op) ((x) op (y) ? (x) : (y)) +#define UACPI_MIN(x, y) UACPI_COMPARE(x, y, <) +#define UACPI_MAX(x, y) UACPI_COMPARE(x, y, >) + +#define UACPI_ALIGN_UP_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#define UACPI_ALIGN_UP(x, val, type) UACPI_ALIGN_UP_MASK(x, (type)(val) - 1) + +#define UACPI_ALIGN_DOWN_MASK(x, mask) ((x) & ~(mask)) +#define UACPI_ALIGN_DOWN(x, val, type) UACPI_ALIGN_DOWN_MASK(x, (type)(val) - 1) + +#define UACPI_IS_ALIGNED_MASK(x, mask) (((x) & (mask)) == 0) +#define UACPI_IS_ALIGNED(x, val, type) UACPI_IS_ALIGNED_MASK(x, (type)(val) - 1) + +#define UACPI_IS_POWER_OF_TWO(x, type) UACPI_IS_ALIGNED(x, x, type) + +void uacpi_memcpy_zerout(void *dst, const void *src, + uacpi_size dst_size, uacpi_size src_size); + +// Returns the one-based bit location of LSb or 0 +uacpi_u8 uacpi_bit_scan_forward(uacpi_u64); + +// Returns the one-based bit location of MSb or 0 +uacpi_u8 uacpi_bit_scan_backward(uacpi_u64); + +#ifndef UACPI_NATIVE_ALLOC_ZEROED +void *uacpi_builtin_alloc_zeroed(uacpi_size size); +#define uacpi_kernel_alloc_zeroed uacpi_builtin_alloc_zeroed +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/tables.h b/sys/include/dev/acpi/uacpi/uacpi/internal/tables.h new file mode 100644 index 0000000..8a5345f --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/tables.h @@ -0,0 +1,70 @@ +#pragma once + +#include <uacpi/internal/context.h> +#include <uacpi/internal/interpreter.h> +#include <uacpi/types.h> +#include <uacpi/status.h> +#include <uacpi/tables.h> + +enum uacpi_table_origin { +#ifndef UACPI_BAREBONES_MODE + UACPI_TABLE_ORIGIN_FIRMWARE_VIRTUAL = 0, +#endif + UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL = 1, + + UACPI_TABLE_ORIGIN_HOST_VIRTUAL, + UACPI_TABLE_ORIGIN_HOST_PHYSICAL, +}; + +struct uacpi_installed_table { + uacpi_phys_addr phys_addr; + struct acpi_sdt_hdr hdr; + void *ptr; + + uacpi_u16 reference_count; + +#define UACPI_TABLE_LOADED (1 << 0) +#define UACPI_TABLE_CSUM_VERIFIED (1 << 1) +#define UACPI_TABLE_INVALID (1 << 2) + uacpi_u8 flags; + uacpi_u8 origin; +}; + +uacpi_status uacpi_initialize_tables(void); +void uacpi_deinitialize_tables(void); + +uacpi_bool uacpi_signatures_match(const void *const lhs, const void *const rhs); +uacpi_status uacpi_check_table_signature(void *table, const uacpi_char *expect); +uacpi_status uacpi_verify_table_checksum(void *table, uacpi_size size); + +uacpi_status uacpi_table_install_physical_with_origin( + uacpi_phys_addr phys, enum uacpi_table_origin origin, uacpi_table *out_table +); +uacpi_status uacpi_table_install_with_origin( + void *virt, enum uacpi_table_origin origin, uacpi_table *out_table +); + +#ifndef UACPI_BAREBONES_MODE +void uacpi_table_mark_as_loaded(uacpi_size idx); + +uacpi_status uacpi_table_load_with_cause( + uacpi_size idx, enum uacpi_table_load_cause cause +); +#endif // !UACPI_BAREBONES_MODE + +typedef uacpi_iteration_decision (*uacpi_table_iteration_callback) + (void *user, struct uacpi_installed_table *tbl, uacpi_size idx); + +uacpi_status uacpi_for_each_table( + uacpi_size base_idx, uacpi_table_iteration_callback, void *user +); + +typedef uacpi_bool (*uacpi_table_match_callback) + (struct uacpi_installed_table *tbl); + +uacpi_status uacpi_table_match( + uacpi_size base_idx, uacpi_table_match_callback, uacpi_table *out_table +); + +#define UACPI_PRI_TBL_HDR "'%.4s' (OEM ID '%.6s' OEM Table ID '%.8s')" +#define UACPI_FMT_TBL_HDR(hdr) (hdr)->signature, (hdr)->oemid, (hdr)->oem_table_id diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/types.h b/sys/include/dev/acpi/uacpi/uacpi/internal/types.h new file mode 100644 index 0000000..b994a27 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/types.h @@ -0,0 +1,310 @@ +#pragma once + +#include <uacpi/status.h> +#include <uacpi/types.h> +#include <uacpi/internal/shareable.h> + +#ifndef UACPI_BAREBONES_MODE + +// object->flags field if object->type == UACPI_OBJECT_REFERENCE +enum uacpi_reference_kind { + UACPI_REFERENCE_KIND_REFOF = 0, + UACPI_REFERENCE_KIND_LOCAL = 1, + UACPI_REFERENCE_KIND_ARG = 2, + UACPI_REFERENCE_KIND_NAMED = 3, + UACPI_REFERENCE_KIND_PKG_INDEX = 4, +}; + +// object->flags field if object->type == UACPI_OBJECT_STRING +enum uacpi_string_kind { + UACPI_STRING_KIND_NORMAL = 0, + UACPI_STRING_KIND_PATH, +}; + +typedef struct uacpi_buffer { + struct uacpi_shareable shareable; + union { + void *data; + uacpi_u8 *byte_data; + uacpi_char *text; + }; + uacpi_size size; +} uacpi_buffer; + +typedef struct uacpi_package { + struct uacpi_shareable shareable; + uacpi_object **objects; + uacpi_size count; +} uacpi_package; + +typedef struct uacpi_buffer_field { + uacpi_buffer *backing; + uacpi_size bit_index; + uacpi_u32 bit_length; + uacpi_bool force_buffer; +} uacpi_buffer_field; + +typedef struct uacpi_buffer_index { + uacpi_size idx; + uacpi_buffer *buffer; +} uacpi_buffer_index; + +typedef struct uacpi_mutex { + struct uacpi_shareable shareable; + uacpi_handle handle; + uacpi_thread_id owner; + uacpi_u16 depth; + uacpi_u8 sync_level; +} uacpi_mutex; + +typedef struct uacpi_event { + struct uacpi_shareable shareable; + uacpi_handle handle; +} uacpi_event; + +typedef struct uacpi_address_space_handler { + struct uacpi_shareable shareable; + uacpi_region_handler callback; + uacpi_handle user_context; + struct uacpi_address_space_handler *next; + struct uacpi_operation_region *regions; + uacpi_u16 space; + +#define UACPI_ADDRESS_SPACE_HANDLER_DEFAULT (1 << 0) + uacpi_u16 flags; +} uacpi_address_space_handler; + +/* + * NOTE: These are common object headers. + * Any changes to these structs must be propagated to all objects. + * ============================================================== + * Common for the following objects: + * - UACPI_OBJECT_OPERATION_REGION + * - UACPI_OBJECT_PROCESSOR + * - UACPI_OBJECT_DEVICE + * - UACPI_OBJECT_THERMAL_ZONE + */ +typedef struct uacpi_address_space_handlers { + struct uacpi_shareable shareable; + uacpi_address_space_handler *head; +} uacpi_address_space_handlers; + +typedef struct uacpi_device_notify_handler { + uacpi_notify_handler callback; + uacpi_handle user_context; + struct uacpi_device_notify_handler *next; +} uacpi_device_notify_handler; + +/* + * Common for the following objects: + * - UACPI_OBJECT_PROCESSOR + * - UACPI_OBJECT_DEVICE + * - UACPI_OBJECT_THERMAL_ZONE + */ +typedef struct uacpi_handlers { + struct uacpi_shareable shareable; + uacpi_address_space_handler *address_space_head; + uacpi_device_notify_handler *notify_head; +} uacpi_handlers; + +// This region has a corresponding _REG method that was succesfully executed +#define UACPI_OP_REGION_STATE_REG_EXECUTED (1 << 0) + +// This region was successfully attached to a handler +#define UACPI_OP_REGION_STATE_ATTACHED (1 << 1) + +typedef struct uacpi_operation_region { + struct uacpi_shareable shareable; + uacpi_address_space_handler *handler; + uacpi_handle user_context; + uacpi_u16 space; + uacpi_u8 state_flags; + uacpi_u64 offset; + uacpi_u64 length; + + union { + // If space == TABLE_DATA + uacpi_u64 table_idx; + + // If space == PCC + uacpi_u8 *internal_buffer; + }; + + // Used to link regions sharing the same handler + struct uacpi_operation_region *next; +} uacpi_operation_region; + +typedef struct uacpi_device { + struct uacpi_shareable shareable; + uacpi_address_space_handler *address_space_handlers; + uacpi_device_notify_handler *notify_handlers; +} uacpi_device; + +typedef struct uacpi_processor { + struct uacpi_shareable shareable; + uacpi_address_space_handler *address_space_handlers; + uacpi_device_notify_handler *notify_handlers; + uacpi_u8 id; + uacpi_u32 block_address; + uacpi_u8 block_length; +} uacpi_processor; + +typedef struct uacpi_thermal_zone { + struct uacpi_shareable shareable; + uacpi_address_space_handler *address_space_handlers; + uacpi_device_notify_handler *notify_handlers; +} uacpi_thermal_zone; + +typedef struct uacpi_power_resource { + uacpi_u8 system_level; + uacpi_u16 resource_order; +} uacpi_power_resource; + +typedef uacpi_status (*uacpi_native_call_handler)( + uacpi_handle ctx, uacpi_object *retval +); + +typedef struct uacpi_control_method { + struct uacpi_shareable shareable; + union { + uacpi_u8 *code; + uacpi_native_call_handler handler; + }; + uacpi_mutex *mutex; + uacpi_u32 size; + uacpi_u8 sync_level : 4; + uacpi_u8 args : 3; + uacpi_u8 is_serialized : 1; + uacpi_u8 named_objects_persist: 1; + uacpi_u8 native_call : 1; + uacpi_u8 owns_code : 1; +} uacpi_control_method; + +typedef enum uacpi_access_type { + UACPI_ACCESS_TYPE_ANY = 0, + UACPI_ACCESS_TYPE_BYTE = 1, + UACPI_ACCESS_TYPE_WORD = 2, + UACPI_ACCESS_TYPE_DWORD = 3, + UACPI_ACCESS_TYPE_QWORD = 4, + UACPI_ACCESS_TYPE_BUFFER = 5, +} uacpi_access_type; + +typedef enum uacpi_lock_rule { + UACPI_LOCK_RULE_NO_LOCK = 0, + UACPI_LOCK_RULE_LOCK = 1, +} uacpi_lock_rule; + +typedef enum uacpi_update_rule { + UACPI_UPDATE_RULE_PRESERVE = 0, + UACPI_UPDATE_RULE_WRITE_AS_ONES = 1, + UACPI_UPDATE_RULE_WRITE_AS_ZEROES = 2, +} uacpi_update_rule; + +typedef enum uacpi_field_unit_kind { + UACPI_FIELD_UNIT_KIND_NORMAL = 0, + UACPI_FIELD_UNIT_KIND_INDEX = 1, + UACPI_FIELD_UNIT_KIND_BANK = 2, +} uacpi_field_unit_kind; + +typedef struct uacpi_field_unit { + struct uacpi_shareable shareable; + + union { + // UACPI_FIELD_UNIT_KIND_NORMAL + struct { + uacpi_namespace_node *region; + }; + + // UACPI_FIELD_UNIT_KIND_INDEX + struct { + struct uacpi_field_unit *index; + struct uacpi_field_unit *data; + }; + + // UACPI_FIELD_UNIT_KIND_BANK + struct { + uacpi_namespace_node *bank_region; + struct uacpi_field_unit *bank_selection; + uacpi_u64 bank_value; + }; + }; + + uacpi_object *connection; + + uacpi_u32 byte_offset; + uacpi_u32 bit_length; + uacpi_u32 pin_offset; + uacpi_u8 bit_offset_within_first_byte; + uacpi_u8 access_width_bytes; + uacpi_u8 access_length; + + uacpi_u8 attributes : 4; + uacpi_u8 update_rule : 2; + uacpi_u8 kind : 2; + uacpi_u8 lock_rule : 1; +} uacpi_field_unit; + +typedef struct uacpi_object { + struct uacpi_shareable shareable; + uacpi_u8 type; + uacpi_u8 flags; + + union { + uacpi_u64 integer; + uacpi_package *package; + uacpi_buffer_field buffer_field; + uacpi_object *inner_object; + uacpi_control_method *method; + uacpi_buffer *buffer; + uacpi_mutex *mutex; + uacpi_event *event; + uacpi_buffer_index buffer_index; + uacpi_operation_region *op_region; + uacpi_device *device; + uacpi_processor *processor; + uacpi_thermal_zone *thermal_zone; + uacpi_address_space_handlers *address_space_handlers; + uacpi_handlers *handlers; + uacpi_power_resource power_resource; + uacpi_field_unit *field_unit; + }; +} uacpi_object; + +uacpi_object *uacpi_create_object(uacpi_object_type type); + +enum uacpi_assign_behavior { + UACPI_ASSIGN_BEHAVIOR_DEEP_COPY, + UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY, +}; + +uacpi_status uacpi_object_assign(uacpi_object *dst, uacpi_object *src, + enum uacpi_assign_behavior); + +void uacpi_object_attach_child(uacpi_object *parent, uacpi_object *child); +void uacpi_object_detach_child(uacpi_object *parent); + +struct uacpi_object *uacpi_create_internal_reference( + enum uacpi_reference_kind kind, uacpi_object *child +); +uacpi_object *uacpi_unwrap_internal_reference(uacpi_object *object); + +enum uacpi_prealloc_objects { + UACPI_PREALLOC_OBJECTS_NO, + UACPI_PREALLOC_OBJECTS_YES, +}; + +uacpi_bool uacpi_package_fill( + uacpi_package *pkg, uacpi_size num_elements, + enum uacpi_prealloc_objects prealloc_objects +); + +uacpi_mutex *uacpi_create_mutex(void); +void uacpi_mutex_unref(uacpi_mutex*); + +void uacpi_method_unref(uacpi_control_method*); + +void uacpi_address_space_handler_unref(uacpi_address_space_handler *handler); + +void uacpi_buffer_to_view(uacpi_buffer*, uacpi_data_view*); + +#endif // !UACPI_BAREBONES_MODE diff --git a/sys/include/dev/acpi/uacpi/uacpi/internal/utilities.h b/sys/include/dev/acpi/uacpi/uacpi/internal/utilities.h new file mode 100644 index 0000000..606ec92 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/internal/utilities.h @@ -0,0 +1,45 @@ +#pragma once + +#include <uacpi/types.h> +#include <uacpi/utilities.h> +#include <uacpi/internal/log.h> +#include <uacpi/internal/stdlib.h> + +static inline uacpi_phys_addr uacpi_truncate_phys_addr_with_warn(uacpi_u64 large_addr) +{ + if (sizeof(uacpi_phys_addr) < 8 && large_addr > 0xFFFFFFFF) { + uacpi_warn( + "truncating a physical address 0x%"UACPI_PRIX64 + " outside of address space\n", UACPI_FMT64(large_addr) + ); + } + + return (uacpi_phys_addr)large_addr; +} + +#define UACPI_PTR_TO_VIRT_ADDR(ptr) ((uacpi_virt_addr)(ptr)) +#define UACPI_VIRT_ADDR_TO_PTR(vaddr) ((void*)(vaddr)) + +#define UACPI_PTR_ADD(ptr, value) ((void*)(((uacpi_u8*)(ptr)) + value)) + +/* + * Target buffer must have a length of at least 8 bytes. + */ +void uacpi_eisa_id_to_string(uacpi_u32, uacpi_char *out_string); + +enum uacpi_base { + UACPI_BASE_AUTO, + UACPI_BASE_OCT = 8, + UACPI_BASE_DEC = 10, + UACPI_BASE_HEX = 16, +}; +uacpi_status uacpi_string_to_integer( + const uacpi_char *str, uacpi_size max_chars, enum uacpi_base base, + uacpi_u64 *out_value +); + +uacpi_bool uacpi_is_valid_nameseg(uacpi_u8 *nameseg); + +void uacpi_free_dynamic_string(const uacpi_char *str); + +#define UACPI_NANOSECONDS_PER_SEC (1000ull * 1000ull * 1000ull) diff --git a/sys/include/dev/acpi/uacpi/uacpi/io.h b/sys/include/dev/acpi/uacpi/uacpi/io.h new file mode 100644 index 0000000..6535a06 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/io.h @@ -0,0 +1,36 @@ +#pragma once + +#include <uacpi/types.h> +#include <uacpi/acpi.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +uacpi_status uacpi_gas_read(const struct acpi_gas *gas, uacpi_u64 *value); +uacpi_status uacpi_gas_write(const struct acpi_gas *gas, uacpi_u64 value); + +typedef struct uacpi_mapped_gas uacpi_mapped_gas; + +/* + * Map a GAS for faster access in the future. The handle returned via + * 'out_mapped' must be freed & unmapped using uacpi_unmap_gas() when + * no longer needed. + */ +uacpi_status uacpi_map_gas(const struct acpi_gas *gas, uacpi_mapped_gas **out_mapped); +void uacpi_unmap_gas(uacpi_mapped_gas*); + +/* + * Same as uacpi_gas_{read,write} but operates on a pre-mapped handle for faster + * access and/or ability to use in critical sections/irq contexts. + */ +uacpi_status uacpi_gas_read_mapped(const uacpi_mapped_gas *gas, uacpi_u64 *value); +uacpi_status uacpi_gas_write_mapped(const uacpi_mapped_gas *gas, uacpi_u64 value); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/kernel_api.h b/sys/include/dev/acpi/uacpi/uacpi/kernel_api.h new file mode 100644 index 0000000..2a370de --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/kernel_api.h @@ -0,0 +1,375 @@ +#pragma once + +#include <uacpi/types.h> +#include <uacpi/platform/arch_helpers.h> + +#ifdef __cplusplus +extern "C" { +#endif + +// Returns the PHYSICAL address of the RSDP structure via *out_rsdp_address. +uacpi_status uacpi_kernel_get_rsdp(uacpi_phys_addr *out_rsdp_address); + +/* + * Map a physical memory range starting at 'addr' with length 'len', and return + * a virtual address that can be used to access it. + * + * NOTE: 'addr' may be misaligned, in this case the host is expected to round it + * down to the nearest page-aligned boundary and map that, while making + * sure that at least 'len' bytes are still mapped starting at 'addr'. The + * return value preserves the misaligned offset. + * + * Example for uacpi_kernel_map(0x1ABC, 0xF00): + * 1. Round down the 'addr' we got to the nearest page boundary. + * Considering a PAGE_SIZE of 4096 (or 0x1000), 0x1ABC rounded down + * is 0x1000, offset within the page is 0x1ABC - 0x1000 => 0xABC + * 2. Requested 'len' is 0xF00 bytes, but we just rounded the address + * down by 0xABC bytes, so add those on top. 0xF00 + 0xABC => 0x19BC + * 3. Round up the final 'len' to the nearest PAGE_SIZE boundary, in + * this case 0x19BC is 0x2000 bytes (2 pages if PAGE_SIZE is 4096) + * 4. Call the VMM to map the aligned address 0x1000 (from step 1) + * with length 0x2000 (from step 3). Let's assume the returned + * virtual address for the mapping is 0xF000. + * 5. Add the original offset within page 0xABC (from step 1) to the + * resulting virtual address 0xF000 + 0xABC => 0xFABC. Return it + * to uACPI. + */ +void *uacpi_kernel_map(uacpi_phys_addr addr, uacpi_size len); + +/* + * Unmap a virtual memory range at 'addr' with a length of 'len' bytes. + * + * NOTE: 'addr' may be misaligned, see the comment above 'uacpi_kernel_map'. + * Similar steps to uacpi_kernel_map can be taken to retrieve the + * virtual address originally returned by the VMM for this mapping + * as well as its true length. + */ +void uacpi_kernel_unmap(void *addr, uacpi_size len); + +#ifndef UACPI_FORMATTED_LOGGING +void uacpi_kernel_log(uacpi_log_level, const uacpi_char*); +#else +UACPI_PRINTF_DECL(2, 3) +void uacpi_kernel_log(uacpi_log_level, const uacpi_char*, ...); +void uacpi_kernel_vlog(uacpi_log_level, const uacpi_char*, uacpi_va_list); +#endif + +/* + * Only the above ^^^ API may be used by early table access and + * UACPI_BAREBONES_MODE. + */ +#ifndef UACPI_BAREBONES_MODE + +/* + * Convenience initialization/deinitialization hooks that will be called by + * uACPI automatically when appropriate if compiled-in. + */ +#ifdef UACPI_KERNEL_INITIALIZATION +/* + * This API is invoked for each initialization level so that appropriate parts + * of the host kernel and/or glue code can be initialized at different stages. + * + * uACPI API that triggers calls to uacpi_kernel_initialize and the respective + * 'current_init_lvl' passed to the hook at that stage: + * 1. uacpi_initialize() -> UACPI_INIT_LEVEL_EARLY + * 2. uacpi_namespace_load() -> UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED + * 3. (start of) uacpi_namespace_initialize() -> UACPI_INIT_LEVEL_NAMESPACE_LOADED + * 4. (end of) uacpi_namespace_initialize() -> UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED + */ +uacpi_status uacpi_kernel_initialize(uacpi_init_level current_init_lvl); +void uacpi_kernel_deinitialize(void); +#endif + +/* + * Open a PCI device at 'address' for reading & writing. + * + * Note that this must be able to open any arbitrary PCI device, not just those + * detected during kernel PCI enumeration, since the following pattern is + * relatively common in AML firmware: + * Device (THC0) + * { + * // Device at 00:10.06 + * Name (_ADR, 0x00100006) // _ADR: Address + * + * OperationRegion (THCR, PCI_Config, Zero, 0x0100) + * Field (THCR, ByteAcc, NoLock, Preserve) + * { + * // Vendor ID field in the PCI configuration space + * VDID, 32 + * } + * + * // Check if the device at 00:10.06 actually exists, that is reading + * // from its configuration space returns something other than 0xFFs. + * If ((VDID != 0xFFFFFFFF)) + * { + * // Actually create the rest of the device's body if it's present + * // in the system, otherwise skip it. + * } + * } + * + * The handle returned via 'out_handle' is used to perform IO on the + * configuration space of the device. + */ +uacpi_status uacpi_kernel_pci_device_open( + uacpi_pci_address address, uacpi_handle *out_handle +); +void uacpi_kernel_pci_device_close(uacpi_handle); + +/* + * Read & write the configuration space of a previously open PCI device. + */ +uacpi_status uacpi_kernel_pci_read8( + uacpi_handle device, uacpi_size offset, uacpi_u8 *value +); +uacpi_status uacpi_kernel_pci_read16( + uacpi_handle device, uacpi_size offset, uacpi_u16 *value +); +uacpi_status uacpi_kernel_pci_read32( + uacpi_handle device, uacpi_size offset, uacpi_u32 *value +); + +uacpi_status uacpi_kernel_pci_write8( + uacpi_handle device, uacpi_size offset, uacpi_u8 value +); +uacpi_status uacpi_kernel_pci_write16( + uacpi_handle device, uacpi_size offset, uacpi_u16 value +); +uacpi_status uacpi_kernel_pci_write32( + uacpi_handle device, uacpi_size offset, uacpi_u32 value +); + +/* + * Map a SystemIO address at [base, base + len) and return a kernel-implemented + * handle that can be used for reading and writing the IO range. + * + * NOTE: The x86 architecture uses the in/out family of instructions + * to access the SystemIO address space. + */ +uacpi_status uacpi_kernel_io_map( + uacpi_io_addr base, uacpi_size len, uacpi_handle *out_handle +); +void uacpi_kernel_io_unmap(uacpi_handle handle); + +/* + * Read/Write the IO range mapped via uacpi_kernel_io_map + * at a 0-based 'offset' within the range. + * + * NOTE: + * The x86 architecture uses the in/out family of instructions + * to access the SystemIO address space. + * + * You are NOT allowed to break e.g. a 4-byte access into four 1-byte accesses. + * Hardware ALWAYS expects accesses to be of the exact width. + */ +uacpi_status uacpi_kernel_io_read8( + uacpi_handle, uacpi_size offset, uacpi_u8 *out_value +); +uacpi_status uacpi_kernel_io_read16( + uacpi_handle, uacpi_size offset, uacpi_u16 *out_value +); +uacpi_status uacpi_kernel_io_read32( + uacpi_handle, uacpi_size offset, uacpi_u32 *out_value +); + +uacpi_status uacpi_kernel_io_write8( + uacpi_handle, uacpi_size offset, uacpi_u8 in_value +); +uacpi_status uacpi_kernel_io_write16( + uacpi_handle, uacpi_size offset, uacpi_u16 in_value +); +uacpi_status uacpi_kernel_io_write32( + uacpi_handle, uacpi_size offset, uacpi_u32 in_value +); + +/* + * Allocate a block of memory of 'size' bytes. + * The contents of the allocated memory are unspecified. + */ +void *uacpi_kernel_alloc(uacpi_size size); + +#ifdef UACPI_NATIVE_ALLOC_ZEROED +/* + * Allocate a block of memory of 'size' bytes. + * The returned memory block is expected to be zero-filled. + */ +void *uacpi_kernel_alloc_zeroed(uacpi_size size); +#endif + +/* + * Free a previously allocated memory block. + * + * 'mem' might be a NULL pointer. In this case, the call is assumed to be a + * no-op. + * + * An optionally enabled 'size_hint' parameter contains the size of the original + * allocation. Note that in some scenarios this incurs additional cost to + * calculate the object size. + */ +#ifndef UACPI_SIZED_FREES +void uacpi_kernel_free(void *mem); +#else +void uacpi_kernel_free(void *mem, uacpi_size size_hint); +#endif + +/* + * Returns the number of nanosecond ticks elapsed since boot, + * strictly monotonic. + */ +uacpi_u64 uacpi_kernel_get_nanoseconds_since_boot(void); + +/* + * Spin for N microseconds. + */ +void uacpi_kernel_stall(uacpi_u8 usec); + +/* + * Sleep for N milliseconds. + */ +void uacpi_kernel_sleep(uacpi_u64 msec); + +/* + * Create/free an opaque non-recursive kernel mutex object. + */ +uacpi_handle uacpi_kernel_create_mutex(void); +void uacpi_kernel_free_mutex(uacpi_handle); + +/* + * Create/free an opaque kernel (semaphore-like) event object. + */ +uacpi_handle uacpi_kernel_create_event(void); +void uacpi_kernel_free_event(uacpi_handle); + +/* + * Returns a unique identifier of the currently executing thread. + * + * The returned thread id cannot be UACPI_THREAD_ID_NONE. + */ +uacpi_thread_id uacpi_kernel_get_thread_id(void); + +/* + * Try to acquire the mutex with a millisecond timeout. + * + * The timeout value has the following meanings: + * 0x0000 - Attempt to acquire the mutex once, in a non-blocking manner + * 0x0001...0xFFFE - Attempt to acquire the mutex for at least 'timeout' + * milliseconds + * 0xFFFF - Infinite wait, block until the mutex is acquired + * + * The following are possible return values: + * 1. UACPI_STATUS_OK - successful acquire operation + * 2. UACPI_STATUS_TIMEOUT - timeout reached while attempting to acquire (or the + * single attempt to acquire was not successful for + * calls with timeout=0) + * 3. Any other value - signifies a host internal error and is treated as such + */ +uacpi_status uacpi_kernel_acquire_mutex(uacpi_handle, uacpi_u16); +void uacpi_kernel_release_mutex(uacpi_handle); + +/* + * Try to wait for an event (counter > 0) with a millisecond timeout. + * A timeout value of 0xFFFF implies infinite wait. + * + * The internal counter is decremented by 1 if wait was successful. + * + * A successful wait is indicated by returning UACPI_TRUE. + */ +uacpi_bool uacpi_kernel_wait_for_event(uacpi_handle, uacpi_u16); + +/* + * Signal the event object by incrementing its internal counter by 1. + * + * This function may be used in interrupt contexts. + */ +void uacpi_kernel_signal_event(uacpi_handle); + +/* + * Reset the event counter to 0. + */ +void uacpi_kernel_reset_event(uacpi_handle); + +/* + * Handle a firmware request. + * + * Currently either a Breakpoint or Fatal operators. + */ +uacpi_status uacpi_kernel_handle_firmware_request(uacpi_firmware_request*); + +/* + * Install an interrupt handler at 'irq', 'ctx' is passed to the provided + * handler for every invocation. + * + * 'out_irq_handle' is set to a kernel-implemented value that can be used to + * refer to this handler from other API. + */ +uacpi_status uacpi_kernel_install_interrupt_handler( + uacpi_u32 irq, uacpi_interrupt_handler, uacpi_handle ctx, + uacpi_handle *out_irq_handle +); + +/* + * Uninstall an interrupt handler. 'irq_handle' is the value returned via + * 'out_irq_handle' during installation. + */ +uacpi_status uacpi_kernel_uninstall_interrupt_handler( + uacpi_interrupt_handler, uacpi_handle irq_handle +); + +/* + * Create/free a kernel spinlock object. + * + * Unlike other types of locks, spinlocks may be used in interrupt contexts. + */ +uacpi_handle uacpi_kernel_create_spinlock(void); +void uacpi_kernel_free_spinlock(uacpi_handle); + +/* + * Lock/unlock helpers for spinlocks. + * + * These are expected to disable interrupts, returning the previous state of cpu + * flags, that can be used to possibly re-enable interrupts if they were enabled + * before. + * + * Note that lock is infalliable. + */ +uacpi_cpu_flags uacpi_kernel_lock_spinlock(uacpi_handle); +void uacpi_kernel_unlock_spinlock(uacpi_handle, uacpi_cpu_flags); + +typedef enum uacpi_work_type { + /* + * Schedule a GPE handler method for execution. + * This should be scheduled to run on CPU0 to avoid potential SMI-related + * firmware bugs. + */ + UACPI_WORK_GPE_EXECUTION, + + /* + * Schedule a Notify(device) firmware request for execution. + * This can run on any CPU. + */ + UACPI_WORK_NOTIFICATION, +} uacpi_work_type; + +typedef void (*uacpi_work_handler)(uacpi_handle); + +/* + * Schedules deferred work for execution. + * Might be invoked from an interrupt context. + */ +uacpi_status uacpi_kernel_schedule_work( + uacpi_work_type, uacpi_work_handler, uacpi_handle ctx +); + +/* + * Waits for two types of work to finish: + * 1. All in-flight interrupts installed via uacpi_kernel_install_interrupt_handler + * 2. All work scheduled via uacpi_kernel_schedule_work + * + * Note that the waits must be done in this order specifically. + */ +uacpi_status uacpi_kernel_wait_for_work_completion(void); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/log.h b/sys/include/dev/acpi/uacpi/uacpi/log.h new file mode 100644 index 0000000..4fb5457 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/log.h @@ -0,0 +1,40 @@ +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum uacpi_log_level { + /* + * Super verbose logging, every op & uop being processed is logged. + * Mostly useful for tracking down hangs/lockups. + */ + UACPI_LOG_DEBUG = 5, + + /* + * A little verbose, every operation region access is traced with a bit of + * extra information on top. + */ + UACPI_LOG_TRACE = 4, + + /* + * Only logs the bare minimum information about state changes and/or + * initialization progress. + */ + UACPI_LOG_INFO = 3, + + /* + * Logs recoverable errors and/or non-important aborts. + */ + UACPI_LOG_WARN = 2, + + /* + * Logs only critical errors that might affect the ability to initialize or + * prevent stable runtime. + */ + UACPI_LOG_ERROR = 1, +} uacpi_log_level; + +#ifdef __cplusplus +} +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/namespace.h b/sys/include/dev/acpi/uacpi/uacpi/namespace.h new file mode 100644 index 0000000..5ef23af --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/namespace.h @@ -0,0 +1,186 @@ +#pragma once + +#include <uacpi/types.h> +#include <uacpi/status.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +typedef struct uacpi_namespace_node uacpi_namespace_node; + +uacpi_namespace_node *uacpi_namespace_root(void); + +typedef enum uacpi_predefined_namespace { + UACPI_PREDEFINED_NAMESPACE_ROOT = 0, + UACPI_PREDEFINED_NAMESPACE_GPE, + UACPI_PREDEFINED_NAMESPACE_PR, + UACPI_PREDEFINED_NAMESPACE_SB, + UACPI_PREDEFINED_NAMESPACE_SI, + UACPI_PREDEFINED_NAMESPACE_TZ, + UACPI_PREDEFINED_NAMESPACE_GL, + UACPI_PREDEFINED_NAMESPACE_OS, + UACPI_PREDEFINED_NAMESPACE_OSI, + UACPI_PREDEFINED_NAMESPACE_REV, + UACPI_PREDEFINED_NAMESPACE_MAX = UACPI_PREDEFINED_NAMESPACE_REV, +} uacpi_predefined_namespace; +uacpi_namespace_node *uacpi_namespace_get_predefined( + uacpi_predefined_namespace +); + +/* + * Returns UACPI_TRUE if the provided 'node' is an alias. + */ +uacpi_bool uacpi_namespace_node_is_alias(uacpi_namespace_node *node); + +uacpi_object_name uacpi_namespace_node_name(const uacpi_namespace_node *node); + +/* + * Returns the type of object stored at the namespace node. + * + * NOTE: due to the existance of the CopyObject operator in AML, the + * return value of this function is subject to TOCTOU bugs. + */ +uacpi_status uacpi_namespace_node_type( + const uacpi_namespace_node *node, uacpi_object_type *out_type +); + +/* + * Returns UACPI_TRUE via 'out' if the type of the object stored at the + * namespace node matches the provided value, UACPI_FALSE otherwise. + * + * NOTE: due to the existance of the CopyObject operator in AML, the + * return value of this function is subject to TOCTOU bugs. + */ +uacpi_status uacpi_namespace_node_is( + const uacpi_namespace_node *node, uacpi_object_type type, uacpi_bool *out +); + +/* + * Returns UACPI_TRUE via 'out' if the type of the object stored at the + * namespace node matches any of the type bits in the provided value, + * UACPI_FALSE otherwise. + * + * NOTE: due to the existance of the CopyObject operator in AML, the + * return value of this function is subject to TOCTOU bugs. + */ +uacpi_status uacpi_namespace_node_is_one_of( + const uacpi_namespace_node *node, uacpi_object_type_bits type_mask, + uacpi_bool *out +); + +uacpi_size uacpi_namespace_node_depth(const uacpi_namespace_node *node); + +uacpi_namespace_node *uacpi_namespace_node_parent( + uacpi_namespace_node *node +); + +uacpi_status uacpi_namespace_node_find( + uacpi_namespace_node *parent, + const uacpi_char *path, + uacpi_namespace_node **out_node +); + +/* + * Same as uacpi_namespace_node_find, except the search recurses upwards when + * the namepath consists of only a single nameseg. Usually, this behavior is + * only desired if resolving a namepath specified in an aml-provided object, + * such as a package element. + */ +uacpi_status uacpi_namespace_node_resolve_from_aml_namepath( + uacpi_namespace_node *scope, + const uacpi_char *path, + uacpi_namespace_node **out_node +); + +typedef uacpi_iteration_decision (*uacpi_iteration_callback) ( + void *user, uacpi_namespace_node *node, uacpi_u32 node_depth +); + +#define UACPI_MAX_DEPTH_ANY 0xFFFFFFFF + +/* + * Depth-first iterate the namespace starting at the first child of 'parent'. + */ +uacpi_status uacpi_namespace_for_each_child_simple( + uacpi_namespace_node *parent, uacpi_iteration_callback callback, void *user +); + +/* + * Depth-first iterate the namespace starting at the first child of 'parent'. + * + * 'descending_callback' is invoked the first time a node is visited when + * walking down. 'ascending_callback' is invoked the second time a node is + * visited after we reach the leaf node without children and start walking up. + * Either of the callbacks may be NULL, but not both at the same time. + * + * Only nodes matching 'type_mask' are passed to the callbacks. + * + * 'max_depth' is used to limit the maximum reachable depth from 'parent', + * where 1 is only direct children of 'parent', 2 is children of first-level + * children etc. Use UACPI_MAX_DEPTH_ANY or -1 to specify infinite depth. + */ +uacpi_status uacpi_namespace_for_each_child( + uacpi_namespace_node *parent, uacpi_iteration_callback descending_callback, + uacpi_iteration_callback ascending_callback, + uacpi_object_type_bits type_mask, uacpi_u32 max_depth, void *user +); + +/* + * Retrieve the next peer namespace node of '*iter', or, if '*iter' is + * UACPI_NULL, retrieve the first child of 'parent' instead. The resulting + * namespace node is stored at '*iter'. + * + * This API can be used to implement an "iterator" version of the + * for_each_child helpers. + * + * Example usage: + * void recurse(uacpi_namespace_node *parent) { + * uacpi_namespace_node *iter = UACPI_NULL; + * + * while (uacpi_namespace_node_next(parent, &iter) == UACPI_STATUS_OK) { + * // Do something with iter... + * descending_callback(iter); + * + * // Recurse down to walk over the children of iter + * recurse(iter); + * } + * } + * + * Prefer the for_each_child family of helpers if possible instead of this API + * as they avoid recursion and/or the need to use dynamic data structures + * entirely. + */ +uacpi_status uacpi_namespace_node_next( + uacpi_namespace_node *parent, uacpi_namespace_node **iter +); + +/* + * Retrieve the next peer namespace node of '*iter', or, if '*iter' is + * UACPI_NULL, retrieve the first child of 'parent' instead. The resulting + * namespace node is stored at '*iter'. Only nodes which type matches one + * of the types set in 'type_mask' are returned. + * + * See comment above 'uacpi_namespace_node_next' for usage examples. + * + * Prefer the for_each_child family of helpers if possible instead of this API + * as they avoid recursion and/or the need to use dynamic data structures + * entirely. + */ +uacpi_status uacpi_namespace_node_next_typed( + uacpi_namespace_node *parent, uacpi_namespace_node **iter, + uacpi_object_type_bits type_mask +); + +const uacpi_char *uacpi_namespace_node_generate_absolute_path( + const uacpi_namespace_node *node +); +void uacpi_free_absolute_path(const uacpi_char *path); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/notify.h b/sys/include/dev/acpi/uacpi/uacpi/notify.h new file mode 100644 index 0000000..3b66757 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/notify.h @@ -0,0 +1,30 @@ +#pragma once + +#include <uacpi/types.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +/* + * Install a Notify() handler to a device node. + * A handler installed to the root node will receive all notifications, even if + * a device already has a dedicated Notify handler. + * 'handler_context' is passed to the handler on every invocation. + */ +uacpi_status uacpi_install_notify_handler( + uacpi_namespace_node *node, uacpi_notify_handler handler, + uacpi_handle handler_context +); + +uacpi_status uacpi_uninstall_notify_handler( + uacpi_namespace_node *node, uacpi_notify_handler handler +); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/opregion.h b/sys/include/dev/acpi/uacpi/uacpi/opregion.h new file mode 100644 index 0000000..1eee4f0 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/opregion.h @@ -0,0 +1,47 @@ +#pragma once + +#include <uacpi/types.h> +#include <uacpi/status.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +/* + * Install an address space handler to a device node. + * The handler is recursively connected to all of the operation regions of + * type 'space' underneath 'device_node'. Note that this recursion stops as + * soon as another device node that already has an address space handler of + * this type installed is encountered. + */ +uacpi_status uacpi_install_address_space_handler( + uacpi_namespace_node *device_node, enum uacpi_address_space space, + uacpi_region_handler handler, uacpi_handle handler_context +); + +/* + * Uninstall the handler of type 'space' from a given device node. + */ +uacpi_status uacpi_uninstall_address_space_handler( + uacpi_namespace_node *device_node, + enum uacpi_address_space space +); + +/* + * Execute _REG(space, ACPI_REG_CONNECT) for all of the opregions with this + * address space underneath this device. This should only be called manually + * if you want to register an early handler that must be available before the + * call to uacpi_namespace_initialize(). + */ +uacpi_status uacpi_reg_all_opregions( + uacpi_namespace_node *device_node, + enum uacpi_address_space space +); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/osi.h b/sys/include/dev/acpi/uacpi/uacpi/osi.h new file mode 100644 index 0000000..5330138 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/osi.h @@ -0,0 +1,125 @@ +#pragma once + +#include <uacpi/types.h> +#include <uacpi/status.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +typedef enum uacpi_vendor_interface { + UACPI_VENDOR_INTERFACE_NONE = 0, + UACPI_VENDOR_INTERFACE_WINDOWS_2000, + UACPI_VENDOR_INTERFACE_WINDOWS_XP, + UACPI_VENDOR_INTERFACE_WINDOWS_XP_SP1, + UACPI_VENDOR_INTERFACE_WINDOWS_SERVER_2003, + UACPI_VENDOR_INTERFACE_WINDOWS_XP_SP2, + UACPI_VENDOR_INTERFACE_WINDOWS_SERVER_2003_SP1, + UACPI_VENDOR_INTERFACE_WINDOWS_VISTA, + UACPI_VENDOR_INTERFACE_WINDOWS_SERVER_2008, + UACPI_VENDOR_INTERFACE_WINDOWS_VISTA_SP1, + UACPI_VENDOR_INTERFACE_WINDOWS_VISTA_SP2, + UACPI_VENDOR_INTERFACE_WINDOWS_7, + UACPI_VENDOR_INTERFACE_WINDOWS_8, + UACPI_VENDOR_INTERFACE_WINDOWS_8_1, + UACPI_VENDOR_INTERFACE_WINDOWS_10, + UACPI_VENDOR_INTERFACE_WINDOWS_10_RS1, + UACPI_VENDOR_INTERFACE_WINDOWS_10_RS2, + UACPI_VENDOR_INTERFACE_WINDOWS_10_RS3, + UACPI_VENDOR_INTERFACE_WINDOWS_10_RS4, + UACPI_VENDOR_INTERFACE_WINDOWS_10_RS5, + UACPI_VENDOR_INTERFACE_WINDOWS_10_19H1, + UACPI_VENDOR_INTERFACE_WINDOWS_10_20H1, + UACPI_VENDOR_INTERFACE_WINDOWS_11, + UACPI_VENDOR_INTERFACE_WINDOWS_11_22H2, +} uacpi_vendor_interface; + +/* + * Returns the "latest" AML-queried _OSI vendor interface. + * + * E.g. for the following AML code: + * _OSI("Windows 2021") + * _OSI("Windows 2000") + * + * This function will return UACPI_VENDOR_INTERFACE_WINDOWS_11, since this is + * the latest version of the interface the code queried, even though the + * "Windows 2000" query came after "Windows 2021". + */ +uacpi_vendor_interface uacpi_latest_queried_vendor_interface(void); + +typedef enum uacpi_interface_kind { + UACPI_INTERFACE_KIND_VENDOR = (1 << 0), + UACPI_INTERFACE_KIND_FEATURE = (1 << 1), + UACPI_INTERFACE_KIND_ALL = UACPI_INTERFACE_KIND_VENDOR | + UACPI_INTERFACE_KIND_FEATURE, +} uacpi_interface_kind; + +/* + * Install or uninstall an interface. + * + * The interface kind is used for matching during interface enumeration in + * uacpi_bulk_configure_interfaces(). + * + * After installing an interface, all _OSI queries report it as supported. + */ +uacpi_status uacpi_install_interface( + const uacpi_char *name, uacpi_interface_kind +); +uacpi_status uacpi_uninstall_interface(const uacpi_char *name); + +typedef enum uacpi_host_interface { + UACPI_HOST_INTERFACE_MODULE_DEVICE = 1, + UACPI_HOST_INTERFACE_PROCESSOR_DEVICE, + UACPI_HOST_INTERFACE_3_0_THERMAL_MODEL, + UACPI_HOST_INTERFACE_3_0_SCP_EXTENSIONS, + UACPI_HOST_INTERFACE_PROCESSOR_AGGREGATOR_DEVICE, +} uacpi_host_interface; + +/* + * Same as install/uninstall interface, but comes with an enum of known + * interfaces defined by the ACPI specification. These are disabled by default + * as they depend on the host kernel support. + */ +uacpi_status uacpi_enable_host_interface(uacpi_host_interface); +uacpi_status uacpi_disable_host_interface(uacpi_host_interface); + +typedef uacpi_bool (*uacpi_interface_handler) + (const uacpi_char *name, uacpi_bool supported); + +/* + * Set a custom interface query (_OSI) handler. + * + * This callback will be invoked for each _OSI query with the value + * passed in the _OSI, as well as whether the interface was detected as + * supported. The callback is able to override the return value dynamically + * or leave it untouched if desired (e.g. if it simply wants to log something or + * do internal bookkeeping of some kind). + */ +uacpi_status uacpi_set_interface_query_handler(uacpi_interface_handler); + +typedef enum uacpi_interface_action { + UACPI_INTERFACE_ACTION_DISABLE = 0, + UACPI_INTERFACE_ACTION_ENABLE, +} uacpi_interface_action; + +/* + * Bulk interface configuration, used to disable or enable all interfaces that + * match 'kind'. + * + * This is generally only needed to work around buggy hardware, for example if + * requested from the kernel command line. + * + * By default, all vendor strings (like "Windows 2000") are enabled, and all + * host features (like "3.0 Thermal Model") are disabled. + */ +uacpi_status uacpi_bulk_configure_interfaces( + uacpi_interface_action action, uacpi_interface_kind kind +); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/platform/arch_helpers.h b/sys/include/dev/acpi/uacpi/uacpi/platform/arch_helpers.h new file mode 100644 index 0000000..2e566c4 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/platform/arch_helpers.h @@ -0,0 +1,38 @@ +#pragma once + +#ifdef UACPI_OVERRIDE_ARCH_HELPERS +#include "uacpi_arch_helpers.h" +#else + +#include <uacpi/platform/atomic.h> + +#ifndef UACPI_ARCH_FLUSH_CPU_CACHE +#define UACPI_ARCH_FLUSH_CPU_CACHE() do {} while (0) +#endif + +typedef unsigned long uacpi_cpu_flags; + +typedef void *uacpi_thread_id; + +/* + * Replace as needed depending on your platform's way to represent thread ids. + * uACPI offers a few more helpers like uacpi_atomic_{load,store}{8,16,32,64,ptr} + * (or you could provide your own helpers) + */ +#ifndef UACPI_ATOMIC_LOAD_THREAD_ID +#define UACPI_ATOMIC_LOAD_THREAD_ID(ptr) ((uacpi_thread_id)uacpi_atomic_load_ptr(ptr)) +#endif + +#ifndef UACPI_ATOMIC_STORE_THREAD_ID +#define UACPI_ATOMIC_STORE_THREAD_ID(ptr, value) uacpi_atomic_store_ptr(ptr, value) +#endif + +/* + * A sentinel value that the kernel promises to NEVER return from + * uacpi_kernel_get_current_thread_id or this will break + */ +#ifndef UACPI_THREAD_ID_NONE +#define UACPI_THREAD_ID_NONE ((uacpi_thread_id)-1) +#endif + +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/platform/atomic.h b/sys/include/dev/acpi/uacpi/uacpi/platform/atomic.h new file mode 100644 index 0000000..1d1b570 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/platform/atomic.h @@ -0,0 +1,347 @@ +#pragma once + +/* + * Most of this header is a giant workaround for MSVC to make atomics into a + * somewhat unified interface with how GCC and Clang handle them. + * + * We don't use the absolutely disgusting C11 stdatomic.h header because it is + * unable to operate on non _Atomic types, which enforce implicit sequential + * consistency and alter the behavior of the standard C binary/unary operators. + * + * The strictness of the atomic helpers defined here is assumed to be at least + * acquire for loads and release for stores. Cmpxchg uses the standard acq/rel + * for success, acq for failure, and is assumed to be strong. + */ + +#ifdef UACPI_OVERRIDE_ATOMIC +#include "uacpi_atomic.h" +#else + +#include <uacpi/platform/compiler.h> + +#if defined(_MSC_VER) && !defined(__clang__) + +#include <intrin.h> + +// mimic __atomic_compare_exchange_n that doesn't exist on MSVC +#define UACPI_MAKE_MSVC_CMPXCHG(width, type, suffix) \ + static inline int uacpi_do_atomic_cmpxchg##width( \ + type volatile *ptr, type volatile *expected, type desired \ + ) \ + { \ + type current; \ + \ + current = _InterlockedCompareExchange##suffix(ptr, *expected, desired); \ + if (current != *expected) { \ + *expected = current; \ + return 0; \ + } \ + return 1; \ + } + +#define UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, width, type) \ + uacpi_do_atomic_cmpxchg##width( \ + (type volatile*)ptr, (type volatile*)expected, desired \ + ) + +#define UACPI_MSVC_ATOMIC_STORE(ptr, value, type, width) \ + _InterlockedExchange##width((type volatile*)(ptr), (type)(value)) + +#define UACPI_MSVC_ATOMIC_LOAD(ptr, type, width) \ + _InterlockedOr##width((type volatile*)(ptr), 0) + +#define UACPI_MSVC_ATOMIC_INC(ptr, type, width) \ + _InterlockedIncrement##width((type volatile*)(ptr)) + +#define UACPI_MSVC_ATOMIC_DEC(ptr, type, width) \ + _InterlockedDecrement##width((type volatile*)(ptr)) + +UACPI_MAKE_MSVC_CMPXCHG(64, __int64, 64) +UACPI_MAKE_MSVC_CMPXCHG(32, long,) +UACPI_MAKE_MSVC_CMPXCHG(16, short, 16) + +#define uacpi_atomic_cmpxchg16(ptr, expected, desired) \ + UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, 16, short) + +#define uacpi_atomic_cmpxchg32(ptr, expected, desired) \ + UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, 32, long) + +#define uacpi_atomic_cmpxchg64(ptr, expected, desired) \ + UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, 64, __int64) + +#define uacpi_atomic_load8(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, char, 8) +#define uacpi_atomic_load16(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, short, 16) +#define uacpi_atomic_load32(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, long,) +#define uacpi_atomic_load64(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, __int64, 64) + +#define uacpi_atomic_store8(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, char, 8) +#define uacpi_atomic_store16(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, short, 16) +#define uacpi_atomic_store32(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, long,) +#define uacpi_atomic_store64(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, __int64, 64) + +#define uacpi_atomic_inc16(ptr) UACPI_MSVC_ATOMIC_INC(ptr, short, 16) +#define uacpi_atomic_inc32(ptr) UACPI_MSVC_ATOMIC_INC(ptr, long,) +#define uacpi_atomic_inc64(ptr) UACPI_MSVC_ATOMIC_INC(ptr, __int64, 64) + +#define uacpi_atomic_dec16(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, short, 16) +#define uacpi_atomic_dec32(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, long,) +#define uacpi_atomic_dec64(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, __int64, 64) +#elif defined(__WATCOMC__) + +#include <stdint.h> + +static int uacpi_do_atomic_cmpxchg16(volatile uint16_t *ptr, volatile uint16_t *expected, uint16_t desired); +#pragma aux uacpi_do_atomic_cmpxchg16 = \ + ".486" \ + "mov ax, [esi]" \ + "lock cmpxchg [edi], bx" \ + "mov [esi], ax" \ + "setz al" \ + "movzx eax, al" \ + parm [ edi ] [ esi ] [ ebx ] \ + value [ eax ] + +static int uacpi_do_atomic_cmpxchg32(volatile uint32_t *ptr, volatile uint32_t *expected, uint32_t desired); +#pragma aux uacpi_do_atomic_cmpxchg32 = \ + ".486" \ + "mov eax, [esi]" \ + "lock cmpxchg [edi], ebx" \ + "mov [esi], eax" \ + "setz al" \ + "movzx eax, al" \ + parm [ edi ] [ esi ] [ ebx ] \ + value [ eax ] + +static int uacpi_do_atomic_cmpxchg64_asm(volatile uint64_t *ptr, volatile uint64_t *expected, uint32_t low, uint32_t high); +#pragma aux uacpi_do_atomic_cmpxchg64_asm = \ + ".586" \ + "mov eax, [esi]" \ + "mov edx, [esi + 4]" \ + "lock cmpxchg8b [edi]" \ + "mov [esi], eax" \ + "mov [esi + 4], edx" \ + "setz al" \ + "movzx eax, al" \ + modify [ edx ] \ + parm [ edi ] [ esi ] [ ebx ] [ ecx ] \ + value [ eax ] + +static inline int uacpi_do_atomic_cmpxchg64(volatile uint64_t *ptr, volatile uint64_t *expected, uint64_t desired) { + return uacpi_do_atomic_cmpxchg64_asm(ptr, expected, desired, desired >> 32); +} + +#define uacpi_atomic_cmpxchg16(ptr, expected, desired) \ + uacpi_do_atomic_cmpxchg16((volatile uint16_t*)ptr, (volatile uint16_t*)expected, (uint16_t)desired) +#define uacpi_atomic_cmpxchg32(ptr, expected, desired) \ + uacpi_do_atomic_cmpxchg32((volatile uint32_t*)ptr, (volatile uint32_t*)expected, (uint32_t)desired) +#define uacpi_atomic_cmpxchg64(ptr, expected, desired) \ + uacpi_do_atomic_cmpxchg64((volatile uint64_t*)ptr, (volatile uint64_t*)expected, (uint64_t)desired) + +static uint8_t uacpi_do_atomic_load8(volatile uint8_t *ptr); +#pragma aux uacpi_do_atomic_load8 = \ + "mov al, [esi]" \ + parm [ esi ] \ + value [ al ] + +static uint16_t uacpi_do_atomic_load16(volatile uint16_t *ptr); +#pragma aux uacpi_do_atomic_load16 = \ + "mov ax, [esi]" \ + parm [ esi ] \ + value [ ax ] + +static uint32_t uacpi_do_atomic_load32(volatile uint32_t *ptr); +#pragma aux uacpi_do_atomic_load32 = \ + "mov eax, [esi]" \ + parm [ esi ] \ + value [ eax ] + +static void uacpi_do_atomic_load64_asm(volatile uint64_t *ptr, uint64_t *out); +#pragma aux uacpi_do_atomic_load64_asm = \ + ".586" \ + "xor eax, eax" \ + "xor ebx, ebx" \ + "xor ecx, ecx" \ + "xor edx, edx" \ + "lock cmpxchg8b [esi]" \ + "mov [edi], eax" \ + "mov [edi + 4], edx" \ + modify [ eax ebx ecx edx ] \ + parm [ esi ] [ edi ] + +static inline uint64_t uacpi_do_atomic_load64(volatile uint64_t *ptr) { + uint64_t value; + uacpi_do_atomic_load64_asm(ptr, &value); + return value; +} + +#define uacpi_atomic_load8(ptr) uacpi_do_atomic_load8((volatile uint8_t*)ptr) +#define uacpi_atomic_load16(ptr) uacpi_do_atomic_load16((volatile uint16_t*)ptr) +#define uacpi_atomic_load32(ptr) uacpi_do_atomic_load32((volatile uint32_t*)ptr) +#define uacpi_atomic_load64(ptr) uacpi_do_atomic_load64((volatile uint64_t*)ptr) + +static void uacpi_do_atomic_store8(volatile uint8_t *ptr, uint8_t value); +#pragma aux uacpi_do_atomic_store8 = \ + "mov [edi], al" \ + parm [ edi ] [ eax ] + +static void uacpi_do_atomic_store16(volatile uint16_t *ptr, uint16_t value); +#pragma aux uacpi_do_atomic_store16 = \ + "mov [edi], ax" \ + parm [ edi ] [ eax ] + +static void uacpi_do_atomic_store32(volatile uint32_t *ptr, uint32_t value); +#pragma aux uacpi_do_atomic_store32 = \ + "mov [edi], eax" \ + parm [ edi ] [ eax ] + +static void uacpi_do_atomic_store64_asm(volatile uint64_t *ptr, uint32_t low, uint32_t high); +#pragma aux uacpi_do_atomic_store64_asm = \ + ".586" \ + "xor eax, eax" \ + "xor edx, edx" \ + "retry: lock cmpxchg8b [edi]" \ + "jnz retry" \ + modify [ eax edx ] \ + parm [ edi ] [ ebx ] [ ecx ] + +static inline void uacpi_do_atomic_store64(volatile uint64_t *ptr, uint64_t value) { + uacpi_do_atomic_store64_asm(ptr, value, value >> 32); +} + +#define uacpi_atomic_store8(ptr, value) uacpi_do_atomic_store8((volatile uint8_t*)ptr, (uint8_t)value) +#define uacpi_atomic_store16(ptr, value) uacpi_do_atomic_store16((volatile uint16_t*)ptr, (uint16_t)value) +#define uacpi_atomic_store32(ptr, value) uacpi_do_atomic_store32((volatile uint32_t*)ptr, (uint32_t)value) +#define uacpi_atomic_store64(ptr, value) uacpi_do_atomic_store64((volatile uint64_t*)ptr, (uint64_t)value) + +static uint16_t uacpi_do_atomic_inc16(volatile uint16_t *ptr); +#pragma aux uacpi_do_atomic_inc16 = \ + ".486" \ + "mov ax, 1" \ + "lock xadd [edi], ax" \ + "add ax, 1" \ + parm [ edi ] \ + value [ ax ] + +static uint32_t uacpi_do_atomic_inc32(volatile uint32_t *ptr); +#pragma aux uacpi_do_atomic_inc32 = \ + ".486" \ + "mov eax, 1" \ + "lock xadd [edi], eax" \ + "add eax, 1" \ + parm [ edi ] \ + value [ eax ] + +static void uacpi_do_atomic_inc64_asm(volatile uint64_t *ptr, uint64_t *out); +#pragma aux uacpi_do_atomic_inc64_asm = \ + ".586" \ + "xor eax, eax" \ + "xor edx, edx" \ + "mov ebx, 1" \ + "mov ecx, 1" \ + "retry: lock cmpxchg8b [esi]" \ + "mov ebx, eax" \ + "mov ecx, edx" \ + "add ebx, 1" \ + "adc ecx, 0" \ + "jnz retry" \ + "mov [edi], ebx" \ + "mov [edi + 4], ecx" \ + modify [ eax ebx ecx edx ] \ + parm [ esi ] [ edi ] + +static inline uint64_t uacpi_do_atomic_inc64(volatile uint64_t *ptr) { + uint64_t value; + uacpi_do_atomic_inc64_asm(ptr, &value); + return value; +} + +#define uacpi_atomic_inc16(ptr) uacpi_do_atomic_inc16((volatile uint16_t*)ptr) +#define uacpi_atomic_inc32(ptr) uacpi_do_atomic_inc32((volatile uint32_t*)ptr) +#define uacpi_atomic_inc64(ptr) uacpi_do_atomic_inc64((volatile uint64_t*)ptr) + +static uint16_t uacpi_do_atomic_dec16(volatile uint16_t *ptr); +#pragma aux uacpi_do_atomic_dec16 = \ + ".486" \ + "mov ax, -1" \ + "lock xadd [edi], ax" \ + "add ax, -1" \ + parm [ edi ] \ + value [ ax ] + +static uint32_t uacpi_do_atomic_dec32(volatile uint32_t *ptr); +#pragma aux uacpi_do_atomic_dec32 = \ + ".486" \ + "mov eax, -1" \ + "lock xadd [edi], eax" \ + "add eax, -1" \ + parm [ edi ] \ + value [ eax ] + +static void uacpi_do_atomic_dec64_asm(volatile uint64_t *ptr, uint64_t *out); +#pragma aux uacpi_do_atomic_dec64_asm = \ + ".586" \ + "xor eax, eax" \ + "xor edx, edx" \ + "mov ebx, -1" \ + "mov ecx, -1" \ + "retry: lock cmpxchg8b [esi]" \ + "mov ebx, eax" \ + "mov ecx, edx" \ + "sub ebx, 1" \ + "sbb ecx, 0" \ + "jnz retry" \ + "mov [edi], ebx" \ + "mov [edi + 4], ecx" \ + modify [ eax ebx ecx edx ] \ + parm [ esi ] [ edi ] + +static inline uint64_t uacpi_do_atomic_dec64(volatile uint64_t *ptr) { + uint64_t value; + uacpi_do_atomic_dec64_asm(ptr, &value); + return value; +} + +#define uacpi_atomic_dec16(ptr) uacpi_do_atomic_dec16((volatile uint16_t*)ptr) +#define uacpi_atomic_dec32(ptr) uacpi_do_atomic_dec32((volatile uint32_t*)ptr) +#define uacpi_atomic_dec64(ptr) uacpi_do_atomic_dec64((volatile uint64_t*)ptr) +#else + +#define UACPI_DO_CMPXCHG(ptr, expected, desired) \ + __atomic_compare_exchange_n(ptr, expected, desired, 0, \ + __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define uacpi_atomic_cmpxchg16(ptr, expected, desired) \ + UACPI_DO_CMPXCHG(ptr, expected, desired) +#define uacpi_atomic_cmpxchg32(ptr, expected, desired) \ + UACPI_DO_CMPXCHG(ptr, expected, desired) +#define uacpi_atomic_cmpxchg64(ptr, expected, desired) \ + UACPI_DO_CMPXCHG(ptr, expected, desired) + +#define uacpi_atomic_load8(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE) +#define uacpi_atomic_load16(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE) +#define uacpi_atomic_load32(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE) +#define uacpi_atomic_load64(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE) + +#define uacpi_atomic_store8(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE) +#define uacpi_atomic_store16(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE) +#define uacpi_atomic_store32(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE) +#define uacpi_atomic_store64(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE) + +#define uacpi_atomic_inc16(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL) +#define uacpi_atomic_inc32(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL) +#define uacpi_atomic_inc64(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL) + +#define uacpi_atomic_dec16(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL) +#define uacpi_atomic_dec32(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL) +#define uacpi_atomic_dec64(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL) +#endif + +#if UACPI_POINTER_SIZE == 4 +#define uacpi_atomic_load_ptr(ptr_to_ptr) uacpi_atomic_load32(ptr_to_ptr) +#define uacpi_atomic_store_ptr(ptr_to_ptr, value) uacpi_atomic_store32(ptr_to_ptr, value) +#else +#define uacpi_atomic_load_ptr(ptr_to_ptr) uacpi_atomic_load64(ptr_to_ptr) +#define uacpi_atomic_store_ptr(ptr_to_ptr, value) uacpi_atomic_store64(ptr_to_ptr, value) +#endif + +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/platform/compiler.h b/sys/include/dev/acpi/uacpi/uacpi/platform/compiler.h new file mode 100644 index 0000000..78aab08 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/platform/compiler.h @@ -0,0 +1,123 @@ +#pragma once + +/* + * Compiler-specific attributes/macros go here. This is the default placeholder + * that should work for MSVC/GCC/clang. + */ + +#ifdef UACPI_OVERRIDE_COMPILER +#include "uacpi_compiler.h" +#else + +#define UACPI_ALIGN(x) __declspec(align(x)) + +#if defined(__WATCOMC__) +#define UACPI_STATIC_ASSERT(expr, msg) +#elif defined(__cplusplus) +#define UACPI_STATIC_ASSERT static_assert +#else +#define UACPI_STATIC_ASSERT _Static_assert +#endif + +#ifdef _MSC_VER + #include <intrin.h> + + #define UACPI_ALWAYS_INLINE __forceinline + + #define UACPI_PACKED(decl) \ + __pragma(pack(push, 1)) \ + decl; \ + __pragma(pack(pop)) +#elif defined(__WATCOMC__) + #define UACPI_ALWAYS_INLINE inline + #define UACPI_PACKED(decl) _Packed decl; +#else + #define UACPI_ALWAYS_INLINE inline __attribute__((always_inline)) + #define UACPI_PACKED(decl) decl __attribute__((packed)); +#endif + +#if defined(__GNUC__) || defined(__clang__) + #define uacpi_unlikely(expr) __builtin_expect(!!(expr), 0) + #define uacpi_likely(expr) __builtin_expect(!!(expr), 1) + + #if __has_attribute(__fallthrough__) + #define UACPI_FALLTHROUGH __attribute__((__fallthrough__)) + #endif + + #define UACPI_MAYBE_UNUSED __attribute__ ((unused)) + + #define UACPI_NO_UNUSED_PARAMETER_WARNINGS_BEGIN \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wunused-parameter\"") + + #define UACPI_NO_UNUSED_PARAMETER_WARNINGS_END \ + _Pragma("GCC diagnostic pop") + + #ifdef __clang__ + #define UACPI_PRINTF_DECL(fmt_idx, args_idx) \ + __attribute__((format(printf, fmt_idx, args_idx))) + #else + #define UACPI_PRINTF_DECL(fmt_idx, args_idx) \ + __attribute__((format(gnu_printf, fmt_idx, args_idx))) + #endif + + #define UACPI_COMPILER_HAS_BUILTIN_MEMCPY + #define UACPI_COMPILER_HAS_BUILTIN_MEMMOVE + #define UACPI_COMPILER_HAS_BUILTIN_MEMSET + #define UACPI_COMPILER_HAS_BUILTIN_MEMCMP +#elif defined(__WATCOMC__) + #define uacpi_unlikely(expr) expr + #define uacpi_likely(expr) expr + + /* + * The OpenWatcom documentation suggests this should be done using + * _Pragma("off (unreferenced)") and _Pragma("pop (unreferenced)"), + * but these pragmas appear to be no-ops. Use inline as the next best thing. + * Note that OpenWatcom accepts redundant modifiers without a warning, + * so UACPI_MAYBE_UNUSED inline still works. + */ + #define UACPI_MAYBE_UNUSED inline + + #define UACPI_NO_UNUSED_PARAMETER_WARNINGS_BEGIN + #define UACPI_NO_UNUSED_PARAMETER_WARNINGS_END + + #define UACPI_PRINTF_DECL(fmt_idx, args_idx) +#else + #define uacpi_unlikely(expr) expr + #define uacpi_likely(expr) expr + + #define UACPI_MAYBE_UNUSED + + #define UACPI_NO_UNUSED_PARAMETER_WARNINGS_BEGIN + #define UACPI_NO_UNUSED_PARAMETER_WARNINGS_END + + #define UACPI_PRINTF_DECL(fmt_idx, args_idx) +#endif + +#ifndef UACPI_FALLTHROUGH + #define UACPI_FALLTHROUGH do {} while (0) +#endif + +#ifndef UACPI_POINTER_SIZE + #ifdef _WIN32 + #ifdef _WIN64 + #define UACPI_POINTER_SIZE 8 + #else + #define UACPI_POINTER_SIZE 4 + #endif + #elif defined(__GNUC__) + #define UACPI_POINTER_SIZE __SIZEOF_POINTER__ + #elif defined(__WATCOMC__) + #ifdef __386__ + #define UACPI_POINTER_SIZE 4 + #elif defined(__I86__) + #error uACPI does not support 16-bit mode compilation + #else + #error Unknown target architecture + #endif + #else + #error Failed to detect pointer size + #endif +#endif + +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/platform/config.h b/sys/include/dev/acpi/uacpi/uacpi/platform/config.h new file mode 100644 index 0000000..dff043f --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/platform/config.h @@ -0,0 +1,162 @@ +#pragma once + +#ifdef UACPI_OVERRIDE_CONFIG +#include "uacpi_config.h" +#else + +#include <uacpi/helpers.h> +#include <uacpi/log.h> + +/* + * ======================= + * Context-related options + * ======================= + */ +#ifndef UACPI_DEFAULT_LOG_LEVEL + #define UACPI_DEFAULT_LOG_LEVEL UACPI_LOG_INFO +#endif + +UACPI_BUILD_BUG_ON_WITH_MSG( + UACPI_DEFAULT_LOG_LEVEL < UACPI_LOG_ERROR || + UACPI_DEFAULT_LOG_LEVEL > UACPI_LOG_DEBUG, + "configured default log level is invalid" +); + +#ifndef UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS + #define UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS 30 +#endif + +UACPI_BUILD_BUG_ON_WITH_MSG( + UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS < 1, + "configured default loop timeout is invalid (expecting at least 1 second)" +); + +#ifndef UACPI_DEFAULT_MAX_CALL_STACK_DEPTH + #define UACPI_DEFAULT_MAX_CALL_STACK_DEPTH 256 +#endif + +UACPI_BUILD_BUG_ON_WITH_MSG( + UACPI_DEFAULT_MAX_CALL_STACK_DEPTH < 4, + "configured default max call stack depth is invalid " + "(expecting at least 4 frames)" +); + +/* + * =================== + * Kernel-api options + * =================== + */ + +/* + * Convenience initialization/deinitialization hooks that will be called by + * uACPI automatically when appropriate if compiled-in. + */ +// #define UACPI_KERNEL_INITIALIZATION + +/* + * Makes kernel api logging callbacks work with unformatted printf-style + * strings and va_args instead of a pre-formatted string. Can be useful if + * your native logging is implemented in terms of this format as well. + */ +// #define UACPI_FORMATTED_LOGGING + +/* + * Makes uacpi_kernel_free take in an additional 'size_hint' parameter, which + * contains the size of the original allocation. Note that this comes with a + * performance penalty in some cases. + */ +// #define UACPI_SIZED_FREES + + +/* + * Makes uacpi_kernel_alloc_zeroed mandatory to implement by the host, uACPI + * will not provide a default implementation if this is enabled. + */ +// #define UACPI_NATIVE_ALLOC_ZEROED + +/* + * ========================= + * Platform-specific options + * ========================= + */ + +/* + * Makes uACPI use the internal versions of mem{cpy,move,set,cmp} instead of + * relying on the host to provide them. Note that compilers like clang and GCC + * rely on these being available by default, even in freestanding mode, so + * compiling uACPI may theoretically generate implicit dependencies on them + * even if this option is defined. + */ +// #define UACPI_USE_BUILTIN_STRING + +/* + * Turns uacpi_phys_addr and uacpi_io_addr into a 32-bit type, and adds extra + * code for address truncation. Needed for e.g. i686 platforms without PAE + * support. + */ +// #define UACPI_PHYS_ADDR_IS_32BITS + +/* + * Switches uACPI into reduced-hardware-only mode. Strips all full-hardware + * ACPI support code at compile-time, including the event subsystem, the global + * lock, and other full-hardware features. + */ +// #define UACPI_REDUCED_HARDWARE + +/* + * Switches uACPI into tables-subsystem-only mode and strips all other code. + * This means only the table API will be usable, no other subsystems are + * compiled in. In this mode, uACPI only depends on the following kernel APIs: + * - uacpi_kernel_get_rsdp + * - uacpi_kernel_{map,unmap} + * - uacpi_kernel_log + * + * Use uacpi_setup_early_table_access to initialize, uacpi_state_reset to + * deinitialize. + * + * This mode is primarily designed for these three use-cases: + * - Bootloader/pre-kernel environments that need to parse ACPI tables, but + * don't actually need a fully-featured AML interpreter, and everything else + * that a full APCI implementation entails. + * - A micro-kernel that has the full AML interpreter running in userspace, but + * still needs to parse ACPI tables to bootstrap allocators, timers, SMP etc. + * - A WIP kernel that needs to parse ACPI tables for bootrapping SMP/timers, + * ECAM, etc., but doesn't yet have enough subsystems implemented in order + * to run a fully-featured AML interpreter. + */ +// #define UACPI_BAREBONES_MODE + +/* + * ============= + * Misc. options + * ============= + */ + +/* + * If UACPI_FORMATTED_LOGGING is not enabled, this is the maximum length of the + * pre-formatted message that is passed to the logging callback. + */ +#ifndef UACPI_PLAIN_LOG_BUFFER_SIZE + #define UACPI_PLAIN_LOG_BUFFER_SIZE 128 +#endif + +UACPI_BUILD_BUG_ON_WITH_MSG( + UACPI_PLAIN_LOG_BUFFER_SIZE < 16, + "configured log buffer size is too small (expecting at least 16 bytes)" +); + +/* + * The size of the table descriptor inline storage. All table descriptors past + * this length will be stored in a dynamically allocated heap array. The size + * of one table descriptor is approximately 56 bytes. + */ +#ifndef UACPI_STATIC_TABLE_ARRAY_LEN + #define UACPI_STATIC_TABLE_ARRAY_LEN 16 +#endif + +UACPI_BUILD_BUG_ON_WITH_MSG( + UACPI_STATIC_TABLE_ARRAY_LEN < 1, + "configured static table array length is too small (expecting at least 1)" +); + +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/platform/libc.h b/sys/include/dev/acpi/uacpi/uacpi/platform/libc.h new file mode 100644 index 0000000..44c9013 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/platform/libc.h @@ -0,0 +1,28 @@ +#pragma once + +#ifdef UACPI_OVERRIDE_LIBC +#include "uacpi_libc.h" +#else +/* + * The following libc functions are used internally by uACPI and have a default + * (sub-optimal) implementation: + * - strcmp + * - strnlen + * - strlen + * - snprintf + * - vsnprintf + * + * The following use a builtin implementation only if UACPI_USE_BUILTIN_STRING + * is defined (more information can be found in the config.h header): + * - memcpy + * - memmove + * - memset + * - memcmp + * + * In case your platform happens to implement optimized verisons of the helpers + * above, you are able to make uACPI use those instead by overriding them like so: + * + * #define uacpi_memcpy my_fast_memcpy + * #define uacpi_snprintf my_fast_snprintf + */ +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/platform/types.h b/sys/include/dev/acpi/uacpi/uacpi/platform/types.h new file mode 100644 index 0000000..f4a7cf9 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/platform/types.h @@ -0,0 +1,64 @@ +#pragma once + +/* + * Platform-specific types go here. This is the default placeholder using + * types from the standard headers. + */ + +#ifdef UACPI_OVERRIDE_TYPES +#include "uacpi_types.h" +#else + +#include <stdbool.h> +#include <stdint.h> +#include <stddef.h> +#include <stdarg.h> + +#include <uacpi/helpers.h> + +typedef uint8_t uacpi_u8; +typedef uint16_t uacpi_u16; +typedef uint32_t uacpi_u32; +typedef uint64_t uacpi_u64; + +typedef int8_t uacpi_i8; +typedef int16_t uacpi_i16; +typedef int32_t uacpi_i32; +typedef int64_t uacpi_i64; + +#define UACPI_TRUE true +#define UACPI_FALSE false +typedef bool uacpi_bool; + +#define UACPI_NULL NULL + +typedef uintptr_t uacpi_uintptr; +typedef uacpi_uintptr uacpi_virt_addr; +typedef size_t uacpi_size; + +typedef va_list uacpi_va_list; +#define uacpi_va_start va_start +#define uacpi_va_end va_end +#define uacpi_va_arg va_arg + +typedef char uacpi_char; + +#define uacpi_offsetof offsetof + +/* + * We use unsignd long long for 64-bit number formatting because 64-bit types + * don't have a standard way to format them. The inttypes.h header is not + * freestanding therefore it's not practical to force the user to define the + * corresponding PRI macros. Moreover, unsignd long long is required to be + * at least 64-bits as per C99. + */ +UACPI_BUILD_BUG_ON_WITH_MSG( + sizeof(unsigned long long) < 8, + "unsigned long long must be at least 64 bits large as per C99" +); +#define UACPI_PRIu64 "llu" +#define UACPI_PRIx64 "llx" +#define UACPI_PRIX64 "llX" +#define UACPI_FMT64(val) ((unsigned long long)(val)) + +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/registers.h b/sys/include/dev/acpi/uacpi/uacpi/registers.h new file mode 100644 index 0000000..cdffb97 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/registers.h @@ -0,0 +1,105 @@ +#include <uacpi/types.h> + +/* + * BEFORE YOU USE THIS API: + * uACPI manages FADT registers on its own entirely, you should only use this + * API directly if there's absolutely no other way for your use case, e.g. + * implementing a CPU idle state driver that does C state switching or similar. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +typedef enum uacpi_register { + UACPI_REGISTER_PM1_STS = 0, + UACPI_REGISTER_PM1_EN, + UACPI_REGISTER_PM1_CNT, + UACPI_REGISTER_PM_TMR, + UACPI_REGISTER_PM2_CNT, + UACPI_REGISTER_SLP_CNT, + UACPI_REGISTER_SLP_STS, + UACPI_REGISTER_RESET, + UACPI_REGISTER_SMI_CMD, + UACPI_REGISTER_MAX = UACPI_REGISTER_SMI_CMD, +} uacpi_register; + +/* + * Read a register from FADT + * + * NOTE: write-only bits (if any) are cleared automatically + */ +uacpi_status uacpi_read_register(uacpi_register, uacpi_u64*); + +/* + * Write a register from FADT + * + * NOTE: + * - Preserved bits (if any) are preserved automatically + * - If a register is made up of two (e.g. PM1a and PM1b) parts, the input + * is written to both at the same time + */ +uacpi_status uacpi_write_register(uacpi_register, uacpi_u64); + +/* + * Write a register from FADT + * + * NOTE: + * - Preserved bits (if any) are preserved automatically + * - For registers that are made up of two (e.g. PM1a and PM1b) parts, the + * provided values are written to their respective physical register + */ +uacpi_status uacpi_write_registers(uacpi_register, uacpi_u64, uacpi_u64); + +typedef enum uacpi_register_field { + UACPI_REGISTER_FIELD_TMR_STS = 0, + UACPI_REGISTER_FIELD_BM_STS, + UACPI_REGISTER_FIELD_GBL_STS, + UACPI_REGISTER_FIELD_PWRBTN_STS, + UACPI_REGISTER_FIELD_SLPBTN_STS, + UACPI_REGISTER_FIELD_RTC_STS, + UACPI_REGISTER_FIELD_PCIEX_WAKE_STS, + UACPI_REGISTER_FIELD_HWR_WAK_STS, + UACPI_REGISTER_FIELD_WAK_STS, + UACPI_REGISTER_FIELD_TMR_EN, + UACPI_REGISTER_FIELD_GBL_EN, + UACPI_REGISTER_FIELD_PWRBTN_EN, + UACPI_REGISTER_FIELD_SLPBTN_EN, + UACPI_REGISTER_FIELD_RTC_EN, + UACPI_REGISTER_FIELD_PCIEXP_WAKE_DIS, + UACPI_REGISTER_FIELD_SCI_EN, + UACPI_REGISTER_FIELD_BM_RLD, + UACPI_REGISTER_FIELD_GBL_RLS, + UACPI_REGISTER_FIELD_SLP_TYP, + UACPI_REGISTER_FIELD_HWR_SLP_TYP, + UACPI_REGISTER_FIELD_SLP_EN, + UACPI_REGISTER_FIELD_HWR_SLP_EN, + UACPI_REGISTER_FIELD_ARB_DIS, + UACPI_REGISTER_FIELD_MAX = UACPI_REGISTER_FIELD_ARB_DIS, +} uacpi_register_field; + +/* + * Read a field from a FADT register + * + * NOTE: The value is automatically masked and shifted down as appropriate, + * the client code doesn't have to do any bit manipulation. E.g. for + * a field at 0b???XX??? the returned value will contain just the 0bXX + */ +uacpi_status uacpi_read_register_field(uacpi_register_field, uacpi_u64*); + +/* + * Write to a field of a FADT register + * + * NOTE: The value is automatically masked and shifted up as appropriate, + * the client code doesn't have to do any bit manipulation. E.g. for + * a field at 0b???XX??? the passed value should be just 0bXX + */ +uacpi_status uacpi_write_register_field(uacpi_register_field, uacpi_u64); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/resources.h b/sys/include/dev/acpi/uacpi/uacpi/resources.h new file mode 100644 index 0000000..f929f1d --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/resources.h @@ -0,0 +1,740 @@ +#pragma once + +#include <uacpi/types.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +typedef enum uacpi_resource_type { + UACPI_RESOURCE_TYPE_IRQ, + UACPI_RESOURCE_TYPE_EXTENDED_IRQ, + + UACPI_RESOURCE_TYPE_DMA, + UACPI_RESOURCE_TYPE_FIXED_DMA, + + UACPI_RESOURCE_TYPE_IO, + UACPI_RESOURCE_TYPE_FIXED_IO, + + UACPI_RESOURCE_TYPE_ADDRESS16, + UACPI_RESOURCE_TYPE_ADDRESS32, + UACPI_RESOURCE_TYPE_ADDRESS64, + UACPI_RESOURCE_TYPE_ADDRESS64_EXTENDED, + + UACPI_RESOURCE_TYPE_MEMORY24, + UACPI_RESOURCE_TYPE_MEMORY32, + UACPI_RESOURCE_TYPE_FIXED_MEMORY32, + + UACPI_RESOURCE_TYPE_START_DEPENDENT, + UACPI_RESOURCE_TYPE_END_DEPENDENT, + + // Up to 7 bytes + UACPI_RESOURCE_TYPE_VENDOR_SMALL, + + // Up to 2^16 - 1 bytes + UACPI_RESOURCE_TYPE_VENDOR_LARGE, + + UACPI_RESOURCE_TYPE_GENERIC_REGISTER, + UACPI_RESOURCE_TYPE_GPIO_CONNECTION, + + // These must always be contiguous in this order + UACPI_RESOURCE_TYPE_SERIAL_I2C_CONNECTION, + UACPI_RESOURCE_TYPE_SERIAL_SPI_CONNECTION, + UACPI_RESOURCE_TYPE_SERIAL_UART_CONNECTION, + UACPI_RESOURCE_TYPE_SERIAL_CSI2_CONNECTION, + + UACPI_RESOURCE_TYPE_PIN_FUNCTION, + UACPI_RESOURCE_TYPE_PIN_CONFIGURATION, + UACPI_RESOURCE_TYPE_PIN_GROUP, + UACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION, + UACPI_RESOURCE_TYPE_PIN_GROUP_CONFIGURATION, + + UACPI_RESOURCE_TYPE_CLOCK_INPUT, + + UACPI_RESOURCE_TYPE_END_TAG, + UACPI_RESOURCE_TYPE_MAX = UACPI_RESOURCE_TYPE_END_TAG, +} uacpi_resource_type; + +typedef struct uacpi_resource_source { + uacpi_u8 index; + uacpi_bool index_present; + uacpi_u16 length; + uacpi_char *string; +} uacpi_resource_source; + +/* + * This applies to IRQ & StartDependent resources only. The DONT_CARE value is + * used for deserialization into the AML format to signify that the serializer + * is allowed to optimize the length down if possible. Note that this is + * generally not allowed unless the resource is generated by the caller: + * + * -- ACPI 6.5 ------------------------------------------------------------ + * The resource descriptors in the byte stream argument must be specified + * exactly as listed in the _CRS byte stream - meaning that the identical + * resource descriptors must appear in the identical order, resulting in a + * buffer of exactly the same length. Optimizations such as changing an + * IRQ descriptor to an IRQNoFlags descriptor (or vice-versa) must not be + * performed. Similarly, changing StartDependentFn to StartDependentFnNoPri + * is not allowed. + * ------------------------------------------------------------------------ + */ +enum uacpi_resource_length_kind { + UACPI_RESOURCE_LENGTH_KIND_DONT_CARE = 0, + UACPI_RESOURCE_LENGTH_KIND_ONE_LESS, + UACPI_RESOURCE_LENGTH_KIND_FULL, +}; + +// triggering fields +#define UACPI_TRIGGERING_EDGE 1 +#define UACPI_TRIGGERING_LEVEL 0 + +// polarity +#define UACPI_POLARITY_ACTIVE_HIGH 0 +#define UACPI_POLARITY_ACTIVE_LOW 1 +#define UACPI_POLARITY_ACTIVE_BOTH 2 + +// sharing +#define UACPI_EXCLUSIVE 0 +#define UACPI_SHARED 1 + +// wake_capability +#define UACPI_WAKE_CAPABLE 1 +#define UACPI_NOT_WAKE_CAPABLE 0 + +typedef struct uacpi_resource_irq { + uacpi_u8 length_kind; + uacpi_u8 triggering; + uacpi_u8 polarity; + uacpi_u8 sharing; + uacpi_u8 wake_capability; + uacpi_u8 num_irqs; + uacpi_u8 irqs[]; +} uacpi_resource_irq; + +typedef struct uacpi_resource_extended_irq { + uacpi_u8 direction; + uacpi_u8 triggering; + uacpi_u8 polarity; + uacpi_u8 sharing; + uacpi_u8 wake_capability; + uacpi_u8 num_irqs; + uacpi_resource_source source; + uacpi_u32 irqs[]; +} uacpi_resource_extended_irq; + +// transfer_type +#define UACPI_TRANSFER_TYPE_8_BIT 0b00 +#define UACPI_TRANSFER_TYPE_8_AND_16_BIT 0b01 +#define UACPI_TRANSFER_TYPE_16_BIT 0b10 + +// bus_master_status +#define UACPI_BUS_MASTER 0b1 + +// channel_speed +#define UACPI_DMA_COMPATIBILITY 0b00 +#define UACPI_DMA_TYPE_A 0b01 +#define UACPI_DMA_TYPE_B 0b10 +#define UACPI_DMA_TYPE_F 0b11 + +// transfer_width +#define UACPI_TRANSFER_WIDTH_8 0x00 +#define UACPI_TRANSFER_WIDTH_16 0x01 +#define UACPI_TRANSFER_WIDTH_32 0x02 +#define UACPI_TRANSFER_WIDTH_64 0x03 +#define UACPI_TRANSFER_WIDTH_128 0x04 +#define UACPI_TRANSFER_WIDTH_256 0x05 + +typedef struct uacpi_resource_dma { + uacpi_u8 transfer_type; + uacpi_u8 bus_master_status; + uacpi_u8 channel_speed; + uacpi_u8 num_channels; + uacpi_u8 channels[]; +} uacpi_resource_dma; + +typedef struct uacpi_resource_fixed_dma { + uacpi_u16 request_line; + uacpi_u16 channel; + uacpi_u8 transfer_width; +} uacpi_resource_fixed_dma; + +// decode_type +#define UACPI_DECODE_16 0b1 +#define UACPI_DECODE_10 0b0 + +typedef struct uacpi_resource_io { + uacpi_u8 decode_type; + uacpi_u16 minimum; + uacpi_u16 maximum; + uacpi_u8 alignment; + uacpi_u8 length; +} uacpi_resource_io; + +typedef struct uacpi_resource_fixed_io { + uacpi_u16 address; + uacpi_u8 length; +} uacpi_resource_fixed_io; + +// write_status +#define UACPI_NON_WRITABLE 0 +#define UACPI_WRITABLE 1 + +// caching +#define UACPI_NON_CACHEABLE 0 +#define UACPI_CACHEABLE 1 +#define UACPI_CACHEABLE_WRITE_COMBINING 2 +#define UACPI_PREFETCHABLE 3 + +// range_type +#define UACPI_RANGE_TYPE_MEMORY 0 +#define UACPI_RANGE_TYPE_RESERVED 1 +#define UACPI_RANGE_TYPE_ACPI 2 +#define UACPI_RANGE_TYPE_NVS 3 + +// address_common->type +#define UACPI_RANGE_MEMORY 0 +#define UACPI_RANGE_IO 1 +#define UACPI_RANGE_BUS 2 + +// translation +#define UACPI_IO_MEM_TRANSLATION 1 +#define UACPI_IO_MEM_STATIC 0 + +// translation_type +#define UACPI_TRANSLATION_DENSE 0 +#define UACPI_TRANSLATION_SPARSE 1 + +// direction +#define UACPI_PRODUCER 0 +#define UACPI_CONSUMER 1 + +// decode_type +#define UACPI_POSITIVE_DECODE 0 +#define UACPI_SUBTRACTIVE_DECODE 1 + +/* + * DO NOT USE! SLATED FOR REMOVAL AT 3.0 + * See the version without the typo above (UACPI_POSITIVE_DECODE) + */ +#define UACPI_POISITIVE_DECODE 0 + +// fixed_min_address & fixed_max_address +#define UACPI_ADDRESS_NOT_FIXED 0 +#define UACPI_ADDRESS_FIXED 1 + +typedef struct uacpi_memory_attribute { + uacpi_u8 write_status; + uacpi_u8 caching; + uacpi_u8 range_type; + uacpi_u8 translation; +} uacpi_memory_attribute; + +typedef struct uacpi_io_attribute { + uacpi_u8 range_type; + uacpi_u8 translation; + uacpi_u8 translation_type; +} uacpi_io_attribute; + +typedef union uacpi_address_attribute { + uacpi_memory_attribute memory; + uacpi_io_attribute io; + uacpi_u8 type_specific; +} uacpi_address_attribute; + +typedef struct uacpi_resource_address_common { + uacpi_address_attribute attribute; + uacpi_u8 type; + uacpi_u8 direction; + uacpi_u8 decode_type; + uacpi_u8 fixed_min_address; + uacpi_u8 fixed_max_address; +} uacpi_resource_address_common; + +typedef struct uacpi_resource_address16 { + uacpi_resource_address_common common; + uacpi_u16 granularity; + uacpi_u16 minimum; + uacpi_u16 maximum; + uacpi_u16 translation_offset; + uacpi_u16 address_length; + uacpi_resource_source source; +} uacpi_resource_address16; + +typedef struct uacpi_resource_address32 { + uacpi_resource_address_common common; + uacpi_u32 granularity; + uacpi_u32 minimum; + uacpi_u32 maximum; + uacpi_u32 translation_offset; + uacpi_u32 address_length; + uacpi_resource_source source; +} uacpi_resource_address32; + +typedef struct uacpi_resource_address64 { + uacpi_resource_address_common common; + uacpi_u64 granularity; + uacpi_u64 minimum; + uacpi_u64 maximum; + uacpi_u64 translation_offset; + uacpi_u64 address_length; + uacpi_resource_source source; +} uacpi_resource_address64; + +typedef struct uacpi_resource_address64_extended { + uacpi_resource_address_common common; + uacpi_u8 revision_id; + uacpi_u64 granularity; + uacpi_u64 minimum; + uacpi_u64 maximum; + uacpi_u64 translation_offset; + uacpi_u64 address_length; + uacpi_u64 attributes; +} uacpi_resource_address64_extended; + +typedef struct uacpi_resource_memory24 { + uacpi_u8 write_status; + uacpi_u16 minimum; + uacpi_u16 maximum; + uacpi_u16 alignment; + uacpi_u16 length; +} uacpi_resource_memory24; + +typedef struct uacpi_resource_memory32 { + uacpi_u8 write_status; + uacpi_u32 minimum; + uacpi_u32 maximum; + uacpi_u32 alignment; + uacpi_u32 length; +} uacpi_resource_memory32; + +typedef struct uacpi_resource_fixed_memory32 { + uacpi_u8 write_status; + uacpi_u32 address; + uacpi_u32 length; +} uacpi_resource_fixed_memory32; + +// compatibility & performance +#define UACPI_GOOD 0 +#define UACPI_ACCEPTABLE 1 +#define UACPI_SUB_OPTIMAL 2 + +typedef struct uacpi_resource_start_dependent { + uacpi_u8 length_kind; + uacpi_u8 compatibility; + uacpi_u8 performance; +} uacpi_resource_start_dependent; + +typedef struct uacpi_resource_vendor_defined { + uacpi_u8 length; + uacpi_u8 data[]; +} uacpi_resource_vendor; + +typedef struct uacpi_resource_vendor_typed { + uacpi_u16 length; + uacpi_u8 sub_type; + uacpi_u8 uuid[16]; + uacpi_u8 data[]; +} uacpi_resource_vendor_typed; + +typedef struct uacpi_resource_generic_register { + uacpi_u8 address_space_id; + uacpi_u8 bit_width; + uacpi_u8 bit_offset; + uacpi_u8 access_size; + uacpi_u64 address; +} uacpi_resource_generic_register; + +// type +#define UACPI_GPIO_CONNECTION_INTERRUPT 0x00 +#define UACPI_GPIO_CONNECTION_IO 0x01 + +typedef struct uacpi_interrupt_connection_flags { + uacpi_u8 triggering; + uacpi_u8 polarity; + uacpi_u8 sharing; + uacpi_u8 wake_capability; +} uacpi_interrupt_connection_flags; + +// restriction +#define UACPI_IO_RESTRICTION_NONE 0x0 +#define UACPI_IO_RESTRICTION_INPUT 0x1 +#define UACPI_IO_RESTRICTION_OUTPUT 0x2 +#define UACPI_IO_RESTRICTION_NONE_PRESERVE 0x3 + +typedef struct uacpi_io_connection_flags { + uacpi_u8 restriction; + uacpi_u8 sharing; +} uacpi_io_connection_flags; + +// pull_configuration +#define UACPI_PIN_CONFIG_DEFAULT 0x00 +#define UACPI_PIN_CONFIG_PULL_UP 0x01 +#define UACPI_PIN_CONFIG_PULL_DOWN 0x02 +#define UACPI_PIN_CONFIG_NO_PULL 0x03 + +typedef struct uacpi_resource_gpio_connection { + uacpi_u8 revision_id; + uacpi_u8 type; + uacpi_u8 direction; + + union { + uacpi_interrupt_connection_flags intr; + uacpi_io_connection_flags io; + uacpi_u16 type_specific; + }; + + uacpi_u8 pull_configuration; + uacpi_u16 drive_strength; + uacpi_u16 debounce_timeout; + uacpi_u16 vendor_data_length; + uacpi_u16 pin_table_length; + uacpi_resource_source source; + uacpi_u16 *pin_table; + uacpi_u8 *vendor_data; +} uacpi_resource_gpio_connection; + +// mode +#define UACPI_MODE_CONTROLLER_INITIATED 0x0 +#define UACPI_MODE_DEVICE_INITIATED 0x1 + +typedef struct uacpi_resource_serial_bus_common { + uacpi_u8 revision_id; + uacpi_u8 type; + uacpi_u8 mode; + uacpi_u8 direction; + uacpi_u8 sharing; + uacpi_u8 type_revision_id; + uacpi_u16 type_data_length; + uacpi_u16 vendor_data_length; + uacpi_resource_source source; + uacpi_u8 *vendor_data; +} uacpi_resource_serial_bus_common; + +// addressing_mode +#define UACPI_I2C_7BIT 0x0 +#define UACPI_I2C_10BIT 0x1 + +typedef struct uacpi_resource_i2c_connection { + uacpi_resource_serial_bus_common common; + uacpi_u8 addressing_mode; + uacpi_u16 slave_address; + uacpi_u32 connection_speed; +} uacpi_resource_i2c_connection; + +// wire_mode +#define UACPI_SPI_4_WIRES 0 +#define UACPI_SPI_3_WIRES 1 + +// device_polarity +#define UACPI_SPI_ACTIVE_LOW 0 +#define UACPI_SPI_ACTIVE_HIGH 1 + +// phase +#define UACPI_SPI_PHASE_FIRST 0 +#define UACPI_SPI_PHASE_SECOND 1 + +// polarity +#define UACPI_SPI_START_LOW 0 +#define UACPI_SPI_START_HIGH 1 + +typedef struct uacpi_resource_spi_connection { + uacpi_resource_serial_bus_common common; + uacpi_u8 wire_mode; + uacpi_u8 device_polarity; + uacpi_u8 data_bit_length; + uacpi_u8 phase; + uacpi_u8 polarity; + uacpi_u16 device_selection; + uacpi_u32 connection_speed; +} uacpi_resource_spi_connection; + +// stop_bits +#define UACPI_UART_STOP_BITS_NONE 0b00 +#define UACPI_UART_STOP_BITS_1 0b01 +#define UACPI_UART_STOP_BITS_1_5 0b10 +#define UACPI_UART_STOP_BITS_2 0b11 + +// data_bits +#define UACPI_UART_DATA_5BITS 0b000 +#define UACPI_UART_DATA_6BITS 0b001 +#define UACPI_UART_DATA_7BITS 0b010 +#define UACPI_UART_DATA_8BITS 0b011 +#define UACPI_UART_DATA_9BITS 0b100 + +// endianness +#define UACPI_UART_LITTLE_ENDIAN 0 +#define UACPI_UART_BIG_ENDIAN 1 + +// parity +#define UACPI_UART_PARITY_NONE 0x00 +#define UACPI_UART_PARITY_EVEN 0x01 +#define UACPI_UART_PARITY_ODD 0x02 +#define UACPI_UART_PARITY_MARK 0x03 +#define UACPI_UART_PARITY_SPACE 0x04 + +// lines_enabled +#define UACPI_UART_DATA_CARRIER_DETECT (1 << 2) +#define UACPI_UART_RING_INDICATOR (1 << 3) +#define UACPI_UART_DATA_SET_READY (1 << 4) +#define UACPI_UART_DATA_TERMINAL_READY (1 << 5) +#define UACPI_UART_CLEAR_TO_SEND (1 << 6) +#define UACPI_UART_REQUEST_TO_SEND (1 << 7) + +// flow_control +#define UACPI_UART_FLOW_CONTROL_NONE 0b00 +#define UACPI_UART_FLOW_CONTROL_HW 0b01 +#define UACPI_UART_FLOW_CONTROL_XON_XOFF 0b10 + +typedef struct uacpi_resource_uart_connection { + uacpi_resource_serial_bus_common common; + uacpi_u8 stop_bits; + uacpi_u8 data_bits; + uacpi_u8 endianness; + uacpi_u8 parity; + uacpi_u8 lines_enabled; + uacpi_u8 flow_control; + uacpi_u32 baud_rate; + uacpi_u16 rx_fifo; + uacpi_u16 tx_fifo; +} uacpi_resource_uart_connection; + +// phy_type +#define UACPI_CSI2_PHY_C 0b00 +#define UACPI_CSI2_PHY_D 0b01 + +typedef struct uacpi_resource_csi2_connection { + uacpi_resource_serial_bus_common common; + uacpi_u8 phy_type; + uacpi_u8 local_port; +} uacpi_resource_csi2_connection; + +typedef struct uacpi_resource_pin_function { + uacpi_u8 revision_id; + uacpi_u8 sharing; + uacpi_u8 pull_configuration; + uacpi_u16 function_number; + uacpi_u16 pin_table_length; + uacpi_u16 vendor_data_length; + uacpi_resource_source source; + uacpi_u16 *pin_table; + uacpi_u8 *vendor_data; +} uacpi_resource_pin_function; + +// type +#define UACPI_PIN_CONFIG_DEFAULT 0x00 +#define UACPI_PIN_CONFIG_BIAS_PULL_UP 0x01 +#define UACPI_PIN_CONFIG_BIAS_PULL_DOWN 0x02 +#define UACPI_PIN_CONFIG_BIAS_DEFAULT 0x03 +#define UACPI_PIN_CONFIG_BIAS_DISABLE 0x04 +#define UACPI_PIN_CONFIG_BIAS_HIGH_IMPEDANCE 0x05 +#define UACPI_PIN_CONFIG_BIAS_BUS_HOLD 0x06 +#define UACPI_PIN_CONFIG_DRIVE_OPEN_DRAIN 0x07 +#define UACPI_PIN_CONFIG_DRIVE_OPEN_SOURCE 0x08 +#define UACPI_PIN_CONFIG_DRIVE_PUSH_PULL 0x09 +#define UACPI_PIN_CONFIG_DRIVE_STRENGTH 0x0A +#define UACPI_PIN_CONFIG_SLEW_RATE 0x0B +#define UACPI_PIN_CONFIG_INPUT_DEBOUNCE 0x0C +#define UACPI_PIN_CONFIG_INPUT_SCHMITT_TRIGGER 0x0D + +typedef struct uacpi_resource_pin_configuration { + uacpi_u8 revision_id; + uacpi_u8 sharing; + uacpi_u8 direction; + uacpi_u8 type; + uacpi_u32 value; + uacpi_u16 pin_table_length; + uacpi_u16 vendor_data_length; + uacpi_resource_source source; + uacpi_u16 *pin_table; + uacpi_u8 *vendor_data; +} uacpi_resource_pin_configuration; + +typedef struct uacpi_resource_label { + uacpi_u16 length; + const uacpi_char *string; +} uacpi_resource_label; + +typedef struct uacpi_resource_pin_group { + uacpi_u8 revision_id; + uacpi_u8 direction; + uacpi_u16 pin_table_length; + uacpi_u16 vendor_data_length; + uacpi_resource_label label; + uacpi_u16 *pin_table; + uacpi_u8 *vendor_data; +} uacpi_resource_pin_group; + +typedef struct uacpi_resource_pin_group_function { + uacpi_u8 revision_id; + uacpi_u8 sharing; + uacpi_u8 direction; + uacpi_u16 function; + uacpi_u16 vendor_data_length; + uacpi_resource_source source; + uacpi_resource_label label; + uacpi_u8 *vendor_data; +} uacpi_resource_pin_group_function; + +typedef struct uacpi_resource_pin_group_configuration { + uacpi_u8 revision_id; + uacpi_u8 sharing; + uacpi_u8 direction; + uacpi_u8 type; + uacpi_u32 value; + uacpi_u16 vendor_data_length; + uacpi_resource_source source; + uacpi_resource_label label; + uacpi_u8 *vendor_data; +} uacpi_resource_pin_group_configuration; + +// scale +#define UACPI_SCALE_HZ 0b00 +#define UACPI_SCALE_KHZ 0b01 +#define UACPI_SCALE_MHZ 0b10 + +// frequency +#define UACPI_FREQUENCY_FIXED 0x0 +#define UACPI_FREQUENCY_VARIABLE 0x1 + +typedef struct uacpi_resource_clock_input { + uacpi_u8 revision_id; + uacpi_u8 frequency; + uacpi_u8 scale; + uacpi_u16 divisor; + uacpi_u32 numerator; + uacpi_resource_source source; +} uacpi_resource_clock_input; + +typedef struct uacpi_resource { + uacpi_u32 type; + uacpi_u32 length; + + union { + uacpi_resource_irq irq; + uacpi_resource_extended_irq extended_irq; + uacpi_resource_dma dma; + uacpi_resource_fixed_dma fixed_dma; + uacpi_resource_io io; + uacpi_resource_fixed_io fixed_io; + uacpi_resource_address16 address16; + uacpi_resource_address32 address32; + uacpi_resource_address64 address64; + uacpi_resource_address64_extended address64_extended; + uacpi_resource_memory24 memory24; + uacpi_resource_memory32 memory32; + uacpi_resource_fixed_memory32 fixed_memory32; + uacpi_resource_start_dependent start_dependent; + uacpi_resource_vendor vendor; + uacpi_resource_vendor_typed vendor_typed; + uacpi_resource_generic_register generic_register; + uacpi_resource_gpio_connection gpio_connection; + uacpi_resource_serial_bus_common serial_bus_common; + uacpi_resource_i2c_connection i2c_connection; + uacpi_resource_spi_connection spi_connection; + uacpi_resource_uart_connection uart_connection; + uacpi_resource_csi2_connection csi2_connection; + uacpi_resource_pin_function pin_function; + uacpi_resource_pin_configuration pin_configuration; + uacpi_resource_pin_group pin_group; + uacpi_resource_pin_group_function pin_group_function; + uacpi_resource_pin_group_configuration pin_group_configuration; + uacpi_resource_clock_input clock_input; + }; +} uacpi_resource; + +#define UACPI_NEXT_RESOURCE(cur) \ + ((uacpi_resource*)((uacpi_u8*)(cur) + (cur)->length)) + +typedef struct uacpi_resources { + uacpi_size length; + uacpi_resource *entries; +} uacpi_resources; +void uacpi_free_resources(uacpi_resources*); + +typedef uacpi_iteration_decision (*uacpi_resource_iteration_callback) + (void *user, uacpi_resource *resource); + +/* + * Evaluate the _CRS method for a 'device' and get the returned resource list + * via 'out_resources'. + * + * NOTE: the returned buffer must be released via a uacpi_free_resources() + */ +uacpi_status uacpi_get_current_resources( + uacpi_namespace_node *device, uacpi_resources **out_resources +); + +/* + * Evaluate the _PRS method for a 'device' and get the returned resource list + * via 'out_resources'. + * + * NOTE: the returned buffer must be released via uacpi_free_resources() + */ +uacpi_status uacpi_get_possible_resources( + uacpi_namespace_node *device, uacpi_resources **out_resources +); + +/* + * Evaluate an arbitrary method that is expected to return an AML resource + * buffer for a 'device' and get the returned resource list via 'out_resources'. + * + * NOTE: the returned buffer must be released via uacpi_free_resources() + */ +uacpi_status uacpi_get_device_resources( + uacpi_namespace_node *device, const uacpi_char *method, + uacpi_resources **out_resources +); + +/* + * Set the configuration to be used by the 'device' by calling its _SRS method. + * + * Note that this expects 'resources' in the normal 'uacpi_resources' format, + * and not the raw AML resources bytestream, the conversion to the latter is + * done automatically by this API. If you want to _SRS a raw AML resources + * bytestream, use 'uacpi_execute' or similar API directly. + */ +uacpi_status uacpi_set_resources( + uacpi_namespace_node *device, uacpi_resources *resources +); + +/* + * A convenience helper for iterating over the resource list returned by any + * of the uacpi_get_*_resources functions. + */ +uacpi_status uacpi_for_each_resource( + uacpi_resources *resources, uacpi_resource_iteration_callback cb, void *user +); + +/* + * A shorthand for uacpi_get_device_resources() + uacpi_for_each_resource(). + * + * Use if you don't actually want to save the 'resources' list, but simply want + * to iterate it once to extract the resources you care about and then free it + * right away. + */ +uacpi_status uacpi_for_each_device_resource( + uacpi_namespace_node *device, const uacpi_char *method, + uacpi_resource_iteration_callback cb, void *user +); + +/* + * Convert a single AML-encoded resource to native format. + * + * This should be used for converting Connection() fields (passed during IO on + * GeneralPurposeIO or GenericSerialBus operation regions) or other similar + * buffers with only one resource to native format. + * + * NOTE: the returned buffer must be released via uacpi_free_resource() + */ +uacpi_status uacpi_get_resource_from_buffer( + uacpi_data_view aml_buffer, uacpi_resource **out_resource +); +void uacpi_free_resource(uacpi_resource*); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/sleep.h b/sys/include/dev/acpi/uacpi/uacpi/sleep.h new file mode 100644 index 0000000..3fd9bf3 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/sleep.h @@ -0,0 +1,67 @@ +#pragma once + +#include <uacpi/types.h> +#include <uacpi/status.h> +#include <uacpi/uacpi.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +/* + * Set the firmware waking vector in FACS. + * + * 'addr32' is the real mode entry-point address + * 'addr64' is the protected mode entry-point address + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_set_waking_vector( + uacpi_phys_addr addr32, uacpi_phys_addr addr64 +)) + +typedef enum uacpi_sleep_state { + UACPI_SLEEP_STATE_S0 = 0, + UACPI_SLEEP_STATE_S1, + UACPI_SLEEP_STATE_S2, + UACPI_SLEEP_STATE_S3, + UACPI_SLEEP_STATE_S4, + UACPI_SLEEP_STATE_S5, + UACPI_SLEEP_STATE_MAX = UACPI_SLEEP_STATE_S5, +} uacpi_sleep_state; + +/* + * Prepare for a given sleep state. + * Must be caled with interrupts ENABLED. + */ +uacpi_status uacpi_prepare_for_sleep_state(uacpi_sleep_state); + +/* + * Enter the given sleep state after preparation. + * Must be called with interrupts DISABLED. + */ +uacpi_status uacpi_enter_sleep_state(uacpi_sleep_state); + +/* + * Prepare to leave the given sleep state. + * Must be called with interrupts DISABLED. + */ +uacpi_status uacpi_prepare_for_wake_from_sleep_state(uacpi_sleep_state); + +/* + * Wake from the given sleep state. + * Must be called with interrupts ENABLED. + */ +uacpi_status uacpi_wake_from_sleep_state(uacpi_sleep_state); + +/* + * Attempt reset via the FADT reset register. + */ +uacpi_status uacpi_reboot(void); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/status.h b/sys/include/dev/acpi/uacpi/uacpi/status.h new file mode 100644 index 0000000..5c09508 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/status.h @@ -0,0 +1,57 @@ +#pragma once + +#include <uacpi/internal/compiler.h> +#include <uacpi/platform/types.h> + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum uacpi_status { + UACPI_STATUS_OK = 0, + UACPI_STATUS_MAPPING_FAILED = 1, + UACPI_STATUS_OUT_OF_MEMORY = 2, + UACPI_STATUS_BAD_CHECKSUM = 3, + UACPI_STATUS_INVALID_SIGNATURE = 4, + UACPI_STATUS_INVALID_TABLE_LENGTH = 5, + UACPI_STATUS_NOT_FOUND = 6, + UACPI_STATUS_INVALID_ARGUMENT = 7, + UACPI_STATUS_UNIMPLEMENTED = 8, + UACPI_STATUS_ALREADY_EXISTS = 9, + UACPI_STATUS_INTERNAL_ERROR = 10, + UACPI_STATUS_TYPE_MISMATCH = 11, + UACPI_STATUS_INIT_LEVEL_MISMATCH = 12, + UACPI_STATUS_NAMESPACE_NODE_DANGLING = 13, + UACPI_STATUS_NO_HANDLER = 14, + UACPI_STATUS_NO_RESOURCE_END_TAG = 15, + UACPI_STATUS_COMPILED_OUT = 16, + UACPI_STATUS_HARDWARE_TIMEOUT = 17, + UACPI_STATUS_TIMEOUT = 18, + UACPI_STATUS_OVERRIDDEN = 19, + UACPI_STATUS_DENIED = 20, + + // All errors that have bytecode-related origin should go here + UACPI_STATUS_AML_UNDEFINED_REFERENCE = 0x0EFF0000, + UACPI_STATUS_AML_INVALID_NAMESTRING = 0x0EFF0001, + UACPI_STATUS_AML_OBJECT_ALREADY_EXISTS = 0x0EFF0002, + UACPI_STATUS_AML_INVALID_OPCODE = 0x0EFF0003, + UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE = 0x0EFF0004, + UACPI_STATUS_AML_BAD_ENCODING = 0x0EFF0005, + UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX = 0x0EFF0006, + UACPI_STATUS_AML_SYNC_LEVEL_TOO_HIGH = 0x0EFF0007, + UACPI_STATUS_AML_INVALID_RESOURCE = 0x0EFF0008, + UACPI_STATUS_AML_LOOP_TIMEOUT = 0x0EFF0009, + UACPI_STATUS_AML_CALL_STACK_DEPTH_LIMIT = 0x0EFF000A, +} uacpi_status; + +const uacpi_char *uacpi_status_to_string(uacpi_status); + +#define uacpi_unlikely_error(expr) uacpi_unlikely((expr) != UACPI_STATUS_OK) +#define uacpi_likely_error(expr) uacpi_likely((expr) != UACPI_STATUS_OK) + +#define uacpi_unlikely_success(expr) uacpi_unlikely((expr) == UACPI_STATUS_OK) +#define uacpi_likely_success(expr) uacpi_likely((expr) == UACPI_STATUS_OK) + +#ifdef __cplusplus +} +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/tables.h b/sys/include/dev/acpi/uacpi/uacpi/tables.h new file mode 100644 index 0000000..5fbecee --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/tables.h @@ -0,0 +1,141 @@ +#pragma once + +#include <uacpi/types.h> +#include <uacpi/status.h> + +#ifdef __cplusplus +extern "C" { +#endif + +// Forward-declared to avoid including the entire acpi.h here +struct acpi_fadt; + +typedef struct uacpi_table_identifiers { + uacpi_object_name signature; + + // if oemid[0] == 0 this field is ignored + char oemid[6]; + + // if oem_table_id[0] == 0 this field is ignored + char oem_table_id[8]; +} uacpi_table_identifiers; + +typedef struct uacpi_table { + union { + uacpi_virt_addr virt_addr; + void *ptr; + struct acpi_sdt_hdr *hdr; + }; + + // Index number used to identify this table internally + uacpi_size index; +} uacpi_table; + +/* + * Install a table from either a virtual or a physical address. + * The table is simply stored in the internal table array, and not loaded by + * the interpreter (see uacpi_table_load). + * + * The table is optionally returned via 'out_table'. + * + * Manual calls to uacpi_table_install are not subject to filtering via the + * table installation callback (if any). + */ +uacpi_status uacpi_table_install( + void*, uacpi_table *out_table +); +uacpi_status uacpi_table_install_physical( + uacpi_phys_addr, uacpi_table *out_table +); + +#ifndef UACPI_BAREBONES_MODE +/* + * Load a previously installed table by feeding it to the interpreter. + */ +uacpi_status uacpi_table_load(uacpi_size index); +#endif // !UACPI_BAREBONES_MODE + +/* + * Helpers for finding tables. + * + * for find_by_signature: + * 'signature' is an array of 4 characters, a null terminator is not + * necessary and can be omitted (especially useful for non-C language + * bindings) + * + * 'out_table' is a pointer to a caller allocated uacpi_table structure that + * receives the table pointer & its internal index in case the call was + * successful. + * + * NOTE: + * The returned table's reference count is incremented by 1, which keeps its + * mapping alive forever unless uacpi_table_unref() is called for this table + * later on. Calling uacpi_table_find_next_with_same_signature() on a table also + * drops its reference count by 1, so if you want to keep it mapped you must + * manually call uacpi_table_ref() beforehand. + */ +uacpi_status uacpi_table_find_by_signature( + const uacpi_char *signature, uacpi_table *out_table +); +uacpi_status uacpi_table_find_next_with_same_signature( + uacpi_table *in_out_table +); +uacpi_status uacpi_table_find( + const uacpi_table_identifiers *id, uacpi_table *out_table +); + +/* + * Increment/decrement a table's reference count. + * The table is unmapped when the reference count drops to 0. + */ +uacpi_status uacpi_table_ref(uacpi_table*); +uacpi_status uacpi_table_unref(uacpi_table*); + +/* + * Returns the pointer to a sanitized internal version of FADT. + * + * The revision is guaranteed to be correct. All of the registers are converted + * to GAS format. Fields that might contain garbage are cleared. + */ +uacpi_status uacpi_table_fadt(struct acpi_fadt**); + +typedef enum uacpi_table_installation_disposition { + // Allow the table to be installed as-is + UACPI_TABLE_INSTALLATION_DISPOSITON_ALLOW = 0, + + /* + * Deny the table from being installed completely. This is useful for + * debugging various problems, e.g. AML loading bad SSDTs that cause the + * system to hang or enter an undesired state. + */ + UACPI_TABLE_INSTALLATION_DISPOSITON_DENY, + + /* + * Override the table being installed with the table at the virtual address + * returned in 'out_override_address'. + */ + UACPI_TABLE_INSTALLATION_DISPOSITON_VIRTUAL_OVERRIDE, + + /* + * Override the table being installed with the table at the physical address + * returned in 'out_override_address'. + */ + UACPI_TABLE_INSTALLATION_DISPOSITON_PHYSICAL_OVERRIDE, +} uacpi_table_installation_disposition; + +typedef uacpi_table_installation_disposition (*uacpi_table_installation_handler) + (struct acpi_sdt_hdr *hdr, uacpi_u64 *out_override_address); + +/* + * Set a handler that is invoked for each table before it gets installed. + * + * Depending on the return value, the table is either allowed to be installed + * as-is, denied, or overriden with a new one. + */ +uacpi_status uacpi_set_table_installation_handler( + uacpi_table_installation_handler handler +); + +#ifdef __cplusplus +} +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/types.h b/sys/include/dev/acpi/uacpi/uacpi/types.h new file mode 100644 index 0000000..240cfdc --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/types.h @@ -0,0 +1,547 @@ +#pragma once +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpedantic" +#pragma GCC diagnostic ignored "-Wformat" + +#include <uacpi/status.h> +#include <uacpi/platform/types.h> +#include <uacpi/platform/compiler.h> +#include <uacpi/platform/arch_helpers.h> +#include <uacpi/platform/config.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#if UACPI_POINTER_SIZE == 4 && defined(UACPI_PHYS_ADDR_IS_32BITS) +typedef uacpi_u32 uacpi_phys_addr; +typedef uacpi_u32 uacpi_io_addr; +#else +typedef uacpi_u64 uacpi_phys_addr; +typedef uacpi_u64 uacpi_io_addr; +#endif + +typedef void *uacpi_handle; + +typedef union uacpi_object_name { + uacpi_char text[4]; + uacpi_u32 id; +} uacpi_object_name; + +typedef enum uacpi_iteration_decision { + UACPI_ITERATION_DECISION_CONTINUE = 0, + UACPI_ITERATION_DECISION_BREAK, + + // Only applicable for uacpi_namespace_for_each_child + UACPI_ITERATION_DECISION_NEXT_PEER, +} uacpi_iteration_decision; + +typedef enum uacpi_address_space { + UACPI_ADDRESS_SPACE_SYSTEM_MEMORY = 0, + UACPI_ADDRESS_SPACE_SYSTEM_IO = 1, + UACPI_ADDRESS_SPACE_PCI_CONFIG = 2, + UACPI_ADDRESS_SPACE_EMBEDDED_CONTROLLER = 3, + UACPI_ADDRESS_SPACE_SMBUS = 4, + UACPI_ADDRESS_SPACE_SYSTEM_CMOS = 5, + UACPI_ADDRESS_SPACE_PCI_BAR_TARGET = 6, + UACPI_ADDRESS_SPACE_IPMI = 7, + UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO = 8, + UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS = 9, + UACPI_ADDRESS_SPACE_PCC = 0x0A, + UACPI_ADDRESS_SPACE_PRM = 0x0B, + UACPI_ADDRESS_SPACE_FFIXEDHW = 0x7F, + + // Internal type + UACPI_ADDRESS_SPACE_TABLE_DATA = 0xDA1A, +} uacpi_address_space; +const uacpi_char *uacpi_address_space_to_string(uacpi_address_space space); + +#ifndef UACPI_BAREBONES_MODE + +typedef enum uacpi_init_level { + // Reboot state, nothing is available + UACPI_INIT_LEVEL_EARLY = 0, + + /* + * State after a successfull call to uacpi_initialize. Table API and + * other helpers that don't depend on the ACPI namespace may be used. + */ + UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED = 1, + + /* + * State after a successfull call to uacpi_namespace_load. Most API may be + * used, namespace can be iterated, etc. + */ + UACPI_INIT_LEVEL_NAMESPACE_LOADED = 2, + + /* + * The final initialization stage, this is entered after the call to + * uacpi_namespace_initialize. All API is available to use. + */ + UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED = 3, +} uacpi_init_level; + +typedef struct uacpi_pci_address { + uacpi_u16 segment; + uacpi_u8 bus; + uacpi_u8 device; + uacpi_u8 function; +} uacpi_pci_address; + +typedef struct uacpi_data_view { + union { + uacpi_u8 *bytes; + const uacpi_u8 *const_bytes; + + uacpi_char *text; + const uacpi_char *const_text; + + void *data; + const void *const_data; + }; + uacpi_size length; +} uacpi_data_view; + +typedef struct uacpi_namespace_node uacpi_namespace_node; + +typedef enum uacpi_object_type { + UACPI_OBJECT_UNINITIALIZED = 0, + UACPI_OBJECT_INTEGER = 1, + UACPI_OBJECT_STRING = 2, + UACPI_OBJECT_BUFFER = 3, + UACPI_OBJECT_PACKAGE = 4, + UACPI_OBJECT_FIELD_UNIT = 5, + UACPI_OBJECT_DEVICE = 6, + UACPI_OBJECT_EVENT = 7, + UACPI_OBJECT_METHOD = 8, + UACPI_OBJECT_MUTEX = 9, + UACPI_OBJECT_OPERATION_REGION = 10, + UACPI_OBJECT_POWER_RESOURCE = 11, + UACPI_OBJECT_PROCESSOR = 12, + UACPI_OBJECT_THERMAL_ZONE = 13, + UACPI_OBJECT_BUFFER_FIELD = 14, + UACPI_OBJECT_DEBUG = 16, + + UACPI_OBJECT_REFERENCE = 20, + UACPI_OBJECT_BUFFER_INDEX = 21, + UACPI_OBJECT_MAX_TYPE_VALUE = UACPI_OBJECT_BUFFER_INDEX +} uacpi_object_type; + +// Type bits for API requiring a bit mask, e.g. uacpi_eval_typed +typedef enum uacpi_object_type_bits { + UACPI_OBJECT_INTEGER_BIT = (1 << UACPI_OBJECT_INTEGER), + UACPI_OBJECT_STRING_BIT = (1 << UACPI_OBJECT_STRING), + UACPI_OBJECT_BUFFER_BIT = (1 << UACPI_OBJECT_BUFFER), + UACPI_OBJECT_PACKAGE_BIT = (1 << UACPI_OBJECT_PACKAGE), + UACPI_OBJECT_FIELD_UNIT_BIT = (1 << UACPI_OBJECT_FIELD_UNIT), + UACPI_OBJECT_DEVICE_BIT = (1 << UACPI_OBJECT_DEVICE), + UACPI_OBJECT_EVENT_BIT = (1 << UACPI_OBJECT_EVENT), + UACPI_OBJECT_METHOD_BIT = (1 << UACPI_OBJECT_METHOD), + UACPI_OBJECT_MUTEX_BIT = (1 << UACPI_OBJECT_MUTEX), + UACPI_OBJECT_OPERATION_REGION_BIT = (1 << UACPI_OBJECT_OPERATION_REGION), + UACPI_OBJECT_POWER_RESOURCE_BIT = (1 << UACPI_OBJECT_POWER_RESOURCE), + UACPI_OBJECT_PROCESSOR_BIT = (1 << UACPI_OBJECT_PROCESSOR), + UACPI_OBJECT_THERMAL_ZONE_BIT = (1 << UACPI_OBJECT_THERMAL_ZONE), + UACPI_OBJECT_BUFFER_FIELD_BIT = (1 << UACPI_OBJECT_BUFFER_FIELD), + UACPI_OBJECT_DEBUG_BIT = (1 << UACPI_OBJECT_DEBUG), + UACPI_OBJECT_REFERENCE_BIT = (1 << UACPI_OBJECT_REFERENCE), + UACPI_OBJECT_BUFFER_INDEX_BIT = (1 << UACPI_OBJECT_BUFFER_INDEX), + UACPI_OBJECT_ANY_BIT = 0xFFFFFFFF, +} uacpi_object_type_bits; + +typedef struct uacpi_object uacpi_object; + +void uacpi_object_ref(uacpi_object *obj); +void uacpi_object_unref(uacpi_object *obj); + +uacpi_object_type uacpi_object_get_type(uacpi_object*); +uacpi_object_type_bits uacpi_object_get_type_bit(uacpi_object*); + +/* + * Returns UACPI_TRUE if the provided object's type matches this type. + */ +uacpi_bool uacpi_object_is(uacpi_object*, uacpi_object_type); + +/* + * Returns UACPI_TRUE if the provided object's type is one of the values + * specified in the 'type_mask' of UACPI_OBJECT_*_BIT. + */ +uacpi_bool uacpi_object_is_one_of( + uacpi_object*, uacpi_object_type_bits type_mask +); + +const uacpi_char *uacpi_object_type_to_string(uacpi_object_type); + +/* + * Create an uninitialized object. The object can be further overwritten via + * uacpi_object_assign_* to anything. + */ +uacpi_object *uacpi_object_create_uninitialized(void); + +/* + * Create an integer object with the value provided. + */ +uacpi_object *uacpi_object_create_integer(uacpi_u64); + +typedef enum uacpi_overflow_behavior { + UACPI_OVERFLOW_ALLOW = 0, + UACPI_OVERFLOW_TRUNCATE, + UACPI_OVERFLOW_DISALLOW, +} uacpi_overflow_behavior; + +/* + * Same as uacpi_object_create_integer, but introduces additional ways to + * control what happens if the provided integer is larger than 32-bits, and the + * AML code expects 32-bit integers. + * + * - UACPI_OVERFLOW_ALLOW -> do nothing, same as the vanilla helper + * - UACPI_OVERFLOW_TRUNCATE -> truncate the integer to 32-bits if it happens to + * be larger than allowed by the DSDT + * - UACPI_OVERFLOW_DISALLOW -> fail object creation with + * UACPI_STATUS_INVALID_ARGUMENT if the provided + * value happens to be too large + */ +uacpi_status uacpi_object_create_integer_safe( + uacpi_u64, uacpi_overflow_behavior, uacpi_object **out_obj +); + +uacpi_status uacpi_object_assign_integer(uacpi_object*, uacpi_u64 value); +uacpi_status uacpi_object_get_integer(uacpi_object*, uacpi_u64 *out); + +/* + * Create a string/buffer object. Takes in a constant view of the data. + * + * NOTE: The data is copied to a separately allocated buffer and is not taken + * ownership of. + */ +uacpi_object *uacpi_object_create_string(uacpi_data_view); +uacpi_object *uacpi_object_create_cstring(const uacpi_char*); +uacpi_object *uacpi_object_create_buffer(uacpi_data_view); + +/* + * Returns a writable view of the data stored in the string or buffer type + * object. + */ +uacpi_status uacpi_object_get_string_or_buffer( + uacpi_object*, uacpi_data_view *out +); +uacpi_status uacpi_object_get_string(uacpi_object*, uacpi_data_view *out); +uacpi_status uacpi_object_get_buffer(uacpi_object*, uacpi_data_view *out); + +/* + * Returns UACPI_TRUE if the provided string object is actually an AML namepath. + * + * This can only be the case for package elements. If a package element is + * specified as a path to an object in AML, it's not resolved by the interpreter + * right away as it might not have been defined at that point yet, and is + * instead stored as a special string object to be resolved by client code + * when needed. + * + * Example usage: + * uacpi_namespace_node *target_node = UACPI_NULL; + * + * uacpi_object *obj = UACPI_NULL; + * uacpi_eval(scope, path, UACPI_NULL, &obj); + * + * uacpi_object_array arr; + * uacpi_object_get_package(obj, &arr); + * + * if (uacpi_object_is_aml_namepath(arr.objects[0])) { + * uacpi_object_resolve_as_aml_namepath( + * arr.objects[0], scope, &target_node + * ); + * } + */ +uacpi_bool uacpi_object_is_aml_namepath(uacpi_object*); + +/* + * Resolve an AML namepath contained in a string object. + * + * This is only applicable to objects that are package elements. See an + * explanation of how this works in the comment above the declaration of + * uacpi_object_is_aml_namepath. + * + * This is a shorthand for: + * uacpi_data_view view; + * uacpi_object_get_string(object, &view); + * + * target_node = uacpi_namespace_node_resolve_from_aml_namepath( + * scope, view.text + * ); + */ +uacpi_status uacpi_object_resolve_as_aml_namepath( + uacpi_object*, uacpi_namespace_node *scope, uacpi_namespace_node **out_node +); + +/* + * Make the provided object a string/buffer. + * Takes in a constant view of the data to be stored in the object. + * + * NOTE: The data is copied to a separately allocated buffer and is not taken + * ownership of. + */ +uacpi_status uacpi_object_assign_string(uacpi_object*, uacpi_data_view in); +uacpi_status uacpi_object_assign_buffer(uacpi_object*, uacpi_data_view in); + +typedef struct uacpi_object_array { + uacpi_object **objects; + uacpi_size count; +} uacpi_object_array; + +/* + * Create a package object and store all of the objects in the array inside. + * The array is allowed to be empty. + * + * NOTE: the reference count of each object is incremented before being stored + * in the object. Client code must remove all of the locally created + * references at its own discretion. + */ +uacpi_object *uacpi_object_create_package(uacpi_object_array in); + +/* + * Returns the list of objects stored in a package object. + * + * NOTE: the reference count of the objects stored inside is not incremented, + * which means destorying/overwriting the object also potentially destroys + * all of the objects stored inside unless the reference count is + * incremented by the client via uacpi_object_ref. + */ +uacpi_status uacpi_object_get_package(uacpi_object*, uacpi_object_array *out); + +/* + * Make the provided object a package and store all of the objects in the array + * inside. The array is allowed to be empty. + * + * NOTE: the reference count of each object is incremented before being stored + * in the object. Client code must remove all of the locally created + * references at its own discretion. + */ +uacpi_status uacpi_object_assign_package(uacpi_object*, uacpi_object_array in); + +/* + * Create a reference object and make it point to 'child'. + * + * NOTE: child's reference count is incremented by one. Client code must remove + * all of the locally created references at its own discretion. + */ +uacpi_object *uacpi_object_create_reference(uacpi_object *child); + +/* + * Make the provided object a reference and make it point to 'child'. + * + * NOTE: child's reference count is incremented by one. Client code must remove + * all of the locally created references at its own discretion. + */ +uacpi_status uacpi_object_assign_reference(uacpi_object*, uacpi_object *child); + +/* + * Retrieve the object pointed to by a reference object. + * + * NOTE: the reference count of the returned object is incremented by one and + * must be uacpi_object_unref'ed by the client when no longer needed. + */ +uacpi_status uacpi_object_get_dereferenced(uacpi_object*, uacpi_object **out); + +typedef struct uacpi_processor_info { + uacpi_u8 id; + uacpi_u32 block_address; + uacpi_u8 block_length; +} uacpi_processor_info; + +/* + * Returns the information about the provided processor object. + */ +uacpi_status uacpi_object_get_processor_info( + uacpi_object*, uacpi_processor_info *out +); + +typedef struct uacpi_power_resource_info { + uacpi_u8 system_level; + uacpi_u16 resource_order; +} uacpi_power_resource_info; + +/* + * Returns the information about the provided power resource object. + */ +uacpi_status uacpi_object_get_power_resource_info( + uacpi_object*, uacpi_power_resource_info *out +); + +typedef enum uacpi_region_op { + // data => uacpi_region_attach_data + UACPI_REGION_OP_ATTACH = 0, + // data => uacpi_region_detach_data + UACPI_REGION_OP_DETACH, + + // data => uacpi_region_rw_data + UACPI_REGION_OP_READ, + UACPI_REGION_OP_WRITE, + + // data => uacpi_region_pcc_send_data + UACPI_REGION_OP_PCC_SEND, + + // data => uacpi_region_gpio_rw_data + UACPI_REGION_OP_GPIO_READ, + UACPI_REGION_OP_GPIO_WRITE, + + // data => uacpi_region_ipmi_rw_data + UACPI_REGION_OP_IPMI_COMMAND, + + // data => uacpi_region_ffixedhw_rw_data + UACPI_REGION_OP_FFIXEDHW_COMMAND, + + // data => uacpi_region_prm_rw_data + UACPI_REGION_OP_PRM_COMMAND, + + // data => uacpi_region_serial_rw_data + UACPI_REGION_OP_SERIAL_READ, + UACPI_REGION_OP_SERIAL_WRITE, +} uacpi_region_op; + +typedef struct uacpi_generic_region_info { + uacpi_u64 base; + uacpi_u64 length; +} uacpi_generic_region_info; + +typedef struct uacpi_pcc_region_info { + uacpi_data_view buffer; + uacpi_u8 subspace_id; +} uacpi_pcc_region_info; + +typedef struct uacpi_gpio_region_info +{ + uacpi_u64 num_pins; +} uacpi_gpio_region_info; + +typedef struct uacpi_region_attach_data { + void *handler_context; + uacpi_namespace_node *region_node; + union { + uacpi_generic_region_info generic_info; + uacpi_pcc_region_info pcc_info; + uacpi_gpio_region_info gpio_info; + }; + void *out_region_context; +} uacpi_region_attach_data; + +typedef struct uacpi_region_rw_data { + void *handler_context; + void *region_context; + union { + uacpi_phys_addr address; + uacpi_u64 offset; + }; + uacpi_u64 value; + uacpi_u8 byte_width; +} uacpi_region_rw_data; + +typedef struct uacpi_region_pcc_send_data { + void *handler_context; + void *region_context; + uacpi_data_view buffer; +} uacpi_region_pcc_send_data; + +typedef struct uacpi_region_gpio_rw_data +{ + void *handler_context; + void *region_context; + uacpi_data_view connection; + uacpi_u32 pin_offset; + uacpi_u32 num_pins; + uacpi_u64 value; +} uacpi_region_gpio_rw_data; + +typedef struct uacpi_region_ipmi_rw_data +{ + void *handler_context; + void *region_context; + uacpi_data_view in_out_message; + uacpi_u64 command; +} uacpi_region_ipmi_rw_data; + +typedef uacpi_region_ipmi_rw_data uacpi_region_ffixedhw_rw_data; + +typedef struct uacpi_region_prm_rw_data +{ + void *handler_context; + void *region_context; + uacpi_data_view in_out_message; +} uacpi_region_prm_rw_data; + +typedef enum uacpi_access_attribute { + UACPI_ACCESS_ATTRIBUTE_QUICK = 0x02, + UACPI_ACCESS_ATTRIBUTE_SEND_RECEIVE = 0x04, + UACPI_ACCESS_ATTRIBUTE_BYTE = 0x06, + UACPI_ACCESS_ATTRIBUTE_WORD = 0x08, + UACPI_ACCESS_ATTRIBUTE_BLOCK = 0x0A, + UACPI_ACCESS_ATTRIBUTE_BYTES = 0x0B, + UACPI_ACCESS_ATTRIBUTE_PROCESS_CALL = 0x0C, + UACPI_ACCESS_ATTRIBUTE_BLOCK_PROCESS_CALL = 0x0D, + UACPI_ACCESS_ATTRIBUTE_RAW_BYTES = 0x0E, + UACPI_ACCESS_ATTRIBUTE_RAW_PROCESS_BYTES = 0x0F, +} uacpi_access_attribute; + +typedef struct uacpi_region_serial_rw_data { + void *handler_context; + void *region_context; + uacpi_u64 command; + uacpi_data_view connection; + uacpi_data_view in_out_buffer; + uacpi_access_attribute access_attribute; + + /* + * Applicable if access_attribute is one of: + * - UACPI_ACCESS_ATTRIBUTE_BYTES + * - UACPI_ACCESS_ATTRIBUTE_RAW_BYTES + * - UACPI_ACCESS_ATTRIBUTE_RAW_PROCESS_BYTES + */ + uacpi_u8 access_length; +} uacpi_region_serial_rw_data; + +typedef struct uacpi_region_detach_data { + void *handler_context; + void *region_context; + uacpi_namespace_node *region_node; +} uacpi_region_detach_data; + +typedef uacpi_status (*uacpi_region_handler) + (uacpi_region_op op, uacpi_handle op_data); + +typedef uacpi_status (*uacpi_notify_handler) + (uacpi_handle context, uacpi_namespace_node *node, uacpi_u64 value); + +typedef enum uacpi_firmware_request_type { + UACPI_FIRMWARE_REQUEST_TYPE_BREAKPOINT, + UACPI_FIRMWARE_REQUEST_TYPE_FATAL, +} uacpi_firmware_request_type; + +typedef struct uacpi_firmware_request { + uacpi_u8 type; + + union { + // UACPI_FIRMWARE_REQUEST_BREAKPOINT + struct { + // The context of the method currently being executed + uacpi_handle ctx; + } breakpoint; + + // UACPI_FIRMWARE_REQUEST_FATAL + struct { + uacpi_u8 type; + uacpi_u32 code; + uacpi_u64 arg; + } fatal; + }; +} uacpi_firmware_request; + +#define UACPI_INTERRUPT_NOT_HANDLED 0 +#define UACPI_INTERRUPT_HANDLED 1 +typedef uacpi_u32 uacpi_interrupt_ret; + +typedef uacpi_interrupt_ret (*uacpi_interrupt_handler)(uacpi_handle); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/uacpi.h b/sys/include/dev/acpi/uacpi/uacpi/uacpi.h new file mode 100644 index 0000000..a37836c --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/uacpi.h @@ -0,0 +1,269 @@ +#pragma once + +#include <uacpi/types.h> +#include <uacpi/status.h> +#include <uacpi/kernel_api.h> +#include <uacpi/namespace.h> + +#define UACPI_MAJOR 2 +#define UACPI_MINOR 1 +#define UACPI_PATCH 1 + +#ifdef UACPI_REDUCED_HARDWARE +#define UACPI_MAKE_STUB_FOR_REDUCED_HARDWARE(fn, ret) \ + UACPI_NO_UNUSED_PARAMETER_WARNINGS_BEGIN \ + static inline fn { return ret; } \ + UACPI_NO_UNUSED_PARAMETER_WARNINGS_END + +#define UACPI_STUB_IF_REDUCED_HARDWARE(fn) \ + UACPI_MAKE_STUB_FOR_REDUCED_HARDWARE(fn,) +#define UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(fn) \ + UACPI_MAKE_STUB_FOR_REDUCED_HARDWARE(fn, UACPI_STATUS_COMPILED_OUT) +#define UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE(fn) \ + UACPI_MAKE_STUB_FOR_REDUCED_HARDWARE(fn, UACPI_STATUS_OK) +#else + +#define UACPI_STUB_IF_REDUCED_HARDWARE(fn) fn; +#define UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(fn) fn; +#define UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE(fn) fn; +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Set up early access to the table subsystem. What this means is: + * - uacpi_table_find() and similar API becomes usable before the call to + * uacpi_initialize(). + * - No kernel API besides logging and map/unmap will be invoked at this stage, + * allowing for heap and scheduling to still be fully offline. + * - The provided 'temporary_buffer' will be used as a temporary storage for the + * internal metadata about the tables (list, reference count, addresses, + * sizes, etc). + * - The 'temporary_buffer' is replaced with a normal heap buffer allocated via + * uacpi_kernel_alloc() after the call to uacpi_initialize() and can therefore + * be reclaimed by the kernel. + * + * The approximate overhead per table is 56 bytes, so a buffer of 4096 bytes + * yields about 73 tables in terms of capacity. uACPI also has an internal + * static buffer for tables, "UACPI_STATIC_TABLE_ARRAY_LEN", which is configured + * as 16 descriptors in length by default. + * + * This function is used to initialize the barebones mode, see + * UACPI_BAREBONES_MODE in config.h for more information. + */ +uacpi_status uacpi_setup_early_table_access( + void *temporary_buffer, uacpi_size buffer_size +); + +/* + * Bad table checksum should be considered a fatal error + * (table load is fully aborted in this case) + */ +#define UACPI_FLAG_BAD_CSUM_FATAL (1ull << 0) + +/* + * Unexpected table signature should be considered a fatal error + * (table load is fully aborted in this case) + */ +#define UACPI_FLAG_BAD_TBL_SIGNATURE_FATAL (1ull << 1) + +/* + * Force uACPI to use RSDT even for later revisions + */ +#define UACPI_FLAG_BAD_XSDT (1ull << 2) + +/* + * If this is set, ACPI mode is not entered during the call to + * uacpi_initialize. The caller is expected to enter it later at their own + * discretion by using uacpi_enter_acpi_mode(). + */ +#define UACPI_FLAG_NO_ACPI_MODE (1ull << 3) + +/* + * Don't create the \_OSI method when building the namespace. + * Only enable this if you're certain that having this method breaks your AML + * blob, a more atomic/granular interface management is available via osi.h + */ +#define UACPI_FLAG_NO_OSI (1ull << 4) + +/* + * Validate table checksums at installation time instead of first use. + * Note that this makes uACPI map the entire table at once, which not all + * hosts are able to handle at early init. + */ +#define UACPI_FLAG_PROACTIVE_TBL_CSUM (1ull << 5) + +#ifndef UACPI_BAREBONES_MODE + +/* + * Initializes the uACPI subsystem, iterates & records all relevant RSDT/XSDT + * tables. Enters ACPI mode. + * + * 'flags' is any combination of UACPI_FLAG_* above + */ +uacpi_status uacpi_initialize(uacpi_u64 flags); + +/* + * Parses & executes all of the DSDT/SSDT tables. + * Initializes the event subsystem. + */ +uacpi_status uacpi_namespace_load(void); + +/* + * Initializes all the necessary objects in the namespaces by calling + * _STA/_INI etc. + */ +uacpi_status uacpi_namespace_initialize(void); + +// Returns the current subsystem initialization level +uacpi_init_level uacpi_get_current_init_level(void); + +/* + * Evaluate an object within the namespace and get back its value. + * Either root or path must be valid. + * A value of NULL for 'parent' implies uacpi_namespace_root() relative + * lookups, unless 'path' is already absolute. + */ +uacpi_status uacpi_eval( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +); +uacpi_status uacpi_eval_simple( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +); + +/* + * Same as uacpi_eval() but without a return value. + */ +uacpi_status uacpi_execute( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args +); +uacpi_status uacpi_execute_simple( + uacpi_namespace_node *parent, const uacpi_char *path +); + +/* + * Same as uacpi_eval, but the return value type is validated against + * the 'ret_mask'. UACPI_STATUS_TYPE_MISMATCH is returned on error. + */ +uacpi_status uacpi_eval_typed( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object_type_bits ret_mask, + uacpi_object **ret +); +uacpi_status uacpi_eval_simple_typed( + uacpi_namespace_node *parent, const uacpi_char *path, + uacpi_object_type_bits ret_mask, uacpi_object **ret +); + +/* + * A shorthand for uacpi_eval_typed with UACPI_OBJECT_INTEGER_BIT. + */ +uacpi_status uacpi_eval_integer( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_u64 *out_value +); +uacpi_status uacpi_eval_simple_integer( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_u64 *out_value +); + +/* + * A shorthand for uacpi_eval_typed with + * UACPI_OBJECT_BUFFER_BIT | UACPI_OBJECT_STRING_BIT + * + * Use uacpi_object_get_string_or_buffer to retrieve the resulting buffer data. + */ +uacpi_status uacpi_eval_buffer_or_string( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +); +uacpi_status uacpi_eval_simple_buffer_or_string( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +); + +/* + * A shorthand for uacpi_eval_typed with UACPI_OBJECT_STRING_BIT. + * + * Use uacpi_object_get_string to retrieve the resulting buffer data. + */ +uacpi_status uacpi_eval_string( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +); +uacpi_status uacpi_eval_simple_string( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +); + +/* + * A shorthand for uacpi_eval_typed with UACPI_OBJECT_BUFFER_BIT. + * + * Use uacpi_object_get_buffer to retrieve the resulting buffer data. + */ +uacpi_status uacpi_eval_buffer( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +); +uacpi_status uacpi_eval_simple_buffer( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +); + +/* + * A shorthand for uacpi_eval_typed with UACPI_OBJECT_PACKAGE_BIT. + * + * Use uacpi_object_get_package to retrieve the resulting object array. + */ +uacpi_status uacpi_eval_package( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +); +uacpi_status uacpi_eval_simple_package( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +); + +/* + * Get the bitness of the currently loaded AML code according to the DSDT. + * + * Returns either 32 or 64. + */ +uacpi_status uacpi_get_aml_bitness(uacpi_u8 *out_bitness); + +/* + * Helpers for entering & leaving ACPI mode. Note that ACPI mode is entered + * automatically during the call to uacpi_initialize(). + */ +UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_enter_acpi_mode(void) +) +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_leave_acpi_mode(void) +) + +/* + * Attempt to acquire the global lock for 'timeout' milliseconds. + * 0xFFFF implies infinite wait. + * + * On success, 'out_seq' is set to a unique sequence number for the current + * acquire transaction. This number is used for validation during release. + */ +uacpi_status uacpi_acquire_global_lock(uacpi_u16 timeout, uacpi_u32 *out_seq); +uacpi_status uacpi_release_global_lock(uacpi_u32 seq); + +#endif // !UACPI_BAREBONES_MODE + +/* + * Reset the global uACPI state by freeing all internally allocated data + * structures & resetting any global variables. After this call, uACPI must be + * re-initialized from scratch to be used again. + * + * This is called by uACPI automatically if a fatal error occurs during a call + * to uacpi_initialize/uacpi_namespace_load etc. in order to prevent accidental + * use of partially uninitialized subsystems. + */ +void uacpi_state_reset(void); + +#ifdef __cplusplus +} +#endif diff --git a/sys/include/dev/acpi/uacpi/uacpi/utilities.h b/sys/include/dev/acpi/uacpi/uacpi/utilities.h new file mode 100644 index 0000000..dfc41c3 --- /dev/null +++ b/sys/include/dev/acpi/uacpi/uacpi/utilities.h @@ -0,0 +1,188 @@ +#pragma once + +#include <uacpi/status.h> +#include <uacpi/types.h> +#include <uacpi/namespace.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +/* + * Checks whether the device at 'node' matches any of the PNP ids provided in + * 'list' (terminated by a UACPI_NULL). This is done by first attempting to + * match the value returned from _HID and then the value(s) from _CID. + * + * Note that the presence of the device (_STA) is not verified here. + */ +uacpi_bool uacpi_device_matches_pnp_id( + uacpi_namespace_node *node, + const uacpi_char *const *list +); + +/* + * Find all the devices in the namespace starting at 'parent' matching the + * specified 'hids' (terminated by a UACPI_NULL) against any value from _HID or + * _CID. Only devices reported as present via _STA are checked. Any matching + * devices are then passed to the 'cb'. + */ +uacpi_status uacpi_find_devices_at( + uacpi_namespace_node *parent, + const uacpi_char *const *hids, + uacpi_iteration_callback cb, + void *user +); + +/* + * Same as uacpi_find_devices_at, except this starts at the root and only + * matches one hid. + */ +uacpi_status uacpi_find_devices( + const uacpi_char *hid, + uacpi_iteration_callback cb, + void *user +); + +typedef enum uacpi_interrupt_model { + UACPI_INTERRUPT_MODEL_PIC = 0, + UACPI_INTERRUPT_MODEL_IOAPIC = 1, + UACPI_INTERRUPT_MODEL_IOSAPIC = 2, +} uacpi_interrupt_model; + +uacpi_status uacpi_set_interrupt_model(uacpi_interrupt_model); + +typedef struct uacpi_pci_routing_table_entry { + uacpi_u32 address; + uacpi_u32 index; + uacpi_namespace_node *source; + uacpi_u8 pin; +} uacpi_pci_routing_table_entry; + +typedef struct uacpi_pci_routing_table { + uacpi_size num_entries; + uacpi_pci_routing_table_entry entries[]; +} uacpi_pci_routing_table; +void uacpi_free_pci_routing_table(uacpi_pci_routing_table*); + +uacpi_status uacpi_get_pci_routing_table( + uacpi_namespace_node *parent, uacpi_pci_routing_table **out_table +); + +typedef struct uacpi_id_string { + // size of the string including the null byte + uacpi_u32 size; + uacpi_char *value; +} uacpi_id_string; +void uacpi_free_id_string(uacpi_id_string *id); + +/* + * Evaluate a device's _HID method and get its value. + * The returned struture must be freed using uacpi_free_id_string. + */ +uacpi_status uacpi_eval_hid(uacpi_namespace_node*, uacpi_id_string **out_id); + +typedef struct uacpi_pnp_id_list { + // number of 'ids' in the list + uacpi_u32 num_ids; + + // size of the 'ids' list including the string lengths + uacpi_u32 size; + + // list of PNP ids + uacpi_id_string ids[]; +} uacpi_pnp_id_list; +void uacpi_free_pnp_id_list(uacpi_pnp_id_list *list); + +/* + * Evaluate a device's _CID method and get its value. + * The returned structure must be freed using uacpi_free_pnp_id_list. + */ +uacpi_status uacpi_eval_cid(uacpi_namespace_node*, uacpi_pnp_id_list **out_list); + +/* + * Evaluate a device's _STA method and get its value. + * If this method is not found, the value of 'flags' is set to all ones. + */ +uacpi_status uacpi_eval_sta(uacpi_namespace_node*, uacpi_u32 *flags); + +/* + * Evaluate a device's _ADR method and get its value. + */ +uacpi_status uacpi_eval_adr(uacpi_namespace_node*, uacpi_u64 *out); + +/* + * Evaluate a device's _CLS method and get its value. + * The format of returned string is BBSSPP where: + * BB => Base Class (e.g. 01 => Mass Storage) + * SS => Sub-Class (e.g. 06 => SATA) + * PP => Programming Interface (e.g. 01 => AHCI) + * The returned struture must be freed using uacpi_free_id_string. + */ +uacpi_status uacpi_eval_cls(uacpi_namespace_node*, uacpi_id_string **out_id); + +/* + * Evaluate a device's _UID method and get its value. + * The returned struture must be freed using uacpi_free_id_string. + */ +uacpi_status uacpi_eval_uid(uacpi_namespace_node*, uacpi_id_string **out_uid); + + +// uacpi_namespace_node_info->flags +#define UACPI_NS_NODE_INFO_HAS_ADR (1 << 0) +#define UACPI_NS_NODE_INFO_HAS_HID (1 << 1) +#define UACPI_NS_NODE_INFO_HAS_UID (1 << 2) +#define UACPI_NS_NODE_INFO_HAS_CID (1 << 3) +#define UACPI_NS_NODE_INFO_HAS_CLS (1 << 4) +#define UACPI_NS_NODE_INFO_HAS_SXD (1 << 5) +#define UACPI_NS_NODE_INFO_HAS_SXW (1 << 6) + +typedef struct uacpi_namespace_node_info { + // Size of the entire structure + uacpi_u32 size; + + // Object information + uacpi_object_name name; + uacpi_object_type type; + uacpi_u8 num_params; + + // UACPI_NS_NODE_INFO_HAS_* + uacpi_u8 flags; + + /* + * A mapping of [S1..S4] to the shallowest D state supported by the device + * in that S state. + */ + uacpi_u8 sxd[4]; + + /* + * A mapping of [S0..S4] to the deepest D state supported by the device + * in that S state to be able to wake itself. + */ + uacpi_u8 sxw[5]; + + uacpi_u64 adr; + uacpi_id_string hid; + uacpi_id_string uid; + uacpi_id_string cls; + uacpi_pnp_id_list cid; +} uacpi_namespace_node_info; +void uacpi_free_namespace_node_info(uacpi_namespace_node_info*); + +/* + * Retrieve information about a namespace node. This includes the attached + * object's type, name, number of parameters (if it's a method), the result of + * evaluating _ADR, _UID, _CLS, _HID, _CID, as well as _SxD and _SxW. + * + * The returned structure must be freed with uacpi_free_namespace_node_info. + */ +uacpi_status uacpi_get_namespace_node_info( + uacpi_namespace_node *node, uacpi_namespace_node_info **out_info +); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/sys/include/dev/cons/ansi.h b/sys/include/dev/cons/ansi.h new file mode 100644 index 0000000..7a336d1 --- /dev/null +++ b/sys/include/dev/cons/ansi.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CONS_ANSI_H_ +#define _CONS_ANSI_H_ + +#include <sys/types.h> +#include <sys/cdefs.h> +#include <sys/param.h> + +/* ANSI colors */ +#define ANSI_BLACK 0x000000 +#define ANSI_RED 0xAA0000 +#define ANSI_GREEN 0x00AA00 +#define ANSI_BLUE 0x00007F +#define ANSI_YELLOW 0xAA5500 +#define ANSI_MAGENTA 0xAA00AA +#define ANSI_CYAN 0x00AAAA +#define ANSI_WHITE 0xAAAAAA + +/* ANSI_FEED update codes */ +#define ANSI_UPDATE_COLOR -1 +#define ANSI_UPDATE_CURSOR -2 + +/* + * ANSI parser state machine. + * + * @prev: Previous char + * @csi: Encountered control seq introducer + * @reset_color: 1 if color is to be reset + * @set_fg: 1 if fg is being set + * @set_bg: 1 if bg is being set + * @fg: Foreground color + * @bg: Background color + * @flags: State flags + */ +struct ansi_state { + char prev; + uint8_t csi : 2; + uint8_t reset_color : 1; + uint8_t set_fg : 1; + uint8_t set_bg : 1; + uint32_t fg; + uint32_t bg; +}; + +int ansi_feed(struct ansi_state *statep, char c); + +#endif /* !_CONS_ANSI_H_ */ diff --git a/sys/include/dev/cons/cons.h b/sys/include/dev/cons/cons.h index 8e2c2c6..7599dd5 100644 --- a/sys/include/dev/cons/cons.h +++ b/sys/include/dev/cons/cons.h @@ -33,15 +33,20 @@ #include <sys/types.h> #include <sys/spinlock.h> #include <dev/video/fbdev.h> +#include <dev/cons/consvar.h> +#include <dev/cons/ansi.h> struct cons_char { char c; uint32_t fg; uint32_t bg; + uint32_t x; + uint32_t y; }; struct cons_screen { struct fbdev fbdev; + struct ansi_state ansi_s; uint32_t fg; uint32_t bg; @@ -53,13 +58,20 @@ struct cons_screen { uint32_t ch_row; /* Current row */ uint32_t curs_col; /* Cursor col */ uint32_t curs_row; /* Cursor row */ + struct cons_buf *ib; /* Input buffer */ + struct cons_buf **ob; /* Output buffers */ struct cons_char last_chr; struct spinlock lock; }; void cons_init(void); void cons_expose(void); +void cons_update_color(struct cons_screen *scr, uint32_t fg, uint32_t bg); +void cons_clear_scr(struct cons_screen *scr, uint32_t bg); +void cons_reset_color(struct cons_screen *scr); +void cons_reset_cursor(struct cons_screen *scr); int cons_putch(struct cons_screen *scr, char c); +int cons_putstr(struct cons_screen *scr, const char *s, size_t len); extern struct cons_screen g_root_scr; diff --git a/sys/include/dev/cons/consvar.h b/sys/include/dev/cons/consvar.h new file mode 100644 index 0000000..253176b --- /dev/null +++ b/sys/include/dev/cons/consvar.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _DEV_CONSVAR_H_ +#define _DEV_CONSVAR_H_ + +#include <sys/types.h> +#include <sys/param.h> +#include <sys/spinlock.h> + +/* Buffer types */ +#define CONS_BUF_INPUT 0 +#define CONS_BUF_OUTPUT 1 + +/* Buffer flags */ +#define CONS_BUF_CLEAN BIT(0) /* Not recently written to */ + +extern struct cons_screen scr; + +/* + * The keyboard packet is two bytes + * and the bits are as follows: + * + * - 0:7 ~ ASCII character + * - 8:15 ~ Scancode + */ +struct cons_input { + union { + uint8_t chr; + uint8_t scancode; + }; + uint16_t data; +}; + +/* + * A circular buffer for buffering + * keyboard input or console output. + */ +struct cons_buf { + struct spinlock lock; + union { + struct cons_input *ibuf; + struct cons_char *obuf; + void *raw; + }; + uint8_t tail; + uint8_t head; + uint8_t type; + uint8_t flags; + size_t len; +}; + +struct cons_buf *cons_new_buf(uint8_t type, size_t len); +int cons_obuf_push(struct cons_buf *bp, struct cons_char c); +int cons_obuf_pop(struct cons_buf *bp, struct cons_char *res); + +int cons_ibuf_push(struct cons_screen *scr, struct cons_input input); +int cons_ibuf_pop(struct cons_screen *scr, struct cons_input *res); + +#endif /* !_DEV_CONSVAR_H_ */ diff --git a/sys/include/dev/dmi/dmi.h b/sys/include/dev/dmi/dmi.h new file mode 100644 index 0000000..d24397a --- /dev/null +++ b/sys/include/dev/dmi/dmi.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _DMI_DMI_H_ +#define _DMI_DMI_H_ + +#include <sys/types.h> + +const char *dmi_vendor(void); +const char *dmi_prodver(void); +const char *dmi_product(void); +const char *dmi_cpu_manufact(void); + +#endif /* !_DMI_DMI_H_ */ diff --git a/sys/include/dev/ic/ahciregs.h b/sys/include/dev/ic/ahciregs.h index 4a4dc65..232b41e 100644 --- a/sys/include/dev/ic/ahciregs.h +++ b/sys/include/dev/ic/ahciregs.h @@ -34,8 +34,10 @@ #include <sys/param.h> struct hba_port { - volatile uint64_t clb; /* Command list base (1k-byte aligned) */ - volatile uint64_t fb; /* FIS base (256-byte aligned) */ + volatile uint32_t clb; /* Command list base low (1k-byte aligned) */ + volatile uint32_t clbu; /* Command list base upper */ + volatile uint32_t fb; /* FIS base (256-byte aligned) */ + volatile uint32_t fbu; /* FIS base upper */ volatile uint32_t is; /* Interrupt status */ volatile uint32_t ie; /* Interrupt enable */ volatile uint32_t cmd; /* Command and status */ @@ -86,6 +88,7 @@ struct hba_memspace { */ #define AHCI_PXSSTS_DET(SSTS) (SSTS & 0xF) #define AHCI_PXSSTS_IPM(SSTS) ((SSTS >> 8) & 0xF) +#define AHCI_PXSSTS_SPD(SSTS) ((SSTS >> 4) & 0xF) /* * Port SATA control bits @@ -98,6 +101,7 @@ struct hba_memspace { * See section 3.3.7 of the AHCI spec. */ #define AHCI_PXCMD_ST BIT(0) /* Start */ +#define AHCI_PXCMD_SUD BIT(1) /* Spin-up device */ #define AHCI_PXCMD_FRE BIT(4) /* FIS Receive Enable */ #define AHCI_PXCMD_FR BIT(14) /* FIS Receive Running */ #define AHCI_PXCMD_CR BIT(15) /* Command List Running */ @@ -122,6 +126,9 @@ struct hba_memspace { */ #define AHCI_CAP_NP(CAP) (CAP & 0x1F) /* Number of ports */ #define AHCI_CAP_NCS(CAP) ((CAP >> 8) & 0x1F) /* Number of command slots */ +#define AHCI_CAP_EMS(CAP) ((CAP >> 6) & 1) /* Enclosure management support */ +#define AHCI_CAP_SAL(CAP) ((CAP >> 25) & 1) /* Supports activity LED */ +#define AHCI_CAP_SSS(CAP) ((CAP >> 27) & 1) /* Supports staggered spin up */ /* * Device detection (DET) and Interface power @@ -132,6 +139,31 @@ struct hba_memspace { #define AHCI_DET_PRESENT 1 /* Device present (no PHY comm) */ #define AHCI_DET_COMM 3 /* Device present and phy comm established */ #define AHCI_IPM_ACTIVE 1 +#define AHCI_SPD_GEN1 1 /* 1.5 Gb/s */ +#define AHCI_SPD_GEN2 2 /* 3 Gb/s */ +#define AHCI_SPD_GEN3 3 /* 6 Gb/s */ + +/* + * PxSERR bits + * See section 3.3.12 of the AHCI spec + */ +#define AHCI_SERR_I BIT(0) /* Recovered data integrity error */ +#define AHCI_SERR_M BIT(1) /* Recovered comms error */ +#define AHCI_SERR_T BIT(8) /* Transient data integrity error */ +#define AHCI_SERR_C BIT(9) /* Persistent comms error */ +#define AHCI_SERR_P BIT(10) /* Protocol error ("oh fuck!" bit) */ +#define AHCI_SERR_E BIT(11) /* Internal error (only God knows, just pray) */ +#define AHCI_DIAG_N BIT(16) /* PhyRdy change */ +#define AHCI_DIAG_I BIT(17) /* PHY internal error */ +#define AHCI_DIAG_W BIT(18) /* Comm wake */ +#define AHCI_DIAG_B BIT(19) /* 10B to 8B decode error */ +#define AHCI_DIAG_C BIT(21) /* CRC error */ +#define AHCI_DIAG_H BIT(22) /* Handshake error */ +#define AHCI_DIAG_S BIT(23) /* Link sequence error */ +#define AHCI_DIAG_T BIT(24) /* Transport state transition error */ +#define AHCI_DIAG_F BIT(25) /* Unknown FIS type */ + +#define ATAPI_SIG 0xEB140101 /* * Device detection initialization values diff --git a/sys/include/dev/ic/ahcivar.h b/sys/include/dev/ic/ahcivar.h index 0d307cd..67f2efe 100644 --- a/sys/include/dev/ic/ahcivar.h +++ b/sys/include/dev/ic/ahcivar.h @@ -30,12 +30,210 @@ #ifndef _IC_AHCIVAR_H_ #define _IC_AHCIVAR_H_ +#include <sys/param.h> +#include <sys/types.h> +#include <sys/device.h> +#include <dev/dcdr/cache.h> #include <dev/ic/ahciregs.h> +#include <fs/ctlfs.h> +#define AHCI_DCDR_CAP 16 + +struct ahci_cmd_hdr; +extern const struct ctlops g_sata_bsize_ops; + +struct ata_identity { + uint16_t rsvd0 : 1; + uint16_t unused0 : 1; + uint16_t incomplete : 1; + uint16_t unused1 : 3; + uint16_t fixed_dev : 1; + uint16_t removable : 1; + uint16_t unused2 : 7; + uint16_t device_type : 1; + uint16_t ncylinders; + uint16_t specific_config; + uint16_t nheads; + uint16_t unused3[2]; + uint16_t sectors_per_track; + uint16_t vendor[3]; + char serial_number[20]; + uint16_t unused4[2]; + uint16_t unused5; + char firmware_rev[8]; + char model_number[40]; + char pad[256]; +}; + +/* + * AHCI Host Bus Adapter + * + * @io: HBA MMIO + * @maxports: Max number of HBA ports + * @nports: Number of implemented HBA ports. + * @nslots: Number of command slots + * @ems: Enclosure management support + * @sal: Supports activity LED + * @sss: Supports staggered spin up + */ struct ahci_hba { struct hba_memspace *io; + uint32_t maxports; + uint32_t nports; + uint32_t nslots; + uint8_t ems : 1; + uint8_t sal : 1; + uint8_t sss : 1; + devmajor_t major; +}; + +/* + * A device attached to a physical HBA port. + * + * [d]: Dynalloc'd memory + * [p]: Paged memory (allocated pageframe) + * + * @io: Memory mapped port registers + * @hba: HBA descriptor + * @cmdlist: Command list [p] + * @nlba: Max number of addressable blocks + * @fra: FIS receive area [p] + * @dev: Device minor number. + */ +struct hba_device { + struct hba_port *io; + struct ahci_hba *hba; + struct ahci_cmd_hdr *cmdlist; + struct dcdr *dcdr; + uint32_t nlba; + void *fra; + dev_t dev; +}; + +/* + * Command header + * + * @cfl: Command FIS length + * @a: ATAPI + * @w: Write + * @p: Prefetchable + * @r: Reset + * @c: Clear busy upon R_OK + * @rsvd0: Reserved + * @pmp: Port multiplier port + * @prdtl: PRDT length (in entries) + * @prdbc: PRDT bytes transferred count + * @ctba: Command table descriptor base addr + * @rsvd1: Reserved + */ +struct ahci_cmd_hdr { + uint8_t cfl : 5; + uint8_t a : 1; + uint8_t w : 1; + uint8_t p : 1; + uint8_t r : 1; + uint8_t c : 1; + uint8_t rsvd0 : 1; + uint8_t pmp : 4; + uint16_t prdtl; + volatile uint32_t prdbc; + uintptr_t ctba; + uint32_t rsvd1[4]; +}; + +/* + * Physical region descriptor + * + * @dba: Data base address + * @rsvd0: Reserved + * @dbc: Count + * @rsvd1: Reserved + * @i: Interrupt on completion + */ +struct ahci_prdt_entry { + uintptr_t dba; + uint32_t rsvd0; + uint32_t dbc : 22; + uint16_t rsvd1 : 9; + uint8_t i : 1; +}; + +/* + * Command table + * + * @cfis: Command FIS + * @acmd: ATAPI command + * @rsvd: Reserved + * @prdt: Physical region descriptors + */ +struct ahci_cmdtab { + uint8_t cfis[64]; + uint8_t acmd[16]; + uint8_t rsvd[48]; + struct ahci_prdt_entry prdt[1]; +}; + +/* + * Host to device FIS + * + * [h]: Set by host + * [d]: Set by device + * [srb]: Shadow register block + * + * @type: Must be 0x27 for H2D [h] + * @pmp: Port multiplier port [h] + * @c: Set to denote command FIS [h] + * @command: Command type [h/srb] + * @feature1: Features register (7:0) [h/srb] + * @lba0: LBA low [h/srb] + * @lba1: LBA mid [h/srb] + * @lba2: LBA hi [h/srb] + * @device: Set bit 7 for LBA [h/srb] + * @lba3: LBA (31:24) [h/srb] + * @lba4: LBA (39:32) [h/srb] + * @lba5: LBA (47:40) [h/srb] + * @featureh: Features high [h/srb] + * @countl: Count low (block aligned) [h/srb] + * @counth: Count high (block aligned) [h/srb] + */ +struct ahci_fis_h2d { + uint8_t type; + uint8_t pmp : 4; + uint8_t rsvd0 : 3; + uint8_t c : 1; + uint8_t command; + uint8_t featurel; + uint8_t lba0; + uint8_t lba1; + uint8_t lba2; + uint8_t device; + uint8_t lba3; + uint8_t lba4; + uint8_t lba5; + uint8_t featureh; + uint8_t countl; + uint8_t counth; + uint8_t icc; + uint8_t control; + uint8_t rsvd1[4]; }; #define AHCI_TIMEOUT 500 /* In ms */ +/* AHCI size constants */ +#define AHCI_FIS_SIZE 256 +#define AHCI_CMDTAB_SIZE 256 +#define AHCI_CMDENTRY_SIZE 32 +#define AHCI_SECTOR_SIZE 512 + +/* AHCI FIS types */ +#define FIS_TYPE_H2D 0x27 +#define FIS_TYPE_D2H 0x34 + +/* ATA commands */ +#define ATA_CMD_NOP 0x00 +#define ATA_CMD_IDENTIFY 0xEC +#define ATA_CMD_READ_DMA 0x25 +#define ATA_CMD_WRITE_DMA 0x35 + #endif /* !_IC_AHCIVAR_H_ */ diff --git a/sys/include/dev/pci/pci.h b/sys/include/dev/pci/pci.h index 4bfacdd..144b500 100644 --- a/sys/include/dev/pci/pci.h +++ b/sys/include/dev/pci/pci.h @@ -54,6 +54,7 @@ struct pci_device { uint8_t slot; uint8_t func; + uint16_t segment; uint16_t msix_capoff; uint16_t device_id; uint16_t vendor_id; @@ -61,6 +62,7 @@ struct pci_device { uint8_t pci_subclass; uint8_t prog_if; uint8_t hdr_type; + uint8_t pci_express : 1; uint8_t pri_bus; uint8_t sec_bus; @@ -74,7 +76,7 @@ struct pci_device { struct msi_intr { const char *name; - void(*handler)(void *); + int(*handler)(void *); }; pcireg_t pci_readl(struct pci_device *dev, uint32_t offset); @@ -84,6 +86,8 @@ int pci_map_bar(struct pci_device *dev, uint8_t barno, void **vap); void pci_writel(struct pci_device *dev, uint32_t offset, pcireg_t val); int pci_enable_msix(struct pci_device *dev, const struct msi_intr *intr); +void pci_add_device(struct pci_device *dev); + void pci_msix_eoi(void); int pci_init(void); diff --git a/sys/include/dev/phy/e1000regs.h b/sys/include/dev/phy/e1000regs.h new file mode 100644 index 0000000..7caceee --- /dev/null +++ b/sys/include/dev/phy/e1000regs.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _PHY_E1000_REGS_H_ +#define _PHY_E1000_REGS_H_ + +#include <sys/types.h> +#include <sys/param.h> + +/* + * E1000 register offsets + * + * XXX: Notes about reserve fields: + * + * - The `EERD' register is reserved and should NOT be touched + * for the 82544GC/EI card. + * + * - The `FLA' register is only usable for the 82541xx and + * 82547GI/EI cards, this is reserved and should NOT be + * touched on any other cards. + * + * - The `TXCW' and `RXCW' registers are reserved and should NOT + * be touched for the 82540EP/EM, 82541xx and 82547GI/EI cards. + * + * - The `LEDCTL' register is reserved and should NOT be touched + * for the 82544GC/EI card. + */ +#define E1000_CTL 0x00000 /* Control register */ +#define E1000_STATUS 0x00008 /* Status register */ +#define E1000_EECD 0x00010 /* EEPROM/flash control and data register */ +#define E1000_EERD 0x00014 /* EEPROM/flash read register */ +#define E1000_FLA 0x0001C /* EEPROM/flash read register */ +#define E1000_CTRL_EXT 0x00018 /* Extended device control register */ +#define E1000_MDIC 0x00020 /* PHY management data interface control register */ +#define E1000_FCAL 0x00028 /* Flow control low register */ +#define E1000_FCAH 0x0002C /* Flow control high register */ +#define E1000_FCT 0x00030 /* Flow control type register */ +#define E1000_VET 0x00038 /* VLAN ethertype register */ +#define E1000_FCTTV 0x00170 /* Flow control transmit timer value register */ +#define E1000_TXCW 0x00178 /* Transmit config word register */ +#define E1000_RXCW 0x00180 /* Receive config word register */ +#define E1000_LEDCTL 0x00E00 /* LED control register */ + +/* + * Device control register (`ctl') bits + * + * See section 13.4.1 of the PCI/PCI-X Intel Gigabit + * Ethernet Controllers spec + * + * XXX: Notes about reserved bits: + * + * - The CTL.LRST bit is reserved and should NOT be touched + * for the 82540EP/EM, 82541xx, or 82547GI/EI cards. + */ +#define E1000_CTL_FD BIT(0) /* Full-duplex */ +#define E1000_CTL_LRST BIT(3) /* Link-reset */ +#define E1000_CTL_RST BIT(26) /* Device reset */ + +/* + * EEPROM/flash control and data register (`eecd') + * bits + * + * See section 13.4.3 of the PCI/PCI-X Intel Gigabit + * Ethernet controller spec + */ +#define E1000_EECD_SK BIT(0) /* EEPROM clock input */ +#define E1000_EECD_CS BIT(1) /* EEPROM chip select */ +#define E1000_EECD_DI BIT(2) /* EEPROM data input */ +#define E1000_EECD_DO BIT(3) /* EEPROM data output */ +#define E1000_EECD_FWE BIT(4) /* EEPROM flash write enable ctl (4:5) */ +#define E1000_EECD_REQ BIT(6) /* Request EEPROM access */ +#define E1000_EECD_GNT BIT(7) /* Grant EEPROM access */ +#define E1000_EECD_PRES BIT(8) /* EEPROM present */ +#define E1000_EECD_SIZE BIT(9) /* EEPROM size (1024-bit [0], 4096-bit [1]) */ +#define E1000_EECD_TYPE BIT(13) /* EEPROM type (microwire [0], SPI [1]) */ + +/* + * EEPROM read (`eerd') register bits + * + * See section 13.4.4 of the PCI/PCI-X Intel Gigabit + * Ethernet controller spec + */ +#define E1000_EERD_START BIT(0) /* Start read */ +#define E1000_EERD_DONE BIT(4) /* EEPROM read finished */ + +/* + * EEPROM word addresses + */ +#define E1000_HWADDR0 0x00 /* Word 0 */ +#define E1000_HWADDR1 0x01 /* Word 1 */ +#define E1000_HWADDR2 0x02 /* Word 2 */ + +#endif /* !_PHY_E1000_REGS_H_ */ diff --git a/sys/include/dev/phy/rt8139.h b/sys/include/dev/phy/rtl.h index ef7b127..f3178d0 100644 --- a/sys/include/dev/phy/rt8139.h +++ b/sys/include/dev/phy/rtl.h @@ -33,7 +33,14 @@ #include <sys/types.h> #include <sys/param.h> -#define RT_IDR0 0x00 /* MAC address */ +/* MAC address */ +#define RT_IDR0 0x00 +#define RT_IDR1 0x00 +#define RT_IDR2 0x02 +#define RT_IDR3 0x03 +#define RT_IDR4 0x04 +#define RT_IDR5 0x05 + #define RT_MAR0 0x08 /* Multicast filter */ #define RT_TXSTATUS0 0x10 /* Transmit status (4 32bit regs) */ #define RT_TXADDR0 0x20 /* Tx descriptors (also 4 32bit) */ @@ -64,12 +71,19 @@ #define RT_AS_LPAR 0x68 /* Auto-negotiation link partner reg (16 bits) */ #define RT_AS_EXPANSION 0x6A /* Auto-negotiation expansion reg (16 bits) */ +#define RT_TXAD_N(N) (RT_TXADDR0 + (N)) +#define RT_TXSTATUS_N(N) (RT_TXSTATUS0 + ((N))) + /* Command register bits */ #define RT_BUFEN BIT(0) /* Buffer empty */ #define RT_TE BIT(2) /* Transmitter enable */ #define RT_RE BIT(3) /* Receiver enable */ #define RT_RST BIT(4) /* Reset */ +/* 93C46 EEPROM mode bits */ +#define RT_EEM0 BIT(6) +#define RT_EEM1 BIT(7) + /* Receive register bits */ #define RT_AAP BIT(0) /* Accept all packets */ #define RT_APM BIT(1) /* Accept physical match packets */ diff --git a/sys/include/dev/timer.h b/sys/include/dev/timer.h index e54adcc..fe91323 100644 --- a/sys/include/dev/timer.h +++ b/sys/include/dev/timer.h @@ -69,6 +69,7 @@ struct timer { const char *name; /* e.g "HPET" */ size_t(*calibrate)(void); /* Returns frequency, 0 for unspecified */ size_t(*get_time_usec)(void); /* Time since init (microseconds) */ + size_t(*get_time_nsec)(void); /* Time since init (nanoseconds) */ size_t(*get_time_sec)(void); /* Time since init (seconds) */ int(*msleep)(size_t ms); int(*usleep)(size_t us); diff --git a/sys/include/dev/usb/xhciregs.h b/sys/include/dev/usb/xhciregs.h index 69515e4..1cbfd14 100644 --- a/sys/include/dev/usb/xhciregs.h +++ b/sys/include/dev/usb/xhciregs.h @@ -98,6 +98,13 @@ struct xhci_opregs { #define XHCI_RTS(BASE, RTSOFF) PTR_OFFSET(BASE, RTSOFF) #define XHCI_CMD_DB(BASE, DBOFF) PTR_OFFSET(BASE, DBOFF) +/* Runtime register offsets */ +#define XHCI_RT_IMAN 0x20 +#define XHCI_RT_IMOD 0x24 +#define XHCI_RT_ERSTSZ 0x28 +#define XHCI_RT_ERSTBA 0x30 +#define XHCI_RT_ERDP 0x38 + /* Support protocol cap fields */ #define XHCI_PROTO_ID(PROTO) (PROTO & 0xFF) #define XHCI_PROTO_MINOR(PROTO) ((PROTO >> 16) & 0xFF) diff --git a/sys/include/dev/usb/xhcivar.h b/sys/include/dev/usb/xhcivar.h index 0488ad8..a9a8fc1 100644 --- a/sys/include/dev/usb/xhcivar.h +++ b/sys/include/dev/usb/xhcivar.h @@ -32,6 +32,7 @@ #include <sys/types.h> #include <sys/types.h> +#include <sys/param.h> #include <dev/usb/xhciregs.h> #define XHCI_TIMEOUT 500 /* In ms */ @@ -41,6 +42,9 @@ #define XHCI_MAX_PROTOS 4 #define XHCI_IMOD_DEFAULT 0 +/* Quirks */ +#define XHCI_QUIRK_HANDOFF BIT(0) + /* * USB proto (USB 2.0 or 3.0) */ @@ -108,6 +112,7 @@ struct xhci_hc { uint32_t *evring; uint8_t maxslots; uint8_t cr_cycle : 1; + uint16_t quirks; size_t maxports; size_t protocnt; struct xhci_caps *caps; diff --git a/sys/include/dev/video/fbdev.h b/sys/include/dev/video/fbdev.h index d23fcd6..c9fec94 100644 --- a/sys/include/dev/video/fbdev.h +++ b/sys/include/dev/video/fbdev.h @@ -38,6 +38,7 @@ struct fbdev { uint32_t width; uint32_t height; uint32_t pitch; + uint32_t bpp; }; /* @@ -51,5 +52,6 @@ fbdev_get_index(const struct fbdev *fbdev, uint32_t x, uint32_t y) } struct fbdev fbdev_get(void); +void fbdev_init_dev(void); #endif /* !_DEV_FBDEV_H_ */ diff --git a/sys/include/fs/ctlfs.h b/sys/include/fs/ctlfs.h new file mode 100644 index 0000000..90f42f0 --- /dev/null +++ b/sys/include/fs/ctlfs.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _FS_CTLFS_H_ +#define _FS_CTLFS_H_ + +#include <sys/sio.h> + +struct ctlfs_dev; + +struct ctlops { + int(*read)(struct ctlfs_dev *cdp, struct sio_txn *sio); + int(*write)(struct ctlfs_dev *cdp, struct sio_txn *sio); +}; + +/* + * Ctlfs op arguments + * + * @devname: Device name. + * @major: Device major + * @minor: Device minor. + * @mode: Access flags. + * @devname [1]: Device name (node name) + * @ctlname: [1]: Control name (node entry name) + */ +struct ctlfs_dev { + union { + const char *devname; + const char *ctlname; + }; + const struct ctlops *ops; + mode_t mode; +}; + +int ctlfs_create_node(const char *name, const struct ctlfs_dev *dp); +int ctlfs_create_entry(const char *name, const struct ctlfs_dev *dp); + +#endif /* !_FS_CTLFS_H_ */ diff --git a/sys/include/fs/tmpfs.h b/sys/include/fs/tmpfs.h new file mode 100644 index 0000000..b2a5bbe --- /dev/null +++ b/sys/include/fs/tmpfs.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _FS_TMPFS_H_ +#define _FS_TMPFS_H_ + +#include <sys/types.h> +#include <sys/limits.h> +#include <sys/vnode.h> +#include <sys/queue.h> +#include <sys/spinlock.h> +#include <vm/vm_obj.h> + +extern const struct vops g_tmpfs_vops; + +/* Tmpfs node types */ +#define TMPFS_NONE (VNON) /* No type */ +#define TMPFS_REG (VREG) /* Regular file [f] */ +#define TMPFS_DIR (VDIR) /* Directory [d] */ + +struct tmpfs_node; + +/* + * A tmpfs node represents an object within the + * tmpfs namespace such as a file, directory, etc. + * + * @rpath: /tmp/ relative path (for lookups) + * @type: The tmpfs node type [one-to-one to vtype] + * @len: Length of buffer + * @data: The backing file data + * @dirvp: Vnode of the parent node + * @vp: Vnode of the current node + * @lock: Lock protecting this node + */ +struct tmpfs_node { + char rpath[PATH_MAX]; + uint8_t type; + size_t len; + void *data; + struct vnode *dirvp; + struct vnode *vp; + struct spinlock lock; + TAILQ_HEAD(, tmpfs_node) dirents; + TAILQ_ENTRY(tmpfs_node) link; +}; + +#endif /* !_FS_TMPFS_H_ */ diff --git a/sys/include/lib/stdbool.h b/sys/include/lib/stdbool.h new file mode 100644 index 0000000..a7a35f1 --- /dev/null +++ b/sys/include/lib/stdbool.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LIB_STDBOOL_H_ +#define _LIB_STDBOOL_H_ + +typedef _Bool bool; + +#define true 1 +#define false 0 + +#endif /* !_LIB_STDBOOL_H_ */ diff --git a/sys/include/lib/stddef.h b/sys/include/lib/stddef.h new file mode 100644 index 0000000..cf23841 --- /dev/null +++ b/sys/include/lib/stddef.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LIB_STDDEF_H_ +#define _LIB_STDDEF_H_ + +/* Compat */ +#include <sys/types.h> + +#endif /* !_LIB_STDDEF_H_ */ diff --git a/sys/include/lib/stdint.h b/sys/include/lib/stdint.h new file mode 100644 index 0000000..6eb99f0 --- /dev/null +++ b/sys/include/lib/stdint.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LIB_STDINT_H_ +#define _LIB_STDINT_H_ + +/* Compat */ +#include <sys/types.h> + +#endif /* !_LIB_STDINT_H_ */ diff --git a/sys/include/lib/string.h b/sys/include/lib/string.h index c138cf1..c09e6f4 100644 --- a/sys/include/lib/string.h +++ b/sys/include/lib/string.h @@ -47,5 +47,6 @@ int strcmp(const char *s1, const char *s2); int strncmp(const char *s1, const char *s2, size_t n); int atoi(char *s); +void *memmove(void *s1, const void *s2, size_t n); #endif /* !_LIB_STRING_H_ */ diff --git a/sys/include/net/ethertypes.h b/sys/include/net/ethertypes.h new file mode 100644 index 0000000..753ea10 --- /dev/null +++ b/sys/include/net/ethertypes.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _NET_ETHERTYPES_H_ +#define _NET_ETHERTYPES_H_ + +#define ETHERTYPE_IPV4 0x0800 +#define ETHERTYPE_ARP 0x0806 + +#endif /* !_NET_ETHERTYPES_H_ */ diff --git a/sys/include/net/if.h b/sys/include/net/if.h new file mode 100644 index 0000000..bd57509 --- /dev/null +++ b/sys/include/net/if.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _NET_IF_H_ +#define _NET_IF_H_ + +#define IFNAMESIZ 16 + +#endif /* !_NET_IF_H_ */ diff --git a/sys/include/net/if_arp.h b/sys/include/net/if_arp.h new file mode 100644 index 0000000..cbfb2fe --- /dev/null +++ b/sys/include/net/if_arp.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _NETINET_IF_ARP_H_ +#define _NETINET_IF_ARP_H_ + +#include <sys/types.h> +#include <net/ethertypes.h> + +/* ARP hardware types */ +#define ARP_HWTYPE_ETHER 1 + +/* ARP operation types */ +#define ARP_REQUEST 1 +#define ARP_REPLY 2 + +struct arp_hdr { + uint16_t hw_type; /* See ARP_HWTYPE_* */ + uint16_t proto_type; /* See ETHERTYPE_* */ + uint8_t hw_len; /* See ETHER_ADDR_LEN */ + uint8_t proto_len; /* Protocol address length */ + uint16_t op_type; /* See operation types above */ +}; + +#endif /* !_NETINET_IF_ARP_H_ */ diff --git a/sys/include/net/if_var.h b/sys/include/net/if_var.h new file mode 100644 index 0000000..e032ff4 --- /dev/null +++ b/sys/include/net/if_var.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _NET_IF_VAR_H_ +#define _NET_IF_VAR_H_ + +#include <sys/queue.h> +#include <sys/types.h> +#include <net/if.h> +#include <net/netbuf.h> + +#define NETIF_ADDR_LEN 32 /* In bytes */ + +/* Return values for netif hooks */ +#define NETIF_ENQ_OK 0 /* Enqueued */ +#define NETIF_ENQ_FLUSHED 1 /* Internal queue flushed */ + +/* Interface types */ +#define NETIF_TYPE_ANY 0 /* Any type */ +#define NETIF_TYPE_WIRE 1 /* Ethernet */ + +/* + * Represents the address of a network + * interface. + * + * @data: Raw address bytes + */ +struct netif_addr { + uint8_t data[NETIF_ADDR_LEN]; +}; + +/* + * Represents a network interface + * + * @name: Interface name + * @type: Interface type (see NETIF_TYPE*) + * @tx_enq: Enqueue a packet + * @tx_start: Start a packet + * + * XXX: tx_enq() returns 0 on success and 1 if a flush was needed + * and the packets have been transmitted. Less than zero values + * indicate failure. + */ +struct netif { + char name[IFNAMESIZ]; + uint8_t type; + TAILQ_ENTRY(netif) link; + struct netif_addr addr; + int(*tx_enq)(struct netif *nifp, struct netbuf *nbp, void *data); + void(*tx_start)(struct netif *nifp); +}; + +void netif_add(struct netif *nifp); +int netif_lookup(const char *name, uint8_t type, struct netif **res); + +#endif /* !_NET_IF_VAR_H_ */ diff --git a/sys/include/net/netbuf.h b/sys/include/net/netbuf.h new file mode 100644 index 0000000..33ba06f --- /dev/null +++ b/sys/include/net/netbuf.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _NET_NETBUF_H_ +#define _NET_NETBUF_H_ + +#define NETBUF_LEN 256 + +struct netbuf { + char data[NETBUF_LEN]; + size_t len; +}; + +#endif /* !_NET_NETBUF_H_ */ diff --git a/sys/include/netinet/if_ether.h b/sys/include/netinet/if_ether.h new file mode 100644 index 0000000..d3dc9b7 --- /dev/null +++ b/sys/include/netinet/if_ether.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _NETINET_IF_ETHER_H_ +#define _NETINET_IF_ETHER_H_ + +#include <sys/types.h> +#include <net/if_arp.h> +#include <net/if_var.h> + +#define ETHER_ADDR_LEN 6 + +struct ether_arp { + struct arp_hdr hdr; + uint8_t sha[ETHER_ADDR_LEN]; + uint8_t spa[4]; + uint8_t tha[ETHER_ADDR_LEN]; + uint8_t tpa[4]; +}; + +struct ether_frame { + uint8_t ether_daddr[ETHER_ADDR_LEN]; + uint8_t ether_saddr[ETHER_ADDR_LEN]; + uint16_t ether_type; +}; + +int arp_request(struct netif *nifp, uint8_t *sproto, uint8_t *tproto); +int arp_reply(struct netif *netif, uint8_t *sproto, uint8_t *tproto); + +#endif /* !_NETINET_IF_ETHER_H_ */ diff --git a/sys/include/sys/bitops.h b/sys/include/sys/bitops.h new file mode 100644 index 0000000..e8e9567 --- /dev/null +++ b/sys/include/sys/bitops.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SYS_BITOPS_H_ +#define _SYS_BITOPS_H_ + +#include <sys/cdefs.h> + +#define BOPS_M1 0x5555555555555555ULL /* 01010101... */ +#define BOPS_M2 0x3333333333333333ULL /* 00110011... */ +#define BOPS_M4 0x0F0F0F0F0F0F0F0FULL /* 00001111... */ +#define BOPS_M8 0x00FF00FF00FF00FFULL /* x4(0), x4(1) */ +#define BOPS_M16 0x0000FFFF0000FFFFULL /* x16(0), x16(1) */ +#define BOPS_M32 0x00000000FFFFFFFFULL /* x32(0), x32(1) */ +#define BOPS_H0 0x0101010101010101ULL /* sum of 256^{0,1,2,3...} */ + +__always_inline static inline int +popcnt(uint64_t x) +{ + x -= (x >> 1) & BOPS_M1; + x = (x & BOPS_M2) + ((x >> 2) & BOPS_M2); + x = (x + (x >> 4)) & BOPS_M4; + return (x * BOPS_H0) >> 56; +} + +#endif /* !_SYS_BITOPS_H_ */ diff --git a/sys/include/sys/cdefs.h b/sys/include/sys/cdefs.h index 61106fa..725193e 100644 --- a/sys/include/sys/cdefs.h +++ b/sys/include/sys/cdefs.h @@ -42,7 +42,9 @@ #define __dead __attribute__((__noreturn__)) #define __cold __attribute__((__cold__)) #define __dead_cold __attribute__((__noreturn__, __cold__)) +#define __aligned(n) __attribute__((__aligned__((n)))) #define __unused __attribute__((__unused__)) +#define __used __attribute__((__used__)) #define __nothing ((void)0) #define __likely(exp) __builtin_expect(((exp) != 0), 1) #define __unlikely(exp) __builtin_expect(((exp) != 0), 0) diff --git a/sys/include/sys/device.h b/sys/include/sys/device.h index f5f92ad..04b66fc 100644 --- a/sys/include/sys/device.h +++ b/sys/include/sys/device.h @@ -36,21 +36,28 @@ #include <sys/queue.h> #include <sys/proc.h> #include <sys/sio.h> +#include <vm/vm_obj.h> typedef uint8_t devmajor_t; /* Device operation typedefs */ typedef int(*dev_read_t)(dev_t, struct sio_txn *, int); typedef int(*dev_write_t)(dev_t, struct sio_txn *, int); +typedef int(*dev_bsize_t)(dev_t); struct cdevsw { int(*read)(dev_t dev, struct sio_txn *sio, int flags); int(*write)(dev_t dev, struct sio_txn *sio, int flags); + paddr_t(*mmap)(dev_t dev, size_t size, off_t off, int flags); + + /* Private */ + struct vm_object vmobj; }; struct bdevsw { int(*read)(dev_t dev, struct sio_txn *sio, int flags); int(*write)(dev_t dev, struct sio_txn *sio, int flags); + int(*bsize)(dev_t dev); }; void *dev_get(devmajor_t major, dev_t dev); @@ -61,10 +68,12 @@ int dev_register(devmajor_t major, dev_t dev, void *devsw); int dev_noread(void); int dev_nowrite(void); +int dev_nobsize(void); /* Device operation stubs */ #define noread ((dev_read_t)dev_noread) #define nowrite ((dev_write_t)dev_nowrite) +#define nobsize ((dev_bsize_t)dev_nobsize) #endif /* _KERNEL */ #endif /* !_SYS_DEVICE_H_ */ diff --git a/sys/include/sys/disklabel.h b/sys/include/sys/disklabel.h new file mode 100644 index 0000000..895c35e --- /dev/null +++ b/sys/include/sys/disklabel.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SYS_DISKLABEL_H_ +#define _SYS_DISKLABEL_H_ + +#include <sys/types.h> + +#define DISK_MAG 0x4F445421UL /* "ODT!" */ + +/* + * Represents a disk table. + * + * @magic: Magic number (`DISK_MAG') + * @sect_size: Disk sector size + */ +struct disklabel { + uint32_t magic; + uint32_t sect_size; +}; + +#endif /* !_SYS_DISKLABEL_H_ */ diff --git a/sys/include/sys/driver.h b/sys/include/sys/driver.h index 05c40fa..9f08de3 100644 --- a/sys/include/sys/driver.h +++ b/sys/include/sys/driver.h @@ -31,27 +31,83 @@ #define _SYS_DRIVER_H_ #include <sys/cdefs.h> +#include <sys/proc.h> +#include <sys/types.h> #if defined(_KERNEL) +/* Variable driver data */ +struct driver_var { + uint8_t deferred : 1; +}; + struct driver { int(*init)(void); + struct driver_var *data; }; +extern struct proc g_proc0; + +/* Early (high priority) drivers */ extern char __drivers_init_start[]; extern char __drivers_init_end[]; +/* Deferred (low priority) drivers */ +extern char __driversd_init_start[]; +extern char __driversd_init_end[]; + #define DRIVER_EXPORT(INIT) \ + static struct driver_var __driver_var = { \ + .deferred = 0 \ + }; \ + \ __attribute__((used, section(".drivers"))) \ static struct driver __driver_desc = { \ .init = INIT, \ + .data = &__driver_var \ + } + +/* + * Some drivers are not required to start up + * early for proper system operation and may + * be deferred to start at a later time. + * + * Examples of such (deferrable) drivers include code + * that waits for I/O (e.g., disks, network cards, + * et cetera). This allows for faster boot times + * as only *required* drivers are started before + * everything else. + * + * Drivers that wish to be deferred may export themselves + * via the DRIVER_DEFER() macro. The DRIVER_DEFERRED() + * macro gives the value of 1 if the current driver + * context has yet to be initialized. The driver may + * use this to defer requests for I/O. + */ +#define DRIVER_DEFER(INIT) \ + static struct driver_var __driver_var = { \ + .deferred = 1 \ + }; \ + \ + __attribute__((used, section(".drivers.defer"))) \ + static struct driver __driver_desc = { \ + .init = INIT, \ + .data = &__driver_var \ } +#define DRIVER_DEFERRED() __driver_var.deferred + #define DRIVERS_INIT() \ for (struct driver *__d = (struct driver *)__drivers_init_start; \ (uintptr_t)__d < (uintptr_t)__drivers_init_end; ++__d) \ { \ __d->init(); \ } + +#define DRIVERS_SCHED() \ + spawn(&g_proc0, __driver_init_td, NULL, 0, NULL) + +void __driver_init_td(void); + #endif /* _KERNEL */ #endif /* !_SYS_DRIVER_H_ */ diff --git a/sys/include/sys/elf.h b/sys/include/sys/elf.h index af5f6d6..76c6d43 100644 --- a/sys/include/sys/elf.h +++ b/sys/include/sys/elf.h @@ -496,4 +496,70 @@ typedef struct { Elf64_Xword sh_entsize; /* Entry size if section holds table */ } Elf64_Shdr; +/* Special section indices. */ + +#define SHN_UNDEF 0 /* Undefined section */ +#define SHN_LORESERVE 0xff00 /* Start of reserved indices */ +#define SHN_LOPROC 0xff00 /* Start of processor-specific */ +#define SHN_BEFORE 0xff00 /* Order section before all others + (Solaris). */ +#define SHN_AFTER 0xff01 /* Order section after all others + (Solaris). */ +#define SHN_HIPROC 0xff1f /* End of processor-specific */ +#define SHN_LOOS 0xff20 /* Start of OS-specific */ +#define SHN_HIOS 0xff3f /* End of OS-specific */ +#define SHN_ABS 0xfff1 /* Associated symbol is absolute */ +#define SHN_COMMON 0xfff2 /* Associated symbol is common */ +#define SHN_XINDEX 0xffff /* Index is in extra table. */ +#define SHN_HIRESERVE 0xffff /* End of reserved indices */ + +/* Legal values for sh_type (section type). */ + +#define SHT_NULL 0 /* Section header table entry unused */ +#define SHT_PROGBITS 1 /* Program data */ +#define SHT_SYMTAB 2 /* Symbol table */ +#define SHT_STRTAB 3 /* String table */ +#define SHT_RELA 4 /* Relocation entries with addends */ +#define SHT_HASH 5 /* Symbol hash table */ +#define SHT_DYNAMIC 6 /* Dynamic linking information */ +#define SHT_NOTE 7 /* Notes */ +#define SHT_NOBITS 8 /* Program space with no data (bss) */ +#define SHT_REL 9 /* Relocation entries, no addends */ +#define SHT_SHLIB 10 /* Reserved */ +#define SHT_DYNSYM 11 /* Dynamic linker symbol table */ +#define SHT_INIT_ARRAY 14 /* Array of constructors */ +#define SHT_FINI_ARRAY 15 /* Array of destructors */ +#define SHT_PREINIT_ARRAY 16 /* Array of pre-constructors */ +#define SHT_GROUP 17 /* Section group */ +#define SHT_SYMTAB_SHNDX 18 /* Extended section indeces */ +#define SHT_NUM 19 /* Number of defined types. */ +#define SHT_LOOS 0x60000000 /* Start OS-specific. */ +#define SHT_CHECKSUM 0x6ffffff8 /* Checksum for DSO content. */ +#define SHT_LOSUNW 0x6ffffffa /* Sun-specific low bound. */ +#define SHT_SUNW_move 0x6ffffffa +#define SHT_SUNW_COMDAT 0x6ffffffb +#define SHT_SUNW_syminfo 0x6ffffffc +#define SHT_HISUNW 0x6fffffff /* Sun-specific high bound. */ +#define SHT_HIOS 0x6fffffff /* End OS-specific type */ +#define SHT_LOPROC 0x70000000 /* Start of processor-specific */ +#define SHT_HIPROC 0x7fffffff /* End of processor-specific */ +#define SHT_LOUSER 0x80000000 /* Start of application-specific */ +#define SHT_HIUSER 0x8fffffff /* End of application-specific */ + +/* Legal values for sh_flags (section flags). */ + +#define SHF_WRITE (1 << 0) /* Writable */ +#define SHF_ALLOC (1 << 1) /* Occupies memory during execution */ +#define SHF_EXECINSTR (1 << 2) /* Executable */ +#define SHF_MERGE (1 << 4) /* Might be merged */ +#define SHF_STRINGS (1 << 5) /* Contains nul-terminated strings */ +#define SHF_INFO_LINK (1 << 6) /* `sh_info' contains SHT index */ +#define SHF_LINK_ORDER (1 << 7) /* Preserve order after combining */ +#define SHF_OS_NONCONFORMING (1 << 8) /* Non-standard OS specific handling + required */ +#define SHF_GROUP (1 << 9) /* Section is member of a group. */ +#define SHF_TLS (1 << 10) /* Section hold thread-local data. */ +#define SHF_COMPRESSED (1 << 11) /* Section with compressed data. */ +#define SHF_MASKOS 0x0ff00000 /* OS-specific. */ +#define SHF_MASKPROC 0xf0000000 /* Processor-specific */ #endif /* _SYS_ELF_H_ */ diff --git a/sys/include/sys/endian.h b/sys/include/sys/endian.h new file mode 100644 index 0000000..5cbc94a --- /dev/null +++ b/sys/include/sys/endian.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SYS_ENDIAN_H_ +#define _SYS_ENDIAN_H_ + +#include <sys/cdefs.h> +#include <sys/types.h> + +#define swap16(x) __swap16((x)) +#define swap32(x) __swap32((x)) + +__always_inline static inline uint16_t +__swap16(uint16_t x) +{ + return ((x << 8) & 0xFF00) | ((x >> 8) & 0x00FF); +} + +__always_inline static inline uint32_t +__swap32(uint32_t x) +{ + return ((x << 24) & 0xFF000000) | + ((x << 8) & 0x00FF0000) | + ((x >> 8) & 0x0000FF00) | + ((x >> 24) & 0x000000FF); +} + +#endif /* !_SYS_ENDIAN_H_ */ diff --git a/sys/include/sys/exec.h b/sys/include/sys/exec.h index 7e720fc..aa2a729 100644 --- a/sys/include/sys/exec.h +++ b/sys/include/sys/exec.h @@ -32,7 +32,6 @@ #include <sys/types.h> -#if defined(_KERNEL) /* Danger: Do not change these !! */ #define AT_NULL 0 @@ -45,7 +44,9 @@ #define AT_RANDOM 7 #define AT_EXECFN 8 #define AT_PAGESIZE 9 +#define _AT_MAX 16 +#if defined(_KERNEL) #define MAX_PHDRS 32 #define STACK_PUSH(PTR, VAL) *(--(PTR)) = VAL #define AUXVAL(PTR, TAG, VAL) \ diff --git a/sys/include/sys/fbdev.h b/sys/include/sys/fbdev.h new file mode 100644 index 0000000..e206889 --- /dev/null +++ b/sys/include/sys/fbdev.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SYS_FBDEV_H_ +#define _SYS_FBDEV_H_ + +struct fbattr { + uint32_t width; + uint32_t height; + uint32_t pitch; + uint32_t bpp; +}; + +#endif /* !_SYS_FBDEV_H_ */ diff --git a/sys/include/sys/fcntl.h b/sys/include/sys/fcntl.h index 122a378..83d38af 100644 --- a/sys/include/sys/fcntl.h +++ b/sys/include/sys/fcntl.h @@ -33,6 +33,7 @@ #define O_RDONLY 0x0000 #define O_WRONLY 0x0001 #define O_RDWR 0x0002 +#define O_CREAT 0x0004 /* Makes seal checking easier */ #if defined(_KERNEL) diff --git a/sys/include/sys/filedesc.h b/sys/include/sys/filedesc.h index a544811..4ce2db2 100644 --- a/sys/include/sys/filedesc.h +++ b/sys/include/sys/filedesc.h @@ -31,8 +31,15 @@ #define _SYS_FILEDESC_H_ #include <sys/types.h> +#if defined(_KERNEL) #include <sys/vnode.h> +#include <sys/syscall.h> #include <sys/spinlock.h> +#include <sys/syscall.h> + +#define SEEK_SET 0 +#define SEEK_CUR 1 +#define SEEK_END 2 struct filedesc { int fdno; @@ -50,8 +57,12 @@ int fd_write(unsigned int fd, void *buf, size_t count); int fd_alloc(struct filedesc **fd_out); int fd_open(const char *pathname, int flags); +off_t fd_seek(int fildes, off_t offset, int whence); int fd_dup(int fd); struct filedesc *fd_get(unsigned int fdno); +scret_t sys_lseek(struct syscall_args *scargs); + +#endif /* _KERNEL */ #endif /* !_SYS_FILEDESC_H_ */ diff --git a/sys/include/sys/krq.h b/sys/include/sys/krq.h new file mode 100644 index 0000000..9cb6ec6 --- /dev/null +++ b/sys/include/sys/krq.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SYS_KRQ_H_ +#define _SYS_KRQ_H_ + +#include <sys/syscall.h> + +#if defined(_KERNEL) +scret_t sys_inject(struct syscall_args *scargs); +#else +int inject(const char *path); +#endif /* _KERNEL */ +#endif /* !_SYS_KRQ_H_ */ diff --git a/sys/include/sys/limits.h b/sys/include/sys/limits.h index 6185719..f56958e 100644 --- a/sys/include/sys/limits.h +++ b/sys/include/sys/limits.h @@ -32,6 +32,9 @@ #define PATH_MAX 1024 #define SSIZE_MAX 32767 +#define ARG_MAX 4096 #define CHAR_BIT 8 - +#if defined(_KERNEL) +#define CPU_MAX 256 +#endif /* _KERNEL */ #endif /* !_SYS_LIMITS_H_ */ diff --git a/sys/include/sys/mman.h b/sys/include/sys/mman.h index 4ead9ba..de360e4 100644 --- a/sys/include/sys/mman.h +++ b/sys/include/sys/mman.h @@ -35,6 +35,8 @@ #if defined(_KERNEL) #include <sys/tree.h> #include <vm/vm_obj.h> +#else +#include <stddef.h> #endif /* _KERNEL */ /* @@ -49,10 +51,10 @@ #endif /* !_KERNEL */ /* mmap() flags */ +#define MAP_ANON 0x0000 #define MAP_SHARED 0x0001 #define MAP_PRIVATE 0x0002 #define MAP_FIXED 0x0004 -#define MAP_ANON 0x0008 #if defined(_KERNEL) /* @@ -80,19 +82,19 @@ struct mmap_lgdr { size_t nbytes; }; -/* Kernel munmap() routine */ -int munmap_at(void *addr, size_t len); - -/* Kernel mmap() routine */ -void *mmap_at(void *addr, size_t len, int prot, int flags, - int fildes, off_t off); - int mmap_entrycmp(const struct mmap_entry *a, const struct mmap_entry *b); RBT_PROTOTYPE(lgdr_entries, mmap_entry, hd, mmap_entrycmp) -#endif /* _KERNEL */ /* Syscall layer */ -scret_t mmap(struct syscall_args *scargs); -scret_t munmap(struct syscall_args *scargs); +scret_t sys_mmap(struct syscall_args *scargs); +scret_t sys_munmap(struct syscall_args *scargs); +#endif /* _KERNEL */ + +/* Kernel munmap() routine */ +int munmap(void *addr, size_t len); + +/* Kernel mmap() routine */ +void *mmap(void *addr, size_t len, int prot, int flags, + int fildes, off_t off); #endif /* !_SYS_MMAN_H_ */ diff --git a/sys/include/sys/mount.h b/sys/include/sys/mount.h index 3b5d89e..636c7bf 100644 --- a/sys/include/sys/mount.h +++ b/sys/include/sys/mount.h @@ -46,6 +46,8 @@ */ #define MOUNT_RAMFS "initramfs" #define MOUNT_DEVFS "devfs" +#define MOUNT_CTLFS "ctlfs" +#define MOUNT_TMPFS "tmpfs" struct vfsops; struct mount; @@ -57,6 +59,8 @@ extern mountlist_t g_mountlist; /* Filesystem operations */ extern const struct vfsops g_initramfs_vfsops; extern const struct vfsops g_devfs_vfsops; +extern const struct vfsops g_ctlfs_vfsops; +extern const struct vfsops g_tmpfs_vfsops; struct mount { char *name; diff --git a/sys/include/sys/namei.h b/sys/include/sys/namei.h index f81f905..ccd7f35 100644 --- a/sys/include/sys/namei.h +++ b/sys/include/sys/namei.h @@ -32,6 +32,9 @@ #include <sys/types.h> #include <sys/vnode.h> +#include <sys/param.h> + +#define NAMEI_WANTPARENT BIT(0) /* Request parent only */ struct nameidata { const char *path; /* Pathname */ diff --git a/sys/include/sys/param.h b/sys/include/sys/param.h index c0a5686..7331d5f 100644 --- a/sys/include/sys/param.h +++ b/sys/include/sys/param.h @@ -67,8 +67,12 @@ /* Gives 1 if pointer is aligned */ #define PTR_ALIGNED(PTR, ALIGN) (!((uintptr_t)PTR & (ALIGN - 1))) -/* Adds a value to a pointer */ +/* + * PTR_OFFSET: Adds an offset to the pointer + * PTR_NOFFSET: Subtracts a negative offset from the pointer + */ #define PTR_OFFSET(PTR, OFF) ((void *)((uintptr_t)PTR + OFF)) +#define PTR_NOFFSET(PTR, NOFF) ((void *)((uintptr_t)PTR - NOFF)) #define NELEM(a) (sizeof(a) / sizeof(a[0])) diff --git a/sys/include/sys/proc.h b/sys/include/sys/proc.h index c561e91..241d990 100644 --- a/sys/include/sys/proc.h +++ b/sys/include/sys/proc.h @@ -63,26 +63,43 @@ struct proc { struct spinlock vcache_lock; struct trapframe tf; struct pcb pcb; + struct proc *parent; + void *data; size_t priority; + int exit_status; bool rested; - uint32_t flags; + volatile uint32_t flags; + uint32_t nleaves; uintptr_t stack_base; struct spinlock ksigq_lock; + TAILQ_HEAD(, proc) leafq; + TAILQ_ENTRY(proc) leaf_link; TAILQ_HEAD(, ksiginfo) ksigq; TAILQ_ENTRY(proc) link; }; #define PROC_EXITING BIT(0) /* Exiting */ #define PROC_EXEC BIT(1) /* Exec called (cleared by sched) */ +#define PROC_ZOMB BIT(2) /* Zombie (dead but not deallocated) */ +#define PROC_LEAFQ BIT(3) /* Leaf queue is active */ +#define PROC_WAITED BIT(4) /* Being waited on by parent */ +#define PROC_KTD BIT(5) /* Kernel thread */ +#define PROC_SLEEP BIT(6) /* Thread execution paused */ struct proc *this_td(void); -int md_fork(struct proc *p, struct proc *parent, uintptr_t ip); +struct proc *get_child(struct proc *cur, pid_t pid); +void proc_reap(struct proc *td); -void md_td_stackinit(struct proc *td, void *stack_top, struct exec_prog *prog); +int md_spawn(struct proc *p, struct proc *parent, uintptr_t ip); + +scret_t sys_spawn(struct syscall_args *scargs); +pid_t spawn(struct proc *cur, void(*func)(void), void *p, int flags, struct proc **newprocp); + +uintptr_t md_td_stackinit(struct proc *td, void *stack_top, struct exec_prog *prog); __dead void md_td_kick(struct proc *td); int fork1(struct proc *cur, int flags, void(*ip)(void), struct proc **newprocp); -int exit1(struct proc *td); +int exit1(struct proc *td, int flags); __dead scret_t sys_exit(struct syscall_args *scargs); #endif /* _KERNEL */ diff --git a/sys/include/sys/reboot.h b/sys/include/sys/reboot.h index 86fc45d..846073d 100644 --- a/sys/include/sys/reboot.h +++ b/sys/include/sys/reboot.h @@ -32,12 +32,15 @@ #include <sys/param.h> #include <sys/cdefs.h> +#include <sys/syscall.h> -#define REBOOT_HALT BIT(0) /* Halt instead of rebooting */ - -#if defined(_KERNEL) +#define REBOOT_RESET 0x00000000 +#define REBOOT_HALT BIT(0) /* Halt instead of rebooting */ +#define REBOOT_POWEROFF BIT(1) /* Power off (needs REBOOT_HALT set too) */ void cpu_reboot(int method); +#if defined(_KERNEL) +scret_t sys_reboot(struct syscall_args *scargs); #endif /* _KERNEL */ #endif /* _SYS_REBOOT_H_ */ diff --git a/sys/include/sys/sched.h b/sys/include/sys/sched.h index 7f5e65f..7d17607 100644 --- a/sys/include/sys/sched.h +++ b/sys/include/sys/sched.h @@ -37,6 +37,10 @@ void sched_init(void); void sched_yield(void); + +void sched_switch_to(struct trapframe *tf, struct proc *td); +void sched_detach(struct proc *td); + __dead void sched_enter(void); void sched_enqueue_td(struct proc *td); diff --git a/sys/include/sys/schedvar.h b/sys/include/sys/schedvar.h index 509e2c9..5ed9f5f 100644 --- a/sys/include/sys/schedvar.h +++ b/sys/include/sys/schedvar.h @@ -36,7 +36,7 @@ #include <machine/cdefs.h> #if defined(_KERNEL) -#define DEFAULT_TIMESLICE_USEC 500 +#define DEFAULT_TIMESLICE_USEC 9000 #define SHORT_TIMESLICE_USEC 10 #define SCHED_POLICY_MLFQ 0x00U /* Multilevel feedback queue */ diff --git a/sys/include/sys/spawn.h b/sys/include/sys/spawn.h new file mode 100644 index 0000000..0c54e4c --- /dev/null +++ b/sys/include/sys/spawn.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SYS_SPAWN_H_ +#define _SYS_SPAWN_H_ + +#include <sys/types.h> +#include <sys/param.h> + +#define SPAWN_WAIT BIT(0) + +#if !defined(_KERNEL) +pid_t spawn(const char *pathname, char **argv, char **envp, int flags); +#endif /* _KERNEL */ +#endif /* !_SYS_SPAWN_H_ */ diff --git a/sys/include/sys/stat.h b/sys/include/sys/stat.h index 6303630..5409f2c 100644 --- a/sys/include/sys/stat.h +++ b/sys/include/sys/stat.h @@ -32,6 +32,8 @@ #include <sys/types.h> +#define S_IFBLK 0060000 + struct stat { dev_t st_dev; ino_t st_ino; @@ -46,4 +48,6 @@ struct stat { time_t st_ctime; }; +int stat(const char *path, struct stat *buf); + #endif /* _SYS_STAT_H_ */ diff --git a/sys/include/sys/syscall.h b/sys/include/sys/syscall.h index 41d1e78..08bd989 100644 --- a/sys/include/sys/syscall.h +++ b/sys/include/sys/syscall.h @@ -33,7 +33,10 @@ #if !defined(__ASSEMBLER__) #include <sys/types.h> #include <sys/cdefs.h> -#endif /* !__ASSEMBLER__ */ +#if defined(_KERNEL) || defined(_OLIBC) +#include <machine/syscall.h> +#endif /* _KERNEL || _OLIBC */ +#endif #define SYS_none 0 #define SYS_exit 1 @@ -43,6 +46,14 @@ #define SYS_stat 5 #define SYS_sysctl 6 #define SYS_write 7 +#define SYS_spawn 8 +#define SYS_reboot 9 +#define SYS_mmap 10 +#define SYS_munmap 11 +#define SYS_access 12 +#define SYS_lseek 13 +#define SYS_sleep 14 +#define SYS_inject 15 #if defined(_KERNEL) /* Syscall return value and arg type */ @@ -64,76 +75,4 @@ extern const size_t MAX_SYSCALLS; extern scret_t(*g_sctab[])(struct syscall_args *); #endif /* _KERNEL */ -#if !defined(__ASSEMBLER__) -__always_inline static inline long -syscall0(uint64_t code) -{ - volatile long ret; - __ASMV("int $0x80" : "=a"(ret) : "a"(code)); - return ret; -} - -__always_inline static inline long -syscall1(uint64_t code, uint64_t arg0) -{ - volatile long ret; - __ASMV("int $0x80" : "=a"(ret) : "a"(code), "D"(arg0) : "memory"); - return ret; -} - -__always_inline static long inline -syscall2(uint64_t code, uint64_t arg0, uint64_t arg1) -{ - volatile long ret; - __ASMV("int $0x80" : "=a"(ret) : "a"(code), "D"(arg0), "S"(arg1) : "memory"); - return ret; -} - -__always_inline static inline long -syscall3(uint64_t code, uint64_t arg0, uint64_t arg1, uint64_t arg2) -{ - volatile long ret; - __ASMV("int $0x80" : "=a"(ret) : "a"(code), "D"(arg0), "S"(arg1), "d"(arg2) : "memory"); - return ret; -} - -__always_inline static inline long -syscall4(uint64_t code, uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3) -{ - volatile long ret; - register uint64_t _arg3 asm("r10") = arg3; - __ASMV("int $0x80" : "=a"(ret) : "a"(code), "D"(arg0), "S"(arg1), "d"(arg2), "r"(_arg3) : "memory"); - return ret; -} - -__always_inline static inline long -syscall5(uint64_t code, uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4) -{ - volatile long ret; - register uint64_t _arg3 asm("r10") = arg3; - register uint64_t _arg4 asm("r9") = arg4; - __ASMV("int $0x80" : "=a"(ret) : "a"(code), "D"(arg0), "S"(arg1), "d"(arg2), "r"(_arg3), "r"(_arg4) : "memory"); - return ret; -} - -__always_inline static inline long -syscall6(uint64_t code, uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5) -{ - volatile long ret; - register uint64_t _arg3 asm("r10") = arg3; - register uint64_t _arg4 asm("r9") = arg4; - register uint64_t _arg5 asm("r8") = arg5; - __ASMV("int $0x80" : "=a"(ret) : "a"(code), "D"(arg0), "S"(arg1), "d"(arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) : "memory"); - return ret; -} - -#define _SYSCALL_N(a0, a1, a2, a3, a4, a5, a6, name, ...) \ - name - -#define syscall(...) \ -_SYSCALL_N(__VA_ARGS__, syscall6, syscall5, \ - syscall4, syscall3, syscall2, syscall1, \ - syscall0)(__VA_ARGS__) - -#endif /* !__ASSEMBLER__ */ #endif /* _SYS_SYSCALL_H_ */ diff --git a/sys/include/sys/syslog.h b/sys/include/sys/syslog.h index defb341..b9d34ab 100644 --- a/sys/include/sys/syslog.h +++ b/sys/include/sys/syslog.h @@ -31,11 +31,13 @@ #define _SYS_SYSLOG_H_ #include <stdarg.h> +#include <stdbool.h> #if defined(_KERNEL) #define OMIT_TIMESTAMP "\x01" +void syslog_silence(bool option); void kprintf(const char *fmt, ...); void serial_init(void); void serial_putc(char c); diff --git a/sys/include/sys/time.h b/sys/include/sys/time.h new file mode 100644 index 0000000..ce66885 --- /dev/null +++ b/sys/include/sys/time.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SYS_TIME_H_ +#define _SYS_TIME_H_ + +#include <sys/types.h> +#if defined(_KERNEL) +#include <sys/syscall.h> +#endif /* _KERNEL */ + +struct timeval { + time_t tv_sec; + time_t tv_usec; +}; + +struct timespec { + time_t tv_sec; + long tv_nsec; +}; + +struct date { + uint16_t year; + uint8_t month; + uint8_t day; + uint8_t sec; + uint8_t min; + uint8_t hour; +}; + +#if defined(_KERNEL) +scret_t sys_sleep(struct syscall_args *scargs); +#endif +#endif /* !_SYS_TIME_H_ */ diff --git a/sys/include/sys/types.h b/sys/include/sys/types.h index 5501cc3..5cb2fc7 100644 --- a/sys/include/sys/types.h +++ b/sys/include/sys/types.h @@ -36,8 +36,7 @@ /* Compat */ #if defined(_KERNEL) -#define true 1 -#define false 0 +#include <stdbool.h> #if !defined(NULL) #define NULL ((void *)0) #endif /* !NULL */ @@ -113,11 +112,6 @@ typedef uint64_t time_t; typedef __ptrdiff_t ptrdiff_t; #endif /* _HAVE_PTRDIFF_T */ -/* Compat */ -#if defined(_KERNEL) -typedef _Bool bool; -#endif - #if defined(_KERNEL) typedef uintptr_t paddr_t; typedef uintptr_t vaddr_t; diff --git a/sys/include/sys/vfs.h b/sys/include/sys/vfs.h index 1ff722a..fcb7391 100644 --- a/sys/include/sys/vfs.h +++ b/sys/include/sys/vfs.h @@ -40,6 +40,7 @@ scret_t sys_close(struct syscall_args *args); scret_t sys_read(struct syscall_args *scargs); scret_t sys_write(struct syscall_args *sargs); scret_t sys_stat(struct syscall_args *scargs); +scret_t sys_access(struct syscall_args *scargs); #endif /* _KERNEL */ #endif /* !_SYS_VFS_H_ */ diff --git a/sys/include/sys/vnode.h b/sys/include/sys/vnode.h index 33092f9..1a6b2aa 100644 --- a/sys/include/sys/vnode.h +++ b/sys/include/sys/vnode.h @@ -32,6 +32,7 @@ #include <sys/types.h> #include <sys/queue.h> +#include <sys/vnode.h> #include <sys/atomic.h> #include <sys/sio.h> #include <vm/vm_obj.h> @@ -47,6 +48,8 @@ struct vnode { const struct vops *vops; struct vm_object vobj; uint32_t refcount; + dev_t major; + dev_t dev; TAILQ_ENTRY(vnode) vcache_link; }; @@ -83,6 +86,13 @@ struct vop_lookup_args { struct vnode **vpp; /* Result vnode */ }; +struct vop_create_args { + const char *path; /* Full path */ + const char *ppath; /* Parent path */ + struct vnode *dirvp; /* Directory vnode */ + struct vnode **vpp; /* Result vnode */ +}; + /* * A field in this structure is unavailable * if it has a value of VNOVAL. @@ -103,6 +113,7 @@ struct vops { int(*read)(struct vnode *vp, struct sio_txn *sio); int(*write)(struct vnode *vp, struct sio_txn *sio); int(*reclaim)(struct vnode *vp); + int(*create)(struct vop_create_args *args); }; extern struct vnode *g_root_vnode; diff --git a/sys/include/vm/pmap.h b/sys/include/vm/pmap.h index 9eed184..e0549d4 100644 --- a/sys/include/vm/pmap.h +++ b/sys/include/vm/pmap.h @@ -76,9 +76,25 @@ int pmap_map(struct vas vas, vaddr_t va, paddr_t pa, vm_prot_t prot); int pmap_unmap(struct vas vas, vaddr_t va); /* + * Returns true if the page is clean (modified), otherwise + * returns false. + */ +bool pmap_is_clean(struct vas vas, vaddr_t va); + +/* + * Marks a page as clean (unmodified) + */ +void pmap_mark_clean(struct vas vas, vaddr_t va); + +/* * Mark a virtual address with a specific * caching type. */ int pmap_set_cache(struct vas vas, vaddr_t va, int type); +/* + * Machine dependent pmap init code. + */ +int pmap_init(void); + #endif /* !_VM_PMAP_H_ */ diff --git a/sys/include/vm/vm_device.h b/sys/include/vm/vm_device.h new file mode 100644 index 0000000..da476e2 --- /dev/null +++ b/sys/include/vm/vm_device.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _VM_DEVICE_H_ +#define _VM_DEVICE_H_ + +#include <sys/types.h> +#include <sys/vnode.h> +#include <sys/device.h> +#include <vm/vm_pager.h> +#include <vm/vm_obj.h> + +extern const struct vm_pagerops vm_vnops; + +struct vm_object *dv_attach(devmajor_t major, dev_t dev, vm_prot_t prot); + +#endif /* !_VM_DEVICE_H_ */ diff --git a/sys/kern/driver_subr.c b/sys/kern/driver_subr.c new file mode 100644 index 0000000..b53463a --- /dev/null +++ b/sys/kern/driver_subr.c @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/driver.h> +#include <sys/proc.h> +#include <sys/cdefs.h> +#include <sys/syslog.h> +#include <sys/panic.h> +#include <dev/timer.h> +#include <machine/sync.h> + +/* + * Initialize early drivers + * + * XXX: This should *NOT* be called directly, + * use DRIVERS_SCHED() instead. + */ +void +__driver_init_td(void) +{ + const struct driver *dp; + struct driver_var *var; + struct proc *td; + uintptr_t start, end; + + td = this_td(); + start = (uintptr_t)__driversd_init_start; + end = (uintptr_t)__driversd_init_end; + + for (dp = (void *)start; (uintptr_t)dp < end; ++dp) { + var = dp->data; + if (var->deferred) { + dp->init(); + var->deferred = 0; + } + } + + exit1(td, 0); + __builtin_unreachable(); +} diff --git a/sys/kern/exec_elf64.c b/sys/kern/exec_elf64.c index c9040dd..9706e77 100644 --- a/sys/kern/exec_elf64.c +++ b/sys/kern/exec_elf64.c @@ -30,6 +30,7 @@ #include <sys/elf.h> #include <sys/exec.h> #include <sys/param.h> +#include <sys/syslog.h> #include <sys/namei.h> #include <sys/proc.h> #include <sys/vnode.h> @@ -42,14 +43,49 @@ #include <string.h> #include <machine/pcb.h> +#define pr_trace(fmt, ...) kprintf("elf64: " fmt, ##__VA_ARGS__) +#define pr_error(...) pr_trace(__VA_ARGS__) + #define PHDR(HDRP, IDX) \ (void *)((uintptr_t)HDRP + (HDRP)->e_phoff + (HDRP->e_phentsize * IDX)) +#define SHDR(HDRP, IDX) \ + (void *)((uintptr_t)HDRP + (HDRP)->e_shoff + (HDRP->e_shentsize * IDX)) + struct elf_file { char *data; size_t size; }; +static int +elf_parse_shdrs(Elf64_Ehdr *eh) +{ + Elf64_Shdr *shp; + uint32_t nshdr; + + if (eh == NULL) { + return -EINVAL; + } + + nshdr = eh->e_shnum; + for (uint32_t i = 0; i < nshdr; ++i) { + shp = SHDR(eh, i); + + /* Drop null entries */ + if (shp->sh_type == SHT_NULL) { + continue; + } + + switch (shp->sh_type) { + case SHT_NOBITS: + memset((void *)shp->sh_addr, 0x0, shp->sh_size); + break; + } + } + + return 0; +} + /* * Load the file and give back an "elf_file" * structure. @@ -188,6 +224,7 @@ elf64_load(const char *pathname, struct proc *td, struct exec_prog *prog) if ((status = elf64_verify(hdr)) != 0) goto done; + memset(loadmap, 0, sizeof(loadmap)); pcbp = &td->pcb; start = -1; end = 0; @@ -209,6 +246,7 @@ elf64_load(const char *pathname, struct proc *td, struct exec_prog *prog) /* Try to allocate page frames */ physmem = vm_alloc_frame(page_count); if (physmem == 0) { + pr_error("out of physical memory\n"); status = -ENOMEM; break; } @@ -237,6 +275,7 @@ elf64_load(const char *pathname, struct proc *td, struct exec_prog *prog) } } + elf_parse_shdrs(hdr); memcpy(prog->loadmap, loadmap, sizeof(loadmap)); prog->start = start; prog->end = end; diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index aa78e8d..6b3e09b 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -35,6 +35,8 @@ #include <sys/exec.h> #include <sys/driver.h> #include <sys/panic.h> +#include <sys/systm.h> +#include <dev/acpi/uacpi.h> #include <dev/cons/cons.h> #include <dev/acpi/acpi.h> #include <machine/cpu.h> @@ -42,7 +44,14 @@ #include <vm/vm.h> #include <string.h> -static struct proc proc0; +#define _START_PATH "/usr/sbin/init" +#if defined(_INSTALL_MEDIA) +#define _START_ARG "/usr/sbin/install" +#else +#define _START_ARG NULL +#endif /* _INSTALL_MEDIA */ + +struct proc g_proc0; static void copyright(void) @@ -54,19 +63,19 @@ copyright(void) static void start_init(void) { -#if 0 struct proc *td = this_td(); struct execve_args execve_args; - char *argv[] = { "/usr/sbin/init", NULL }; + char *argv[] = { _START_PATH, _START_ARG, NULL }; char *envp[] = { NULL }; + kprintf("starting init...\n"); execve_args.pathname = argv[0]; execve_args.argv = argv; execve_args.envp = envp; if (execve(td, &execve_args) != 0) panic("failed to load init\n"); -#endif - for (;;); + + __builtin_unreachable(); } int @@ -75,15 +84,15 @@ main(void) /* Setup serial driver */ serial_init(); + /* Init the virtual memory subsystem */ + vm_init(); + /* Startup the console */ cons_init(); copyright(); kprintf("Starting Hyra/%s v%s: %s\n", HYRA_ARCH, HYRA_VERSION, HYRA_BUILDDATE); - /* Init the virtual memory subsystem */ - vm_init(); - /* Start the ACPI subsystem */ acpi_init(); @@ -96,18 +105,28 @@ main(void) /* Expose the console to devfs */ cons_expose(); + uacpi_init(); + /* Start scheduler and bootstrap APs */ md_intoff(); sched_init(); + memset(&g_proc0, 0, sizeof(g_proc0)); + /* Startup pid 1 */ - memset(&proc0, 0, sizeof(proc0.tf)); - fork1(&proc0, 0, start_init, NULL); + spawn(&g_proc0, start_init, NULL, 0, NULL); + md_inton(); - /* Load all drivers */ + /* Load all early drivers */ DRIVERS_INIT(); - /* Bootstrap APs and here we go! */ + /* Only log to kmsg from here */ + syslog_silence(true); + + /* + * Bootstrap APs, schedule all other drivers + * and here we go! + */ mp_bootstrap_aps(&g_bsp_ci); sched_enter(); __builtin_unreachable(); diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c index 201db3e..d4c9885 100644 --- a/sys/kern/kern_descrip.c +++ b/sys/kern/kern_descrip.c @@ -148,6 +148,8 @@ static int fd_rw(unsigned int fd, void *buf, size_t count, uint8_t write) { char *kbuf = NULL; + ssize_t n; + uint32_t seal; struct filedesc *filedes; struct sio_txn sio; scret_t retval = 0; @@ -158,8 +160,17 @@ fd_rw(unsigned int fd, void *buf, size_t count, uint8_t write) } filedes = fd_get(fd); - kbuf = dynalloc(count); + seal = filedes->flags; + /* Check the seal */ + if (write && !ISSET(seal, O_ALLOW_WR)) { + return -EPERM; + } + if (!write && ISSET(seal, O_WRONLY)) { + return -EPERM; + } + + kbuf = dynalloc(count); if (kbuf == NULL) { retval = -ENOMEM; goto done; @@ -186,6 +197,7 @@ fd_rw(unsigned int fd, void *buf, size_t count, uint8_t write) sio.buf = kbuf; sio.offset = filedes->offset; + spinlock_acquire(&filedes->lock); if (write) { /* Copy in user buffer */ if (copyin(buf, kbuf, count) < 0) { @@ -194,30 +206,62 @@ fd_rw(unsigned int fd, void *buf, size_t count, uint8_t write) } /* Call VFS write hook */ - if ((count = vfs_vop_write(filedes->vp, &sio)) < 0) { - retval = -EIO; + if ((n = vfs_vop_write(filedes->vp, &sio)) < 0) { + retval = n; goto done; } } else { - if ((count = vfs_vop_read(filedes->vp, &sio)) < 0) { - retval = -EIO; + if ((n = vfs_vop_read(filedes->vp, &sio)) < 0) { + retval = n; goto done; } - } - if (copyout(kbuf, buf, count) < 0) { - retval = -EFAULT; - goto done; + /* End of file? */ + if (n == 0) { + retval = 0; + goto done; + } + + if (copyout(kbuf, buf, count) < 0) { + retval = -EFAULT; + goto done; + } } + /* Increment the offset per read */ + filedes->offset += n; retval = count; done: if (kbuf != NULL) { dynfree(kbuf); } + spinlock_release(&filedes->lock); return retval; } +static int +fd_do_create(const char *path, struct nameidata *ndp) +{ + struct vop_create_args cargs; + struct vnode *dirvp = ndp->vp; + const struct vops *vops = dirvp->vops; + int error; + + if (vops->create == NULL) { + return -EINVAL; + } + + cargs.path = path; + cargs.ppath = ndp->path; + cargs.dirvp = dirvp; + cargs.vpp = &ndp->vp; + if ((error = vops->create(&cargs)) < 0) { + return error; + } + + return 0; +} + int fd_read(unsigned int fd, void *buf, size_t count) { @@ -236,18 +280,17 @@ fd_write(unsigned int fd, void *buf, size_t count) * * @pathname: Path of file to open. * @flags: Flags to use. - * - * TODO: Use of flags. */ int fd_open(const char *pathname, int flags) { int error; + const struct vops *vops; struct filedesc *filedes; struct nameidata nd; nd.path = pathname; - nd.flags = 0; + nd.flags = ISSET(flags, O_CREAT) ? NAMEI_WANTPARENT : 0; if ((error = namei(&nd)) < 0) { return error; @@ -258,6 +301,14 @@ fd_open(const char *pathname, int flags) return error; } + vops = nd.vp->vops; + if (ISSET(flags, O_CREAT) && vops->create != NULL) { + error = fd_do_create(pathname, &nd); + } + if (error < 0) { + return error; + } + filedes->vp = nd.vp; filedes->flags = flags; return filedes->fdno; @@ -285,3 +336,51 @@ fd_dup(int fd) new_desc->vp = tmp->vp; return new_desc->fdno; } + +off_t +fd_seek(int fildes, off_t offset, int whence) +{ + struct filedesc *tmp; + struct vattr attr; + struct vop_getattr_args getattr_args; + + tmp = fd_get(fildes); + if (tmp == NULL) { + return -EBADF; + } + + getattr_args.vp = tmp->vp; + getattr_args.res = &attr; + if ((vfs_vop_getattr(tmp->vp, &getattr_args)) < 0) { + return -EPIPE; + } + + switch (whence) { + case SEEK_SET: + tmp->offset = offset; + break; + case SEEK_CUR: + tmp->offset += offset; + break; + case SEEK_END: + tmp->offset = attr.size + offset; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * Update file offset + * + * arg0: `filedes' + * arg1: `offset' + * arg2: `whence' + */ +scret_t +sys_lseek(struct syscall_args *scargs) +{ + return fd_seek(scargs->arg0, scargs->arg1, scargs->arg2); +} diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index bf6a26e..2a53b8a 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -37,6 +37,7 @@ #include <vm/map.h> #include <vm/physmem.h> #include <machine/pcb.h> +#include <machine/cdefs.h> #include <string.h> /* @@ -87,6 +88,7 @@ execve(struct proc *td, const struct execve_args *args) release_stack(td); /* Save program state */ + md_intoff(); memcpy(&td->exec, &prog, sizeof(td->exec)); /* Set new stack and map it to userspace */ @@ -99,7 +101,7 @@ execve(struct proc *td, const struct execve_args *args) stack_top = td->stack_base + (PROC_STACK_SIZE - 1); /* Setup registers, signals and stack */ - md_td_stackinit(td, (void *)(stack_top + VM_HIGHER_HALF), &prog); + stack_top = md_td_stackinit(td, (void *)(stack_top + VM_HIGHER_HALF), &prog); setregs(td, &prog, stack_top); signals_init(td); diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index 75ab0e9..c00f39b 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -35,6 +35,7 @@ #include <vm/vm.h> #include <vm/map.h> #include <machine/pcb.h> +#include <machine/cpu.h> #define pr_trace(fmt, ...) kprintf("exit: " fmt, ##__VA_ARGS__) #define pr_error(...) pr_trace(__VA_ARGS__) @@ -48,6 +49,11 @@ unload_td(struct proc *td) struct pcb *pcbp; size_t len; + sched_detach(td); + if (ISSET(td->flags, PROC_KTD)) { + return; + } + execp = &td->exec; auxvalp = &execp->auxval; pcbp = &td->pcb; @@ -72,56 +78,107 @@ unload_td(struct proc *td) } } +void +proc_reap(struct proc *td) +{ + struct pcb *pcbp; + vaddr_t stack_va; + paddr_t stack_pa; + + pcbp = &td->pcb; + unload_td(td); + + /* + * User space stacks are identity mapped and + * kernel space stacks are not. + */ + if (ISSET(td->flags, PROC_KTD)) { + stack_va = td->stack_base; + stack_pa = td->stack_base - VM_HIGHER_HALF; + } else { + stack_va = td->stack_base; + stack_pa = td->stack_base; + vm_unmap(pcbp->addrsp, stack_va, PROC_STACK_SIZE); + } + + vm_free_frame(stack_pa, PROC_STACK_PAGES); + pmap_destroy_vas(pcbp->addrsp); +} + /* * Kill a thread and deallocate its resources. * * @td: Thread to exit */ int -exit1(struct proc *td) +exit1(struct proc *td, int flags) { - struct pcb *pcbp; - struct proc *curtd; - uintptr_t stack; + struct proc *curtd, *procp; + struct proc *parent; + struct cpu_info *ci; pid_t target_pid, curpid; + ci = this_cpu(); target_pid = td->pid; curtd = this_td(); - pcbp = &td->pcb; curpid = curtd->pid; - stack = td->stack_base; td->flags |= PROC_EXITING; + parent = td->parent; - /* - * If this is on the higher half, it is kernel - * mapped and we need to convert it to a physical - * address. - */ - if (stack >= VM_HIGHER_HALF) { - stack -= VM_HIGHER_HALF; + /* If we have any children, kill them too */ + if (td->nleaves > 0) { + TAILQ_FOREACH(procp, &td->leafq, leaf_link) { + if (!ISSET(procp->flags, PROC_EXITING)) + exit1(procp, flags); + } } - unload_td(td); - vm_unmap(pcbp->addrsp, td->stack_base, PROC_STACK_SIZE); - vm_free_frame(stack, PROC_STACK_PAGES); + if (target_pid != curpid) { + proc_reap(td); + } - pmap_destroy_vas(pcbp->addrsp); - dynfree(td); + if (td->data != NULL) { + dynfree(td->data); + } + + /* + * Only free the process structure if we aren't + * being waited on, otherwise let it be so the + * parent can examine what's left of it. + */ + if (!ISSET(td->flags, PROC_WAITED)) { + dynfree(td); + } else { + td->flags |= PROC_ZOMB; + td->flags &= ~PROC_WAITED; + } /* * If we are the thread exiting, reenter the scheduler * and do not return. */ - if (target_pid == curpid) + if (target_pid == curpid) { + ci->curtd = NULL; + if (parent->pid == 0) + sched_enter(); + + parent->flags &= ~PROC_SLEEP; sched_enter(); + } return 0; } +/* + * arg0: Exit status. + */ scret_t sys_exit(struct syscall_args *scargs) { - exit1(this_td()); + struct proc *td = this_td(); + + td->exit_status = scargs->arg0; + exit1(td, 0); __builtin_unreachable(); } diff --git a/sys/kern/kern_krq.c b/sys/kern/kern_krq.c new file mode 100644 index 0000000..c12a98c --- /dev/null +++ b/sys/kern/kern_krq.c @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/syscall.h> +#include <sys/krq.h> +#include <sys/errno.h> +#include <sys/spinlock.h> +#include <sys/driver.h> +#include <sys/syslog.h> + +static struct spinlock krq_lock = {0}; + +/* + * Load a kernel runtime quantum (KRQ) + * + * @arg0: path + * + * XXX: If the 'path' argument is NULL, all deferrable + * drivers are loaded. + * + * TODO: Handle non-null paths where a completly seperate + * module/krq can be loaded. + */ +scret_t +sys_inject(struct syscall_args *scargs) +{ + if (scargs->arg0 != 0) { + return -EINVAL; + } + + spinlock_acquire(&krq_lock); + DRIVERS_SCHED(); + spinlock_release(&krq_lock); + return 0; +} diff --git a/sys/kern/kern_panic.c b/sys/kern/kern_panic.c index 950ea8f..099f620 100644 --- a/sys/kern/kern_panic.c +++ b/sys/kern/kern_panic.c @@ -31,6 +31,9 @@ #include <sys/spinlock.h> #include <sys/syslog.h> #include <sys/reboot.h> +#include <dev/cons/cons.h> +#include <machine/cdefs.h> +#include <machine/cpu.h> /* * Burn and sizzle - the core logic that really ends @@ -51,10 +54,35 @@ bas(bool do_trace, int reboot_type) md_backtrace(); } + kprintf(OMIT_TIMESTAMP "\n-- ALL CORES HAVE BEEN HALTED --\n"); cpu_reboot(reboot_type); __builtin_unreachable(); } +static void +panic_screen(void) +{ + struct cons_screen *scr = &g_root_scr; + + if (scr->fb_mem != NULL) { + scr->bg = 0x8B0000; + scr->fg = 0xAABBAA; + cons_reset_cursor(scr); + cons_clear_scr(scr, 0x393B39); + } +} + +static void +do_panic(const char *fmt, va_list *ap) +{ + syslog_silence(false); + kprintf(OMIT_TIMESTAMP "panic: "); + vkprintf(fmt, ap); + bas(true, REBOOT_HALT); + + __builtin_unreachable(); +} + /* * Tells the user something terribly wrong happened then * halting the system as soon as possible. @@ -69,11 +97,13 @@ panic(const char *fmt, ...) { va_list ap; - va_start(ap, fmt); - kprintf(OMIT_TIMESTAMP "panic: "); - vkprintf(fmt, &ap); - bas(true, REBOOT_HALT); + /* Shut everything else up */ + md_intoff(); + cpu_halt_others(); + panic_screen(); + va_start(ap, fmt); + do_panic(fmt, &ap); __builtin_unreachable(); } @@ -89,7 +119,6 @@ hcf(const char *fmt, ...) { va_list ap; - if (fmt != NULL) { va_start(ap, fmt); kprintf(OMIT_TIMESTAMP); diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c index 35a1af7..ec5592e 100644 --- a/sys/kern/kern_sched.c +++ b/sys/kern/kern_sched.c @@ -104,12 +104,29 @@ sched_dequeue_td(void) for (size_t i = 0; i < SCHED_NQUEUE; ++i) { queue = &qlist[i]; - if (!TAILQ_EMPTY(&queue->q)) { - td = TAILQ_FIRST(&queue->q); - TAILQ_REMOVE(&queue->q, td, link); - spinlock_release(&tdq_lock); - return td; + if (TAILQ_EMPTY(&queue->q)) { + continue; } + + td = TAILQ_FIRST(&queue->q); + if (td == NULL) { + continue; + } + + while (ISSET(td->flags, PROC_SLEEP)) { + td = TAILQ_NEXT(td, link); + if (td == NULL) { + break; + } + } + + if (td == NULL) { + continue; + } + + TAILQ_REMOVE(&queue->q, td, link); + spinlock_release(&tdq_lock); + return td; } /* We got nothing */ @@ -176,62 +193,50 @@ td_pri_update(struct proc *td) } } +void +sched_switch_to(struct trapframe *tf, struct proc *td) +{ + struct cpu_info *ci; + struct pcb *pcbp; + + ci = this_cpu(); + + if (tf != NULL) { + memcpy(tf, &td->tf, sizeof(*tf)); + } + + ci->curtd = td; + pcbp = &td->pcb; + pmap_switch_vas(pcbp->addrsp); +} + /* * Perform a context switch. */ void sched_switch(struct trapframe *tf) { - struct cpu_info *ci; - struct pcb *pcbp; struct proc *next_td, *td; - bool use_current = true; + struct cpu_info *ci; ci = this_cpu(); td = ci->curtd; if (td != NULL) { - dispatch_signals(td); - td_pri_update(td); - } - - /* - * Get the next thread and use it only if it isn't - * in the middle of an exit, exec, or whatever. - */ - do { - if ((next_td = sched_dequeue_td()) == NULL) { - sched_oneshot(false); + if (td->pid == 0) return; - } - - /* - * If we are in the middle of an exec, don't use this - * thread. - */ - if (ISSET(next_td->flags, PROC_EXEC)) { - use_current = false; - } - /* - * Don't use this thread if we are currently - * exiting. - */ - if (ISSET(next_td->flags, PROC_EXITING)) { - use_current = false; - } - } while (!use_current); - - /* Save the previous thread */ - if (td != NULL) { + dispatch_signals(td); + td_pri_update(td); sched_save_td(td, tf); } - memcpy(tf, &next_td->tf, sizeof(*tf)); - ci->curtd = next_td; - pcbp = &next_td->pcb; + if ((next_td = sched_dequeue_td()) == NULL) { + sched_oneshot(false); + return; + } - pmap_switch_vas(pcbp->addrsp); + sched_switch_to(tf, next_td); sched_oneshot(false); } @@ -241,20 +246,9 @@ sched_switch(struct trapframe *tf) void sched_enter(void) { - static int nenter = 0; - - /* - * Enable interrupts for all processors and - * sync on first entry. - */ md_inton(); - if (nenter == 0) { - md_sync_all(); - atomic_inc_int(&nenter); - } - + sched_oneshot(false); for (;;) { - sched_oneshot(false); md_pause(); } } @@ -262,14 +256,39 @@ sched_enter(void) void sched_yield(void) { - struct proc *td = this_td(); + struct proc *td; + struct cpu_info *ci = this_cpu(); - if (td != NULL) { - td->rested = true; + if ((td = ci->curtd) == NULL) { + return; } + td->rested = true; + + /* FIXME: Hang yielding when waited on */ + if (ISSET(td->flags, PROC_WAITED)) { + return; + } + + ci->curtd = NULL; + md_inton(); sched_oneshot(false); - while (td->rested); + + md_hlt(); + md_intoff(); + ci->curtd = td; +} + +void +sched_detach(struct proc *td) +{ + struct sched_queue *queue; + + spinlock_acquire(&tdq_lock); + queue = &qlist[td->priority]; + + TAILQ_REMOVE(&queue->q, td, link); + spinlock_release(&tdq_lock); } void diff --git a/sys/kern/kern_spawn.c b/sys/kern/kern_spawn.c new file mode 100644 index 0000000..a953a6e --- /dev/null +++ b/sys/kern/kern_spawn.c @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/spawn.h> +#include <sys/proc.h> +#include <sys/exec.h> +#include <sys/mman.h> +#include <sys/systm.h> +#include <sys/errno.h> +#include <sys/syslog.h> +#include <sys/syscall.h> +#include <sys/atomic.h> +#include <sys/signal.h> +#include <sys/limits.h> +#include <sys/sched.h> +#include <vm/dynalloc.h> +#include <string.h> + +#define pr_trace(fmt, ...) kprintf("spawn: " fmt, ##__VA_ARGS__) +#define pr_error(...) pr_trace(__VA_ARGS__) + +#define ARGVP_MAX (ARG_MAX / sizeof(void *)) + +static volatile size_t nthreads = 0; + +/* + * TODO: envp + */ +struct spawn_args { + char path[PATH_MAX]; + char argv_blk[ARG_MAX]; + char *argv[ARGVP_MAX]; +}; + +static inline void +try_free_data(void *p) +{ + if (p != NULL) { + dynfree(p); + } +} + +static void +spawn_thunk(void) +{ + const char *path; + char pathbuf[PATH_MAX]; + struct proc *cur; + struct execve_args execve_args; + struct spawn_args *args; + char *envp[] = { NULL }; + + cur = this_td(); + args = cur->data; + path = args->path; + memset(pathbuf, 0, sizeof(pathbuf)); + memcpy(pathbuf, path, strlen(path)); + + execve_args.pathname = pathbuf; + execve_args.argv = (char **)&args->argv[0]; + execve_args.envp = envp; + path = NULL; + + if (execve(cur, &execve_args) != 0) { + pr_error("execve failed, aborting\n"); + exit1(this_td(), 0); + } + __builtin_unreachable(); +} + +/* + * Spawn a new process + * + * @cur: Parent (current) process. + * @func: Address of start code. + * @p: Data to pass to new process (used for user procs) + * @flags: Spawn flags. + * @newprocp: If not NULL, will contain the new process. + * + * Returns the PID of the child on success, otherwise an + * errno value that is less than zero. + * + * XXX: `p` is only used by sys_spawn and should be set + * to NULL if called in the kernel. + */ +pid_t +spawn(struct proc *cur, void(*func)(void), void *p, int flags, struct proc **newprocp) +{ + struct proc *newproc; + struct mmap_lgdr *mlgdr; + int error; + pid_t pid; + + newproc = dynalloc(sizeof(*newproc)); + if (newproc == NULL) { + pr_error("could not alloc proc (-ENOMEM)\n"); + try_free_data(p); + return -ENOMEM; + } + + mlgdr = dynalloc(sizeof(*mlgdr)); + if (mlgdr == NULL) { + dynfree(newproc); + try_free_data(p); + pr_error("could not alloc proc mlgdr (-ENOMEM)\n"); + return -ENOMEM; + } + + memset(newproc, 0, sizeof(*newproc)); + error = md_spawn(newproc, cur, (uintptr_t)func); + if (error < 0) { + dynfree(newproc); + dynfree(mlgdr); + try_free_data(p); + pr_error("error initializing proc\n"); + return error; + } + + /* Set proc output if we can */ + if (newprocp != NULL) { + *newprocp = newproc; + } + + if (!ISSET(cur->flags, PROC_LEAFQ)) { + TAILQ_INIT(&cur->leafq); + cur->flags |= PROC_LEAFQ; + } + + /* Add to parent leafq */ + TAILQ_INSERT_TAIL(&cur->leafq, newproc, leaf_link); + atomic_inc_int(&cur->nleaves); + newproc->parent = cur; + newproc->data = p; + newproc->exit_status = -1; + + /* Initialize the mmap ledger */ + mlgdr->nbytes = 0; + RBT_INIT(lgdr_entries, &mlgdr->hd); + newproc->mlgdr = mlgdr; + newproc->flags |= PROC_WAITED; + + newproc->pid = ++nthreads; + signals_init(newproc); + sched_enqueue_td(newproc); + pid = newproc->pid; + + if (ISSET(flags, SPAWN_WAIT)) { + cur->flags |= PROC_SLEEP; + + while (ISSET(cur->flags, PROC_SLEEP)) { + sched_yield(); + } + while (!ISSET(newproc->flags, PROC_ZOMB)) { + sched_yield(); + } + + if (newproc->exit_status < 0) { + pid = newproc->exit_status; + } + + proc_reap(newproc); + } + + return pid; +} + +/* + * Get the child of a process by PID. + * + * @cur: Parent process. + * @pid: Child PID. + * + * Returns NULL if no child was found. + */ +struct proc * +get_child(struct proc *cur, pid_t pid) +{ + struct proc *procp; + + TAILQ_FOREACH(procp, &cur->leafq, leaf_link) { + if (procp->pid == pid) { + return procp; + } + } + + return NULL; +} + +/* + * arg0: The file /path/to/executable + * arg1: Argv + * arg2: Envp (TODO) + * arg3: Optional flags (`flags') + */ +scret_t +sys_spawn(struct syscall_args *scargs) +{ + struct spawn_args *args; + char *path; + const char *u_path, **u_argv; + const char *u_p = NULL; + struct proc *td; + int flags, error; + size_t len, bytes_copied = 0; + size_t argv_i = 0; + + td = this_td(); + u_path = (const char *)scargs->arg0; + u_argv = (const char **)scargs->arg1; + flags = scargs->arg3; + + args = dynalloc(sizeof(*args)); + if (args == NULL) { + return -ENOMEM; + } + + error = copyinstr(u_path, args->path, sizeof(args->path)); + if (error < 0) { + dynfree(args); + return error; + } + + memset(args->argv, 0, ARG_MAX); + for (size_t i = 0; i < ARG_MAX - 1; ++i) { + error = copyin(&u_argv[argv_i], &u_p, sizeof(u_p)); + if (error < 0) { + dynfree(args); + return error; + } + if (u_p == NULL) { + args->argv[argv_i++] = NULL; + break; + } + + path = &args->argv_blk[i]; + error = copyinstr(u_p, path, ARG_MAX - bytes_copied); + if (error < 0) { + dynfree(args); + return error; + } + + args->argv[argv_i++] = &args->argv_blk[i]; + len = strlen(path); + bytes_copied += (len + 1); + i += len; + } + + return spawn(td, spawn_thunk, args, flags, NULL); +} diff --git a/sys/kern/kern_stub.c b/sys/kern/kern_stub.c index 8603fd5..17c6e54 100644 --- a/sys/kern/kern_stub.c +++ b/sys/kern/kern_stub.c @@ -40,8 +40,10 @@ sigfpe_default(int signo) static struct proc *td; td = this_td(); - kprintf("Floating point exception (pid=%d)\n", td->pid); - exit1(td); + syslog_silence(false); + kprintf(OMIT_TIMESTAMP "Floating point exception (pid=%d)\n", td->pid); + syslog_silence(true); + exit1(td, 0); } void @@ -50,8 +52,10 @@ sigkill_default(int signo) static struct proc *td; td = this_td(); - kprintf("Terminated (pid=%d)\n", td->pid); - exit1(td); + syslog_silence(false); + kprintf(OMIT_TIMESTAMP "Terminated (pid=%d)\n", td->pid); + syslog_silence(true); + exit1(td, 0); } void @@ -60,8 +64,10 @@ sigsegv_default(int signo) static struct proc *td; td = this_td(); - kprintf("Segmentation fault (pid=%d)\n", td->pid); - exit1(td); + syslog_silence(false); + kprintf(OMIT_TIMESTAMP "Segmentation fault (pid=%d)\n", td->pid); + syslog_silence(true); + exit1(td, 0); } int @@ -75,3 +81,9 @@ dev_nowrite(void) { return -ENOTSUP; } + +int +dev_nobsize(void) +{ + return -ENOTSUP; +} diff --git a/sys/kern/kern_syscall.c b/sys/kern/kern_syscall.c index 986d82a..292fa56 100644 --- a/sys/kern/kern_syscall.c +++ b/sys/kern/kern_syscall.c @@ -29,9 +29,13 @@ #include <sys/syscall.h> #include <sys/sysctl.h> +#include <sys/reboot.h> #include <sys/types.h> +#include <sys/time.h> +#include <sys/mman.h> #include <sys/proc.h> #include <sys/vfs.h> +#include <sys/krq.h> scret_t(*g_sctab[])(struct syscall_args *) = { NULL, /* SYS_none */ @@ -42,6 +46,14 @@ scret_t(*g_sctab[])(struct syscall_args *) = { sys_stat, /* SYS_stat */ sys_sysctl, /* SYS_sysctl */ sys_write, /* SYS_write */ + sys_spawn, /* SYS_spawn */ + sys_reboot, /* SYS_reboot */ + sys_mmap, /* SYS_mmap */ + sys_munmap, /* SYS_munap */ + sys_access, /* SYS_access */ + sys_lseek, /* SYS_lseek */ + sys_sleep, /* SYS_sleep */ + sys_inject, /* SYS_inject */ }; const size_t MAX_SYSCALLS = NELEM(g_sctab); diff --git a/sys/kern/kern_syslog.c b/sys/kern/kern_syslog.c index 10bf348..c7f51f7 100644 --- a/sys/kern/kern_syslog.c +++ b/sys/kern/kern_syslog.c @@ -28,9 +28,14 @@ */ #include <sys/syslog.h> +#include <sys/cdefs.h> +#include <sys/sio.h> #include <sys/spinlock.h> +#include <sys/device.h> +#include <sys/errno.h> #include <dev/cons/cons.h> #include <dev/timer.h> +#include <fs/devfs.h> #include <stdarg.h> #include <string.h> @@ -40,21 +45,105 @@ #define SERIAL_DEBUG 0 #endif +#if defined(__USER_KMSG) +#define USER_KMSG __USER_KMSG +#else +#define USER_KMSG 0 +#endif + +#define KBUF_SIZE (1 << 16) + +/* Sanity check */ +__static_assert(KBUF_SIZE <= (1 << 16), "KBUF_SIZE too high!"); + /* Global logger lock */ -static struct spinlock lock = {0}; +static struct spinlock kmsg_lock = {0}; +static bool no_cons_log = false; + +/* Kernel message buffer */ +static char kmsg[KBUF_SIZE]; +static size_t kmsg_i = 0; +static struct cdevsw kmsg_cdevw; + +static void +kmsg_append(const char *s, size_t len) +{ + spinlock_acquire(&kmsg_lock); + if ((kmsg_i + len) >= KBUF_SIZE) { + kmsg_i = 0; + } + + for (size_t i = 0; i < len; ++i) { + kmsg[kmsg_i + i] = s[i]; + } + kmsg_i += len; + spinlock_release(&kmsg_lock); +} + +/* + * Character device function. + */ +static int +kmsg_read(dev_t dev, struct sio_txn *sio, int flags) +{ + size_t len, offset, j; + size_t bytes_read = 0; + char *p = sio->buf; + + spinlock_acquire(&kmsg_lock); + len = sio->len; + offset = sio->offset; + + if (len == 0) { + spinlock_release(&kmsg_lock); + return -EINVAL; + } + if (offset >= kmsg_i) { + spinlock_release(&kmsg_lock); + return 0; + } + + for (size_t i = 0; i < len; ++i) { + j = offset + i; + if (j > kmsg_i) { + break; + } + + p[i] = kmsg[j]; + ++bytes_read; + } + + spinlock_release(&kmsg_lock); + return bytes_read; +} static void syslog_write(const char *s, size_t len) { - const char *p = s; + const char *p; + size_t l; - while (len--) { - cons_putch(&g_root_scr, *p); - if (SERIAL_DEBUG) { + if (SERIAL_DEBUG) { + p = s; + l = len; + while (l--) { serial_putc(*p); + ++p; } - ++p; } + + kmsg_append(s, len); + + /* + * If the USER_KMSG option is disabled in kconf, + * do not log to the console if everything else + * has already started. + */ + if (!USER_KMSG && no_cons_log) { + return; + } + + cons_putstr(&g_root_scr, s, len); } /* @@ -105,10 +194,42 @@ kprintf(const char *fmt, ...) syslog_write(timestamp, strlen(timestamp)); } - spinlock_acquire(&lock); va_start(ap, fmt); vkprintf(fmt_p, &ap); va_end(ap); - spinlock_release(&lock); } + +/* + * Silence kernel messages in if the system + * is already operating in a user context. + * + * XXX: This is ignored if the kconf USER_KMSG + * option is set to "no". A kmsg device file + * is also created on the first call. + */ +void +syslog_silence(bool option) +{ + static bool once = false; + static char devname[] = "kmsg"; + devmajor_t major; + dev_t dev; + + if (!once) { + once = true; + major = dev_alloc_major(); + dev = dev_alloc(major); + + dev_register(major, dev, &kmsg_cdevw); + devfs_create_entry(devname, major, dev, 0444); + + } + + no_cons_log = option; +} + +static struct cdevsw kmsg_cdevw = { + .read = kmsg_read, + .write = nowrite +}; diff --git a/sys/kern/kern_time.c b/sys/kern/kern_time.c new file mode 100644 index 0000000..102648c --- /dev/null +++ b/sys/kern/kern_time.c @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <sys/time.h> +#include <sys/syscall.h> +#include <sys/systm.h> +#include <sys/errno.h> +#include <sys/cdefs.h> +#include <dev/timer.h> +#include <machine/cdefs.h> + +/* + * arg0: Timespec + * arg1: Remaining timeval + */ +scret_t +sys_sleep(struct syscall_args *scargs) +{ + struct timespec ts; + struct timer tmr; + size_t timeout_msec; + tmrr_status_t status; + int error; + + error = copyin((void *)scargs->arg0, &ts, sizeof(ts)); + if (error < 0) { + return error; + } + + if (ts.tv_nsec >= 1000000000) { + return -EINVAL; + } + + status = req_timer(TIMER_GP, &tmr); + if (__unlikely(status != TMRR_SUCCESS)) { + return -ENOTSUP; + } + if (__unlikely(tmr.msleep == NULL)) { + return -ENOTSUP; + } + + timeout_msec = ts.tv_nsec / 1000000; + timeout_msec += ts.tv_sec * 1000; + + md_inton(); + tmr.msleep(timeout_msec); + md_intoff(); + return 0; +} diff --git a/sys/kern/vfs_init.c b/sys/kern/vfs_init.c index caa0766..bc7f8b0 100644 --- a/sys/kern/vfs_init.c +++ b/sys/kern/vfs_init.c @@ -36,7 +36,9 @@ struct vnode *g_root_vnode = NULL; static struct fs_info fs_list[] = { {MOUNT_RAMFS, &g_initramfs_vfsops, 0, 0}, - {MOUNT_DEVFS, &g_devfs_vfsops, 0, 0} + {MOUNT_DEVFS, &g_devfs_vfsops, 0, 0}, + {MOUNT_CTLFS, &g_ctlfs_vfsops, 0, 0}, + {MOUNT_TMPFS, &g_tmpfs_vfsops, 0, 0} }; void diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c index 7419d1d..d88c447 100644 --- a/sys/kern/vfs_lookup.c +++ b/sys/kern/vfs_lookup.c @@ -29,6 +29,7 @@ #include <sys/namei.h> #include <sys/vnode.h> +#include <sys/param.h> #include <sys/mount.h> #include <sys/errno.h> #include <vm/dynalloc.h> @@ -118,20 +119,60 @@ vfs_get_fname_at(const char *path, size_t idx) } /* + * Count the number of components that exist within + * a path minus the delimiter as well as any redundant + * delimiters. + * + * @path: Path to count + */ +static uint8_t +namei_num_cnp(const char *path) +{ + const char *p = path; + uint8_t count = 0; + + while (*p != '\0') { + /* Skip redundant delimiters */ + if (p[0] == '/' && p[1] == '/') { + ++p; + continue; + } + + if (*p == '/') { + ++count; + } + ++p; + } + + /* Don't count leading slash */ + if (*(p - 1) == '/') { + --count; + } + + return count; +} + +/* * Search for a path within a mountpoint. * * @mp: Mountpoint to search in. * @path: Path to search for. + * @ndp: Namei data pointer */ static struct vnode * -namei_mp_search(struct mount *mp, const char *path) +namei_mp_search(struct mount *mp, const char *path, struct nameidata *ndp) { struct vop_lookup_args lookup_args; struct vnode *vp = mp->vp; + uint8_t n_cnp = 0; char *name; int status; - for (size_t i = 1;; ++i) { + n_cnp = namei_num_cnp(path); + if (ISSET(ndp->flags, NAMEI_WANTPARENT)) { + --n_cnp; + } + for (size_t i = 1; i < n_cnp; ++i) { name = vfs_get_fname_at(path, i); if (name == NULL) break; @@ -143,11 +184,12 @@ namei_mp_search(struct mount *mp, const char *path) status = vfs_vop_lookup(vp, &lookup_args); dynfree(name); - if (status == 0) - return vp; + if (status != 0) { + return NULL; + } } - return NULL; + return vp; } /* @@ -211,7 +253,7 @@ namei(struct nameidata *ndp) /* If the name matches, search within */ if (strcmp(mp->name, name) == 0) - vp = namei_mp_search(mp, path); + vp = namei_mp_search(mp, path, ndp); /* Did we find it at this mountpoint? */ if (vp != NULL) { diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c index 6f2d683..0d51331 100644 --- a/sys/kern/vfs_syscalls.c +++ b/sys/kern/vfs_syscalls.c @@ -43,7 +43,7 @@ static int vfs_dostat(const char *path, struct stat *sbuf) { char pathbuf[PATH_MAX]; - struct vattr *attr; + struct vattr attr; struct stat st; struct vnode *vp; struct vop_getattr_args gattr; @@ -54,11 +54,11 @@ vfs_dostat(const char *path, struct stat *sbuf) return -EINVAL; } - if ((copyinstr(path, pathbuf, sizeof(path))) < 0) { + if ((copyinstr(path, pathbuf, sizeof(pathbuf))) < 0) { return -EFAULT; } - nd.path = path; + nd.path = pathbuf; nd.flags = 0; if ((error = namei(&nd)) != 0) { @@ -67,19 +67,42 @@ vfs_dostat(const char *path, struct stat *sbuf) vp = nd.vp; gattr.vp = vp; + gattr.res = &attr; error = vfs_vop_getattr(vp, &gattr); if (error != 0) { return error; } - attr = gattr.res; memset(&st, VNOVAL, sizeof(st)); /* Copy stat data to userspace statbuf */ - st.st_mode = attr->mode; - st.st_size = attr->size; + st.st_mode = attr.mode; + st.st_size = attr.size; copyout(&st, sbuf, sizeof(*sbuf)); + vfs_release_vnode(vp); + return 0; +} + +static int +vfs_doaccess(const char *path) +{ + struct nameidata nd; + char pathbuf[PATH_MAX]; + int error; + + if ((copyinstr(path, pathbuf, sizeof(pathbuf))) < 0) { + return -EFAULT; + } + + nd.path = pathbuf; + nd.flags = 0; + + if ((error = namei(&nd)) != 0) { + return error; + } + + vfs_release_vnode(nd.vp); return 0; } @@ -149,3 +172,14 @@ sys_stat(struct syscall_args *scargs) { return vfs_dostat((const char *)scargs->arg0, (void *)scargs->arg1); } + +/* + * Check if a file can be accessed. + * + * @arg0: path + */ +scret_t +sys_access(struct syscall_args *scargs) +{ + return vfs_doaccess((const char *)scargs->arg0); +} diff --git a/sys/lib/string/memmove.c b/sys/lib/string/memmove.c new file mode 100644 index 0000000..f1271ee --- /dev/null +++ b/sys/lib/string/memmove.c @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <string.h> + +void * +memmove(void *s1, const void *s2, size_t n) +{ + const char *f = s2; + char *t = s1; + + if (f < t) { + f += n; + t += n; + while (n-- > 0) { + *--t = *--f; + } + } else { + while (n-- > 0) { + *t++ = *f++; + } + } + return s1; +} diff --git a/sys/lib/string/vsnprintf.c b/sys/lib/string/vsnprintf.c index e9e391f..a3b7e91 100644 --- a/sys/lib/string/vsnprintf.c +++ b/sys/lib/string/vsnprintf.c @@ -104,6 +104,7 @@ vsnprintf(char *s, size_t size, const char *fmt, va_list ap) num_len = strlen(num_buf); for (size_t i = num_len; i < pad_width; ++i) printc(s, size, &off, '0'); + pad_width = 0; } printstr(s, size, &off, num_buf); break; diff --git a/sys/kern/kern_fork.c b/sys/net/if.c index abb7707..5c9bc01 100644 --- a/sys/kern/kern_fork.c +++ b/sys/net/if.c @@ -27,61 +27,59 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include <sys/mman.h> -#include <sys/tree.h> #include <sys/types.h> -#include <sys/proc.h> +#include <sys/queue.h> +#include <sys/spinlock.h> #include <sys/errno.h> -#include <sys/sched.h> -#include <sys/signal.h> -#include <vm/dynalloc.h> +#include <net/if_var.h> #include <string.h> -static size_t nthreads = 0; +static TAILQ_HEAD(, netif) netif_list; +static bool netif_init = false; /* - * Fork1 - fork and direct a thread to 'ip' - * - * @cur: Current process. - * @flags: Flags to set. - * @ip: Location for new thread to start at. - * @newprocp: Will contain new thread if not NULL. + * Expose a network interface to the rest of the + * system. */ -int -fork1(struct proc *cur, int flags, void(*ip)(void), struct proc **newprocp) +void +netif_add(struct netif *nifp) { - struct proc *newproc; - struct mmap_lgdr *mlgdr; - int status = 0; - - newproc = dynalloc(sizeof(*newproc)); - if (newproc == NULL) - return -ENOMEM; + if (!netif_init) { + TAILQ_INIT(&netif_list); + netif_init = true; + } - mlgdr = dynalloc(sizeof(*mlgdr)); - if (mlgdr == NULL) - return -ENOMEM; + TAILQ_INSERT_TAIL(&netif_list, nifp, link); +} - memset(newproc, 0, sizeof(*newproc)); - status = md_fork(newproc, cur, (uintptr_t)ip); - if (status != 0) - goto done; +/* + * Lookup a network interface by name or type. + * + * @name: Name to lookup (use `type' if NULL) + * @type: Type to lookup (use if `name' is NULL) + */ +int +netif_lookup(const char *name, uint8_t type, struct netif **res) +{ + struct netif *netif; - /* Set proc output if we can */ - if (newprocp != NULL) - *newprocp = newproc; + if (!netif_init) { + return -EAGAIN; + } - /* Initialize the mmap ledger */ - mlgdr->nbytes = 0; - RBT_INIT(lgdr_entries, &mlgdr->hd); - newproc->mlgdr = mlgdr; + TAILQ_FOREACH(netif, &netif_list, link) { + if (name != NULL) { + if (strcmp(netif->name, name) == 0) { + *res = netif; + return 0; + } + } - newproc->pid = ++nthreads; - signals_init(newproc); - sched_enqueue_td(newproc); -done: - if (status != 0) - dynfree(newproc); + if (name == NULL && netif->type == type) { + *res = netif; + return 0; + } + } - return status; + return -ENODEV; } diff --git a/sys/netinet/if_ether.c b/sys/netinet/if_ether.c new file mode 100644 index 0000000..db1d6d4 --- /dev/null +++ b/sys/netinet/if_ether.c @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <sys/endian.h> +#include <sys/errno.h> +#include <vm/dynalloc.h> +#include <net/ethertypes.h> +#include <netinet/if_ether.h> +#include <string.h> + +struct arp_pkt { + struct ether_frame ehfr; + struct ether_arp payload; +}; + +static struct arp_pkt * +arp_create(struct netif *nifp, uint32_t *sproto, uint32_t *tproto, uint16_t op) +{ + struct arp_pkt *packet; + struct arp_hdr *hdrp; + struct ether_frame *frp; + struct ether_arp *payload; + + packet = dynalloc(sizeof(*packet)); + if (packet == NULL) { + return NULL; + } + + frp = &packet->ehfr; + payload = &packet->payload; + hdrp = &payload->hdr; + + /* Ethernet frame, from source to all */ + memcpy(frp->ether_saddr, &nifp->addr, ETHER_ADDR_LEN); + memset(frp->ether_daddr, 0xFF, ETHER_ADDR_LEN); + frp->ether_type = swap16(ETHERTYPE_ARP); + + /* Now for the ARP header */ + hdrp->hw_type = swap16(ARP_HWTYPE_ETHER); + hdrp->proto_type = swap16(ETHERTYPE_IPV4); + hdrp->hw_len = ETHER_ADDR_LEN; + hdrp->proto_len = 4; + hdrp->op_type = swap16(op); + + memcpy(payload->sha, frp->ether_saddr, ETHER_ADDR_LEN); + memset(payload->tha, 0xFF, ETHER_ADDR_LEN); + + /* Protocol source address */ + *((uint32_t *)payload->spa) = *sproto; + *((uint32_t *)payload->tpa) = *tproto; + return packet; +} + +static int +arp_send(struct netif *nifp, uint8_t *sproto, uint8_t *tproto, uint16_t op) +{ + struct arp_pkt *packet; + struct netbuf nb; + uint32_t *src_tmp, *targ_tmp; + + if (nifp->tx_enq == NULL) { + return -ENOTSUP; + } + if (nifp->tx_start == NULL) { + return -ENOTSUP; + } + + src_tmp = (uint32_t *)sproto; + targ_tmp = (uint32_t *)tproto; + + packet = arp_create(nifp, src_tmp, targ_tmp, op); + if (packet == NULL) { + return -ENOMEM; + } + + nb.len = sizeof(*packet); + memcpy(nb.data, packet, nb.len); + + nifp->tx_enq(nifp, &nb, NULL); + nifp->tx_start(nifp); + dynfree(packet); + return 0; +} + +int +arp_request(struct netif *nifp, uint8_t *sproto, uint8_t *tproto) +{ + return arp_send(nifp, sproto, tproto, ARP_REQUEST); +} + +int +arp_reply(struct netif *nifp, uint8_t *sproto, uint8_t *tproto) +{ + return arp_send(nifp, sproto, tproto, ARP_REPLY); +} diff --git a/sys/vm/vm_device.c b/sys/vm/vm_device.c new file mode 100644 index 0000000..e990b47 --- /dev/null +++ b/sys/vm/vm_device.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Hyra nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <sys/device.h> +#include <sys/syslog.h> +#include <vm/vm_device.h> + +#define pr_trace(fmt, ...) kprintf("vm_device: " fmt, ##__VA_ARGS__) +#define pr_error(...) pr_trace(__VA_ARGS__) + +const struct vm_pagerops dv_vnops; + +/* + * Attach a cdev to a vm_object + * + * @major: Char device major + * @minor: Char device minor. + */ +struct vm_object * +dv_attach(devmajor_t major, dev_t dev, vm_prot_t prot) +{ + int error; + struct cdevsw *cdevp; + struct vm_object *vmobj; + + if ((cdevp = dev_get(major, dev)) == NULL) { + pr_error("bad attach (major=%d, dev=%d)\n", major, dev); + return NULL; + } + + if (cdevp->mmap == NULL) { + pr_error("cdev lacks mmap() (major=%d, dev=%d)\n", major, dev); + return NULL; + } + + error = vm_obj_init(&cdevp->vmobj, &dv_vnops, 1); + if (error != 0) { + return NULL; + } + + vmobj = &cdevp->vmobj; + vmobj->prot = prot; + vmobj->data = cdevp; + vmobj->pgops = &dv_vnops; + return vmobj; +} + +/* TODO */ +const struct vm_pagerops dv_vnops = { + .get = NULL, +}; diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c index 2846a69..7518838 100644 --- a/sys/vm/vm_init.c +++ b/sys/vm/vm_init.c @@ -56,6 +56,7 @@ vm_init(void) void *pool; vm_physmem_init(); + pmap_init(); g_kvas = pmap_read_vas(); vm_ctx.dynalloc_pool_sz = DYNALLOC_POOL_SZ; diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index b56e896..26effdb 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -35,8 +35,10 @@ #include <sys/syscall.h> #include <sys/syslog.h> #include <sys/mman.h> +#include <sys/filedesc.h> #include <vm/dynalloc.h> #include <vm/vm_pager.h> +#include <vm/vm_device.h> #include <vm/pmap.h> #include <vm/map.h> #include <vm/vm.h> @@ -157,51 +159,113 @@ vm_map_modify(struct vas vas, vaddr_t va, paddr_t pa, vm_prot_t prot, bool unmap * crashes. */ void * -mmap_at(void *addr, size_t len, int prot, int flags, int fildes, off_t off) +mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off) { - struct vm_object *map_obj; + struct vm_object *map_obj = NULL; + struct cdevsw *cdevp; struct vm_page *pg; struct mmap_entry *ep; + struct vnode *vp; + struct filedesc *fdp; struct proc *td; struct vas vas; int error, npgs; paddr_t pa; vaddr_t va; size_t misalign; + off_t page_off; misalign = len & (DEFAULT_PAGESIZE - 1); len = ALIGN_UP(len + misalign, DEFAULT_PAGESIZE); npgs = len / DEFAULT_PAGESIZE; - - if (addr == NULL) { - pr_error("mmap: NULL addr not supported\n"); - return NULL; - } + vas = pmap_read_vas(); /* Validate flags */ - if (ISSET(flags, MAP_FIXED | MAP_SHARED)) { - pr_error("mmap: fixed/shared mappings not yet supported\n"); + if (ISSET(flags, MAP_FIXED)) { + pr_error("mmap: fixed mappings not yet supported\n"); mmap_dbg(addr, len, prot, flags, fildes, off); return NULL; } - map_obj = dynalloc(sizeof(*map_obj)); - if (map_obj == NULL) { - kprintf("mmap: failed to allocate map object\n"); - return NULL; + + /* + * Attempt to open the file if mapping + * is shared. + */ + if (ISSET(flags, MAP_SHARED)) { + fdp = fd_get(fildes); + if (fdp == NULL) { + pr_error("mmap: no such fd (fd=%d)\n", fildes); + return NULL; + } + + vp = fdp->vp; + if (vp->type != VCHR) { + /* TODO */ + pr_error("mmap: only device files supported\n"); + return NULL; + } + + map_obj = dv_attach(vp->major, vp->dev, prot); + if (map_obj == NULL) { + kprintf("mmap: dv_attach() failure\n"); + return NULL; + } + + cdevp = map_obj->data; + if ((pa = cdevp->mmap(vp->dev, len, off, 0)) == 0) { + kprintf("mmap: dev mmap() gave 0\n"); + return NULL; + } + + /* + * If the address passed is NULL, just identity + * map everything. + * + * XXX: This is why the bounds check done in the + * cdev mmap() *must* be correct. + * + * TODO: Use copy-on-write for this instead. Since mapping + * certain devices may required a lot of memory to + * be referenced anyways, we could use a buffered + * copy-on-write technique where only a window of + * pages can be mapped on-demand and other pages + * freed when that window is exceeded. + */ + if (addr == NULL) { + addr = (void *)pa; + } + + va = ALIGN_DOWN((vaddr_t)addr, DEFAULT_PAGESIZE); + error = vm_map(vas, va, pa, prot, len); + if (error != 0) { + kprintf("mmap: map failed (error=%d)\n", error); + return NULL; + } + + goto done; } - error = vm_obj_init(map_obj, &vm_anonops, 1); - if (error < 0) { - kprintf("mmap: vm_obj_init() returned %d\n", error); - kprintf("mmap: failed to init object\n"); - return NULL; + + /* Only allocate new obj if needed */ + if (map_obj == NULL) { + map_obj = dynalloc(sizeof(*map_obj)); + if (map_obj == NULL) { + kprintf("mmap: failed to allocate map object\n"); + return NULL; + } + error = vm_obj_init(map_obj, &vm_anonops, 1); + if (error < 0) { + kprintf("mmap: vm_obj_init() returned %d\n", error); + kprintf("mmap: failed to init object\n"); + return NULL; + } } /* XXX: Assuming private */ - vas = pmap_read_vas(); va = ALIGN_DOWN((vaddr_t)addr, DEFAULT_PAGESIZE); for (int i = 0; i < npgs; ++i) { pg = vm_pagealloc(map_obj, PALLOC_ZERO); + page_off = i * DEFAULT_PAGESIZE; if (pg == NULL) { /* TODO */ @@ -209,15 +273,21 @@ mmap_at(void *addr, size_t len, int prot, int flags, int fildes, off_t off) return NULL; } + /* TODO: copy-on-write */ + if (addr == NULL) { + va = pg->phys_addr; + addr = (void *)va; + } + pa = pg->phys_addr; - error = vm_map(vas, va, pa, prot, len); - pr_trace("va=%p, len=%d\n", va, len); + error = vm_map(vas, va + page_off, pa, prot, len); if (error < 0) { pr_error("mmap: failed to map page (retval=%x)\n", error); return NULL; } } +done: /* Add entry to ledger */ td = this_td(); ep = dynalloc(sizeof(*ep)); @@ -243,7 +313,7 @@ mmap_at(void *addr, size_t len, int prot, int flags, int fildes, off_t off) * multiple of the machine page size. */ int -munmap_at(void *addr, size_t len) +munmap(void *addr, size_t len) { int pgno; vaddr_t va; @@ -299,7 +369,7 @@ munmap_at(void *addr, size_t len) * arg5 -> off */ scret_t -mmap(struct syscall_args *scargs) +sys_mmap(struct syscall_args *scargs) { void *addr; size_t len; @@ -308,11 +378,11 @@ mmap(struct syscall_args *scargs) addr = (void *)scargs->arg0; len = scargs->arg1; - prot = scargs->arg2; + prot = scargs->arg2 | PROT_USER; flags = scargs->arg3; fildes = scargs->arg4; off = scargs->arg5; - return (scret_t)mmap_at(addr, len, prot, flags, fildes, off); + return (scret_t)mmap(addr, len, prot, flags, fildes, off); } /* @@ -322,14 +392,14 @@ mmap(struct syscall_args *scargs) * arg1 -> len */ scret_t -munmap(struct syscall_args *scargs) +sys_munmap(struct syscall_args *scargs) { void *addr; size_t len; addr = (void *)scargs->arg0; len = scargs->arg1; - return (scret_t)munmap_at(addr, len); + return (scret_t)munmap(addr, len); } /* diff --git a/sys/vm/vm_physmem.c b/sys/vm/vm_physmem.c index c7fcedb..89f9ee6 100644 --- a/sys/vm/vm_physmem.c +++ b/sys/vm/vm_physmem.c @@ -36,11 +36,12 @@ #include <vm/vm.h> #include <string.h> -size_t highest_frame_idx = 0; -size_t bitmap_size = 0; -size_t bitmap_free_start = 0; +static size_t highest_frame_idx = 0; +static size_t bitmap_size = 0; +static size_t bitmap_free_start = 0; +static ssize_t last_idx = 0; -uint8_t *bitmap; +static uint8_t *bitmap; static struct limine_memmap_response *resp = NULL; static struct spinlock lock = {0}; @@ -137,27 +138,51 @@ physmem_init_bitmap(void) * * @count: Number of frames to allocate. */ -uintptr_t -vm_alloc_frame(size_t count) +static uintptr_t +__vm_alloc_frame(size_t count) { size_t frames = 0; + ssize_t idx = -1; uintptr_t ret = 0; - spinlock_acquire(&lock); - for (size_t i = 0; i < highest_frame_idx; ++i) { + for (size_t i = last_idx; i < highest_frame_idx; ++i) { if (!testbit(bitmap, i)) { - /* We have a free page */ - if (++frames != count) { - continue; - } + if (idx < 0) + idx = i; + if (++frames >= count) + break; - for (size_t j = i; j < i + count; ++j) { - setbit(bitmap, j); - } - - ret = i * DEFAULT_PAGESIZE; - break; + continue; } + + idx = -1; + frames = 0; + } + + if (idx < 0 || frames != count) { + ret = 0; + goto done; + } + + for (size_t i = idx; i < idx + count; ++i) { + setbit(bitmap, i); + } + ret = idx * DEFAULT_PAGESIZE; + last_idx = idx; + memset(PHYS_TO_VIRT(ret), 0, count * DEFAULT_PAGESIZE); +done: + return ret; +} + +uintptr_t +vm_alloc_frame(size_t count) +{ + uintptr_t ret; + + spinlock_acquire(&lock); + if ((ret = __vm_alloc_frame(count)) == 0) { + last_idx = 0; + ret = __vm_alloc_frame(count); } spinlock_release(&lock); @@ -169,6 +194,8 @@ vm_free_frame(uintptr_t base, size_t count) { size_t stop_at = base + (count * DEFAULT_PAGESIZE); + base = ALIGN_UP(base, DEFAULT_PAGESIZE); + spinlock_acquire(&lock); for (uintptr_t p = base; p < stop_at; p += DEFAULT_PAGESIZE) { clrbit(bitmap, p / DEFAULT_PAGESIZE); diff --git a/sys/vm/vm_vnode.c b/sys/vm/vm_vnode.c index 2457c97..27defc9 100644 --- a/sys/vm/vm_vnode.c +++ b/sys/vm/vm_vnode.c @@ -162,7 +162,6 @@ vn_attach(struct vnode *vp, vm_prot_t prot) if (vp->type != VREG) { pr_error("vn_attach: vp=%p, prot=%x\n", vp, prot); - pr_error("vn_attach: Special files not supported yet!\n"); return NULL; } |