summaryrefslogtreecommitdiff
path: root/sys/dev
diff options
context:
space:
mode:
authorIan Moffett <ian@osmora.org>2025-05-17 21:56:07 -0400
committerIan Moffett <ian@osmora.org>2025-05-17 21:58:44 -0400
commit08eeb79db14145d83578025e1f0e7f7af460ee25 (patch)
treeb6af572a4b8dceb4f044f1e0bf5697f5c18dc0fd /sys/dev
parent9c64c3e69fa60b3657d33e829a411cb37064a169 (diff)
kernel: acpi: Add uACPI portexpt
See https://github.com/uACPI/uACPI/ Signed-off-by: Ian Moffett <ian@osmora.org>
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/acpi/uacpi.c545
-rw-r--r--sys/dev/acpi/uacpi/default_handlers.c336
-rw-r--r--sys/dev/acpi/uacpi/event.c2449
-rw-r--r--sys/dev/acpi/uacpi/interpreter.c6053
-rw-r--r--sys/dev/acpi/uacpi/io.c1116
-rw-r--r--sys/dev/acpi/uacpi/mutex.c396
-rw-r--r--sys/dev/acpi/uacpi/namespace.c1081
-rw-r--r--sys/dev/acpi/uacpi/notify.c255
-rw-r--r--sys/dev/acpi/uacpi/opcodes.c272
-rw-r--r--sys/dev/acpi/uacpi/opregion.c1056
-rw-r--r--sys/dev/acpi/uacpi/osi.c388
-rw-r--r--sys/dev/acpi/uacpi/registers.c572
-rw-r--r--sys/dev/acpi/uacpi/resources.c2569
-rw-r--r--sys/dev/acpi/uacpi/shareable.c71
-rw-r--r--sys/dev/acpi/uacpi/sleep.c616
-rw-r--r--sys/dev/acpi/uacpi/stdlib.c728
-rw-r--r--sys/dev/acpi/uacpi/tables.c1399
-rw-r--r--sys/dev/acpi/uacpi/types.c1489
-rw-r--r--sys/dev/acpi/uacpi/uacpi.c998
-rw-r--r--sys/dev/acpi/uacpi/utilities.c1156
20 files changed, 23545 insertions, 0 deletions
diff --git a/sys/dev/acpi/uacpi.c b/sys/dev/acpi/uacpi.c
new file mode 100644
index 0000000..9e5ae6b
--- /dev/null
+++ b/sys/dev/acpi/uacpi.c
@@ -0,0 +1,545 @@
+/*
+ * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Hyra nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/spinlock.h>
+#include <sys/proc.h>
+#include <sys/param.h>
+#include <sys/syslog.h>
+#include <sys/panic.h>
+#include <dev/timer.h>
+#include <uacpi/kernel_api.h>
+#include <uacpi/platform/arch_helpers.h>
+#include <uacpi/types.h>
+#include <uacpi/event.h>
+#include <uacpi/sleep.h>
+#include <machine/cdefs.h>
+#include <machine/pio.h>
+#if defined(__x86_64__)
+#include <machine/idt.h>
+#include <machine/ioapic.h>
+#include <machine/intr.h>
+#endif /* __x86_64__ */
+#include <dev/acpi/uacpi.h>
+#include <dev/acpi/acpi.h>
+#include <dev/pci/pci.h>
+#include <vm/dynalloc.h>
+#include <vm/vm.h>
+#include <string.h>
+
+typedef struct {
+ uacpi_io_addr base;
+ uacpi_size length;
+} io_range_t;
+
+void *
+uacpi_kernel_alloc(uacpi_size size)
+{
+ return dynalloc(size);
+}
+
+void
+uacpi_kernel_free(void *mem)
+{
+ dynfree(mem);
+}
+
+uacpi_status
+uacpi_kernel_get_rsdp(uacpi_phys_addr *out_rsdp_address)
+{
+ paddr_t pa;
+
+ pa = acpi_rsdp();
+ if (pa == 0) {
+ return UACPI_STATUS_NOT_FOUND;
+ }
+
+ *out_rsdp_address = pa;
+ return UACPI_STATUS_OK;
+}
+
+/* TODO: Actual mutex */
+uacpi_handle
+uacpi_kernel_create_mutex(void)
+{
+ struct spinlock *lp;
+
+ lp = dynalloc(sizeof(*lp));
+ if (lp == NULL) {
+ return NULL;
+ }
+ memset(lp, 0, sizeof(*lp));
+ return lp;
+}
+
+void
+uacpi_kernel_free_mutex(uacpi_handle handle)
+{
+ dynfree(handle);
+}
+
+uacpi_status
+uacpi_kernel_acquire_mutex(uacpi_handle handle, [[maybe_unused]] uacpi_u16 timeout)
+{
+ spinlock_acquire((struct spinlock *)handle);
+ return UACPI_STATUS_OK;
+}
+
+void
+uacpi_kernel_release_mutex(uacpi_handle handle)
+{
+ spinlock_release((struct spinlock *)handle);
+}
+
+uacpi_thread_id
+uacpi_kernel_get_thread_id(void)
+{
+ struct proc *td = this_td();
+
+ if (td == NULL) {
+ return 0; /* PID 0 */
+ }
+
+ return &td->pid;
+}
+
+uacpi_status
+uacpi_kernel_handle_firmware_request(uacpi_firmware_request *request)
+{
+ switch (request->type) {
+ case UACPI_FIRMWARE_REQUEST_TYPE_FATAL:
+ panic("uacpi: fatal firmware request\n");
+ break;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_handle
+uacpi_kernel_create_spinlock(void)
+{
+ struct spinlock *lp;
+
+ lp = dynalloc(sizeof(*lp));
+ if (lp == NULL) {
+ return NULL;
+ }
+ memset(lp, 0, sizeof(*lp));
+ return lp;
+}
+
+void
+uacpi_kernel_free_spinlock(uacpi_handle lock)
+{
+ dynfree(lock);
+}
+
+uacpi_cpu_flags
+uacpi_kernel_lock_spinlock(uacpi_handle lock)
+{
+ struct spinlock *lp = lock;
+
+ return __atomic_test_and_set(&lp->lock, __ATOMIC_ACQUIRE);
+}
+
+void
+uacpi_kernel_unlock_spinlock(uacpi_handle lock, uacpi_cpu_flags interrupt_state)
+{
+ spinlock_release((struct spinlock *)lock);
+}
+
+uacpi_handle
+uacpi_kernel_create_event(void)
+{
+ size_t *counter;
+
+ counter = dynalloc(sizeof(*counter));
+ if (counter == NULL) {
+ return NULL;
+ }
+
+ *counter = 0;
+ return counter;
+}
+
+void
+uacpi_kernel_free_event(uacpi_handle handle)
+{
+ dynfree(handle);
+}
+
+uacpi_bool
+uacpi_kernel_wait_for_event(uacpi_handle handle, uacpi_u16 timeout)
+{
+ size_t *counter = (size_t *)handle;
+ struct timer tmr;
+ size_t usec_start, usec;
+ size_t elapsed_msec;
+
+ if (timeout == 0xFFFF) {
+ while (*counter != 0) {
+ md_pause();
+ }
+ return UACPI_TRUE;
+ }
+
+ req_timer(TIMER_GP, &tmr);
+ usec_start = tmr.get_time_usec();
+
+ for (;;) {
+ if (*counter == 0) {
+ return UACPI_TRUE;
+ }
+
+ usec = tmr.get_time_usec();
+ elapsed_msec = (usec - usec_start) / 1000;
+ if (elapsed_msec >= timeout) {
+ break;
+ }
+
+ md_pause();
+ }
+
+ __atomic_fetch_sub((size_t *)handle, 1, __ATOMIC_SEQ_CST);
+ return UACPI_FALSE;
+}
+
+void
+uacpi_kernel_signal_event(uacpi_handle handle)
+{
+ __atomic_fetch_add((size_t *)handle, 1, __ATOMIC_SEQ_CST);
+}
+
+void
+uacpi_kernel_reset_event(uacpi_handle handle)
+{
+ __atomic_store_n((size_t *)handle, 0, __ATOMIC_SEQ_CST);
+}
+
+uacpi_status
+uacpi_kernel_install_interrupt_handler(uacpi_u32 irq, uacpi_interrupt_handler fn,
+ uacpi_handle ctx, uacpi_handle *out_irq_handle)
+{
+ int vec;
+
+#if defined(__x86_64__)
+ vec = intr_alloc_vector("acpi", IPL_HIGH);
+ idt_set_desc(vec, IDT_INT_GATE, ISR(fn), IST_HW_IRQ);
+ ioapic_set_vec(irq, vec);
+ ioapic_irq_unmask(irq);
+ return UACPI_STATUS_OK;
+#else
+ return UACPI_STATUS_UNIMPLEMENTED;
+#endif /* __x86_64__ */
+}
+
+uacpi_status
+uacpi_kernel_uninstall_interrupt_handler([[maybe_unused]] uacpi_interrupt_handler fn, uacpi_handle irq_handle)
+{
+ return UACPI_STATUS_UNIMPLEMENTED;
+}
+
+uacpi_status
+uacpi_kernel_schedule_work(uacpi_work_type, uacpi_work_handler, uacpi_handle ctx)
+{
+ return UACPI_STATUS_UNIMPLEMENTED;
+}
+
+uacpi_status
+uacpi_kernel_wait_for_work_completion(void)
+{
+ return UACPI_STATUS_UNIMPLEMENTED;
+}
+
+void uacpi_kernel_stall(uacpi_u8 usec)
+{
+ /* XXX: STUB */
+ (void)usec;
+}
+
+void
+uacpi_kernel_sleep(uacpi_u64 msec)
+{
+ struct timer tmr;
+
+ req_timer(TIMER_GP, &tmr);
+ tmr.msleep(msec);
+}
+
+void *
+uacpi_kernel_map(uacpi_phys_addr addr, [[maybe_unused]] uacpi_size len)
+{
+ return PHYS_TO_VIRT(addr);
+}
+
+void
+uacpi_kernel_unmap([[maybe_unused]] void *addr, [[maybe_unused]] uacpi_size len)
+{
+ /* XXX: no-op */
+ (void)addr;
+ (void)len;
+}
+
+uacpi_status
+uacpi_kernel_io_read8(uacpi_handle handle, uacpi_size offset, uacpi_u8 *out_value)
+{
+ io_range_t *rp = (io_range_t *)handle;
+
+ if (offset >= rp->length) {
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ *out_value = inb(rp->base + offset);
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status
+uacpi_kernel_io_read16(uacpi_handle handle, uacpi_size offset, uacpi_u16 *out_value)
+{
+ io_range_t *rp = (io_range_t *)handle;
+
+ if (offset >= rp->length) {
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ *out_value = inw(rp->base + offset);
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status
+uacpi_kernel_io_read32(uacpi_handle handle, uacpi_size offset, uacpi_u32 *out_value)
+{
+ io_range_t *rp = (io_range_t *)handle;
+
+ if (offset >= rp->length) {
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ *out_value = inl(rp->base + offset);
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status
+uacpi_kernel_io_write8(uacpi_handle handle, uacpi_size offset, uacpi_u8 in_value)
+{
+ io_range_t *rp = (io_range_t *)handle;
+
+ if (offset >= rp->length) {
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ outb(rp->base + offset, in_value);
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status
+uacpi_kernel_io_write16(uacpi_handle handle, uacpi_size offset, uacpi_u16 in_value)
+{
+ io_range_t *rp = (io_range_t *)handle;
+
+ if (offset >= rp->length) {
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ outw(rp->base + offset, in_value);
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status
+uacpi_kernel_io_write32(uacpi_handle handle, uacpi_size offset, uacpi_u32 in_value)
+{
+ io_range_t *rp = (io_range_t *)handle;
+
+ if (offset >= rp->length) {
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ outl(rp->base + offset, in_value);
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status
+uacpi_kernel_io_map(uacpi_io_addr base, uacpi_size len, uacpi_handle *out_handle)
+{
+ io_range_t *rp;
+
+ rp = dynalloc(sizeof(*rp));
+ if (rp == NULL) {
+ return UACPI_STATUS_OUT_OF_MEMORY;
+ }
+
+ rp->base = base;
+ rp->length = len;
+ *out_handle = rp;
+ return UACPI_STATUS_OK;
+}
+
+void
+uacpi_kernel_io_unmap(uacpi_handle handle)
+{
+ dynfree(handle);
+}
+
+void
+uacpi_kernel_pci_device_close([[maybe_unused]] uacpi_handle handle)
+{
+ /* XXX: no-op */
+ (void)handle;
+}
+
+uacpi_status
+uacpi_kernel_pci_device_open(uacpi_pci_address address, uacpi_handle *out_handle)
+{
+ struct pci_device *devp;
+
+ devp = dynalloc(sizeof(*devp));
+ if (devp == NULL) {
+ return UACPI_STATUS_OUT_OF_MEMORY;
+ }
+
+ devp->segment = address.segment;
+ devp->bus = address.bus;
+ devp->slot = address.device;
+ devp->func = address.function;
+ pci_add_device(devp);
+
+ *out_handle = devp;
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status
+uacpi_kernel_pci_read8(uacpi_handle handle, uacpi_size offset, uacpi_u8 *out_value)
+{
+ struct pci_device *devp = handle;
+ uint32_t v;
+
+ v = pci_readl(devp, offset);
+ *out_value = (v >> ((offset & 3) * 8)) & MASK(8);
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status
+uacpi_kernel_pci_read16(uacpi_handle handle, uacpi_size offset, uacpi_u16 *out_value)
+{
+ struct pci_device *devp = handle;
+ uint32_t v;
+
+ v = pci_readl(devp, offset);
+ *out_value = (v >> ((offset & 2) * 8)) & MASK(16);
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_kernel_pci_read32(uacpi_handle handle, uacpi_size offset, uacpi_u32 *out_value)
+{
+ struct pci_device *devp = handle;
+ *out_value = pci_readl(devp, offset);
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status
+uacpi_kernel_pci_write8(uacpi_handle handle, uacpi_size offset, uacpi_u8 in_value)
+{
+ struct pci_device *devp = handle;
+ uint32_t v;
+
+ uacpi_kernel_pci_read8(handle, offset, (void *)&v);
+ v &= ~(0xFFFF >> ((offset & 3) * 8));
+ v |= (in_value >> ((offset & 3) * 8));
+ pci_writel(devp, offset, v);
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status
+uacpi_kernel_pci_write16(uacpi_handle handle, uacpi_size offset, uacpi_u16 in_value)
+{
+ struct pci_device *devp = handle;
+ uint32_t v;
+
+ uacpi_kernel_pci_read8(handle, offset, (void *)&v);
+ v &= ~(0xFFFF >> ((offset & 2) * 8));
+ v |= (in_value >> ((offset & 2) * 8));
+ pci_writel(devp, offset, v);
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status
+uacpi_kernel_pci_write32(uacpi_handle handle, uacpi_size offset, uacpi_u32 in_value)
+{
+ struct pci_device *devp = handle;
+
+ pci_writel(devp, offset, in_value);
+ return UACPI_STATUS_OK;
+}
+
+uacpi_u64
+uacpi_kernel_get_nanoseconds_since_boot(void)
+{
+ static uacpi_u64 time = 0;
+
+ /* TODO */
+ time += 1000000;
+ return time;
+}
+
+void
+uacpi_kernel_log(uacpi_log_level level, const uacpi_char *p)
+{
+ kprintf(p);
+}
+
+int
+uacpi_init(void)
+{
+ uacpi_status ret;
+
+ ret = uacpi_initialize(0);
+ if (uacpi_unlikely_error(ret)) {
+ kprintf("uacpi init error: %s\n", uacpi_status_to_string(ret));
+ return -1;
+ }
+
+ ret = uacpi_namespace_load();
+ if (uacpi_unlikely_error(ret)) {
+ kprintf("uacpi namespace load error: %s\n", uacpi_status_to_string(ret));
+ return -1;
+ }
+
+ ret = uacpi_namespace_initialize();
+ if (uacpi_unlikely_error(ret)) {
+ kprintf("uacpi namespace init error: %s\n", uacpi_status_to_string(ret));
+ return -1;
+ }
+
+ ret = uacpi_finalize_gpe_initialization();
+ if (uacpi_unlikely_error(ret)) {
+ kprintf("uacpi GPE init error: %s\n", uacpi_status_to_string(ret));
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/sys/dev/acpi/uacpi/default_handlers.c b/sys/dev/acpi/uacpi/default_handlers.c
new file mode 100644
index 0000000..32259d6
--- /dev/null
+++ b/sys/dev/acpi/uacpi/default_handlers.c
@@ -0,0 +1,336 @@
+#include <uacpi/internal/opregion.h>
+#include <uacpi/internal/namespace.h>
+#include <uacpi/internal/utilities.h>
+#include <uacpi/internal/helpers.h>
+#include <uacpi/internal/log.h>
+#include <uacpi/internal/io.h>
+#include <uacpi/kernel_api.h>
+#include <uacpi/uacpi.h>
+
+#ifndef UACPI_BAREBONES_MODE
+
+#define PCI_ROOT_PNP_ID "PNP0A03"
+#define PCI_EXPRESS_ROOT_PNP_ID "PNP0A08"
+
+static uacpi_namespace_node *find_pci_root(uacpi_namespace_node *node)
+{
+ static const uacpi_char *pci_root_ids[] = {
+ PCI_ROOT_PNP_ID,
+ PCI_EXPRESS_ROOT_PNP_ID,
+ UACPI_NULL
+ };
+ uacpi_namespace_node *parent = node->parent;
+
+ while (parent != uacpi_namespace_root()) {
+ if (uacpi_device_matches_pnp_id(parent, pci_root_ids)) {
+ uacpi_trace(
+ "found a PCI root node %.4s controlling region %.4s\n",
+ parent->name.text, node->name.text
+ );
+ return parent;
+ }
+
+ parent = parent->parent;
+ }
+
+ uacpi_trace_region_error(
+ node, "unable to find PCI root controlling",
+ UACPI_STATUS_NOT_FOUND
+ );
+ return node;
+}
+
+static uacpi_status pci_region_attach(uacpi_region_attach_data *data)
+{
+ uacpi_namespace_node *node, *pci_root, *device;
+ uacpi_pci_address address = { 0 };
+ uacpi_u64 value;
+ uacpi_status ret;
+
+ node = data->region_node;
+ pci_root = find_pci_root(node);
+
+ /*
+ * Find the actual device object that is supposed to be controlling
+ * this operation region.
+ */
+ device = node;
+ while (device) {
+ uacpi_object_type type;
+
+ ret = uacpi_namespace_node_type(device, &type);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (type == UACPI_OBJECT_DEVICE)
+ break;
+
+ device = device->parent;
+ }
+
+ if (uacpi_unlikely(device == UACPI_NULL)) {
+ ret = UACPI_STATUS_NOT_FOUND;
+ uacpi_trace_region_error(
+ node, "unable to find device responsible for", ret
+ );
+ return ret;
+ }
+
+ ret = uacpi_eval_simple_integer(device, "_ADR", &value);
+ if (ret == UACPI_STATUS_OK) {
+ address.function = (value >> 0) & 0xFF;
+ address.device = (value >> 16) & 0xFF;
+ }
+
+ ret = uacpi_eval_simple_integer(pci_root, "_SEG", &value);
+ if (ret == UACPI_STATUS_OK)
+ address.segment = value;
+
+ ret = uacpi_eval_simple_integer(pci_root, "_BBN", &value);
+ if (ret == UACPI_STATUS_OK)
+ address.bus = value;
+
+ uacpi_trace(
+ "detected PCI device %.4s@%04X:%02X:%02X:%01X\n",
+ device->name.text, address.segment, address.bus,
+ address.device, address.function
+ );
+
+ return uacpi_kernel_pci_device_open(address, &data->out_region_context);
+}
+
+static uacpi_status pci_region_detach(uacpi_region_detach_data *data)
+{
+ uacpi_kernel_pci_device_close(data->region_context);
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status pci_region_do_rw(
+ uacpi_region_op op, uacpi_region_rw_data *data
+)
+{
+ uacpi_handle dev = data->region_context;
+ uacpi_u8 width;
+ uacpi_size offset;
+
+ offset = data->offset;
+ width = data->byte_width;
+
+ return op == UACPI_REGION_OP_READ ?
+ uacpi_pci_read(dev, offset, width, &data->value) :
+ uacpi_pci_write(dev, offset, width, data->value);
+}
+
+static uacpi_status handle_pci_region(uacpi_region_op op, uacpi_handle op_data)
+{
+ switch (op) {
+ case UACPI_REGION_OP_ATTACH:
+ return pci_region_attach(op_data);
+ case UACPI_REGION_OP_DETACH:
+ return pci_region_detach(op_data);
+ case UACPI_REGION_OP_READ:
+ case UACPI_REGION_OP_WRITE:
+ return pci_region_do_rw(op, op_data);
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+}
+
+struct memory_region_ctx {
+ uacpi_phys_addr phys;
+ uacpi_u8 *virt;
+ uacpi_size size;
+};
+
+static uacpi_status memory_region_attach(uacpi_region_attach_data *data)
+{
+ struct memory_region_ctx *ctx;
+ uacpi_status ret = UACPI_STATUS_OK;
+
+ ctx = uacpi_kernel_alloc(sizeof(*ctx));
+ if (ctx == UACPI_NULL)
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ ctx->size = data->generic_info.length;
+
+ // FIXME: this really shouldn't try to map everything at once
+ ctx->phys = data->generic_info.base;
+ ctx->virt = uacpi_kernel_map(ctx->phys, ctx->size);
+
+ if (uacpi_unlikely(ctx->virt == UACPI_NULL)) {
+ ret = UACPI_STATUS_MAPPING_FAILED;
+ uacpi_trace_region_error(data->region_node, "unable to map", ret);
+ uacpi_free(ctx, sizeof(*ctx));
+ goto out;
+ }
+
+ data->out_region_context = ctx;
+out:
+ return ret;
+}
+
+static uacpi_status memory_region_detach(uacpi_region_detach_data *data)
+{
+ struct memory_region_ctx *ctx = data->region_context;
+
+ uacpi_kernel_unmap(ctx->virt, ctx->size);
+ uacpi_free(ctx, sizeof(*ctx));
+ return UACPI_STATUS_OK;
+}
+
+struct io_region_ctx {
+ uacpi_io_addr base;
+ uacpi_handle handle;
+};
+
+static uacpi_status io_region_attach(uacpi_region_attach_data *data)
+{
+ struct io_region_ctx *ctx;
+ uacpi_generic_region_info *info = &data->generic_info;
+ uacpi_status ret;
+
+ ctx = uacpi_kernel_alloc(sizeof(*ctx));
+ if (ctx == UACPI_NULL)
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ ctx->base = info->base;
+
+ ret = uacpi_kernel_io_map(ctx->base, info->length, &ctx->handle);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_trace_region_error(
+ data->region_node, "unable to map an IO", ret
+ );
+ uacpi_free(ctx, sizeof(*ctx));
+ return ret;
+ }
+
+ data->out_region_context = ctx;
+ return ret;
+}
+
+static uacpi_status io_region_detach(uacpi_region_detach_data *data)
+{
+ struct io_region_ctx *ctx = data->region_context;
+
+ uacpi_kernel_io_unmap(ctx->handle);
+ uacpi_free(ctx, sizeof(*ctx));
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status memory_region_do_rw(
+ uacpi_region_op op, uacpi_region_rw_data *data
+)
+{
+ struct memory_region_ctx *ctx = data->region_context;
+ uacpi_size offset;
+
+ offset = data->address - ctx->phys;
+
+ return op == UACPI_REGION_OP_READ ?
+ uacpi_system_memory_read(ctx->virt, offset, data->byte_width, &data->value) :
+ uacpi_system_memory_write(ctx->virt, offset, data->byte_width, data->value);
+}
+
+static uacpi_status handle_memory_region(uacpi_region_op op, uacpi_handle op_data)
+{
+ switch (op) {
+ case UACPI_REGION_OP_ATTACH:
+ return memory_region_attach(op_data);
+ case UACPI_REGION_OP_DETACH:
+ return memory_region_detach(op_data);
+ case UACPI_REGION_OP_READ:
+ case UACPI_REGION_OP_WRITE:
+ return memory_region_do_rw(op, op_data);
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+}
+
+static uacpi_status table_data_region_do_rw(
+ uacpi_region_op op, uacpi_region_rw_data *data
+)
+{
+ void *addr = UACPI_VIRT_ADDR_TO_PTR((uacpi_virt_addr)data->offset);
+
+ return op == UACPI_REGION_OP_READ ?
+ uacpi_system_memory_read(addr, 0, data->byte_width, &data->value) :
+ uacpi_system_memory_write(addr, 0, data->byte_width, data->value);
+}
+
+static uacpi_status handle_table_data_region(uacpi_region_op op, uacpi_handle op_data)
+{
+ switch (op) {
+ case UACPI_REGION_OP_ATTACH:
+ case UACPI_REGION_OP_DETACH:
+ return UACPI_STATUS_OK;
+ case UACPI_REGION_OP_READ:
+ case UACPI_REGION_OP_WRITE:
+ return table_data_region_do_rw(op, op_data);
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+}
+
+static uacpi_status io_region_do_rw(
+ uacpi_region_op op, uacpi_region_rw_data *data
+)
+{
+ struct io_region_ctx *ctx = data->region_context;
+ uacpi_u8 width;
+ uacpi_size offset;
+
+ offset = data->offset - ctx->base;
+ width = data->byte_width;
+
+ return op == UACPI_REGION_OP_READ ?
+ uacpi_system_io_read(ctx->handle, offset, width, &data->value) :
+ uacpi_system_io_write(ctx->handle, offset, width, data->value);
+}
+
+static uacpi_status handle_io_region(uacpi_region_op op, uacpi_handle op_data)
+{
+ switch (op) {
+ case UACPI_REGION_OP_ATTACH:
+ return io_region_attach(op_data);
+ case UACPI_REGION_OP_DETACH:
+ return io_region_detach(op_data);
+ case UACPI_REGION_OP_READ:
+ case UACPI_REGION_OP_WRITE:
+ return io_region_do_rw(op, op_data);
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+}
+
+void uacpi_install_default_address_space_handlers(void)
+{
+ uacpi_namespace_node *root;
+
+ root = uacpi_namespace_root();
+
+ uacpi_install_address_space_handler_with_flags(
+ root, UACPI_ADDRESS_SPACE_SYSTEM_MEMORY,
+ handle_memory_region, UACPI_NULL,
+ UACPI_ADDRESS_SPACE_HANDLER_DEFAULT
+ );
+
+ uacpi_install_address_space_handler_with_flags(
+ root, UACPI_ADDRESS_SPACE_SYSTEM_IO,
+ handle_io_region, UACPI_NULL,
+ UACPI_ADDRESS_SPACE_HANDLER_DEFAULT
+ );
+
+ uacpi_install_address_space_handler_with_flags(
+ root, UACPI_ADDRESS_SPACE_PCI_CONFIG,
+ handle_pci_region, UACPI_NULL,
+ UACPI_ADDRESS_SPACE_HANDLER_DEFAULT
+ );
+
+ uacpi_install_address_space_handler_with_flags(
+ root, UACPI_ADDRESS_SPACE_TABLE_DATA,
+ handle_table_data_region, UACPI_NULL,
+ UACPI_ADDRESS_SPACE_HANDLER_DEFAULT
+ );
+}
+
+#endif // !UACPI_BAREBONES_MODE
diff --git a/sys/dev/acpi/uacpi/event.c b/sys/dev/acpi/uacpi/event.c
new file mode 100644
index 0000000..0c58372
--- /dev/null
+++ b/sys/dev/acpi/uacpi/event.c
@@ -0,0 +1,2449 @@
+#include <uacpi/internal/event.h>
+#include <uacpi/internal/registers.h>
+#include <uacpi/internal/context.h>
+#include <uacpi/internal/io.h>
+#include <uacpi/internal/log.h>
+#include <uacpi/internal/namespace.h>
+#include <uacpi/internal/interpreter.h>
+#include <uacpi/internal/notify.h>
+#include <uacpi/internal/utilities.h>
+#include <uacpi/internal/mutex.h>
+#include <uacpi/internal/stdlib.h>
+#include <uacpi/acpi.h>
+
+#define UACPI_EVENT_DISABLED 0
+#define UACPI_EVENT_ENABLED 1
+
+#if !defined(UACPI_REDUCED_HARDWARE) && !defined(UACPI_BAREBONES_MODE)
+
+static uacpi_handle g_gpe_state_slock;
+static struct uacpi_recursive_lock g_event_lock;
+static uacpi_bool g_gpes_finalized;
+
+struct fixed_event {
+ uacpi_u8 enable_field;
+ uacpi_u8 status_field;
+ uacpi_u16 enable_mask;
+ uacpi_u16 status_mask;
+};
+
+struct fixed_event_handler {
+ uacpi_interrupt_handler handler;
+ uacpi_handle ctx;
+};
+
+static const struct fixed_event fixed_events[UACPI_FIXED_EVENT_MAX + 1] = {
+ [UACPI_FIXED_EVENT_GLOBAL_LOCK] = {
+ .status_field = UACPI_REGISTER_FIELD_GBL_STS,
+ .enable_field = UACPI_REGISTER_FIELD_GBL_EN,
+ .enable_mask = ACPI_PM1_EN_GBL_EN_MASK,
+ .status_mask = ACPI_PM1_STS_GBL_STS_MASK,
+ },
+ [UACPI_FIXED_EVENT_TIMER_STATUS] = {
+ .status_field = UACPI_REGISTER_FIELD_TMR_STS,
+ .enable_field = UACPI_REGISTER_FIELD_TMR_EN,
+ .enable_mask = ACPI_PM1_EN_TMR_EN_MASK,
+ .status_mask = ACPI_PM1_STS_TMR_STS_MASK,
+ },
+ [UACPI_FIXED_EVENT_POWER_BUTTON] = {
+ .status_field = UACPI_REGISTER_FIELD_PWRBTN_STS,
+ .enable_field = UACPI_REGISTER_FIELD_PWRBTN_EN,
+ .enable_mask = ACPI_PM1_EN_PWRBTN_EN_MASK,
+ .status_mask = ACPI_PM1_STS_PWRBTN_STS_MASK,
+ },
+ [UACPI_FIXED_EVENT_SLEEP_BUTTON] = {
+ .status_field = UACPI_REGISTER_FIELD_SLPBTN_STS,
+ .enable_field = UACPI_REGISTER_FIELD_SLPBTN_EN,
+ .enable_mask = ACPI_PM1_EN_SLPBTN_EN_MASK,
+ .status_mask = ACPI_PM1_STS_SLPBTN_STS_MASK,
+ },
+ [UACPI_FIXED_EVENT_RTC] = {
+ .status_field = UACPI_REGISTER_FIELD_RTC_STS,
+ .enable_field = UACPI_REGISTER_FIELD_RTC_EN,
+ .enable_mask = ACPI_PM1_EN_RTC_EN_MASK,
+ .status_mask = ACPI_PM1_STS_RTC_STS_MASK,
+ },
+};
+
+static struct fixed_event_handler
+fixed_event_handlers[UACPI_FIXED_EVENT_MAX + 1];
+
+static uacpi_status initialize_fixed_events(void)
+{
+ uacpi_size i;
+
+ for (i = 0; i < UACPI_FIXED_EVENT_MAX; ++i) {
+ uacpi_write_register_field(
+ fixed_events[i].enable_field, UACPI_EVENT_DISABLED
+ );
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status set_event(uacpi_u8 event, uacpi_u8 value)
+{
+ uacpi_status ret;
+ uacpi_u64 raw_value;
+ const struct fixed_event *ev = &fixed_events[event];
+
+ ret = uacpi_write_register_field(ev->enable_field, value);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_read_register_field(ev->enable_field, &raw_value);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (raw_value != value) {
+ uacpi_error("failed to %sable fixed event %d\n",
+ value ? "en" : "dis", event);
+ return UACPI_STATUS_HARDWARE_TIMEOUT;
+ }
+
+ uacpi_trace("fixed event %d %sabled successfully\n",
+ event, value ? "en" : "dis");
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_enable_fixed_event(uacpi_fixed_event event)
+{
+ uacpi_status ret;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ if (uacpi_is_hardware_reduced())
+ return UACPI_STATUS_OK;
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ /*
+ * Attempting to enable an event that doesn't have a handler is most likely
+ * an error, don't allow it.
+ */
+ if (uacpi_unlikely(fixed_event_handlers[event].handler == UACPI_NULL)) {
+ ret = UACPI_STATUS_NO_HANDLER;
+ goto out;
+ }
+
+ ret = set_event(event, UACPI_EVENT_ENABLED);
+
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+uacpi_status uacpi_disable_fixed_event(uacpi_fixed_event event)
+{
+ uacpi_status ret;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ if (uacpi_is_hardware_reduced())
+ return UACPI_STATUS_OK;
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = set_event(event, UACPI_EVENT_DISABLED);
+
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+uacpi_status uacpi_clear_fixed_event(uacpi_fixed_event event)
+{
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ if (uacpi_is_hardware_reduced())
+ return UACPI_STATUS_OK;
+
+ return uacpi_write_register_field(
+ fixed_events[event].status_field, ACPI_PM1_STS_CLEAR
+ );
+}
+
+static uacpi_interrupt_ret dispatch_fixed_event(
+ const struct fixed_event *ev, uacpi_fixed_event event
+)
+{
+ uacpi_status ret;
+ struct fixed_event_handler *evh = &fixed_event_handlers[event];
+
+ ret = uacpi_write_register_field(ev->status_field, ACPI_PM1_STS_CLEAR);
+ if (uacpi_unlikely_error(ret))
+ return UACPI_INTERRUPT_NOT_HANDLED;
+
+ if (uacpi_unlikely(evh->handler == UACPI_NULL)) {
+ uacpi_warn(
+ "fixed event %d fired but no handler installed, disabling...\n",
+ event
+ );
+ uacpi_write_register_field(ev->enable_field, UACPI_EVENT_DISABLED);
+ return UACPI_INTERRUPT_NOT_HANDLED;
+ }
+
+ return evh->handler(evh->ctx);
+}
+
+static uacpi_interrupt_ret handle_fixed_events(void)
+{
+ uacpi_interrupt_ret int_ret = UACPI_INTERRUPT_NOT_HANDLED;
+ uacpi_status ret;
+ uacpi_u64 enable_mask, status_mask;
+ uacpi_size i;
+
+ ret = uacpi_read_register(UACPI_REGISTER_PM1_STS, &status_mask);
+ if (uacpi_unlikely_error(ret))
+ return int_ret;
+
+ ret = uacpi_read_register(UACPI_REGISTER_PM1_EN, &enable_mask);
+ if (uacpi_unlikely_error(ret))
+ return int_ret;
+
+ for (i = 0; i < UACPI_FIXED_EVENT_MAX; ++i)
+ {
+ const struct fixed_event *ev = &fixed_events[i];
+
+ if (!(status_mask & ev->status_mask) ||
+ !(enable_mask & ev->enable_mask))
+ continue;
+
+ int_ret |= dispatch_fixed_event(ev, i);
+ }
+
+ return int_ret;
+}
+
+struct gpe_native_handler {
+ uacpi_gpe_handler cb;
+ uacpi_handle ctx;
+
+ /*
+ * Preserved values to be used for state restoration if this handler is
+ * removed at any point.
+ */
+ uacpi_handle previous_handler;
+ uacpi_u8 previous_triggering : 1;
+ uacpi_u8 previous_handler_type : 3;
+ uacpi_u8 previously_enabled : 1;
+};
+
+struct gpe_implicit_notify_handler {
+ struct gpe_implicit_notify_handler *next;
+ uacpi_namespace_node *device;
+};
+
+#define EVENTS_PER_GPE_REGISTER 8
+
+/*
+ * NOTE:
+ * This API and handler types are inspired by ACPICA, let's not reinvent the
+ * wheel and follow a similar path that people ended up finding useful after
+ * years of dealing with ACPI. Obviously credit goes to them for inventing
+ * "implicit notify" and other neat API.
+ */
+enum gpe_handler_type {
+ GPE_HANDLER_TYPE_NONE = 0,
+ GPE_HANDLER_TYPE_AML_HANDLER = 1,
+ GPE_HANDLER_TYPE_NATIVE_HANDLER = 2,
+ GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW = 3,
+ GPE_HANDLER_TYPE_IMPLICIT_NOTIFY = 4,
+};
+
+struct gp_event {
+ union {
+ struct gpe_native_handler *native_handler;
+ struct gpe_implicit_notify_handler *implicit_handler;
+ uacpi_namespace_node *aml_handler;
+ uacpi_handle *any_handler;
+ };
+
+ struct gpe_register *reg;
+ uacpi_u16 idx;
+
+ // "reference count" of the number of times this event has been enabled
+ uacpi_u8 num_users;
+
+ uacpi_u8 handler_type : 3;
+ uacpi_u8 triggering : 1;
+ uacpi_u8 wake : 1;
+ uacpi_u8 block_interrupts : 1;
+};
+
+struct gpe_register {
+ uacpi_mapped_gas status;
+ uacpi_mapped_gas enable;
+
+ uacpi_u8 runtime_mask;
+ uacpi_u8 wake_mask;
+ uacpi_u8 masked_mask;
+ uacpi_u8 current_mask;
+
+ uacpi_u16 base_idx;
+};
+
+struct gpe_block {
+ struct gpe_block *prev, *next;
+
+ /*
+ * Technically this can only refer to \_GPE, but there's also apparently a
+ * "GPE Block Device" with id "ACPI0006", which is not used by anyone. We
+ * still keep it as a possibility that someone might eventually use it, so
+ * it is supported here.
+ */
+ uacpi_namespace_node *device_node;
+
+ struct gpe_register *registers;
+ struct gp_event *events;
+ struct gpe_interrupt_ctx *irq_ctx;
+
+ uacpi_u16 num_registers;
+ uacpi_u16 num_events;
+ uacpi_u16 base_idx;
+};
+
+struct gpe_interrupt_ctx {
+ struct gpe_interrupt_ctx *prev, *next;
+
+ struct gpe_block *gpe_head;
+ uacpi_handle irq_handle;
+ uacpi_u32 irq;
+};
+static struct gpe_interrupt_ctx *g_gpe_interrupt_head;
+
+static uacpi_u8 gpe_get_mask(struct gp_event *event)
+{
+ return 1 << (event->idx - event->reg->base_idx);
+}
+
+enum gpe_state {
+ GPE_STATE_ENABLED,
+ GPE_STATE_ENABLED_CONDITIONALLY,
+ GPE_STATE_DISABLED,
+};
+
+static uacpi_status set_gpe_state(struct gp_event *event, enum gpe_state state)
+{
+ uacpi_status ret;
+ struct gpe_register *reg = event->reg;
+ uacpi_u64 enable_mask;
+ uacpi_u8 event_bit;
+ uacpi_cpu_flags flags;
+
+ event_bit = gpe_get_mask(event);
+ if (state != GPE_STATE_DISABLED && (reg->masked_mask & event_bit))
+ return UACPI_STATUS_OK;
+
+ if (state == GPE_STATE_ENABLED_CONDITIONALLY) {
+ if (!(reg->current_mask & event_bit))
+ return UACPI_STATUS_OK;
+
+ state = GPE_STATE_ENABLED;
+ }
+
+ flags = uacpi_kernel_lock_spinlock(g_gpe_state_slock);
+
+ ret = uacpi_gas_read_mapped(&reg->enable, &enable_mask);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ switch (state) {
+ case GPE_STATE_ENABLED:
+ enable_mask |= event_bit;
+ break;
+ case GPE_STATE_DISABLED:
+ enable_mask &= ~event_bit;
+ break;
+ default:
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ ret = uacpi_gas_write_mapped(&reg->enable, enable_mask);
+out:
+ uacpi_kernel_unlock_spinlock(g_gpe_state_slock, flags);
+ return ret;
+}
+
+static uacpi_status clear_gpe(struct gp_event *event)
+{
+ struct gpe_register *reg = event->reg;
+
+ return uacpi_gas_write_mapped(&reg->status, gpe_get_mask(event));
+}
+
+static uacpi_status restore_gpe(struct gp_event *event)
+{
+ uacpi_status ret;
+
+ if (event->triggering == UACPI_GPE_TRIGGERING_LEVEL) {
+ ret = clear_gpe(event);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ ret = set_gpe_state(event, GPE_STATE_ENABLED_CONDITIONALLY);
+ event->block_interrupts = UACPI_FALSE;
+
+ return ret;
+}
+
+static void async_restore_gpe(uacpi_handle opaque)
+{
+ uacpi_status ret;
+ struct gp_event *event = opaque;
+
+ ret = restore_gpe(event);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_error("unable to restore GPE(%02X): %s\n",
+ event->idx, uacpi_status_to_string(ret));
+ }
+}
+
+static void async_run_gpe_handler(uacpi_handle opaque)
+{
+ uacpi_status ret;
+ struct gp_event *event = opaque;
+
+ ret = uacpi_namespace_write_lock();
+ if (uacpi_unlikely_error(ret))
+ goto out_no_unlock;
+
+ switch (event->handler_type) {
+ case GPE_HANDLER_TYPE_AML_HANDLER: {
+ uacpi_object *method_obj;
+ uacpi_object_name name;
+
+ method_obj = uacpi_namespace_node_get_object_typed(
+ event->aml_handler, UACPI_OBJECT_METHOD_BIT
+ );
+ if (uacpi_unlikely(method_obj == UACPI_NULL)) {
+ uacpi_error("GPE(%02X) AML handler gone\n", event->idx);
+ break;
+ }
+
+ name = uacpi_namespace_node_name(event->aml_handler);
+ uacpi_trace(
+ "executing GPE(%02X) handler %.4s\n",
+ event->idx, name.text
+ );
+
+ ret = uacpi_execute_control_method(
+ event->aml_handler, method_obj->method, UACPI_NULL, UACPI_NULL
+ );
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_error(
+ "error while executing GPE(%02X) handler %.4s: %s\n",
+ event->idx, event->aml_handler->name.text,
+ uacpi_status_to_string(ret)
+ );
+ }
+ break;
+ }
+
+ case GPE_HANDLER_TYPE_IMPLICIT_NOTIFY: {
+ struct gpe_implicit_notify_handler *handler;
+
+ handler = event->implicit_handler;
+ while (handler) {
+ /*
+ * 2 - Device Wake. Used to notify OSPM that the device has signaled
+ * its wake event, and that OSPM needs to notify OSPM native device
+ * driver for the device.
+ */
+ uacpi_notify_all(handler->device, 2);
+ handler = handler->next;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ uacpi_namespace_write_unlock();
+
+out_no_unlock:
+ /*
+ * We schedule the work as NOTIFICATION to make sure all other notifications
+ * finish before this GPE is re-enabled.
+ */
+ ret = uacpi_kernel_schedule_work(
+ UACPI_WORK_NOTIFICATION, async_restore_gpe, event
+ );
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_error("unable to schedule GPE(%02X) restore: %s\n",
+ event->idx, uacpi_status_to_string(ret));
+ async_restore_gpe(event);
+ }
+}
+
+static uacpi_interrupt_ret dispatch_gpe(
+ uacpi_namespace_node *device_node, struct gp_event *event
+)
+{
+ uacpi_status ret;
+ uacpi_interrupt_ret int_ret = UACPI_INTERRUPT_NOT_HANDLED;
+
+ /*
+ * For raw handlers we don't do any management whatsoever, we just let the
+ * handler know a GPE has triggered and let it handle disable/enable as
+ * well as clearing.
+ */
+ if (event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) {
+ return event->native_handler->cb(
+ event->native_handler->ctx, device_node, event->idx
+ );
+ }
+
+ ret = set_gpe_state(event, GPE_STATE_DISABLED);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_error("failed to disable GPE(%02X): %s\n",
+ event->idx, uacpi_status_to_string(ret));
+ return int_ret;
+ }
+
+ event->block_interrupts = UACPI_TRUE;
+
+ if (event->triggering == UACPI_GPE_TRIGGERING_EDGE) {
+ ret = clear_gpe(event);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_error("unable to clear GPE(%02X): %s\n",
+ event->idx, uacpi_status_to_string(ret));
+ set_gpe_state(event, GPE_STATE_ENABLED_CONDITIONALLY);
+ return int_ret;
+ }
+ }
+
+ switch (event->handler_type) {
+ case GPE_HANDLER_TYPE_NATIVE_HANDLER:
+ int_ret = event->native_handler->cb(
+ event->native_handler->ctx, device_node, event->idx
+ );
+ if (!(int_ret & UACPI_GPE_REENABLE))
+ break;
+
+ ret = restore_gpe(event);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_error("unable to restore GPE(%02X): %s\n",
+ event->idx, uacpi_status_to_string(ret));
+ }
+ break;
+
+ case GPE_HANDLER_TYPE_AML_HANDLER:
+ case GPE_HANDLER_TYPE_IMPLICIT_NOTIFY:
+ ret = uacpi_kernel_schedule_work(
+ UACPI_WORK_GPE_EXECUTION, async_run_gpe_handler, event
+ );
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_warn(
+ "unable to schedule GPE(%02X) for execution: %s\n",
+ event->idx, uacpi_status_to_string(ret)
+ );
+ }
+ break;
+
+ default:
+ uacpi_warn("GPE(%02X) fired but no handler, keeping disabled\n",
+ event->idx);
+ break;
+ }
+
+ return UACPI_INTERRUPT_HANDLED;
+}
+
+static uacpi_interrupt_ret detect_gpes(struct gpe_block *block)
+{
+ uacpi_status ret;
+ uacpi_interrupt_ret int_ret = UACPI_INTERRUPT_NOT_HANDLED;
+ struct gpe_register *reg;
+ struct gp_event *event;
+ uacpi_u64 status, enable;
+ uacpi_size i, j;
+
+ while (block) {
+ for (i = 0; i < block->num_registers; ++i) {
+ reg = &block->registers[i];
+
+ if (!reg->runtime_mask && !reg->wake_mask)
+ continue;
+
+ ret = uacpi_gas_read_mapped(&reg->status, &status);
+ if (uacpi_unlikely_error(ret))
+ return int_ret;
+
+ ret = uacpi_gas_read_mapped(&reg->enable, &enable);
+ if (uacpi_unlikely_error(ret))
+ return int_ret;
+
+ if (status == 0)
+ continue;
+
+ for (j = 0; j < EVENTS_PER_GPE_REGISTER; ++j) {
+ if (!((status & enable) & (1ull << j)))
+ continue;
+
+ event = &block->events[j + i * EVENTS_PER_GPE_REGISTER];
+ int_ret |= dispatch_gpe(block->device_node, event);
+ }
+ }
+
+ block = block->next;
+ }
+
+ return int_ret;
+}
+
+static uacpi_status maybe_dispatch_gpe(
+ uacpi_namespace_node *gpe_device, struct gp_event *event
+)
+{
+ uacpi_status ret;
+ struct gpe_register *reg = event->reg;
+ uacpi_u64 status;
+
+ ret = uacpi_gas_read_mapped(&reg->status, &status);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (!(status & gpe_get_mask(event)))
+ return ret;
+
+ dispatch_gpe(gpe_device, event);
+ return ret;
+}
+
+static uacpi_interrupt_ret handle_gpes(uacpi_handle opaque)
+{
+ struct gpe_interrupt_ctx *ctx = opaque;
+
+ if (uacpi_unlikely(ctx == UACPI_NULL))
+ return UACPI_INTERRUPT_NOT_HANDLED;
+
+ return detect_gpes(ctx->gpe_head);
+}
+
+static uacpi_status find_or_create_gpe_interrupt_ctx(
+ uacpi_u32 irq, struct gpe_interrupt_ctx **out_ctx
+)
+{
+ uacpi_status ret;
+ struct gpe_interrupt_ctx *entry = g_gpe_interrupt_head;
+
+ while (entry) {
+ if (entry->irq == irq) {
+ *out_ctx = entry;
+ return UACPI_STATUS_OK;
+ }
+
+ entry = entry->next;
+ }
+
+ entry = uacpi_kernel_alloc_zeroed(sizeof(*entry));
+ if (uacpi_unlikely(entry == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ /*
+ * SCI interrupt is installed by other code and is responsible for more
+ * things than just the GPE handling. Don't install it here.
+ */
+ if (irq != g_uacpi_rt_ctx.fadt.sci_int) {
+ ret = uacpi_kernel_install_interrupt_handler(
+ irq, handle_gpes, entry, &entry->irq_handle
+ );
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_free(entry, sizeof(*entry));
+ return ret;
+ }
+ }
+
+ entry->irq = irq;
+ entry->next = g_gpe_interrupt_head;
+ g_gpe_interrupt_head = entry;
+
+ *out_ctx = entry;
+ return UACPI_STATUS_OK;
+}
+
+static void gpe_release_implicit_notify_handlers(struct gp_event *event)
+{
+ struct gpe_implicit_notify_handler *handler, *next_handler;
+
+ handler = event->implicit_handler;
+ while (handler) {
+ next_handler = handler->next;
+ uacpi_free(handler, sizeof(*handler));
+ handler = next_handler;
+ }
+
+ event->implicit_handler = UACPI_NULL;
+}
+
+enum gpe_block_action
+{
+ GPE_BLOCK_ACTION_DISABLE_ALL,
+ GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME,
+ GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE,
+ GPE_BLOCK_ACTION_CLEAR_ALL,
+};
+
+static uacpi_status gpe_block_apply_action(
+ struct gpe_block *block, enum gpe_block_action action
+)
+{
+ uacpi_status ret;
+ uacpi_size i;
+ uacpi_u8 value;
+ struct gpe_register *reg;
+
+ for (i = 0; i < block->num_registers; ++i) {
+ reg = &block->registers[i];
+
+ switch (action) {
+ case GPE_BLOCK_ACTION_DISABLE_ALL:
+ value = 0;
+ break;
+ case GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME:
+ value = reg->runtime_mask & ~reg->masked_mask;
+ break;
+ case GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE:
+ value = reg->wake_mask;
+ break;
+ case GPE_BLOCK_ACTION_CLEAR_ALL:
+ ret = uacpi_gas_write_mapped(&reg->status, 0xFF);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ continue;
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ reg->current_mask = value;
+ ret = uacpi_gas_write_mapped(&reg->enable, value);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static void gpe_block_mask_safe(struct gpe_block *block)
+{
+ uacpi_size i;
+ struct gpe_register *reg;
+
+ for (i = 0; i < block->num_registers; ++i) {
+ reg = &block->registers[i];
+
+ // No need to flush or do anything if it's not currently enabled
+ if (!reg->current_mask)
+ continue;
+
+ // 1. Mask the GPEs, this makes sure their state is no longer modifyable
+ reg->masked_mask = 0xFF;
+
+ /*
+ * 2. Wait for in-flight work & IRQs to finish, these might already
+ * be past the respective "if (masked)" check and therefore may
+ * try to re-enable a masked GPE.
+ */
+ uacpi_kernel_wait_for_work_completion();
+
+ /*
+ * 3. Now that this GPE's state is unmodifyable and we know that
+ * currently in-flight IRQs will see the masked state, we can
+ * safely disable all events knowing they won't be re-enabled by
+ * a racing IRQ.
+ */
+ uacpi_gas_write_mapped(&reg->enable, 0x00);
+
+ /*
+ * 4. Wait for the last possible IRQ to finish, now that this event is
+ * disabled.
+ */
+ uacpi_kernel_wait_for_work_completion();
+ }
+}
+
+static void uninstall_gpe_block(struct gpe_block *block)
+{
+ if (block->registers != UACPI_NULL) {
+ struct gpe_register *reg;
+ uacpi_size i;
+
+ gpe_block_mask_safe(block);
+
+ for (i = 0; i < block->num_registers; ++i) {
+ reg = &block->registers[i];
+
+ if (reg->enable.total_bit_width)
+ uacpi_unmap_gas_nofree(&reg->enable);
+ if (reg->status.total_bit_width)
+ uacpi_unmap_gas_nofree(&reg->status);
+ }
+ }
+
+ if (block->prev)
+ block->prev->next = block->next;
+
+ if (block->irq_ctx) {
+ struct gpe_interrupt_ctx *ctx = block->irq_ctx;
+
+ // Are we the first GPE block?
+ if (block == ctx->gpe_head) {
+ ctx->gpe_head = ctx->gpe_head->next;
+ } else {
+ struct gpe_block *prev_block = ctx->gpe_head;
+
+ // We're not, do a search
+ while (prev_block) {
+ if (prev_block->next == block) {
+ prev_block->next = block->next;
+ break;
+ }
+
+ prev_block = prev_block->next;
+ }
+ }
+
+ // This GPE block was the last user of this interrupt context, remove it
+ if (ctx->gpe_head == UACPI_NULL) {
+ if (ctx->prev)
+ ctx->prev->next = ctx->next;
+
+ if (ctx->irq != g_uacpi_rt_ctx.fadt.sci_int) {
+ uacpi_kernel_uninstall_interrupt_handler(
+ handle_gpes, ctx->irq_handle
+ );
+ }
+
+ uacpi_free(block->irq_ctx, sizeof(*block->irq_ctx));
+ }
+ }
+
+ if (block->events != UACPI_NULL) {
+ uacpi_size i;
+ struct gp_event *event;
+
+ for (i = 0; i < block->num_events; ++i) {
+ event = &block->events[i];
+
+ switch (event->handler_type) {
+ case GPE_HANDLER_TYPE_NONE:
+ case GPE_HANDLER_TYPE_AML_HANDLER:
+ break;
+
+ case GPE_HANDLER_TYPE_NATIVE_HANDLER:
+ case GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW:
+ uacpi_free(event->native_handler,
+ sizeof(*event->native_handler));
+ break;
+
+ case GPE_HANDLER_TYPE_IMPLICIT_NOTIFY: {
+ gpe_release_implicit_notify_handlers(event);
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ }
+
+ uacpi_free(block->registers,
+ sizeof(*block->registers) * block->num_registers);
+ uacpi_free(block->events,
+ sizeof(*block->events) * block->num_events);
+ uacpi_free(block, sizeof(*block));
+}
+
+static struct gp_event *gpe_from_block(struct gpe_block *block, uacpi_u16 idx)
+{
+ uacpi_u16 offset;
+
+ if (idx < block->base_idx)
+ return UACPI_NULL;
+
+ offset = idx - block->base_idx;
+ if (offset > block->num_events)
+ return UACPI_NULL;
+
+ return &block->events[offset];
+}
+
+struct gpe_match_ctx {
+ struct gpe_block *block;
+ uacpi_u32 matched_count;
+ uacpi_bool post_dynamic_table_load;
+};
+
+static uacpi_iteration_decision do_match_gpe_methods(
+ uacpi_handle opaque, uacpi_namespace_node *node, uacpi_u32 depth
+)
+{
+ uacpi_status ret;
+ struct gpe_match_ctx *ctx = opaque;
+ struct gp_event *event;
+ uacpi_u8 triggering;
+ uacpi_u64 idx;
+
+ UACPI_UNUSED(depth);
+
+ if (node->name.text[0] != '_')
+ return UACPI_ITERATION_DECISION_CONTINUE;
+
+ switch (node->name.text[1]) {
+ case 'L':
+ triggering = UACPI_GPE_TRIGGERING_LEVEL;
+ break;
+ case 'E':
+ triggering = UACPI_GPE_TRIGGERING_EDGE;
+ break;
+ default:
+ return UACPI_ITERATION_DECISION_CONTINUE;
+ }
+
+ ret = uacpi_string_to_integer(&node->name.text[2], 2, UACPI_BASE_HEX, &idx);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_trace("invalid GPE method name %.4s, ignored\n", node->name.text);
+ return UACPI_ITERATION_DECISION_CONTINUE;
+ }
+
+ event = gpe_from_block(ctx->block, idx);
+ if (event == UACPI_NULL)
+ return UACPI_ITERATION_DECISION_CONTINUE;
+
+ switch (event->handler_type) {
+ /*
+ * This had implicit notify configured but this is no longer needed as we
+ * now have an actual AML handler. Free the implicit notify list and switch
+ * this handler to AML mode.
+ */
+ case GPE_HANDLER_TYPE_IMPLICIT_NOTIFY:
+ gpe_release_implicit_notify_handlers(event);
+ UACPI_FALLTHROUGH;
+ case GPE_HANDLER_TYPE_NONE:
+ event->aml_handler = node;
+ event->handler_type = GPE_HANDLER_TYPE_AML_HANDLER;
+ break;
+
+ case GPE_HANDLER_TYPE_AML_HANDLER:
+ // This is okay, since we're re-running the detection code
+ if (!ctx->post_dynamic_table_load) {
+ uacpi_warn(
+ "GPE(%02X) already matched %.4s, skipping %.4s\n",
+ (uacpi_u32)idx, event->aml_handler->name.text, node->name.text
+ );
+ }
+ return UACPI_ITERATION_DECISION_CONTINUE;
+
+ case GPE_HANDLER_TYPE_NATIVE_HANDLER:
+ case GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW:
+ uacpi_trace(
+ "not assigning GPE(%02X) to %.4s, override "
+ "installed by user\n", (uacpi_u32)idx, node->name.text
+ );
+ UACPI_FALLTHROUGH;
+ default:
+ return UACPI_ITERATION_DECISION_CONTINUE;
+ }
+
+ uacpi_trace("assigned GPE(%02X) -> %.4s\n",
+ (uacpi_u32)idx, node->name.text);
+ event->triggering = triggering;
+ ctx->matched_count++;
+
+ return UACPI_ITERATION_DECISION_CONTINUE;
+}
+
+void uacpi_events_match_post_dynamic_table_load(void)
+{
+ struct gpe_match_ctx match_ctx = {
+ .post_dynamic_table_load = UACPI_TRUE,
+ };
+ struct gpe_interrupt_ctx *irq_ctx;
+
+ uacpi_namespace_write_unlock();
+
+ if (uacpi_unlikely_error(uacpi_recursive_lock_acquire(&g_event_lock)))
+ goto out;
+
+ irq_ctx = g_gpe_interrupt_head;
+
+ while (irq_ctx) {
+ match_ctx.block = irq_ctx->gpe_head;
+
+ while (match_ctx.block) {
+ uacpi_namespace_do_for_each_child(
+ match_ctx.block->device_node, do_match_gpe_methods, UACPI_NULL,
+ UACPI_OBJECT_METHOD_BIT, UACPI_MAX_DEPTH_ANY,
+ UACPI_SHOULD_LOCK_YES, UACPI_PERMANENT_ONLY_YES, &match_ctx
+ );
+ match_ctx.block = match_ctx.block->next;
+ }
+
+ irq_ctx = irq_ctx->next;
+ }
+
+ if (match_ctx.matched_count) {
+ uacpi_info("matched %u additional GPEs post dynamic table load\n",
+ match_ctx.matched_count);
+ }
+
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ uacpi_namespace_write_lock();
+}
+
+static uacpi_status create_gpe_block(
+ uacpi_namespace_node *device_node, uacpi_u32 irq, uacpi_u16 base_idx,
+ uacpi_u64 address, uacpi_u8 address_space_id, uacpi_u16 num_registers
+)
+{
+ uacpi_status ret = UACPI_STATUS_OUT_OF_MEMORY;
+ struct gpe_match_ctx match_ctx = { 0 };
+ struct gpe_block *block;
+ struct gpe_register *reg;
+ struct gp_event *event;
+ struct acpi_gas tmp_gas = { 0 };
+ uacpi_size i, j;
+
+ tmp_gas.address_space_id = address_space_id;
+ tmp_gas.register_bit_width = 8;
+
+ block = uacpi_kernel_alloc_zeroed(sizeof(*block));
+ if (uacpi_unlikely(block == UACPI_NULL))
+ return ret;
+
+ block->device_node = device_node;
+ block->base_idx = base_idx;
+
+ block->num_registers = num_registers;
+ block->registers = uacpi_kernel_alloc_zeroed(
+ num_registers * sizeof(*block->registers)
+ );
+ if (uacpi_unlikely(block->registers == UACPI_NULL))
+ goto error_out;
+
+ block->num_events = num_registers * EVENTS_PER_GPE_REGISTER;
+ block->events = uacpi_kernel_alloc_zeroed(
+ block->num_events * sizeof(*block->events)
+ );
+ if (uacpi_unlikely(block->events == UACPI_NULL))
+ goto error_out;
+
+ for (reg = block->registers, event = block->events, i = 0;
+ i < num_registers; ++i, ++reg) {
+
+ /*
+ * Initialize this register pair as well as all the events within it.
+ *
+ * Each register has two sub registers: status & enable, 8 bits each.
+ * Each bit corresponds to one event that we initialize below.
+ */
+ reg->base_idx = base_idx + (i * EVENTS_PER_GPE_REGISTER);
+
+
+ tmp_gas.address = address + i;
+ ret = uacpi_map_gas_noalloc(&tmp_gas, &reg->status);
+ if (uacpi_unlikely_error(ret))
+ goto error_out;
+
+ tmp_gas.address += num_registers;
+ ret = uacpi_map_gas_noalloc(&tmp_gas, &reg->enable);
+ if (uacpi_unlikely_error(ret))
+ goto error_out;
+
+ for (j = 0; j < EVENTS_PER_GPE_REGISTER; ++j, ++event) {
+ event->idx = reg->base_idx + j;
+ event->reg = reg;
+ }
+
+ /*
+ * Disable all GPEs in this register & clear anything that might be
+ * pending from earlier.
+ */
+ ret = uacpi_gas_write_mapped(&reg->enable, 0x00);
+ if (uacpi_unlikely_error(ret))
+ goto error_out;
+
+ ret = uacpi_gas_write_mapped(&reg->status, 0xFF);
+ if (uacpi_unlikely_error(ret))
+ goto error_out;
+ }
+
+ ret = find_or_create_gpe_interrupt_ctx(irq, &block->irq_ctx);
+ if (uacpi_unlikely_error(ret))
+ goto error_out;
+
+ block->next = block->irq_ctx->gpe_head;
+ block->irq_ctx->gpe_head = block;
+ match_ctx.block = block;
+
+ uacpi_namespace_do_for_each_child(
+ device_node, do_match_gpe_methods, UACPI_NULL,
+ UACPI_OBJECT_METHOD_BIT, UACPI_MAX_DEPTH_ANY,
+ UACPI_SHOULD_LOCK_YES, UACPI_PERMANENT_ONLY_YES, &match_ctx
+ );
+
+ uacpi_trace("initialized GPE block %.4s[%d->%d], %d AML handlers (IRQ %d)\n",
+ device_node->name.text, base_idx, base_idx + block->num_events,
+ match_ctx.matched_count, irq);
+ return UACPI_STATUS_OK;
+
+error_out:
+ uninstall_gpe_block(block);
+ return ret;
+}
+
+typedef uacpi_iteration_decision (*gpe_block_iteration_callback)
+ (struct gpe_block*, uacpi_handle);
+
+static void for_each_gpe_block(
+ gpe_block_iteration_callback cb, uacpi_handle handle
+)
+{
+ uacpi_iteration_decision decision;
+ struct gpe_interrupt_ctx *irq_ctx = g_gpe_interrupt_head;
+ struct gpe_block *block;
+
+ while (irq_ctx) {
+ block = irq_ctx->gpe_head;
+
+ while (block) {
+ decision = cb(block, handle);
+ if (decision == UACPI_ITERATION_DECISION_BREAK)
+ return;
+
+ block = block->next;
+ }
+
+ irq_ctx = irq_ctx->next;
+ }
+}
+
+struct gpe_search_ctx {
+ uacpi_namespace_node *gpe_device;
+ uacpi_u16 idx;
+ struct gpe_block *out_block;
+ struct gp_event *out_event;
+};
+
+static uacpi_iteration_decision do_find_gpe(
+ struct gpe_block *block, uacpi_handle opaque
+)
+{
+ struct gpe_search_ctx *ctx = opaque;
+
+ if (block->device_node != ctx->gpe_device)
+ return UACPI_ITERATION_DECISION_CONTINUE;
+
+ ctx->out_block = block;
+ ctx->out_event = gpe_from_block(block, ctx->idx);
+ if (ctx->out_event == UACPI_NULL)
+ return UACPI_ITERATION_DECISION_CONTINUE;
+
+ return UACPI_ITERATION_DECISION_BREAK;
+}
+
+static struct gp_event *get_gpe(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx
+)
+{
+ struct gpe_search_ctx ctx = { 0 };
+
+ ctx.gpe_device = gpe_device;
+ ctx.idx = idx;
+
+ for_each_gpe_block(do_find_gpe, &ctx);
+ return ctx.out_event;
+}
+
+static void gp_event_toggle_masks(struct gp_event *event, uacpi_bool set_on)
+{
+ uacpi_u8 this_mask;
+ struct gpe_register *reg = event->reg;
+
+ this_mask = gpe_get_mask(event);
+
+ if (set_on) {
+ reg->runtime_mask |= this_mask;
+ reg->current_mask = reg->runtime_mask;
+ return;
+ }
+
+ reg->runtime_mask &= ~this_mask;
+ reg->current_mask = reg->runtime_mask;
+}
+
+static uacpi_status gpe_remove_user(struct gp_event *event)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+
+ if (uacpi_unlikely(event->num_users == 0))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ if (--event->num_users == 0) {
+ gp_event_toggle_masks(event, UACPI_FALSE);
+
+ ret = set_gpe_state(event, GPE_STATE_DISABLED);
+ if (uacpi_unlikely_error(ret)) {
+ gp_event_toggle_masks(event, UACPI_TRUE);
+ event->num_users++;
+ }
+ }
+
+ return ret;
+}
+
+enum event_clear_if_first {
+ EVENT_CLEAR_IF_FIRST_YES,
+ EVENT_CLEAR_IF_FIRST_NO,
+};
+
+static uacpi_status gpe_add_user(
+ struct gp_event *event, enum event_clear_if_first clear_if_first
+)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+
+ if (uacpi_unlikely(event->num_users == 0xFF))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ if (++event->num_users == 1) {
+ if (clear_if_first == EVENT_CLEAR_IF_FIRST_YES)
+ clear_gpe(event);
+
+ gp_event_toggle_masks(event, UACPI_TRUE);
+
+ ret = set_gpe_state(event, GPE_STATE_ENABLED);
+ if (uacpi_unlikely_error(ret)) {
+ gp_event_toggle_masks(event, UACPI_FALSE);
+ event->num_users--;
+ }
+ }
+
+ return ret;
+}
+
+const uacpi_char *uacpi_gpe_triggering_to_string(
+ uacpi_gpe_triggering triggering
+)
+{
+ switch (triggering) {
+ case UACPI_GPE_TRIGGERING_EDGE:
+ return "edge";
+ case UACPI_GPE_TRIGGERING_LEVEL:
+ return "level";
+ default:
+ return "invalid";
+ }
+}
+
+static uacpi_bool gpe_needs_polling(struct gp_event *event)
+{
+ return event->num_users && event->triggering == UACPI_GPE_TRIGGERING_EDGE;
+}
+
+static uacpi_status gpe_mask_unmask(
+ struct gp_event *event, uacpi_bool should_mask
+)
+{
+ struct gpe_register *reg;
+ uacpi_u8 mask;
+
+ reg = event->reg;
+ mask = gpe_get_mask(event);
+
+ if (should_mask) {
+ if (reg->masked_mask & mask)
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ // 1. Mask the GPE, this makes sure its state is no longer modifyable
+ reg->masked_mask |= mask;
+
+ /*
+ * 2. Wait for in-flight work & IRQs to finish, these might already
+ * be past the respective "if (masked)" check and therefore may
+ * try to re-enable a masked GPE.
+ */
+ uacpi_kernel_wait_for_work_completion();
+
+ /*
+ * 3. Now that this GPE's state is unmodifyable and we know that currently
+ * in-flight IRQs will see the masked state, we can safely disable this
+ * event knowing it won't be re-enabled by a racing IRQ.
+ */
+ set_gpe_state(event, GPE_STATE_DISABLED);
+
+ /*
+ * 4. Wait for the last possible IRQ to finish, now that this event is
+ * disabled.
+ */
+ uacpi_kernel_wait_for_work_completion();
+
+ return UACPI_STATUS_OK;
+ }
+
+ if (!(reg->masked_mask & mask))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ reg->masked_mask &= ~mask;
+ if (!event->block_interrupts && event->num_users)
+ set_gpe_state(event, GPE_STATE_ENABLED_CONDITIONALLY);
+
+ return UACPI_STATUS_OK;
+}
+
+/*
+ * Safely mask the event before we modify its handlers.
+ *
+ * This makes sure we can't get an IRQ in the middle of modifying this
+ * event's structures.
+ */
+static uacpi_bool gpe_mask_safe(struct gp_event *event)
+{
+ // No need to flush or do anything if it's not currently enabled
+ if (!(event->reg->current_mask & gpe_get_mask(event)))
+ return UACPI_FALSE;
+
+ gpe_mask_unmask(event, UACPI_TRUE);
+ return UACPI_TRUE;
+}
+
+static uacpi_iteration_decision do_initialize_gpe_block(
+ struct gpe_block *block, uacpi_handle opaque
+)
+{
+ uacpi_status ret;
+ uacpi_bool *poll_blocks = opaque;
+ uacpi_size i, j, count_enabled = 0;
+ struct gp_event *event;
+
+ for (i = 0; i < block->num_registers; ++i) {
+ for (j = 0; j < EVENTS_PER_GPE_REGISTER; ++j) {
+ event = &block->events[j + i * EVENTS_PER_GPE_REGISTER];
+
+ if (event->wake ||
+ event->handler_type != GPE_HANDLER_TYPE_AML_HANDLER)
+ continue;
+
+ ret = gpe_add_user(event, EVENT_CLEAR_IF_FIRST_NO);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_warn("failed to enable GPE(%02X): %s\n",
+ event->idx, uacpi_status_to_string(ret));
+ continue;
+ }
+
+ *poll_blocks |= gpe_needs_polling(event);
+ count_enabled++;
+ }
+ }
+
+ if (count_enabled) {
+ uacpi_info(
+ "enabled %zu GPEs in block %.4s@[%d->%d]\n",
+ count_enabled, block->device_node->name.text,
+ block->base_idx, block->base_idx + block->num_events
+ );
+ }
+ return UACPI_ITERATION_DECISION_CONTINUE;
+}
+
+uacpi_status uacpi_finalize_gpe_initialization(void)
+{
+ uacpi_status ret;
+ uacpi_bool poll_blocks = UACPI_FALSE;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (g_gpes_finalized)
+ goto out;
+
+ g_gpes_finalized = UACPI_TRUE;
+
+ for_each_gpe_block(do_initialize_gpe_block, &poll_blocks);
+ if (poll_blocks)
+ detect_gpes(g_gpe_interrupt_head->gpe_head);
+
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+static uacpi_status sanitize_device_and_find_gpe(
+ uacpi_namespace_node **gpe_device, uacpi_u16 idx,
+ struct gp_event **out_event
+)
+{
+ if (*gpe_device == UACPI_NULL) {
+ *gpe_device = uacpi_namespace_get_predefined(
+ UACPI_PREDEFINED_NAMESPACE_GPE
+ );
+ }
+
+ *out_event = get_gpe(*gpe_device, idx);
+ if (*out_event == UACPI_NULL)
+ return UACPI_STATUS_NOT_FOUND;
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status do_install_gpe_handler(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx,
+ uacpi_gpe_triggering triggering, enum gpe_handler_type type,
+ uacpi_gpe_handler handler, uacpi_handle ctx
+)
+{
+ uacpi_status ret;
+ struct gp_event *event;
+ struct gpe_native_handler *native_handler;
+ uacpi_bool did_mask;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ if (uacpi_unlikely(triggering > UACPI_GPE_TRIGGERING_MAX))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ if (event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER ||
+ event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) {
+ ret = UACPI_STATUS_ALREADY_EXISTS;
+ goto out;
+ }
+
+ native_handler = uacpi_kernel_alloc(sizeof(*native_handler));
+ if (uacpi_unlikely(native_handler == UACPI_NULL)) {
+ ret = UACPI_STATUS_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ native_handler->cb = handler;
+ native_handler->ctx = ctx;
+ native_handler->previous_handler = event->any_handler;
+ native_handler->previous_handler_type = event->handler_type;
+ native_handler->previous_triggering = event->triggering;
+ native_handler->previously_enabled = UACPI_FALSE;
+
+ did_mask = gpe_mask_safe(event);
+
+ if ((event->handler_type == GPE_HANDLER_TYPE_AML_HANDLER ||
+ event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) &&
+ event->num_users != 0) {
+ native_handler->previously_enabled = UACPI_TRUE;
+ gpe_remove_user(event);
+
+ if (uacpi_unlikely(event->triggering != triggering)) {
+ uacpi_warn(
+ "GPE(%02X) user handler claims %s triggering, originally "
+ "configured as %s\n", idx,
+ uacpi_gpe_triggering_to_string(triggering),
+ uacpi_gpe_triggering_to_string(event->triggering)
+ );
+ }
+ }
+
+ event->native_handler = native_handler;
+ event->handler_type = type;
+ event->triggering = triggering;
+
+ if (did_mask)
+ gpe_mask_unmask(event, UACPI_FALSE);
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+uacpi_status uacpi_install_gpe_handler(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx,
+ uacpi_gpe_triggering triggering, uacpi_gpe_handler handler,
+ uacpi_handle ctx
+)
+{
+ return do_install_gpe_handler(
+ gpe_device, idx, triggering, GPE_HANDLER_TYPE_NATIVE_HANDLER,
+ handler, ctx
+ );
+}
+
+uacpi_status uacpi_install_gpe_handler_raw(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx,
+ uacpi_gpe_triggering triggering, uacpi_gpe_handler handler,
+ uacpi_handle ctx
+)
+{
+ return do_install_gpe_handler(
+ gpe_device, idx, triggering, GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW,
+ handler, ctx
+ );
+}
+
+uacpi_status uacpi_uninstall_gpe_handler(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx,
+ uacpi_gpe_handler handler
+)
+{
+ uacpi_status ret;
+ struct gp_event *event;
+ struct gpe_native_handler *native_handler;
+ uacpi_bool did_mask;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ if (event->handler_type != GPE_HANDLER_TYPE_NATIVE_HANDLER &&
+ event->handler_type != GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) {
+ ret = UACPI_STATUS_NOT_FOUND;
+ goto out;
+ }
+
+ native_handler = event->native_handler;
+ if (uacpi_unlikely(native_handler->cb != handler)) {
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ did_mask = gpe_mask_safe(event);
+
+ event->aml_handler = native_handler->previous_handler;
+ event->triggering = native_handler->previous_triggering;
+ event->handler_type = native_handler->previous_handler_type;
+
+ if ((event->handler_type == GPE_HANDLER_TYPE_AML_HANDLER ||
+ event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) &&
+ native_handler->previously_enabled) {
+ gpe_add_user(event, EVENT_CLEAR_IF_FIRST_NO);
+ }
+
+ uacpi_free(native_handler, sizeof(*native_handler));
+
+ if (did_mask)
+ gpe_mask_unmask(event, UACPI_FALSE);
+
+ if (gpe_needs_polling(event))
+ maybe_dispatch_gpe(gpe_device, event);
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+uacpi_status uacpi_enable_gpe(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx
+)
+{
+ uacpi_status ret;
+ struct gp_event *event;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ if (uacpi_unlikely(event->handler_type == GPE_HANDLER_TYPE_NONE)) {
+ ret = UACPI_STATUS_NO_HANDLER;
+ goto out;
+ }
+
+ ret = gpe_add_user(event, EVENT_CLEAR_IF_FIRST_YES);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ if (gpe_needs_polling(event))
+ maybe_dispatch_gpe(gpe_device, event);
+
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+uacpi_status uacpi_disable_gpe(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx
+)
+{
+ uacpi_status ret;
+ struct gp_event *event;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ ret = gpe_remove_user(event);
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+uacpi_status uacpi_clear_gpe(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx
+)
+{
+ uacpi_status ret;
+ struct gp_event *event;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ ret = clear_gpe(event);
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+static uacpi_status gpe_suspend_resume(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx, enum gpe_state state
+)
+{
+ uacpi_status ret;
+ struct gp_event *event;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ event->block_interrupts = state == GPE_STATE_DISABLED;
+ ret = set_gpe_state(event, state);
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+uacpi_status uacpi_suspend_gpe(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx
+)
+{
+ return gpe_suspend_resume(gpe_device, idx, GPE_STATE_DISABLED);
+}
+
+uacpi_status uacpi_resume_gpe(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx
+)
+{
+ return gpe_suspend_resume(gpe_device, idx, GPE_STATE_ENABLED);
+}
+
+uacpi_status uacpi_finish_handling_gpe(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx
+)
+{
+ uacpi_status ret;
+ struct gp_event *event;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ event = get_gpe(gpe_device, idx);
+ if (uacpi_unlikely(event == UACPI_NULL)) {
+ ret = UACPI_STATUS_NOT_FOUND;
+ goto out;
+ }
+
+ ret = restore_gpe(event);
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+
+}
+
+static uacpi_status gpe_get_mask_unmask(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_bool should_mask
+)
+{
+ uacpi_status ret;
+ struct gp_event *event;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ ret = gpe_mask_unmask(event, should_mask);
+
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+uacpi_status uacpi_mask_gpe(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx
+)
+{
+ return gpe_get_mask_unmask(gpe_device, idx, UACPI_TRUE);
+}
+
+uacpi_status uacpi_unmask_gpe(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx
+)
+{
+ return gpe_get_mask_unmask(gpe_device, idx, UACPI_FALSE);
+}
+
+uacpi_status uacpi_setup_gpe_for_wake(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx,
+ uacpi_namespace_node *wake_device
+)
+{
+ uacpi_status ret;
+ struct gp_event *event;
+ uacpi_bool did_mask;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ if (wake_device != UACPI_NULL) {
+ uacpi_bool is_dev = wake_device == uacpi_namespace_root();
+
+ if (!is_dev) {
+ ret = uacpi_namespace_node_is(wake_device, UACPI_OBJECT_DEVICE, &is_dev);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ if (!is_dev)
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ did_mask = gpe_mask_safe(event);
+
+ if (wake_device != UACPI_NULL) {
+ switch (event->handler_type) {
+ case GPE_HANDLER_TYPE_NONE:
+ event->handler_type = GPE_HANDLER_TYPE_IMPLICIT_NOTIFY;
+ event->triggering = UACPI_GPE_TRIGGERING_LEVEL;
+ break;
+
+ case GPE_HANDLER_TYPE_AML_HANDLER:
+ /*
+ * An AML handler already exists, we expect it to call Notify() as
+ * it sees fit. For now just make sure this event is disabled if it
+ * had been enabled automatically previously during initialization.
+ */
+ gpe_remove_user(event);
+ break;
+
+ case GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW:
+ case GPE_HANDLER_TYPE_NATIVE_HANDLER:
+ uacpi_warn(
+ "not configuring implicit notify for GPE(%02X) -> %.4s: "
+ " a user handler already installed\n", event->idx,
+ wake_device->name.text
+ );
+ break;
+
+ // We will re-check this below
+ case GPE_HANDLER_TYPE_IMPLICIT_NOTIFY:
+ break;
+
+ default:
+ uacpi_warn("invalid GPE(%02X) handler type: %d\n",
+ event->idx, event->handler_type);
+ ret = UACPI_STATUS_INTERNAL_ERROR;
+ goto out_unmask;
+ }
+
+ /*
+ * This GPE has no known AML handler, so we configure it to receive
+ * implicit notifications for wake devices when we get a corresponding
+ * GPE triggered. Usually it's the job of a matching AML handler, but
+ * we didn't find any.
+ */
+ if (event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) {
+ struct gpe_implicit_notify_handler *implicit_handler;
+
+ implicit_handler = event->implicit_handler;
+ while (implicit_handler) {
+ if (implicit_handler->device == wake_device) {
+ ret = UACPI_STATUS_ALREADY_EXISTS;
+ goto out_unmask;
+ }
+
+ implicit_handler = implicit_handler->next;
+ }
+
+ implicit_handler = uacpi_kernel_alloc(sizeof(*implicit_handler));
+ if (uacpi_likely(implicit_handler != UACPI_NULL)) {
+ implicit_handler->device = wake_device;
+ implicit_handler->next = event->implicit_handler;
+ event->implicit_handler = implicit_handler;
+ } else {
+ uacpi_warn(
+ "unable to configure implicit wake for GPE(%02X) -> %.4s: "
+ "out of memory\n", event->idx, wake_device->name.text
+ );
+ }
+ }
+ }
+
+ event->wake = UACPI_TRUE;
+
+out_unmask:
+ if (did_mask)
+ gpe_mask_unmask(event, UACPI_FALSE);
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+static uacpi_status gpe_enable_disable_for_wake(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_bool enabled
+)
+{
+ uacpi_status ret;
+ struct gp_event *event;
+ struct gpe_register *reg;
+ uacpi_u8 mask;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ if (!event->wake) {
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ reg = event->reg;
+ mask = gpe_get_mask(event);
+
+ if (enabled)
+ reg->wake_mask |= mask;
+ else
+ reg->wake_mask &= mask;
+
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+uacpi_status uacpi_enable_gpe_for_wake(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx
+)
+{
+ return gpe_enable_disable_for_wake(gpe_device, idx, UACPI_TRUE);
+}
+
+uacpi_status uacpi_disable_gpe_for_wake(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx
+)
+{
+ return gpe_enable_disable_for_wake(gpe_device, idx, UACPI_FALSE);
+}
+
+struct do_for_all_gpes_ctx {
+ enum gpe_block_action action;
+ uacpi_status ret;
+};
+
+static uacpi_iteration_decision do_for_all_gpes(
+ struct gpe_block *block, uacpi_handle opaque
+)
+{
+ struct do_for_all_gpes_ctx *ctx = opaque;
+
+ ctx->ret = gpe_block_apply_action(block, ctx->action);
+ if (uacpi_unlikely_error(ctx->ret))
+ return UACPI_ITERATION_DECISION_BREAK;
+
+ return UACPI_ITERATION_DECISION_CONTINUE;
+}
+
+static uacpi_status for_all_gpes_locked(struct do_for_all_gpes_ctx *ctx)
+{
+ uacpi_status ret;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ for_each_gpe_block(do_for_all_gpes, ctx);
+
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ctx->ret;
+}
+
+uacpi_status uacpi_disable_all_gpes(void)
+{
+ struct do_for_all_gpes_ctx ctx = {
+ .action = GPE_BLOCK_ACTION_DISABLE_ALL,
+ };
+ return for_all_gpes_locked(&ctx);
+}
+
+uacpi_status uacpi_enable_all_runtime_gpes(void)
+{
+ struct do_for_all_gpes_ctx ctx = {
+ .action = GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME,
+ };
+ return for_all_gpes_locked(&ctx);
+}
+
+uacpi_status uacpi_enable_all_wake_gpes(void)
+{
+ struct do_for_all_gpes_ctx ctx = {
+ .action = GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE,
+ };
+ return for_all_gpes_locked(&ctx);
+}
+
+static uacpi_status initialize_gpes(void)
+{
+ uacpi_status ret;
+ uacpi_namespace_node *gpe_node;
+ struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
+ uacpi_u8 gpe0_regs = 0, gpe1_regs = 0;
+
+ gpe_node = uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_GPE);
+
+ if (fadt->x_gpe0_blk.address && fadt->gpe0_blk_len) {
+ gpe0_regs = fadt->gpe0_blk_len / 2;
+
+ ret = create_gpe_block(
+ gpe_node, fadt->sci_int, 0, fadt->x_gpe0_blk.address,
+ fadt->x_gpe0_blk.address_space_id, gpe0_regs
+ );
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_error("unable to create FADT GPE block 0: %s\n",
+ uacpi_status_to_string(ret));
+ }
+ }
+
+ if (fadt->x_gpe1_blk.address && fadt->gpe1_blk_len) {
+ gpe1_regs = fadt->gpe1_blk_len / 2;
+
+ if (uacpi_unlikely((gpe0_regs * EVENTS_PER_GPE_REGISTER) >
+ fadt->gpe1_base)) {
+ uacpi_error(
+ "FADT GPE block 1 [%d->%d] collides with GPE block 0 "
+ "[%d->%d], ignoring\n",
+ 0, gpe0_regs * EVENTS_PER_GPE_REGISTER, fadt->gpe1_base,
+ gpe1_regs * EVENTS_PER_GPE_REGISTER
+ );
+ gpe1_regs = 0;
+ goto out;
+ }
+
+ ret = create_gpe_block(
+ gpe_node, fadt->sci_int, fadt->gpe1_base, fadt->x_gpe1_blk.address,
+ fadt->x_gpe1_blk.address_space_id, gpe1_regs
+ );
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_error("unable to create FADT GPE block 1: %s\n",
+ uacpi_status_to_string(ret));
+ }
+ }
+
+ if (gpe0_regs == 0 && gpe1_regs == 0)
+ uacpi_trace("platform has no FADT GPE events\n");
+
+out:
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_install_gpe_block(
+ uacpi_namespace_node *gpe_device, uacpi_u64 address,
+ uacpi_address_space address_space, uacpi_u16 num_registers, uacpi_u32 irq
+)
+{
+ uacpi_status ret;
+ uacpi_bool is_dev;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ ret = uacpi_namespace_node_is(gpe_device, UACPI_OBJECT_DEVICE, &is_dev);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ if (!is_dev)
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (uacpi_unlikely(get_gpe(gpe_device, 0) != UACPI_NULL)) {
+ ret = UACPI_STATUS_ALREADY_EXISTS;
+ goto out;
+ }
+
+ ret = create_gpe_block(
+ gpe_device, irq, 0, address, address_space, num_registers
+ );
+
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+uacpi_status uacpi_uninstall_gpe_block(
+ uacpi_namespace_node *gpe_device
+)
+{
+ uacpi_status ret;
+ uacpi_bool is_dev;
+ struct gpe_search_ctx search_ctx = { 0 };
+
+ search_ctx.idx = 0;
+ search_ctx.gpe_device = gpe_device;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ ret = uacpi_namespace_node_is(gpe_device, UACPI_OBJECT_DEVICE, &is_dev);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ if (!is_dev)
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ for_each_gpe_block(do_find_gpe, &search_ctx);
+ if (search_ctx.out_block == UACPI_NULL) {
+ ret = UACPI_STATUS_NOT_FOUND;
+ goto out;
+ }
+
+ uninstall_gpe_block(search_ctx.out_block);
+
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+static uacpi_interrupt_ret handle_global_lock(uacpi_handle ctx)
+{
+ uacpi_cpu_flags flags;
+ UACPI_UNUSED(ctx);
+
+ if (uacpi_unlikely(!g_uacpi_rt_ctx.has_global_lock)) {
+ uacpi_warn("platform has no global lock but a release event "
+ "was fired anyway?\n");
+ return UACPI_INTERRUPT_HANDLED;
+ }
+
+ flags = uacpi_kernel_lock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock);
+ if (!g_uacpi_rt_ctx.global_lock_pending) {
+ uacpi_trace("spurious firmware global lock release notification\n");
+ goto out;
+ }
+
+ uacpi_trace("received a firmware global lock release notification\n");
+
+ uacpi_kernel_signal_event(g_uacpi_rt_ctx.global_lock_event);
+ g_uacpi_rt_ctx.global_lock_pending = UACPI_FALSE;
+
+out:
+ uacpi_kernel_unlock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock, flags);
+ return UACPI_INTERRUPT_HANDLED;
+}
+
+static uacpi_interrupt_ret handle_sci(uacpi_handle ctx)
+{
+ uacpi_interrupt_ret int_ret = UACPI_INTERRUPT_NOT_HANDLED;
+
+ int_ret |= handle_fixed_events();
+ int_ret |= handle_gpes(ctx);
+
+ return int_ret;
+}
+
+uacpi_status uacpi_initialize_events_early(void)
+{
+ uacpi_status ret;
+
+ if (uacpi_is_hardware_reduced())
+ return UACPI_STATUS_OK;
+
+ g_gpe_state_slock = uacpi_kernel_create_spinlock();
+ if (uacpi_unlikely(g_gpe_state_slock == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ ret = uacpi_recursive_lock_init(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = initialize_fixed_events();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_initialize_events(void)
+{
+ uacpi_status ret;
+
+ if (uacpi_is_hardware_reduced())
+ return UACPI_STATUS_OK;
+
+ ret = initialize_gpes();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_kernel_install_interrupt_handler(
+ g_uacpi_rt_ctx.fadt.sci_int, handle_sci, g_gpe_interrupt_head,
+ &g_uacpi_rt_ctx.sci_handle
+ );
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_error(
+ "unable to install SCI interrupt handler: %s\n",
+ uacpi_status_to_string(ret)
+ );
+ return ret;
+ }
+ g_uacpi_rt_ctx.sci_handle_valid = UACPI_TRUE;
+
+ g_uacpi_rt_ctx.global_lock_event = uacpi_kernel_create_event();
+ if (uacpi_unlikely(g_uacpi_rt_ctx.global_lock_event == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ g_uacpi_rt_ctx.global_lock_spinlock = uacpi_kernel_create_spinlock();
+ if (uacpi_unlikely(g_uacpi_rt_ctx.global_lock_spinlock == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ ret = uacpi_install_fixed_event_handler(
+ UACPI_FIXED_EVENT_GLOBAL_LOCK, handle_global_lock, UACPI_NULL
+ );
+ if (uacpi_likely_success(ret)) {
+ if (uacpi_unlikely(g_uacpi_rt_ctx.facs == UACPI_NULL)) {
+ uacpi_uninstall_fixed_event_handler(UACPI_FIXED_EVENT_GLOBAL_LOCK);
+ uacpi_warn("platform has global lock but no FACS was provided\n");
+ return ret;
+ }
+ g_uacpi_rt_ctx.has_global_lock = UACPI_TRUE;
+ } else if (ret == UACPI_STATUS_HARDWARE_TIMEOUT) {
+ // has_global_lock remains set to false
+ uacpi_trace("platform has no global lock\n");
+ ret = UACPI_STATUS_OK;
+ }
+
+ return ret;
+}
+
+void uacpi_deinitialize_events(void)
+{
+ struct gpe_interrupt_ctx *ctx, *next_ctx = g_gpe_interrupt_head;
+ uacpi_size i;
+
+ g_gpes_finalized = UACPI_FALSE;
+
+ if (g_uacpi_rt_ctx.sci_handle_valid) {
+ uacpi_kernel_uninstall_interrupt_handler(
+ handle_sci, g_uacpi_rt_ctx.sci_handle
+ );
+ g_uacpi_rt_ctx.sci_handle_valid = UACPI_FALSE;
+ }
+
+ while (next_ctx) {
+ struct gpe_block *block, *next_block;
+
+ ctx = next_ctx;
+ next_ctx = ctx->next;
+
+ next_block = ctx->gpe_head;
+ while (next_block) {
+ block = next_block;
+ next_block = block->next;
+ uninstall_gpe_block(block);
+ }
+ }
+
+ for (i = 0; i < UACPI_FIXED_EVENT_MAX; ++i) {
+ if (fixed_event_handlers[i].handler)
+ uacpi_uninstall_fixed_event_handler(i);
+ }
+
+ if (g_gpe_state_slock != UACPI_NULL) {
+ uacpi_kernel_free_spinlock(g_gpe_state_slock);
+ g_gpe_state_slock = UACPI_NULL;
+ }
+
+ uacpi_recursive_lock_deinit(&g_event_lock);
+
+ g_gpe_interrupt_head = UACPI_NULL;
+}
+
+uacpi_status uacpi_install_fixed_event_handler(
+ uacpi_fixed_event event, uacpi_interrupt_handler handler,
+ uacpi_handle user
+)
+{
+ uacpi_status ret;
+ struct fixed_event_handler *ev;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ if (uacpi_is_hardware_reduced())
+ return UACPI_STATUS_OK;
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ev = &fixed_event_handlers[event];
+
+ if (ev->handler != UACPI_NULL) {
+ ret = UACPI_STATUS_ALREADY_EXISTS;
+ goto out;
+ }
+
+ ev->handler = handler;
+ ev->ctx = user;
+
+ ret = set_event(event, UACPI_EVENT_ENABLED);
+ if (uacpi_unlikely_error(ret)) {
+ ev->handler = UACPI_NULL;
+ ev->ctx = UACPI_NULL;
+ }
+
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+uacpi_status uacpi_uninstall_fixed_event_handler(
+ uacpi_fixed_event event
+)
+{
+ uacpi_status ret;
+ struct fixed_event_handler *ev;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ if (uacpi_is_hardware_reduced())
+ return UACPI_STATUS_OK;
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ev = &fixed_event_handlers[event];
+
+ ret = set_event(event, UACPI_EVENT_DISABLED);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ uacpi_kernel_wait_for_work_completion();
+
+ ev->handler = UACPI_NULL;
+ ev->ctx = UACPI_NULL;
+
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+uacpi_status uacpi_fixed_event_info(
+ uacpi_fixed_event event, uacpi_event_info *out_info
+)
+{
+ uacpi_status ret;
+ const struct fixed_event *ev;
+ uacpi_u64 raw_value;
+ uacpi_event_info info = 0;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ if (uacpi_is_hardware_reduced())
+ return UACPI_STATUS_NOT_FOUND;
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (fixed_event_handlers[event].handler != UACPI_NULL)
+ info |= UACPI_EVENT_INFO_HAS_HANDLER;
+
+ ev = &fixed_events[event];
+
+ ret = uacpi_read_register_field(ev->enable_field, &raw_value);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+ if (raw_value)
+ info |= UACPI_EVENT_INFO_ENABLED | UACPI_EVENT_INFO_HW_ENABLED;
+
+ ret = uacpi_read_register_field(ev->status_field, &raw_value);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+ if (raw_value)
+ info |= UACPI_EVENT_INFO_HW_STATUS;
+
+ *out_info = info;
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+uacpi_status uacpi_gpe_info(
+ uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_event_info *out_info
+)
+{
+ uacpi_status ret;
+ struct gp_event *event;
+ struct gpe_register *reg;
+ uacpi_u8 mask;
+ uacpi_u64 raw_value;
+ uacpi_event_info info = 0;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ if (event->handler_type != GPE_HANDLER_TYPE_NONE)
+ info |= UACPI_EVENT_INFO_HAS_HANDLER;
+
+ mask = gpe_get_mask(event);
+ reg = event->reg;
+
+ if (reg->runtime_mask & mask)
+ info |= UACPI_EVENT_INFO_ENABLED;
+ if (reg->masked_mask & mask)
+ info |= UACPI_EVENT_INFO_MASKED;
+ if (reg->wake_mask & mask)
+ info |= UACPI_EVENT_INFO_ENABLED_FOR_WAKE;
+
+ ret = uacpi_gas_read_mapped(&reg->enable, &raw_value);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+ if (raw_value & mask)
+ info |= UACPI_EVENT_INFO_HW_ENABLED;
+
+ ret = uacpi_gas_read_mapped(&reg->status, &raw_value);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+ if (raw_value & mask)
+ info |= UACPI_EVENT_INFO_HW_STATUS;
+
+ *out_info = info;
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+#define PM1_STATUS_BITS ( \
+ ACPI_PM1_STS_TMR_STS_MASK | \
+ ACPI_PM1_STS_BM_STS_MASK | \
+ ACPI_PM1_STS_GBL_STS_MASK | \
+ ACPI_PM1_STS_PWRBTN_STS_MASK | \
+ ACPI_PM1_STS_SLPBTN_STS_MASK | \
+ ACPI_PM1_STS_RTC_STS_MASK | \
+ ACPI_PM1_STS_PCIEXP_WAKE_STS_MASK | \
+ ACPI_PM1_STS_WAKE_STS_MASK \
+)
+
+uacpi_status uacpi_clear_all_events(void)
+{
+ uacpi_status ret;
+ struct do_for_all_gpes_ctx ctx = {
+ .action = GPE_BLOCK_ACTION_CLEAR_ALL,
+ };
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ ret = uacpi_recursive_lock_acquire(&g_event_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_write_register(UACPI_REGISTER_PM1_STS, PM1_STATUS_BITS);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ for_each_gpe_block(do_for_all_gpes, &ctx);
+ ret = ctx.ret;
+
+out:
+ uacpi_recursive_lock_release(&g_event_lock);
+ return ret;
+}
+
+#endif // !UACPI_REDUCED_HARDWARE && !UACPI_BAREBONES_MODE
diff --git a/sys/dev/acpi/uacpi/interpreter.c b/sys/dev/acpi/uacpi/interpreter.c
new file mode 100644
index 0000000..8ffb8d5
--- /dev/null
+++ b/sys/dev/acpi/uacpi/interpreter.c
@@ -0,0 +1,6053 @@
+#include <uacpi/internal/types.h>
+#include <uacpi/internal/interpreter.h>
+#include <uacpi/internal/dynamic_array.h>
+#include <uacpi/internal/opcodes.h>
+#include <uacpi/internal/namespace.h>
+#include <uacpi/internal/stdlib.h>
+#include <uacpi/internal/context.h>
+#include <uacpi/internal/shareable.h>
+#include <uacpi/internal/tables.h>
+#include <uacpi/internal/helpers.h>
+#include <uacpi/kernel_api.h>
+#include <uacpi/internal/utilities.h>
+#include <uacpi/internal/opregion.h>
+#include <uacpi/internal/io.h>
+#include <uacpi/internal/notify.h>
+#include <uacpi/internal/resources.h>
+#include <uacpi/internal/event.h>
+#include <uacpi/internal/mutex.h>
+#include <uacpi/internal/osi.h>
+
+#ifndef UACPI_BAREBONES_MODE
+
+enum item_type {
+ ITEM_NONE = 0,
+ ITEM_NAMESPACE_NODE,
+ ITEM_OBJECT,
+ ITEM_EMPTY_OBJECT,
+ ITEM_PACKAGE_LENGTH,
+ ITEM_IMMEDIATE,
+};
+
+struct package_length {
+ uacpi_u32 begin;
+ uacpi_u32 end;
+};
+
+struct item {
+ uacpi_u8 type;
+ union {
+ uacpi_handle handle;
+ uacpi_object *obj;
+ struct uacpi_namespace_node *node;
+ struct package_length pkg;
+ uacpi_u64 immediate;
+ uacpi_u8 immediate_bytes[8];
+ };
+};
+
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE(item_array, struct item, 8)
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(item_array, struct item, static)
+
+struct op_context {
+ uacpi_u8 pc;
+ uacpi_bool preempted;
+
+ /*
+ * == 0 -> none
+ * >= 1 -> item[idx - 1]
+ */
+ uacpi_u8 tracked_pkg_idx;
+
+ uacpi_aml_op switched_from;
+
+ const struct uacpi_op_spec *op;
+ struct item_array items;
+};
+
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE(op_context_array, struct op_context, 8)
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(
+ op_context_array, struct op_context, static
+)
+
+static struct op_context *op_context_array_one_before_last(
+ struct op_context_array *arr
+)
+{
+ uacpi_size size;
+
+ size = op_context_array_size(arr);
+
+ if (size < 2)
+ return UACPI_NULL;
+
+ return op_context_array_at(arr, size - 2);
+}
+
+enum code_block_type {
+ CODE_BLOCK_IF = 1,
+ CODE_BLOCK_ELSE = 2,
+ CODE_BLOCK_WHILE = 3,
+ CODE_BLOCK_SCOPE = 4,
+};
+
+struct code_block {
+ enum code_block_type type;
+ uacpi_u32 begin, end;
+ union {
+ struct uacpi_namespace_node *node;
+ uacpi_u64 expiration_point;
+ };
+};
+
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE(code_block_array, struct code_block, 8)
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(
+ code_block_array, struct code_block, static
+)
+
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE(held_mutexes_array, uacpi_mutex*, 8)
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(
+ held_mutexes_array, uacpi_mutex*, static
+)
+
+static uacpi_status held_mutexes_array_push(
+ struct held_mutexes_array *arr, uacpi_mutex *mutex
+)
+{
+ uacpi_mutex **slot;
+
+ slot = held_mutexes_array_alloc(arr);
+ if (uacpi_unlikely(slot == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ *slot = mutex;
+ uacpi_shareable_ref(mutex);
+ return UACPI_STATUS_OK;
+}
+
+static void held_mutexes_array_remove_idx(
+ struct held_mutexes_array *arr, uacpi_size i
+)
+{
+ uacpi_size size;
+
+ size = held_mutexes_array_inline_capacity(arr);
+
+ // Only the dynamic array part is affected
+ if (i >= size) {
+ i -= size;
+ size = arr->size_including_inline - size;
+ size -= i + 1;
+
+ uacpi_memmove(
+ &arr->dynamic_storage[i], &arr->dynamic_storage[i + 1],
+ size * sizeof(arr->inline_storage[0])
+ );
+
+ held_mutexes_array_pop(arr);
+ return;
+ }
+
+ size = UACPI_MIN(held_mutexes_array_inline_capacity(arr),
+ arr->size_including_inline);
+ size -= i + 1;
+ uacpi_memmove(
+ &arr->inline_storage[i], &arr->inline_storage[i + 1],
+ size * sizeof(arr->inline_storage[0])
+ );
+
+ size = held_mutexes_array_size(arr);
+ i = held_mutexes_array_inline_capacity(arr);
+
+ /*
+ * This array has dynamic storage as well, now we have to take the first
+ * dynamic item, move it to the top of inline storage, and then shift all
+ * dynamic items backward by 1 as well.
+ */
+ if (size > i) {
+ arr->inline_storage[i - 1] = arr->dynamic_storage[0];
+ size -= i + 1;
+
+ uacpi_memmove(
+ &arr->dynamic_storage[0], &arr->dynamic_storage[1],
+ size * sizeof(arr->inline_storage[0])
+ );
+ }
+
+ held_mutexes_array_pop(arr);
+}
+
+enum force_release {
+ FORCE_RELEASE_NO,
+ FORCE_RELEASE_YES,
+};
+
+static uacpi_status held_mutexes_array_remove_and_release(
+ struct held_mutexes_array *arr, uacpi_mutex *mutex,
+ enum force_release force
+)
+{
+ uacpi_mutex *item;
+ uacpi_size i;
+
+ if (uacpi_unlikely(held_mutexes_array_size(arr) == 0))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ item = *held_mutexes_array_last(arr);
+
+ if (uacpi_unlikely(item->sync_level != mutex->sync_level &&
+ force != FORCE_RELEASE_YES)) {
+ uacpi_warn(
+ "ignoring mutex @%p release due to sync level mismatch: %d vs %d\n",
+ mutex, mutex->sync_level, item->sync_level
+ );
+
+ // We still return OK because we don't want to abort because of this
+ return UACPI_STATUS_OK;
+ }
+
+ if (mutex->depth > 1 && force == FORCE_RELEASE_NO) {
+ uacpi_release_aml_mutex(mutex);
+ return UACPI_STATUS_OK;
+ }
+
+ // Fast path for well-behaved AML that releases mutexes in descending order
+ if (uacpi_likely(item == mutex)) {
+ held_mutexes_array_pop(arr);
+ goto do_release;
+ }
+
+ /*
+ * The mutex being released is not the last one acquired, although we did
+ * verify that at least it has the same sync level. Anyway, now we have
+ * to search for it and then remove it from the array while shifting
+ * everything backwards.
+ */
+ i = held_mutexes_array_size(arr);
+ for (;;) {
+ item = *held_mutexes_array_at(arr, --i);
+ if (item == mutex)
+ break;
+
+ if (uacpi_unlikely(i == 0))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ held_mutexes_array_remove_idx(arr, i);
+
+do_release:
+ // This is either a force release, or depth was already 1 to begin with
+ mutex->depth = 1;
+ uacpi_release_aml_mutex(mutex);
+
+ uacpi_mutex_unref(mutex);
+ return UACPI_STATUS_OK;
+}
+
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE(
+ temp_namespace_node_array, uacpi_namespace_node*, 8)
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(
+ temp_namespace_node_array, uacpi_namespace_node*, static
+)
+
+static uacpi_status temp_namespace_node_array_push(
+ struct temp_namespace_node_array *arr, uacpi_namespace_node *node
+)
+{
+ uacpi_namespace_node **slot;
+
+ slot = temp_namespace_node_array_alloc(arr);
+ if (uacpi_unlikely(slot == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ *slot = node;
+ return UACPI_STATUS_OK;
+}
+
+struct call_frame {
+ struct uacpi_control_method *method;
+
+ uacpi_object *args[7];
+ uacpi_object *locals[8];
+
+ struct op_context_array pending_ops;
+ struct code_block_array code_blocks;
+ struct temp_namespace_node_array temp_nodes;
+ struct code_block *last_while;
+ uacpi_u64 prev_while_expiration;
+ uacpi_u32 prev_while_code_offset;
+
+ uacpi_u32 code_offset;
+
+ struct uacpi_namespace_node *cur_scope;
+
+ // Only used if the method is serialized
+ uacpi_u8 prev_sync_level;
+};
+
+static void *call_frame_cursor(struct call_frame *frame)
+{
+ return frame->method->code + frame->code_offset;
+}
+
+static uacpi_size call_frame_code_bytes_left(struct call_frame *frame)
+{
+ return frame->method->size - frame->code_offset;
+}
+
+static uacpi_bool call_frame_has_code(struct call_frame *frame)
+{
+ return call_frame_code_bytes_left(frame) > 0;
+}
+
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE(call_frame_array, struct call_frame, 4)
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(
+ call_frame_array, struct call_frame, static
+)
+
+static struct call_frame *call_frame_array_one_before_last(
+ struct call_frame_array *arr
+)
+{
+ uacpi_size size;
+
+ size = call_frame_array_size(arr);
+
+ if (size < 2)
+ return UACPI_NULL;
+
+ return call_frame_array_at(arr, size - 2);
+}
+
+// NOTE: Try to keep size under 2 pages
+struct execution_context {
+ uacpi_object *ret;
+ struct call_frame_array call_stack;
+ struct held_mutexes_array held_mutexes;
+
+ struct call_frame *cur_frame;
+ struct code_block *cur_block;
+ const struct uacpi_op_spec *cur_op;
+ struct op_context *prev_op_ctx;
+ struct op_context *cur_op_ctx;
+
+ uacpi_u8 sync_level;
+};
+
+#define AML_READ(ptr, offset) (*(((uacpi_u8*)(ptr)) + offset))
+
+static uacpi_status parse_nameseg(uacpi_u8 *cursor,
+ uacpi_object_name *out_name)
+{
+ if (uacpi_unlikely(!uacpi_is_valid_nameseg(cursor)))
+ return UACPI_STATUS_AML_INVALID_NAMESTRING;
+
+ uacpi_memcpy(&out_name->id, cursor, 4);
+ return UACPI_STATUS_OK;
+}
+
+/*
+ * -------------------------------------------------------------
+ * RootChar := ‘\’
+ * ParentPrefixChar := ‘^’
+ * ‘\’ := 0x5C
+ * ‘^’ := 0x5E
+ * ------------------------------------------------------------
+ * NameSeg := <leadnamechar namechar namechar namechar>
+ * NameString := <rootchar namepath> | <prefixpath namepath>
+ * PrefixPath := Nothing | <’^’ prefixpath>
+ * NamePath := NameSeg | DualNamePath | MultiNamePath | NullName
+ * DualNamePath := DualNamePrefix NameSeg NameSeg
+ * MultiNamePath := MultiNamePrefix SegCount NameSeg(SegCount)
+ */
+
+static uacpi_status name_string_to_path(
+ struct call_frame *frame, uacpi_size offset,
+ uacpi_char **out_string, uacpi_size *out_size
+)
+{
+ uacpi_size bytes_left, prefix_bytes, nameseg_bytes = 0, namesegs;
+ uacpi_char *base_cursor, *cursor;
+ uacpi_char prev_char;
+
+ bytes_left = frame->method->size - offset;
+ cursor = (uacpi_char*)frame->method->code + offset;
+ base_cursor = cursor;
+ namesegs = 0;
+
+ prefix_bytes = 0;
+ for (;;) {
+ if (uacpi_unlikely(bytes_left == 0))
+ return UACPI_STATUS_AML_INVALID_NAMESTRING;
+
+ prev_char = *cursor;
+
+ switch (prev_char) {
+ case '^':
+ case '\\':
+ prefix_bytes++;
+ cursor++;
+ bytes_left--;
+ break;
+ default:
+ break;
+ }
+
+ if (prev_char != '^')
+ break;
+ }
+
+ // At least a NullName byte is expected here
+ if (uacpi_unlikely(bytes_left == 0))
+ return UACPI_STATUS_AML_INVALID_NAMESTRING;
+
+ namesegs = 0;
+ bytes_left--;
+ switch (*cursor++)
+ {
+ case UACPI_DUAL_NAME_PREFIX:
+ namesegs = 2;
+ break;
+ case UACPI_MULTI_NAME_PREFIX:
+ if (uacpi_unlikely(bytes_left == 0))
+ return UACPI_STATUS_AML_INVALID_NAMESTRING;
+
+ namesegs = *(uacpi_u8*)cursor;
+ if (uacpi_unlikely(namesegs == 0)) {
+ uacpi_error("MultiNamePrefix but SegCount is 0\n");
+ return UACPI_STATUS_AML_INVALID_NAMESTRING;
+ }
+
+ cursor++;
+ bytes_left--;
+ break;
+ case UACPI_NULL_NAME:
+ break;
+ default:
+ /*
+ * Might be an invalid byte, but assume single nameseg for now,
+ * the code below will validate it for us.
+ */
+ cursor--;
+ bytes_left++;
+ namesegs = 1;
+ break;
+ }
+
+ if (uacpi_unlikely((namesegs * 4) > bytes_left))
+ return UACPI_STATUS_AML_INVALID_NAMESTRING;
+
+ if (namesegs) {
+ // 4 chars per nameseg
+ nameseg_bytes = namesegs * 4;
+
+ // dot separator for every nameseg
+ nameseg_bytes += namesegs - 1;
+ }
+
+ *out_size = nameseg_bytes + prefix_bytes + 1;
+
+ *out_string = uacpi_kernel_alloc(*out_size);
+ if (*out_string == UACPI_NULL)
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ uacpi_memcpy(*out_string, base_cursor, prefix_bytes);
+
+ base_cursor = *out_string;
+ base_cursor += prefix_bytes;
+
+ while (namesegs-- > 0) {
+ uacpi_memcpy(base_cursor, cursor, 4);
+ cursor += 4;
+ base_cursor += 4;
+
+ if (namesegs)
+ *base_cursor++ = '.';
+ }
+
+ *base_cursor = '\0';
+ return UACPI_STATUS_OK;
+}
+
+enum resolve_behavior {
+ RESOLVE_CREATE_LAST_NAMESEG_FAIL_IF_EXISTS,
+ RESOLVE_FAIL_IF_DOESNT_EXIST,
+};
+
+static uacpi_status resolve_name_string(
+ struct call_frame *frame,
+ enum resolve_behavior behavior,
+ struct uacpi_namespace_node **out_node
+)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+ uacpi_u8 *cursor;
+ uacpi_size bytes_left, namesegs = 0;
+ struct uacpi_namespace_node *parent, *cur_node = frame->cur_scope;
+ uacpi_char prev_char = 0;
+ uacpi_bool just_one_nameseg = UACPI_TRUE;
+
+ bytes_left = call_frame_code_bytes_left(frame);
+ cursor = call_frame_cursor(frame);
+
+ for (;;) {
+ if (uacpi_unlikely(bytes_left == 0))
+ return UACPI_STATUS_AML_INVALID_NAMESTRING;
+
+ switch (*cursor) {
+ case '\\':
+ if (prev_char == '^')
+ return UACPI_STATUS_AML_INVALID_NAMESTRING;
+
+ cur_node = uacpi_namespace_root();
+ break;
+ case '^':
+ // Tried to go behind root
+ if (uacpi_unlikely(cur_node == uacpi_namespace_root()))
+ return UACPI_STATUS_AML_INVALID_NAMESTRING;
+
+ cur_node = cur_node->parent;
+ break;
+ default:
+ break;
+ }
+
+ prev_char = *cursor;
+
+ switch (prev_char) {
+ case '^':
+ case '\\':
+ just_one_nameseg = UACPI_FALSE;
+ cursor++;
+ bytes_left--;
+ break;
+ default:
+ break;
+ }
+
+ if (prev_char != '^')
+ break;
+ }
+
+ // At least a NullName byte is expected here
+ if (uacpi_unlikely(bytes_left == 0))
+ return UACPI_STATUS_AML_INVALID_NAMESTRING;
+
+ bytes_left--;
+ switch (*cursor++)
+ {
+ case UACPI_DUAL_NAME_PREFIX:
+ namesegs = 2;
+ just_one_nameseg = UACPI_FALSE;
+ break;
+ case UACPI_MULTI_NAME_PREFIX:
+ if (uacpi_unlikely(bytes_left == 0))
+ return UACPI_STATUS_AML_INVALID_NAMESTRING;
+
+ namesegs = *cursor;
+ if (uacpi_unlikely(namesegs == 0)) {
+ uacpi_error("MultiNamePrefix but SegCount is 0\n");
+ return UACPI_STATUS_AML_INVALID_NAMESTRING;
+ }
+
+ cursor++;
+ bytes_left--;
+ just_one_nameseg = UACPI_FALSE;
+ break;
+ case UACPI_NULL_NAME:
+ if (behavior == RESOLVE_CREATE_LAST_NAMESEG_FAIL_IF_EXISTS ||
+ just_one_nameseg)
+ return UACPI_STATUS_AML_INVALID_NAMESTRING;
+
+ goto out;
+ default:
+ /*
+ * Might be an invalid byte, but assume single nameseg for now,
+ * the code below will validate it for us.
+ */
+ cursor--;
+ bytes_left++;
+ namesegs = 1;
+ break;
+ }
+
+ if (uacpi_unlikely((namesegs * 4) > bytes_left))
+ return UACPI_STATUS_AML_INVALID_NAMESTRING;
+
+ for (; namesegs; cursor += 4, namesegs--) {
+ uacpi_object_name name;
+
+ ret = parse_nameseg(cursor, &name);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ parent = cur_node;
+ cur_node = uacpi_namespace_node_find_sub_node(parent, name);
+
+ switch (behavior) {
+ case RESOLVE_CREATE_LAST_NAMESEG_FAIL_IF_EXISTS:
+ if (namesegs == 1) {
+ if (cur_node) {
+ cur_node = UACPI_NULL;
+ ret = UACPI_STATUS_AML_OBJECT_ALREADY_EXISTS;
+ goto out;
+ }
+
+ // Create the node and link to parent but don't install YET
+ cur_node = uacpi_namespace_node_alloc(name);
+ if (uacpi_unlikely(cur_node == UACPI_NULL)) {
+ ret = UACPI_STATUS_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ cur_node->parent = parent;
+ }
+ break;
+ case RESOLVE_FAIL_IF_DOESNT_EXIST:
+ if (just_one_nameseg) {
+ while (!cur_node && parent != uacpi_namespace_root()) {
+ cur_node = parent;
+ parent = cur_node->parent;
+
+ cur_node = uacpi_namespace_node_find_sub_node(parent, name);
+ }
+ }
+ break;
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ if (cur_node == UACPI_NULL) {
+ ret = UACPI_STATUS_NOT_FOUND;
+ break;
+ }
+ }
+
+out:
+ cursor += namesegs * 4;
+ frame->code_offset = cursor - frame->method->code;
+
+ if (uacpi_likely_success(ret) && behavior == RESOLVE_FAIL_IF_DOESNT_EXIST)
+ uacpi_shareable_ref(cur_node);
+
+ *out_node = cur_node;
+ return ret;
+}
+
+static uacpi_status do_install_node_item(struct call_frame *frame,
+ struct item *item)
+{
+ uacpi_status ret;
+
+ ret = uacpi_namespace_node_install(item->node->parent, item->node);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (!frame->method->named_objects_persist)
+ ret = temp_namespace_node_array_push(&frame->temp_nodes, item->node);
+
+ if (uacpi_likely_success(ret))
+ item->node = UACPI_NULL;
+
+ return ret;
+}
+
+static uacpi_u8 peek_next_op(struct call_frame *frame, uacpi_aml_op *out_op)
+{
+ uacpi_aml_op op;
+ uacpi_size bytes_left;
+ uacpi_u8 length = 0;
+ uacpi_u8 *cursor;
+ struct code_block *block;
+
+ block = code_block_array_last(&frame->code_blocks);
+ bytes_left = block->end - frame->code_offset;
+ if (bytes_left == 0)
+ return 0;
+
+ cursor = call_frame_cursor(frame);
+
+ op = AML_READ(cursor, length++);
+ if (op == UACPI_EXT_PREFIX) {
+ if (uacpi_unlikely(bytes_left < 2))
+ return 0;
+
+ op <<= 8;
+ op |= AML_READ(cursor, length++);
+ }
+
+ *out_op = op;
+ return length;
+}
+
+static uacpi_status get_op(struct execution_context *ctx)
+{
+ uacpi_aml_op op;
+ uacpi_u8 length;
+
+ length = peek_next_op(ctx->cur_frame, &op);
+ if (uacpi_unlikely(length == 0))
+ return UACPI_STATUS_AML_BAD_ENCODING;
+
+ ctx->cur_frame->code_offset += length;
+ g_uacpi_rt_ctx.opcodes_executed++;
+
+ ctx->cur_op = uacpi_get_op_spec(op);
+ if (uacpi_unlikely(ctx->cur_op->properties & UACPI_OP_PROPERTY_RESERVED)) {
+ uacpi_error(
+ "invalid opcode '%s' encountered in bytestream\n",
+ ctx->cur_op->name
+ );
+ return UACPI_STATUS_AML_INVALID_OPCODE;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_buffer(struct execution_context *ctx)
+{
+ struct package_length *pkg;
+ uacpi_u8 *src;
+ uacpi_object *dst, *declared_size;
+ uacpi_u32 buffer_size, init_size, aml_offset;
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+
+ aml_offset = item_array_at(&op_ctx->items, 2)->immediate;
+ src = ctx->cur_frame->method->code;
+ src += aml_offset;
+
+ pkg = &item_array_at(&op_ctx->items, 0)->pkg;
+ init_size = pkg->end - aml_offset;
+
+ // TODO: do package bounds checking at parse time
+ if (uacpi_unlikely(pkg->end > ctx->cur_frame->method->size))
+ return UACPI_STATUS_AML_BAD_ENCODING;
+
+ declared_size = item_array_at(&op_ctx->items, 1)->obj;
+
+ if (uacpi_unlikely(declared_size->integer > 0xE0000000)) {
+ uacpi_error(
+ "buffer is too large (%"UACPI_PRIu64"), assuming corrupted "
+ "bytestream\n", UACPI_FMT64(declared_size->integer)
+ );
+ return UACPI_STATUS_AML_BAD_ENCODING;
+ }
+
+ if (uacpi_unlikely(declared_size->integer == 0)) {
+ uacpi_error("attempted to create an empty buffer\n");
+ return UACPI_STATUS_AML_BAD_ENCODING;
+ }
+
+ buffer_size = declared_size->integer;
+ if (uacpi_unlikely(init_size > buffer_size)) {
+ uacpi_error(
+ "too many buffer initializers: %u (size is %u)\n",
+ init_size, buffer_size
+ );
+ return UACPI_STATUS_AML_BAD_ENCODING;
+ }
+
+ dst = item_array_at(&op_ctx->items, 3)->obj;
+ dst->buffer->data = uacpi_kernel_alloc(buffer_size);
+ if (uacpi_unlikely(dst->buffer->data == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+ dst->buffer->size = buffer_size;
+
+ uacpi_memcpy_zerout(dst->buffer->data, src, buffer_size, init_size);
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_string(struct execution_context *ctx)
+{
+ struct call_frame *frame = ctx->cur_frame;
+ uacpi_object *obj;
+
+ uacpi_char *string;
+ uacpi_size length, max_bytes;
+
+ obj = item_array_last(&ctx->cur_op_ctx->items)->obj;
+ string = call_frame_cursor(frame);
+
+ // TODO: sanitize string for valid UTF-8
+ max_bytes = call_frame_code_bytes_left(frame);
+ length = uacpi_strnlen(string, max_bytes);
+
+ if (uacpi_unlikely((length == max_bytes) || (string[length++] != 0x00)))
+ return UACPI_STATUS_AML_BAD_ENCODING;
+
+ obj->buffer->text = uacpi_kernel_alloc(length);
+ if (uacpi_unlikely(obj->buffer->text == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ uacpi_memcpy(obj->buffer->text, string, length);
+ obj->buffer->size = length;
+ frame->code_offset += length;
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_package(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_package *package;
+ uacpi_u32 num_elements, num_defined_elements, i;
+
+ /*
+ * Layout of items here:
+ * [0] -> Package length, not interesting
+ * [1] -> Immediate or integer object, depending on PackageOp/VarPackageOp
+ * [2..N-2] -> AML pc+Package element pairs
+ * [N-1] -> The resulting package object that we're constructing
+ */
+ package = item_array_last(&op_ctx->items)->obj->package;
+
+ // 1. Detect how many elements we have, do sanity checking
+ if (op_ctx->op->code == UACPI_AML_OP_VarPackageOp) {
+ uacpi_object *var_num_elements;
+
+ var_num_elements = item_array_at(&op_ctx->items, 1)->obj;
+ if (uacpi_unlikely(var_num_elements->integer > 0xE0000000)) {
+ uacpi_error(
+ "package is too large (%"UACPI_PRIu64"), assuming "
+ "corrupted bytestream\n", UACPI_FMT64(var_num_elements->integer)
+ );
+ return UACPI_STATUS_AML_BAD_ENCODING;
+ }
+ num_elements = var_num_elements->integer;
+ } else {
+ num_elements = item_array_at(&op_ctx->items, 1)->immediate;
+ }
+
+ num_defined_elements = (item_array_size(&op_ctx->items) - 3) / 2;
+ if (uacpi_unlikely(num_defined_elements > num_elements)) {
+ uacpi_warn(
+ "too many package initializers: %u, truncating to %u\n",
+ num_defined_elements, num_elements
+ );
+
+ num_defined_elements = num_elements;
+ }
+
+ // 2. Create every object in the package, start as uninitialized
+ if (uacpi_unlikely(!uacpi_package_fill(package, num_elements,
+ UACPI_PREALLOC_OBJECTS_YES)))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ // 3. Go through every defined object and copy it into the package
+ for (i = 0; i < num_defined_elements; ++i) {
+ uacpi_size base_pkg_index;
+ uacpi_status ret;
+ struct item *item;
+ uacpi_object *obj;
+
+ base_pkg_index = (i * 2) + 2;
+ item = item_array_at(&op_ctx->items, base_pkg_index + 1);
+ obj = item->obj;
+
+ if (obj != UACPI_NULL && obj->type == UACPI_OBJECT_REFERENCE) {
+ /*
+ * For named objects we don't actually need the object itself, but
+ * simply the path to it. Often times objects referenced by the
+ * package are not defined until later so it's not possible to
+ * resolve them. For uniformity and to follow the behavior of NT,
+ * simply convert the name string to a path string object to be
+ * resolved later when actually needed.
+ */
+ if (obj->flags == UACPI_REFERENCE_KIND_NAMED) {
+ uacpi_object_unref(obj);
+ item->obj = UACPI_NULL;
+ obj = UACPI_NULL;
+ } else {
+ obj = uacpi_unwrap_internal_reference(obj);
+ }
+ }
+
+ if (obj == UACPI_NULL) {
+ uacpi_size length;
+ uacpi_char *path;
+
+ obj = uacpi_create_object(UACPI_OBJECT_STRING);
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ ret = name_string_to_path(
+ ctx->cur_frame,
+ item_array_at(&op_ctx->items, base_pkg_index)->immediate,
+ &path, &length
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ obj->flags = UACPI_STRING_KIND_PATH;
+ obj->buffer->text = path;
+ obj->buffer->size = length;
+
+ item->obj = obj;
+ item->type = ITEM_OBJECT;
+ }
+
+ ret = uacpi_object_assign(package->objects[i], obj,
+ UACPI_ASSIGN_BEHAVIOR_DEEP_COPY);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_size sizeof_int(void)
+{
+ return g_uacpi_rt_ctx.is_rev1 ? 4 : 8;
+}
+
+static uacpi_status get_object_storage(
+ uacpi_object *obj, uacpi_data_view *out_buf, uacpi_bool include_null
+)
+{
+ switch (obj->type) {
+ case UACPI_OBJECT_INTEGER:
+ out_buf->length = sizeof_int();
+ out_buf->data = &obj->integer;
+ break;
+ case UACPI_OBJECT_STRING:
+ out_buf->length = obj->buffer->size;
+ if (out_buf->length && !include_null)
+ out_buf->length--;
+
+ out_buf->text = obj->buffer->text;
+ break;
+ case UACPI_OBJECT_BUFFER:
+ if (obj->buffer->size == 0) {
+ out_buf->bytes = UACPI_NULL;
+ out_buf->length = 0;
+ break;
+ }
+
+ out_buf->length = obj->buffer->size;
+ out_buf->bytes = obj->buffer->data;
+ break;
+ case UACPI_OBJECT_REFERENCE:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ default:
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_u8 *buffer_index_cursor(uacpi_buffer_index *buf_idx)
+{
+ uacpi_u8 *out_cursor;
+
+ out_cursor = buf_idx->buffer->data;
+ out_cursor += buf_idx->idx;
+
+ return out_cursor;
+}
+
+static void write_buffer_index(uacpi_buffer_index *buf_idx,
+ uacpi_data_view *src_buf)
+{
+ uacpi_memcpy_zerout(buffer_index_cursor(buf_idx), src_buf->bytes,
+ 1, src_buf->length);
+}
+
+/*
+ * The word "implicit cast" here is only because it's called that in
+ * the specification. In reality, we just copy one buffer to another
+ * because that's what NT does.
+ */
+static uacpi_status object_assign_with_implicit_cast(
+ uacpi_object *dst, uacpi_object *src, uacpi_data_view *wtr_response
+)
+{
+ uacpi_status ret;
+ uacpi_data_view src_buf;
+
+ ret = get_object_storage(src, &src_buf, UACPI_FALSE);
+ if (uacpi_unlikely_error(ret))
+ goto out_bad_cast;
+
+ switch (dst->type) {
+ case UACPI_OBJECT_INTEGER:
+ case UACPI_OBJECT_STRING:
+ case UACPI_OBJECT_BUFFER: {
+ uacpi_data_view dst_buf;
+
+ ret = get_object_storage(dst, &dst_buf, UACPI_FALSE);
+ if (uacpi_unlikely_error(ret))
+ goto out_bad_cast;
+
+ uacpi_memcpy_zerout(
+ dst_buf.bytes, src_buf.bytes, dst_buf.length, src_buf.length
+ );
+ break;
+ }
+
+ case UACPI_OBJECT_BUFFER_FIELD:
+ uacpi_write_buffer_field(
+ &dst->buffer_field, src_buf.bytes, src_buf.length
+ );
+ break;
+
+ case UACPI_OBJECT_FIELD_UNIT:
+ return uacpi_write_field_unit(
+ dst->field_unit, src_buf.bytes, src_buf.length,
+ wtr_response
+ );
+
+ case UACPI_OBJECT_BUFFER_INDEX:
+ write_buffer_index(&dst->buffer_index, &src_buf);
+ break;
+
+ default:
+ ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ goto out_bad_cast;
+ }
+
+ return ret;
+
+out_bad_cast:
+ uacpi_error(
+ "attempted to perform an invalid implicit cast (%s -> %s)\n",
+ uacpi_object_type_to_string(src->type),
+ uacpi_object_type_to_string(dst->type)
+ );
+ return ret;
+}
+
+enum argx_or_localx {
+ ARGX,
+ LOCALX,
+};
+
+static uacpi_status handle_arg_or_local(
+ struct execution_context *ctx,
+ uacpi_size idx, enum argx_or_localx type
+)
+{
+ uacpi_object **src;
+ struct item *dst;
+ enum uacpi_reference_kind kind;
+
+ if (type == ARGX) {
+ src = &ctx->cur_frame->args[idx];
+ kind = UACPI_REFERENCE_KIND_ARG;
+ } else {
+ src = &ctx->cur_frame->locals[idx];
+ kind = UACPI_REFERENCE_KIND_LOCAL;
+ }
+
+ if (*src == UACPI_NULL) {
+ uacpi_object *default_value;
+
+ default_value = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED);
+ if (uacpi_unlikely(default_value == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ *src = uacpi_create_internal_reference(kind, default_value);
+ if (uacpi_unlikely(*src == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ uacpi_object_unref(default_value);
+ }
+
+ dst = item_array_last(&ctx->cur_op_ctx->items);
+ dst->obj = *src;
+ dst->type = ITEM_OBJECT;
+ uacpi_object_ref(dst->obj);
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_local(struct execution_context *ctx)
+{
+ uacpi_size idx;
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+
+ idx = op_ctx->op->code - UACPI_AML_OP_Local0Op;
+ return handle_arg_or_local(ctx, idx, LOCALX);
+}
+
+static uacpi_status handle_arg(struct execution_context *ctx)
+{
+ uacpi_size idx;
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+
+ idx = op_ctx->op->code - UACPI_AML_OP_Arg0Op;
+ return handle_arg_or_local(ctx, idx, ARGX);
+}
+
+static uacpi_status handle_named_object(struct execution_context *ctx)
+{
+ struct uacpi_namespace_node *src;
+ struct item *dst;
+
+ src = item_array_at(&ctx->cur_op_ctx->items, 0)->node;
+ dst = item_array_at(&ctx->cur_op_ctx->items, 1);
+
+ dst->obj = src->object;
+ dst->type = ITEM_OBJECT;
+ uacpi_object_ref(dst->obj);
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_create_alias(struct execution_context *ctx)
+{
+ uacpi_namespace_node *src, *dst;
+
+ src = item_array_at(&ctx->cur_op_ctx->items, 0)->node;
+ dst = item_array_at(&ctx->cur_op_ctx->items, 1)->node;
+
+ dst->object = src->object;
+ dst->flags = UACPI_NAMESPACE_NODE_FLAG_ALIAS;
+ uacpi_object_ref(dst->object);
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_create_op_region(struct execution_context *ctx)
+{
+ uacpi_namespace_node *node;
+ uacpi_object *obj;
+ uacpi_operation_region *op_region;
+ uacpi_u64 region_end;
+
+ node = item_array_at(&ctx->cur_op_ctx->items, 0)->node;
+ obj = item_array_at(&ctx->cur_op_ctx->items, 4)->obj;
+ op_region = obj->op_region;
+
+ op_region->space = item_array_at(&ctx->cur_op_ctx->items, 1)->immediate;
+ op_region->offset = item_array_at(&ctx->cur_op_ctx->items, 2)->obj->integer;
+ op_region->length = item_array_at(&ctx->cur_op_ctx->items, 3)->obj->integer;
+ region_end = op_region->offset + op_region->length;
+
+ if (uacpi_unlikely(op_region->length == 0)) {
+ // Don't abort here, as long as it's never accessed we don't care
+ uacpi_warn("unusable/empty operation region %.4s\n", node->name.text);
+ } else if (uacpi_unlikely(op_region->offset > region_end)) {
+ uacpi_error(
+ "invalid operation region %.4s bounds: offset=0x%"UACPI_PRIX64
+ " length=0x%"UACPI_PRIX64"\n", node->name.text,
+ UACPI_FMT64(op_region->offset), UACPI_FMT64(op_region->length)
+ );
+ return UACPI_STATUS_AML_BAD_ENCODING;
+ }
+
+ if (op_region->space == UACPI_ADDRESS_SPACE_PCC && op_region->offset > 255) {
+ uacpi_warn(
+ "invalid PCC operation region %.4s subspace %"UACPI_PRIX64"\n",
+ node->name.text, UACPI_FMT64(op_region->offset)
+ );
+ }
+
+ node->object = uacpi_create_internal_reference(
+ UACPI_REFERENCE_KIND_NAMED, obj
+ );
+ if (uacpi_unlikely(node->object == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ uacpi_initialize_opregion_node(node);
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status table_id_error(
+ const uacpi_char *opcode, const uacpi_char *arg,
+ uacpi_buffer *str
+)
+{
+ uacpi_error("%s: invalid %s '%s'\n", opcode, arg, str->text);
+ return UACPI_STATUS_AML_BAD_ENCODING;
+}
+
+static void report_table_id_find_error(
+ const uacpi_char *opcode, struct uacpi_table_identifiers *id,
+ uacpi_status ret
+)
+{
+ uacpi_error(
+ "%s: unable to find table '%.4s' (OEM ID '%.6s', "
+ "OEM Table ID '%.8s'): %s\n",
+ opcode, id->signature.text, id->oemid, id->oem_table_id,
+ uacpi_status_to_string(ret)
+ );
+}
+
+static uacpi_status build_table_id(
+ const uacpi_char *opcode,
+ struct uacpi_table_identifiers *out_id,
+ uacpi_buffer *signature, uacpi_buffer *oem_id,
+ uacpi_buffer *oem_table_id
+)
+{
+ if (uacpi_unlikely(signature->size != (sizeof(uacpi_object_name) + 1)))
+ return table_id_error(opcode, "SignatureString", signature);
+
+ uacpi_memcpy(out_id->signature.text, signature->text,
+ sizeof(uacpi_object_name));
+
+ if (uacpi_unlikely(oem_id->size > (sizeof(out_id->oemid) + 1)))
+ return table_id_error(opcode, "OemIDString", oem_id);
+
+ uacpi_memcpy_zerout(
+ out_id->oemid, oem_id->text,
+ sizeof(out_id->oemid), oem_id->size ? oem_id->size - 1 : 0
+ );
+
+ if (uacpi_unlikely(oem_table_id->size > (sizeof(out_id->oem_table_id) + 1)))
+ return table_id_error(opcode, "OemTableIDString", oem_table_id);
+
+ uacpi_memcpy_zerout(
+ out_id->oem_table_id, oem_table_id->text,
+ sizeof(out_id->oem_table_id),
+ oem_table_id->size ? oem_table_id->size - 1 : 0
+ );
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_create_data_region(struct execution_context *ctx)
+{
+ uacpi_status ret;
+ struct item_array *items = &ctx->cur_op_ctx->items;
+ struct uacpi_table_identifiers table_id;
+ uacpi_table table;
+ uacpi_namespace_node *node;
+ uacpi_object *obj;
+ uacpi_operation_region *op_region;
+
+ node = item_array_at(items, 0)->node;
+
+ ret = build_table_id(
+ "DataTableRegion", &table_id,
+ item_array_at(items, 1)->obj->buffer,
+ item_array_at(items, 2)->obj->buffer,
+ item_array_at(items, 3)->obj->buffer
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_table_find(&table_id, &table);
+ if (uacpi_unlikely_error(ret)) {
+ report_table_id_find_error("DataTableRegion", &table_id, ret);
+ return ret;
+ }
+
+ obj = item_array_at(items, 4)->obj;
+ op_region = obj->op_region;
+ op_region->space = UACPI_ADDRESS_SPACE_TABLE_DATA;
+ op_region->offset = table.virt_addr;
+ op_region->length = table.hdr->length;
+ op_region->table_idx = table.index;
+
+ node->object = uacpi_create_internal_reference(
+ UACPI_REFERENCE_KIND_NAMED, obj
+ );
+ if (uacpi_unlikely(node->object == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ uacpi_initialize_opregion_node(node);
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_bool is_dynamic_table_load(enum uacpi_table_load_cause cause)
+{
+ return cause != UACPI_TABLE_LOAD_CAUSE_INIT;
+}
+
+static void prepare_table_load(
+ void *ptr, enum uacpi_table_load_cause cause, uacpi_control_method *in_method
+)
+{
+ struct acpi_dsdt *dsdt = ptr;
+ enum uacpi_log_level log_level = UACPI_LOG_TRACE;
+ const uacpi_char *log_prefix = "load of";
+
+ if (is_dynamic_table_load(cause)) {
+ log_prefix = cause == UACPI_TABLE_LOAD_CAUSE_HOST ?
+ "host-invoked load of" : "dynamic load of";
+ log_level = UACPI_LOG_INFO;
+ }
+
+ uacpi_log_lvl(
+ log_level, "%s "UACPI_PRI_TBL_HDR"\n",
+ log_prefix, UACPI_FMT_TBL_HDR(&dsdt->hdr)
+ );
+
+ in_method->code = dsdt->definition_block;
+ in_method->size = dsdt->hdr.length - sizeof(dsdt->hdr);
+ in_method->named_objects_persist = UACPI_TRUE;
+}
+
+static uacpi_status do_load_table(
+ uacpi_namespace_node *parent, struct acpi_sdt_hdr *tbl,
+ enum uacpi_table_load_cause cause
+)
+{
+ struct uacpi_control_method method = { 0 };
+ uacpi_status ret;
+
+ prepare_table_load(tbl, cause, &method);
+
+ ret = uacpi_execute_control_method(parent, &method, UACPI_NULL, UACPI_NULL);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (is_dynamic_table_load(cause))
+ uacpi_events_match_post_dynamic_table_load();
+
+ return ret;
+}
+
+static uacpi_status handle_load_table(struct execution_context *ctx)
+{
+ uacpi_status ret;
+ struct item_array *items = &ctx->cur_op_ctx->items;
+ struct item *root_node_item;
+ struct uacpi_table_identifiers table_id;
+ uacpi_table table;
+ uacpi_buffer *root_path, *param_path;
+ uacpi_control_method *method;
+ uacpi_namespace_node *root_node, *param_node = UACPI_NULL;
+
+ /*
+ * If we already have the last true/false object loaded, this is a second
+ * invocation of this handler. For the second invocation we want to detect
+ * new AML GPE handlers that might've been loaded, as well as potentially
+ * remove the target.
+ */
+ if (item_array_size(items) == 12) {
+ uacpi_size idx;
+ struct uacpi_table tmp_table = { 0 };
+
+ idx = item_array_at(items, 2)->immediate;
+ tmp_table.index = idx;
+ uacpi_table_unref(&tmp_table);
+
+ /*
+ * If this load failed, remove the target that was provided via
+ * ParameterPathString so that it doesn't get stored to.
+ */
+ if (uacpi_unlikely(item_array_at(items, 11)->obj->integer == 0)) {
+ uacpi_object *target;
+
+ target = item_array_at(items, 3)->obj;
+ if (target != UACPI_NULL) {
+ uacpi_object_unref(target);
+ item_array_at(items, 3)->obj = UACPI_NULL;
+ }
+
+ return UACPI_STATUS_OK;
+ }
+
+ uacpi_events_match_post_dynamic_table_load();
+ return UACPI_STATUS_OK;
+ }
+
+ ret = build_table_id(
+ "LoadTable", &table_id,
+ item_array_at(items, 5)->obj->buffer,
+ item_array_at(items, 6)->obj->buffer,
+ item_array_at(items, 7)->obj->buffer
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ root_path = item_array_at(items, 8)->obj->buffer;
+ param_path = item_array_at(items, 9)->obj->buffer;
+ root_node_item = item_array_at(items, 0);
+
+ if (root_path->size > 1) {
+ ret = uacpi_namespace_node_resolve(
+ ctx->cur_frame->cur_scope, root_path->text, UACPI_SHOULD_LOCK_NO,
+ UACPI_MAY_SEARCH_ABOVE_PARENT_YES, UACPI_PERMANENT_ONLY_NO,
+ &root_node
+ );
+ if (uacpi_unlikely_error(ret)) {
+ table_id_error("LoadTable", "RootPathString", root_path);
+ if (ret == UACPI_STATUS_NOT_FOUND)
+ ret = UACPI_STATUS_AML_UNDEFINED_REFERENCE;
+ return ret;
+ }
+ } else {
+ root_node = uacpi_namespace_root();
+ }
+
+ root_node_item->node = root_node;
+ root_node_item->type = ITEM_NAMESPACE_NODE;
+ uacpi_shareable_ref(root_node);
+
+ if (param_path->size > 1) {
+ struct item *param_item;
+
+ ret = uacpi_namespace_node_resolve(
+ root_node, param_path->text, UACPI_SHOULD_LOCK_NO,
+ UACPI_MAY_SEARCH_ABOVE_PARENT_YES, UACPI_PERMANENT_ONLY_NO,
+ &param_node
+ );
+ if (uacpi_unlikely_error(ret)) {
+ table_id_error("LoadTable", "ParameterPathString", root_path);
+ if (ret == UACPI_STATUS_NOT_FOUND)
+ ret = UACPI_STATUS_AML_UNDEFINED_REFERENCE;
+ return ret;
+ }
+
+ param_item = item_array_at(items, 3);
+ param_item->obj = param_node->object;
+ uacpi_object_ref(param_item->obj);
+ param_item->type = ITEM_OBJECT;
+ }
+
+ ret = uacpi_table_find(&table_id, &table);
+ if (uacpi_unlikely_error(ret)) {
+ report_table_id_find_error("LoadTable", &table_id, ret);
+ return ret;
+ }
+ uacpi_table_mark_as_loaded(table.index);
+
+ item_array_at(items, 2)->immediate = table.index;
+ method = item_array_at(items, 1)->obj->method;
+ prepare_table_load(table.hdr, UACPI_TABLE_LOAD_CAUSE_LOAD_TABLE_OP, method);
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_load(struct execution_context *ctx)
+{
+ uacpi_status ret;
+ struct item_array *items = &ctx->cur_op_ctx->items;
+ uacpi_table table;
+ uacpi_control_method *method;
+ uacpi_object *src;
+ struct acpi_sdt_hdr *src_table = UACPI_NULL;
+ void *table_buffer;
+ uacpi_size declared_size;
+ uacpi_bool unmap_src = UACPI_FALSE;
+
+ /*
+ * If we already have the last true/false object loaded, this is a second
+ * invocation of this handler. For the second invocation we simply want to
+ * detect new AML GPE handlers that might've been loaded.
+ * We do this only if table load was successful though.
+ */
+ if (item_array_size(items) == 5) {
+ if (item_array_at(items, 4)->obj->integer != 0)
+ uacpi_events_match_post_dynamic_table_load();
+ return UACPI_STATUS_OK;
+ }
+
+ src = item_array_at(items, 2)->obj;
+
+ switch (src->type) {
+ case UACPI_OBJECT_OPERATION_REGION: {
+ uacpi_operation_region *op_region;
+
+ op_region = src->op_region;
+ if (uacpi_unlikely(
+ op_region->space != UACPI_ADDRESS_SPACE_SYSTEM_MEMORY
+ )) {
+ uacpi_error("Load: operation region is not SystemMemory\n");
+ goto error_out;
+ }
+
+ if (uacpi_unlikely(op_region->length < sizeof(struct acpi_sdt_hdr))) {
+ uacpi_error(
+ "Load: operation region is too small: %"UACPI_PRIu64"\n",
+ UACPI_FMT64(op_region->length)
+ );
+ goto error_out;
+ }
+
+ src_table = uacpi_kernel_map(op_region->offset, op_region->length);
+ if (uacpi_unlikely(src_table == UACPI_NULL)) {
+ uacpi_error(
+ "Load: failed to map operation region "
+ "0x%016"UACPI_PRIX64" -> 0x%016"UACPI_PRIX64"\n",
+ UACPI_FMT64(op_region->offset),
+ UACPI_FMT64(op_region->offset + op_region->length)
+ );
+ goto error_out;
+ }
+
+ unmap_src = UACPI_TRUE;
+ declared_size = op_region->length;
+ break;
+ }
+
+ case UACPI_OBJECT_BUFFER: {
+ uacpi_buffer *buffer;
+
+ buffer = src->buffer;
+ if (buffer->size < sizeof(struct acpi_sdt_hdr)) {
+ uacpi_error(
+ "Load: buffer is too small: %zu\n",
+ buffer->size
+ );
+ goto error_out;
+ }
+
+ src_table = buffer->data;
+ declared_size = buffer->size;
+ break;
+ }
+
+ default:
+ uacpi_error(
+ "Load: invalid argument '%s', expected "
+ "Buffer/Field/OperationRegion\n",
+ uacpi_object_type_to_string(src->type)
+ );
+ goto error_out;
+ }
+
+ if (uacpi_unlikely(src_table->length > declared_size)) {
+ uacpi_error(
+ "Load: table size %u is larger than the declared size %zu\n",
+ src_table->length, declared_size
+ );
+ goto error_out;
+ }
+
+ if (uacpi_unlikely(src_table->length < sizeof(struct acpi_sdt_hdr))) {
+ uacpi_error("Load: table size %u is too small\n", src_table->length);
+ goto error_out;
+ }
+
+ table_buffer = uacpi_kernel_alloc(src_table->length);
+ if (uacpi_unlikely(table_buffer == UACPI_NULL))
+ goto error_out;
+
+ uacpi_memcpy(table_buffer, src_table, src_table->length);
+
+ if (unmap_src) {
+ uacpi_kernel_unmap(src_table, declared_size);
+ unmap_src = UACPI_FALSE;
+ }
+
+ ret = uacpi_table_install_with_origin(
+ table_buffer, UACPI_TABLE_ORIGIN_FIRMWARE_VIRTUAL, &table
+ );
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_free(table_buffer, src_table->length);
+
+ if (ret != UACPI_STATUS_OVERRIDDEN)
+ goto error_out;
+ }
+ uacpi_table_mark_as_loaded(table.index);
+
+ item_array_at(items, 0)->node = uacpi_namespace_root();
+
+ method = item_array_at(items, 1)->obj->method;
+ prepare_table_load(table.ptr, UACPI_TABLE_LOAD_CAUSE_LOAD_OP, method);
+
+ return UACPI_STATUS_OK;
+
+error_out:
+ if (unmap_src && src_table)
+ uacpi_kernel_unmap(src_table, declared_size);
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_execute_table(void *tbl, enum uacpi_table_load_cause cause)
+{
+ uacpi_status ret;
+
+ ret = uacpi_namespace_write_lock();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = do_load_table(uacpi_namespace_root(), tbl, cause);
+
+ uacpi_namespace_write_unlock();
+ return ret;
+}
+
+static uacpi_u32 get_field_length(struct item *item)
+{
+ struct package_length *pkg = &item->pkg;
+ return pkg->end - pkg->begin;
+}
+
+struct field_specific_data {
+ uacpi_namespace_node *region;
+ struct uacpi_field_unit *field0;
+ struct uacpi_field_unit *field1;
+ uacpi_u64 value;
+};
+
+static uacpi_status ensure_is_a_field_unit(uacpi_namespace_node *node,
+ uacpi_field_unit **out_field)
+{
+ uacpi_object *obj;
+
+ obj = uacpi_namespace_node_get_object(node);
+ if (obj->type != UACPI_OBJECT_FIELD_UNIT) {
+ uacpi_error(
+ "invalid argument: '%.4s' is not a field unit (%s)\n",
+ node->name.text, uacpi_object_type_to_string(obj->type)
+ );
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+
+ *out_field = obj->field_unit;
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status ensure_is_an_op_region(uacpi_namespace_node *node,
+ uacpi_namespace_node **out_node)
+{
+ uacpi_object *obj;
+
+ obj = uacpi_namespace_node_get_object(node);
+ if (obj->type != UACPI_OBJECT_OPERATION_REGION) {
+ uacpi_error(
+ "invalid argument: '%.4s' is not an operation region (%s)\n",
+ node->name.text, uacpi_object_type_to_string(obj->type)
+ );
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+
+ *out_node = node;
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_create_field(struct execution_context *ctx)
+{
+ uacpi_status ret;
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_namespace_node *node;
+ uacpi_object *obj, *connection_obj = UACPI_NULL;
+ struct field_specific_data field_data = { 0 };
+ uacpi_size i = 1, bit_offset = 0;
+ uacpi_u32 length, pin_offset = 0;
+
+ uacpi_u8 raw_value, access_type, lock_rule, update_rule;
+ uacpi_u8 access_attrib = 0, access_length = 0;
+
+ switch (op_ctx->op->code) {
+ case UACPI_AML_OP_FieldOp:
+ node = item_array_at(&op_ctx->items, i++)->node;
+ ret = ensure_is_an_op_region(node, &field_data.region);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ break;
+
+ case UACPI_AML_OP_BankFieldOp:
+ node = item_array_at(&op_ctx->items, i++)->node;
+ ret = ensure_is_an_op_region(node, &field_data.region);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ node = item_array_at(&op_ctx->items, i++)->node;
+ ret = ensure_is_a_field_unit(node, &field_data.field0);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ field_data.value = item_array_at(&op_ctx->items, i++)->obj->integer;
+ break;
+
+ case UACPI_AML_OP_IndexFieldOp:
+ node = item_array_at(&op_ctx->items, i++)->node;
+ ret = ensure_is_a_field_unit(node, &field_data.field0);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ node = item_array_at(&op_ctx->items, i++)->node;
+ ret = ensure_is_a_field_unit(node, &field_data.field1);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ break;
+
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ /*
+ * ByteData
+ * bit 0-3: AccessType
+ * 0 AnyAcc
+ * 1 ByteAcc
+ * 2 WordAcc
+ * 3 DWordAcc
+ * 4 QWordAcc
+ * 5 BufferAcc
+ * 6 Reserved
+ * 7-15 Reserved
+ * bit 4: LockRule
+ * 0 NoLock
+ * 1 Lock
+ * bit 5-6: UpdateRule
+ * 0 Preserve
+ * 1 WriteAsOnes
+ * 2 WriteAsZeros
+ * bit 7: Reserved (must be 0)
+ */
+ raw_value = item_array_at(&op_ctx->items, i++)->immediate;
+ access_type = (raw_value >> 0) & 0xF;
+ lock_rule = (raw_value >> 4) & 0x1;
+ update_rule = (raw_value >> 5) & 0x3;
+
+ while (i < item_array_size(&op_ctx->items)) {
+ struct item *item;
+ item = item_array_at(&op_ctx->items, i++);
+
+ // An actual field object
+ if (item->type == ITEM_NAMESPACE_NODE) {
+ uacpi_field_unit *field;
+
+ length = get_field_length(item_array_at(&op_ctx->items, i++));
+ node = item->node;
+
+ obj = item_array_at(&op_ctx->items, i++)->obj;
+ field = obj->field_unit;
+
+ field->update_rule = update_rule;
+ field->lock_rule = lock_rule;
+ field->attributes = access_attrib;
+ field->access_length = access_length;
+
+ /*
+ * 0 AnyAcc
+ * 1 ByteAcc
+ * 2 WordAcc
+ * 3 DWordAcc
+ * 4 QWordAcc
+ * 5 BufferAcc
+ * 6 Reserved
+ * 7-15 Reserved
+ */
+ switch (access_type) {
+ case 0:
+ // TODO: optimize to calculate best access strategy
+ UACPI_FALLTHROUGH;
+ case 1:
+ case 5:
+ field->access_width_bytes = 1;
+ break;
+ case 2:
+ field->access_width_bytes = 2;
+ break;
+ case 3:
+ field->access_width_bytes = 4;
+ break;
+ case 4:
+ field->access_width_bytes = 8;
+ break;
+ default:
+ uacpi_error("invalid field '%.4s' access type %d\n",
+ node->name.text, access_type);
+ return UACPI_STATUS_AML_BAD_ENCODING;
+ }
+
+ field->bit_length = length;
+ field->pin_offset = pin_offset;
+
+ // FIXME: overflow, OOB, etc checks
+ field->byte_offset = UACPI_ALIGN_DOWN(
+ bit_offset / 8,
+ field->access_width_bytes,
+ uacpi_u32
+ );
+
+ field->bit_offset_within_first_byte = bit_offset;
+ field->bit_offset_within_first_byte =
+ bit_offset & ((field->access_width_bytes * 8) - 1);
+
+ switch (op_ctx->op->code) {
+ case UACPI_AML_OP_FieldOp:
+ field->region = field_data.region;
+ uacpi_shareable_ref(field->region);
+
+ field->kind = UACPI_FIELD_UNIT_KIND_NORMAL;
+ break;
+
+ case UACPI_AML_OP_BankFieldOp:
+ field->bank_region = field_data.region;
+ uacpi_shareable_ref(field->bank_region);
+
+ field->bank_selection = field_data.field0;
+ uacpi_shareable_ref(field->bank_selection);
+
+ field->bank_value = field_data.value;
+ field->kind = UACPI_FIELD_UNIT_KIND_BANK;
+ break;
+
+ case UACPI_AML_OP_IndexFieldOp:
+ field->index = field_data.field0;
+ uacpi_shareable_ref(field->index);
+
+ field->data = field_data.field1;
+ uacpi_shareable_ref(field->data);
+
+ field->kind = UACPI_FIELD_UNIT_KIND_INDEX;
+ break;
+
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ field->connection = connection_obj;
+ if (field->connection)
+ uacpi_object_ref(field->connection);
+
+ node->object = uacpi_create_internal_reference(
+ UACPI_REFERENCE_KIND_NAMED, obj
+ );
+ if (uacpi_unlikely(node->object == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ ret = do_install_node_item(ctx->cur_frame, item);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ bit_offset += length;
+ pin_offset += length;
+ continue;
+ }
+
+ // All other stuff
+ switch ((int)item->immediate) {
+ // ReservedField := 0x00 PkgLength
+ case 0x00:
+ length = get_field_length(item_array_at(&op_ctx->items, i++));
+ bit_offset += length;
+ pin_offset += length;
+ break;
+
+ // AccessField := 0x01 AccessType AccessAttrib
+ // ExtendedAccessField := 0x03 AccessType ExtendedAccessAttrib AccessLength
+ case 0x01:
+ case 0x03:
+ raw_value = item_array_at(&op_ctx->items, i++)->immediate;
+
+ access_type = raw_value & 0xF;
+ access_attrib = (raw_value >> 6) & 0x3;
+
+ raw_value = item_array_at(&op_ctx->items, i++)->immediate;
+
+ /*
+ * Bits 7:6
+ * 0 = AccessAttrib = Normal Access Attributes
+ * 1 = AccessAttrib = AttribBytes (x)
+ * 2 = AccessAttrib = AttribRawBytes (x)
+ * 3 = AccessAttrib = AttribRawProcessBytes (x)
+ * x is encoded as bits 0:7 of the AccessAttrib byte.
+ */
+ if (access_attrib) {
+ switch (access_attrib) {
+ case 1:
+ access_attrib = UACPI_ACCESS_ATTRIBUTE_BYTES;
+ break;
+ case 2:
+ access_attrib = UACPI_ACCESS_ATTRIBUTE_RAW_BYTES;
+ break;
+ case 3:
+ access_attrib = UACPI_ACCESS_ATTRIBUTE_RAW_PROCESS_BYTES;
+ break;
+ }
+
+ access_length = raw_value;
+ } else { // Normal access attributes
+ access_attrib = raw_value;
+ }
+
+ if (item->immediate == 3)
+ access_length = item_array_at(&op_ctx->items, i++)->immediate;
+ break;
+
+ // ConnectField := <0x02 NameString> | <0x02 BufferData>
+ case 0x02:
+ connection_obj = item_array_at(&op_ctx->items, i++)->obj;
+ pin_offset = 0;
+ break;
+
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static void truncate_number_if_needed(uacpi_object *obj)
+{
+ if (!g_uacpi_rt_ctx.is_rev1)
+ return;
+
+ obj->integer &= 0xFFFFFFFF;
+}
+
+static uacpi_u64 ones(void)
+{
+ return g_uacpi_rt_ctx.is_rev1 ? 0xFFFFFFFF : 0xFFFFFFFFFFFFFFFF;
+}
+
+static uacpi_status method_get_ret_target(struct execution_context *ctx,
+ uacpi_object **out_operand)
+{
+ uacpi_size depth;
+
+ // Check if we're targeting the previous call frame
+ depth = call_frame_array_size(&ctx->call_stack);
+ if (depth > 1) {
+ struct op_context *op_ctx;
+ struct call_frame *frame;
+
+ frame = call_frame_array_at(&ctx->call_stack, depth - 2);
+ depth = op_context_array_size(&frame->pending_ops);
+
+ // Ok, no one wants the return value at call site. Discard it.
+ if (!depth) {
+ *out_operand = UACPI_NULL;
+ return UACPI_STATUS_OK;
+ }
+
+ op_ctx = op_context_array_at(&frame->pending_ops, depth - 1);
+
+ /*
+ * Prevent the table being dynamically loaded from attempting to return
+ * a value to the caller. This is unlikely to be ever encountered in the
+ * wild, but we should still guard against the possibility.
+ */
+ if (uacpi_unlikely(op_ctx->op->code == UACPI_AML_OP_LoadOp ||
+ op_ctx->op->code == UACPI_AML_OP_LoadTableOp)) {
+ *out_operand = UACPI_NULL;
+ return UACPI_STATUS_OK;
+ }
+
+ *out_operand = item_array_last(&op_ctx->items)->obj;
+ return UACPI_STATUS_OK;
+ }
+
+ return UACPI_STATUS_NOT_FOUND;
+}
+
+static uacpi_status method_get_ret_object(struct execution_context *ctx,
+ uacpi_object **out_obj)
+{
+ uacpi_status ret;
+
+ ret = method_get_ret_target(ctx, out_obj);
+ if (ret == UACPI_STATUS_NOT_FOUND) {
+ *out_obj = ctx->ret;
+ return UACPI_STATUS_OK;
+ }
+ if (ret != UACPI_STATUS_OK || *out_obj == UACPI_NULL)
+ return ret;
+
+ *out_obj = uacpi_unwrap_internal_reference(*out_obj);
+ return UACPI_STATUS_OK;
+}
+
+static struct code_block *find_last_block(struct code_block_array *blocks,
+ enum code_block_type type)
+{
+ uacpi_size i;
+
+ i = code_block_array_size(blocks);
+ while (i-- > 0) {
+ struct code_block *block;
+
+ block = code_block_array_at(blocks, i);
+ if (block->type == type)
+ return block;
+ }
+
+ return UACPI_NULL;
+}
+
+static void update_scope(struct call_frame *frame)
+{
+ struct code_block *block;
+
+ block = find_last_block(&frame->code_blocks, CODE_BLOCK_SCOPE);
+ if (block == UACPI_NULL) {
+ frame->cur_scope = uacpi_namespace_root();
+ return;
+ }
+
+ frame->cur_scope = block->node;
+}
+
+static uacpi_status begin_block_execution(struct execution_context *ctx)
+{
+ struct call_frame *cur_frame = ctx->cur_frame;
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ struct package_length *pkg;
+ struct code_block *block;
+
+ block = code_block_array_alloc(&cur_frame->code_blocks);
+ if (uacpi_unlikely(block == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ pkg = &item_array_at(&op_ctx->items, 0)->pkg;
+
+ // Disarm the tracked package so that we don't skip the Scope
+ op_ctx->tracked_pkg_idx = 0;
+
+ switch (op_ctx->op->code) {
+ case UACPI_AML_OP_IfOp:
+ block->type = CODE_BLOCK_IF;
+ break;
+ case UACPI_AML_OP_ElseOp:
+ block->type = CODE_BLOCK_ELSE;
+ break;
+ case UACPI_AML_OP_WhileOp:
+ block->type = CODE_BLOCK_WHILE;
+
+ if (pkg->begin == cur_frame->prev_while_code_offset) {
+ uacpi_u64 cur_ticks;
+
+ cur_ticks = uacpi_kernel_get_nanoseconds_since_boot();
+
+ if (uacpi_unlikely(cur_ticks > block->expiration_point)) {
+ uacpi_error("loop time out after running for %u seconds\n",
+ g_uacpi_rt_ctx.loop_timeout_seconds);
+ code_block_array_pop(&cur_frame->code_blocks);
+ return UACPI_STATUS_AML_LOOP_TIMEOUT;
+ }
+
+ block->expiration_point = cur_frame->prev_while_expiration;
+ } else {
+ /*
+ * Calculate the expiration point for this loop.
+ * If a loop is executed past this point, it will get aborted.
+ */
+ block->expiration_point = uacpi_kernel_get_nanoseconds_since_boot();
+ block->expiration_point +=
+ g_uacpi_rt_ctx.loop_timeout_seconds * UACPI_NANOSECONDS_PER_SEC;
+ }
+ break;
+ case UACPI_AML_OP_ScopeOp:
+ case UACPI_AML_OP_DeviceOp:
+ case UACPI_AML_OP_ProcessorOp:
+ case UACPI_AML_OP_PowerResOp:
+ case UACPI_AML_OP_ThermalZoneOp:
+ block->type = CODE_BLOCK_SCOPE;
+ block->node = item_array_at(&op_ctx->items, 1)->node;
+ break;
+ default:
+ code_block_array_pop(&cur_frame->code_blocks);
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ // -1 because we want to re-evaluate at the start of the op next time
+ block->begin = pkg->begin - 1;
+ block->end = pkg->end;
+ ctx->cur_block = block;
+
+ cur_frame->last_while = find_last_block(&cur_frame->code_blocks,
+ CODE_BLOCK_WHILE);
+ update_scope(cur_frame);
+ return UACPI_STATUS_OK;
+}
+
+static void frame_reset_post_end_block(struct execution_context *ctx,
+ enum code_block_type type)
+{
+ struct call_frame *frame = ctx->cur_frame;
+
+ if (type == CODE_BLOCK_WHILE) {
+ struct code_block *block = ctx->cur_block;
+
+ // + 1 here to skip the WhileOp and get to the PkgLength
+ frame->prev_while_code_offset = block->begin + 1;
+ frame->prev_while_expiration = block->expiration_point;
+ }
+
+ code_block_array_pop(&frame->code_blocks);
+ ctx->cur_block = code_block_array_last(&frame->code_blocks);
+
+ if (type == CODE_BLOCK_WHILE) {
+ frame->last_while = find_last_block(&frame->code_blocks, type);
+ } else if (type == CODE_BLOCK_SCOPE) {
+ update_scope(frame);
+ }
+}
+
+static void debug_store_no_recurse(const uacpi_char *prefix, uacpi_object *src)
+{
+ switch (src->type) {
+ case UACPI_OBJECT_UNINITIALIZED:
+ uacpi_trace("%s Uninitialized\n", prefix);
+ break;
+ case UACPI_OBJECT_STRING:
+ uacpi_trace("%s String => \"%s\"\n", prefix, src->buffer->text);
+ break;
+ case UACPI_OBJECT_INTEGER:
+ if (g_uacpi_rt_ctx.is_rev1) {
+ uacpi_trace(
+ "%s Integer => 0x%08X\n", prefix, (uacpi_u32)src->integer
+ );
+ } else {
+ uacpi_trace(
+ "%s Integer => 0x%016"UACPI_PRIX64"\n", prefix,
+ UACPI_FMT64(src->integer)
+ );
+ }
+ break;
+ case UACPI_OBJECT_REFERENCE:
+ uacpi_trace("%s Reference @%p => %p\n", prefix, src, src->inner_object);
+ break;
+ case UACPI_OBJECT_PACKAGE:
+ uacpi_trace(
+ "%s Package @%p (%p) (%zu elements)\n",
+ prefix, src, src->package, src->package->count
+ );
+ break;
+ case UACPI_OBJECT_BUFFER:
+ uacpi_trace(
+ "%s Buffer @%p (%p) (%zu bytes)\n",
+ prefix, src, src->buffer, src->buffer->size
+ );
+ break;
+ case UACPI_OBJECT_OPERATION_REGION:
+ uacpi_trace(
+ "%s OperationRegion (ASID %d) 0x%016"UACPI_PRIX64
+ " -> 0x%016"UACPI_PRIX64"\n", prefix,
+ src->op_region->space, UACPI_FMT64(src->op_region->offset),
+ UACPI_FMT64(src->op_region->offset + src->op_region->length)
+ );
+ break;
+ case UACPI_OBJECT_POWER_RESOURCE:
+ uacpi_trace(
+ "%s Power Resource %d %d\n",
+ prefix, src->power_resource.system_level,
+ src->power_resource.resource_order
+ );
+ break;
+ case UACPI_OBJECT_PROCESSOR:
+ uacpi_trace(
+ "%s Processor[%d] 0x%08X (%d)\n",
+ prefix, src->processor->id, src->processor->block_address,
+ src->processor->block_length
+ );
+ break;
+ case UACPI_OBJECT_BUFFER_INDEX:
+ uacpi_trace(
+ "%s Buffer Index %p[%zu] => 0x%02X\n",
+ prefix, src->buffer_index.buffer->data, src->buffer_index.idx,
+ *buffer_index_cursor(&src->buffer_index)
+ );
+ break;
+ case UACPI_OBJECT_MUTEX:
+ uacpi_trace(
+ "%s Mutex @%p (%p => %p) sync level %d\n",
+ prefix, src, src->mutex, src->mutex->handle,
+ src->mutex->sync_level
+ );
+ break;
+ case UACPI_OBJECT_METHOD:
+ uacpi_trace("%s Method @%p (%p)\n", prefix, src, src->method);
+ break;
+ default:
+ uacpi_trace(
+ "%s %s @%p\n",
+ prefix, uacpi_object_type_to_string(src->type), src
+ );
+ }
+}
+
+static uacpi_status debug_store(uacpi_object *src)
+{
+ /*
+ * Don't bother running the body if current log level is not set to trace.
+ * All DebugOp logging is done as TRACE exclusively.
+ */
+ if (!uacpi_should_log(UACPI_LOG_TRACE))
+ return UACPI_STATUS_OK;
+
+ src = uacpi_unwrap_internal_reference(src);
+
+ debug_store_no_recurse("[AML DEBUG]", src);
+
+ if (src->type == UACPI_OBJECT_PACKAGE) {
+ uacpi_package *pkg = src->package;
+ uacpi_size i;
+
+ for (i = 0; i < pkg->count; ++i) {
+ uacpi_object *obj = pkg->objects[i];
+ if (obj->type == UACPI_OBJECT_REFERENCE &&
+ obj->flags == UACPI_REFERENCE_KIND_PKG_INDEX)
+ obj = obj->inner_object;
+
+ debug_store_no_recurse("Element:", obj);
+ }
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+/*
+ * NOTE: this function returns the parent object
+ */
+static uacpi_object *reference_unwind(uacpi_object *obj)
+{
+ uacpi_object *parent = obj;
+
+ while (obj) {
+ if (obj->type != UACPI_OBJECT_REFERENCE)
+ return parent;
+
+ parent = obj;
+ obj = parent->inner_object;
+ }
+
+ // This should be unreachable
+ return UACPI_NULL;
+}
+
+static uacpi_iteration_decision opregion_try_detach_from_parent(
+ void *user, uacpi_namespace_node *node, uacpi_u32 node_depth
+)
+{
+ uacpi_object *target_object = user;
+ UACPI_UNUSED(node_depth);
+
+ if (node->object == target_object) {
+ uacpi_opregion_uninstall_handler(node);
+ return UACPI_ITERATION_DECISION_BREAK;
+ }
+
+ return UACPI_ITERATION_DECISION_CONTINUE;
+}
+
+static void object_replace_child(uacpi_object *parent, uacpi_object *new_child)
+{
+ if (parent->flags == UACPI_REFERENCE_KIND_NAMED &&
+ uacpi_object_is(parent->inner_object, UACPI_OBJECT_OPERATION_REGION)) {
+
+ /*
+ * We're doing a CopyObject or similar to a namespace node that is an
+ * operation region. Try to find the parent node and manually detach
+ * the handler.
+ */
+ opregion_try_detach_from_parent(parent, uacpi_namespace_root(), 0);
+ uacpi_namespace_do_for_each_child(
+ uacpi_namespace_root(), opregion_try_detach_from_parent, UACPI_NULL,
+ UACPI_OBJECT_OPERATION_REGION_BIT, UACPI_MAX_DEPTH_ANY,
+ UACPI_SHOULD_LOCK_NO, UACPI_PERMANENT_ONLY_NO, parent
+ );
+ }
+
+ uacpi_object_detach_child(parent);
+ uacpi_object_attach_child(parent, new_child);
+}
+
+/*
+ * Breakdown of what happens here:
+ *
+ * CopyObject(..., Obj) where Obj is:
+ * 1. LocalX -> Overwrite LocalX.
+ * 2. NAME -> Overwrite NAME.
+ * 3. ArgX -> Overwrite ArgX unless ArgX is a reference, in that case
+ * overwrite the referenced object.
+ * 4. RefOf -> Not allowed here.
+ * 5. Index -> Overwrite Object stored at the index.
+ */
+ static uacpi_status copy_object_to_reference(uacpi_object *dst,
+ uacpi_object *src)
+{
+ uacpi_status ret;
+ uacpi_object *src_obj, *new_obj;
+
+ switch (dst->flags) {
+ case UACPI_REFERENCE_KIND_ARG: {
+ uacpi_object *referenced_obj;
+
+ referenced_obj = uacpi_unwrap_internal_reference(dst);
+ if (referenced_obj->type == UACPI_OBJECT_REFERENCE) {
+ dst = reference_unwind(referenced_obj);
+ break;
+ }
+
+ UACPI_FALLTHROUGH;
+ }
+ case UACPI_REFERENCE_KIND_LOCAL:
+ case UACPI_REFERENCE_KIND_PKG_INDEX:
+ case UACPI_REFERENCE_KIND_NAMED:
+ break;
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ src_obj = uacpi_unwrap_internal_reference(src);
+
+ new_obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED);
+ if (uacpi_unlikely(new_obj == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ ret = uacpi_object_assign(new_obj, src_obj,
+ UACPI_ASSIGN_BEHAVIOR_DEEP_COPY);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ object_replace_child(dst, new_obj);
+ uacpi_object_unref(new_obj);
+
+ return UACPI_STATUS_OK;
+}
+
+/*
+ * if Store(..., Obj) where Obj is:
+ * 1. LocalX/Index -> OVERWRITE unless the object is a reference, in that
+ * case store to the referenced object _with_ implicit
+ * cast.
+ * 2. ArgX -> OVERWRITE unless the object is a reference, in that
+ * case OVERWRITE the referenced object.
+ * 3. NAME -> Store with implicit cast.
+ * 4. RefOf -> Not allowed here.
+ */
+static uacpi_status store_to_reference(
+ uacpi_object *dst, uacpi_object *src, uacpi_data_view *wtr_response
+)
+{
+ uacpi_object *src_obj;
+ uacpi_bool overwrite = UACPI_FALSE;
+
+ switch (dst->flags) {
+ case UACPI_REFERENCE_KIND_LOCAL:
+ case UACPI_REFERENCE_KIND_ARG:
+ case UACPI_REFERENCE_KIND_PKG_INDEX: {
+ uacpi_object *referenced_obj;
+
+ if (dst->flags == UACPI_REFERENCE_KIND_PKG_INDEX)
+ referenced_obj = dst->inner_object;
+ else
+ referenced_obj = uacpi_unwrap_internal_reference(dst);
+
+ if (referenced_obj->type == UACPI_OBJECT_REFERENCE) {
+ overwrite = dst->flags == UACPI_REFERENCE_KIND_ARG;
+ dst = reference_unwind(referenced_obj);
+ break;
+ }
+
+ overwrite = UACPI_TRUE;
+ break;
+ }
+ case UACPI_REFERENCE_KIND_NAMED:
+ dst = reference_unwind(dst);
+ break;
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ src_obj = uacpi_unwrap_internal_reference(src);
+ overwrite |= dst->inner_object->type == UACPI_OBJECT_UNINITIALIZED;
+
+ if (overwrite) {
+ uacpi_status ret;
+ uacpi_object *new_obj;
+
+ new_obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED);
+ if (uacpi_unlikely(new_obj == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ ret = uacpi_object_assign(new_obj, src_obj,
+ UACPI_ASSIGN_BEHAVIOR_DEEP_COPY);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_object_unref(new_obj);
+ return ret;
+ }
+
+ object_replace_child(dst, new_obj);
+ uacpi_object_unref(new_obj);
+ return UACPI_STATUS_OK;
+ }
+
+ return object_assign_with_implicit_cast(
+ dst->inner_object, src_obj, wtr_response
+ );
+}
+
+static uacpi_status handle_ref_or_deref_of(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_object *dst, *src;
+
+ src = item_array_at(&op_ctx->items, 0)->obj;
+
+ if (op_ctx->op->code == UACPI_AML_OP_CondRefOfOp)
+ dst = item_array_at(&op_ctx->items, 2)->obj;
+ else
+ dst = item_array_at(&op_ctx->items, 1)->obj;
+
+ if (op_ctx->op->code == UACPI_AML_OP_DerefOfOp) {
+ uacpi_bool was_a_reference = UACPI_FALSE;
+
+ if (src->type == UACPI_OBJECT_REFERENCE) {
+ was_a_reference = UACPI_TRUE;
+
+ /*
+ * Explicit dereferencing [DerefOf] behavior:
+ * Simply grabs the bottom-most object that is not a reference.
+ * This mimics the behavior of NT Acpi.sys: any DerfOf fetches
+ * the bottom-most reference. Note that this is different from
+ * ACPICA where DerefOf dereferences one level.
+ */
+ src = reference_unwind(src)->inner_object;
+ }
+
+ if (src->type == UACPI_OBJECT_BUFFER_INDEX) {
+ uacpi_buffer_index *buf_idx = &src->buffer_index;
+
+ dst->type = UACPI_OBJECT_INTEGER;
+ uacpi_memcpy_zerout(
+ &dst->integer, buffer_index_cursor(buf_idx),
+ sizeof(dst->integer), 1
+ );
+ return UACPI_STATUS_OK;
+ }
+
+ if (!was_a_reference) {
+ uacpi_error(
+ "invalid DerefOf argument: %s, expected a reference\n",
+ uacpi_object_type_to_string(src->type)
+ );
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+
+ return uacpi_object_assign(dst, src,
+ UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY);
+ }
+
+ dst->type = UACPI_OBJECT_REFERENCE;
+ dst->inner_object = src;
+ uacpi_object_ref(src);
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status do_binary_math(
+ uacpi_object *arg0, uacpi_object *arg1,
+ uacpi_object *tgt0, uacpi_object *tgt1,
+ uacpi_aml_op op
+)
+{
+ uacpi_u64 lhs, rhs, res;
+ uacpi_bool should_negate = UACPI_FALSE;
+
+ lhs = arg0->integer;
+ rhs = arg1->integer;
+
+ switch (op)
+ {
+ case UACPI_AML_OP_AddOp:
+ res = lhs + rhs;
+ break;
+ case UACPI_AML_OP_SubtractOp:
+ res = lhs - rhs;
+ break;
+ case UACPI_AML_OP_MultiplyOp:
+ res = lhs * rhs;
+ break;
+ case UACPI_AML_OP_ShiftLeftOp:
+ case UACPI_AML_OP_ShiftRightOp:
+ if (rhs <= (g_uacpi_rt_ctx.is_rev1 ? 31 : 63)) {
+ if (op == UACPI_AML_OP_ShiftLeftOp)
+ res = lhs << rhs;
+ else
+ res = lhs >> rhs;
+ } else {
+ res = 0;
+ }
+ break;
+ case UACPI_AML_OP_NandOp:
+ should_negate = UACPI_TRUE;
+ UACPI_FALLTHROUGH;
+ case UACPI_AML_OP_AndOp:
+ res = rhs & lhs;
+ break;
+ case UACPI_AML_OP_NorOp:
+ should_negate = UACPI_TRUE;
+ UACPI_FALLTHROUGH;
+ case UACPI_AML_OP_OrOp:
+ res = rhs | lhs;
+ break;
+ case UACPI_AML_OP_XorOp:
+ res = rhs ^ lhs;
+ break;
+ case UACPI_AML_OP_DivideOp:
+ if (uacpi_unlikely(rhs == 0)) {
+ uacpi_error("attempted to divide by zero\n");
+ return UACPI_STATUS_AML_BAD_ENCODING;
+ }
+ tgt1->integer = lhs / rhs;
+ res = lhs % rhs;
+ break;
+ case UACPI_AML_OP_ModOp:
+ if (uacpi_unlikely(rhs == 0)) {
+ uacpi_error("attempted to calculate modulo of zero\n");
+ return UACPI_STATUS_AML_BAD_ENCODING;
+ }
+ res = lhs % rhs;
+ break;
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ if (should_negate)
+ res = ~res;
+
+ tgt0->integer = res;
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_binary_math(struct execution_context *ctx)
+{
+ uacpi_object *arg0, *arg1, *tgt0, *tgt1;
+ struct item_array *items = &ctx->cur_op_ctx->items;
+ uacpi_aml_op op = ctx->cur_op_ctx->op->code;
+
+ arg0 = item_array_at(items, 0)->obj;
+ arg1 = item_array_at(items, 1)->obj;
+
+ if (op == UACPI_AML_OP_DivideOp) {
+ tgt0 = item_array_at(items, 4)->obj;
+ tgt1 = item_array_at(items, 5)->obj;
+ } else {
+ tgt0 = item_array_at(items, 3)->obj;
+ tgt1 = UACPI_NULL;
+ }
+
+ return do_binary_math(arg0, arg1, tgt0, tgt1, op);
+}
+
+static uacpi_status handle_unary_math(struct execution_context *ctx)
+{
+ uacpi_object *arg, *tgt;
+ struct item_array *items = &ctx->cur_op_ctx->items;
+ uacpi_aml_op op = ctx->cur_op_ctx->op->code;
+
+ arg = item_array_at(items, 0)->obj;
+ tgt = item_array_at(items, 2)->obj;
+
+ switch (op) {
+ case UACPI_AML_OP_NotOp:
+ tgt->integer = ~arg->integer;
+ truncate_number_if_needed(tgt);
+ break;
+ case UACPI_AML_OP_FindSetRightBitOp:
+ tgt->integer = uacpi_bit_scan_forward(arg->integer);
+ break;
+ case UACPI_AML_OP_FindSetLeftBitOp:
+ tgt->integer = uacpi_bit_scan_backward(arg->integer);
+ break;
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status ensure_valid_idx(uacpi_object *obj, uacpi_size idx,
+ uacpi_size src_size)
+{
+ if (uacpi_likely(idx < src_size))
+ return UACPI_STATUS_OK;
+
+ uacpi_error(
+ "invalid index %zu, %s@%p has %zu elements\n",
+ idx, uacpi_object_type_to_string(obj->type), obj, src_size
+ );
+ return UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX;
+}
+
+static uacpi_status handle_index(struct execution_context *ctx)
+{
+ uacpi_status ret;
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_object *src;
+ struct item *dst;
+ uacpi_size idx;
+
+ src = item_array_at(&op_ctx->items, 0)->obj;
+ idx = item_array_at(&op_ctx->items, 1)->obj->integer;
+ dst = item_array_at(&op_ctx->items, 3);
+
+ switch (src->type) {
+ case UACPI_OBJECT_BUFFER:
+ case UACPI_OBJECT_STRING: {
+ uacpi_buffer_index *buf_idx;
+ uacpi_data_view buf;
+ get_object_storage(src, &buf, UACPI_FALSE);
+
+ ret = ensure_valid_idx(src, idx, buf.length);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ dst->type = ITEM_OBJECT;
+ dst->obj = uacpi_create_object(UACPI_OBJECT_BUFFER_INDEX);
+ if (uacpi_unlikely(dst->obj == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ buf_idx = &dst->obj->buffer_index;
+ buf_idx->idx = idx;
+ buf_idx->buffer = src->buffer;
+ uacpi_shareable_ref(buf_idx->buffer);
+
+ break;
+ }
+ case UACPI_OBJECT_PACKAGE: {
+ uacpi_package *pkg = src->package;
+ uacpi_object *obj;
+
+ ret = ensure_valid_idx(src, idx, pkg->count);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ /*
+ * Lazily transform the package element into an internal reference
+ * to itself of type PKG_INDEX. This is needed to support stuff like
+ * CopyObject(..., Index(pkg, X)) where the new object must be
+ * propagated to anyone else with a currently alive index object.
+ *
+ * Sidenote: Yes, IndexOp is not a SimpleName, so technically it is
+ * illegal to CopyObject to it. However, yet again we fall
+ * victim to the NT ACPI driver implementation, which allows
+ * it just fine.
+ */
+ obj = pkg->objects[idx];
+ if (obj->type != UACPI_OBJECT_REFERENCE ||
+ obj->flags != UACPI_REFERENCE_KIND_PKG_INDEX) {
+
+ obj = uacpi_create_internal_reference(
+ UACPI_REFERENCE_KIND_PKG_INDEX, obj
+ );
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ pkg->objects[idx] = obj;
+ uacpi_object_unref(obj->inner_object);
+ }
+
+ dst->obj = obj;
+ dst->type = ITEM_OBJECT;
+ uacpi_object_ref(dst->obj);
+ break;
+ }
+ default:
+ uacpi_error(
+ "invalid argument for Index: %s, "
+ "expected String/Buffer/Package\n",
+ uacpi_object_type_to_string(src->type)
+ );
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_u64 object_to_integer(const uacpi_object *obj,
+ uacpi_size max_buffer_bytes)
+{
+ uacpi_u64 dst;
+
+ switch (obj->type) {
+ case UACPI_OBJECT_INTEGER:
+ dst = obj->integer;
+ break;
+ case UACPI_OBJECT_BUFFER: {
+ uacpi_size bytes;
+ bytes = UACPI_MIN(max_buffer_bytes, obj->buffer->size);
+ uacpi_memcpy_zerout(&dst, obj->buffer->data, sizeof(dst), bytes);
+ break;
+ }
+ case UACPI_OBJECT_STRING:
+ uacpi_string_to_integer(
+ obj->buffer->text, obj->buffer->size, UACPI_BASE_AUTO, &dst
+ );
+ break;
+ default:
+ dst = 0;
+ break;
+ }
+
+ return dst;
+}
+
+static uacpi_status integer_to_string(
+ uacpi_u64 integer, uacpi_buffer *str, uacpi_bool is_hex
+)
+{
+ int repr_len;
+ uacpi_char int_buf[21];
+ uacpi_size final_size;
+
+ repr_len = uacpi_snprintf(
+ int_buf, sizeof(int_buf),
+ is_hex ? "%"UACPI_PRIX64 : "%"UACPI_PRIu64,
+ UACPI_FMT64(integer)
+ );
+ if (uacpi_unlikely(repr_len < 0))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ // 0x prefix + repr + \0
+ final_size = (is_hex ? 2 : 0) + repr_len + 1;
+
+ str->data = uacpi_kernel_alloc(final_size);
+ if (uacpi_unlikely(str->data == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ if (is_hex) {
+ str->text[0] = '0';
+ str->text[1] = 'x';
+ }
+ uacpi_memcpy(str->text + (is_hex ? 2 : 0), int_buf, repr_len + 1);
+ str->size = final_size;
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status buffer_to_string(
+ uacpi_buffer *buf, uacpi_buffer *str, uacpi_bool is_hex
+)
+{
+ int repr_len;
+ uacpi_char int_buf[5];
+ uacpi_size i, final_size;
+ uacpi_char *cursor;
+
+ if (is_hex) {
+ final_size = 4 * buf->size;
+ } else {
+ final_size = 0;
+
+ for (i = 0; i < buf->size; ++i) {
+ uacpi_u8 value = ((uacpi_u8*)buf->data)[i];
+
+ if (value < 10)
+ final_size += 1;
+ else if (value < 100)
+ final_size += 2;
+ else
+ final_size += 3;
+ }
+ }
+
+ // Comma for every value but one
+ final_size += buf->size - 1;
+
+ // Null terminator
+ final_size += 1;
+
+ str->data = uacpi_kernel_alloc(final_size);
+ if (uacpi_unlikely(str->data == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ cursor = str->data;
+
+ for (i = 0; i < buf->size; ++i) {
+ repr_len = uacpi_snprintf(
+ int_buf, sizeof(int_buf),
+ is_hex ? "0x%02X" : "%d",
+ ((uacpi_u8*)buf->data)[i]
+ );
+ if (uacpi_unlikely(repr_len < 0)) {
+ uacpi_free(str->data, final_size);
+ str->data = UACPI_NULL;
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ uacpi_memcpy(cursor, int_buf, repr_len + 1);
+ cursor += repr_len;
+
+ if (i != buf->size - 1)
+ *cursor++ = ',';
+ }
+
+ str->size = final_size;
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status do_make_empty_object(uacpi_buffer *buf,
+ uacpi_bool is_string)
+{
+ buf->text = uacpi_kernel_alloc_zeroed(sizeof(uacpi_char));
+ if (uacpi_unlikely(buf->text == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ if (is_string)
+ buf->size = sizeof(uacpi_char);
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status make_null_string(uacpi_buffer *buf)
+{
+ return do_make_empty_object(buf, UACPI_TRUE);
+}
+
+static uacpi_status make_null_buffer(uacpi_buffer *buf)
+{
+ /*
+ * Allocate at least 1 byte just to be safe,
+ * even for empty buffers. We still set the
+ * size to 0 though.
+ */
+ return do_make_empty_object(buf, UACPI_FALSE);
+}
+
+static uacpi_status handle_to(struct execution_context *ctx)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_object *src, *dst;
+
+ src = item_array_at(&op_ctx->items, 0)->obj;
+ dst = item_array_at(&op_ctx->items, 2)->obj;
+
+ switch (op_ctx->op->code) {
+ case UACPI_AML_OP_ToIntegerOp:
+ // NT always takes the first 8 bytes, even for revision 1
+ dst->integer = object_to_integer(src, 8);
+ break;
+
+ case UACPI_AML_OP_ToHexStringOp:
+ case UACPI_AML_OP_ToDecimalStringOp: {
+ uacpi_bool is_hex = op_ctx->op->code == UACPI_AML_OP_ToHexStringOp;
+
+ if (src->type == UACPI_OBJECT_INTEGER) {
+ ret = integer_to_string(src->integer, dst->buffer, is_hex);
+ break;
+ } else if (src->type == UACPI_OBJECT_BUFFER) {
+ if (uacpi_unlikely(src->buffer->size == 0))
+ return make_null_string(dst->buffer);
+
+ ret = buffer_to_string(src->buffer, dst->buffer, is_hex);
+ break;
+ }
+ UACPI_FALLTHROUGH;
+ }
+ case UACPI_AML_OP_ToBufferOp: {
+ uacpi_data_view buf;
+ uacpi_u8 *dst_buf;
+
+ ret = get_object_storage(src, &buf, UACPI_TRUE);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (uacpi_unlikely(buf.length == 0))
+ return make_null_buffer(dst->buffer);
+
+ dst_buf = uacpi_kernel_alloc(buf.length);
+ if (uacpi_unlikely(dst_buf == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ uacpi_memcpy(dst_buf, buf.bytes, buf.length);
+ dst->buffer->data = dst_buf;
+ dst->buffer->size = buf.length;
+ break;
+ }
+
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ return ret;
+}
+
+static uacpi_status handle_to_string(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_buffer *src_buf, *dst_buf;
+ uacpi_size req_len, len;
+
+ src_buf = item_array_at(&op_ctx->items, 0)->obj->buffer;
+ req_len = item_array_at(&op_ctx->items, 1)->obj->integer;
+ dst_buf = item_array_at(&op_ctx->items, 3)->obj->buffer;
+
+ len = UACPI_MIN(req_len, src_buf->size);
+ if (uacpi_unlikely(len == 0))
+ return make_null_string(dst_buf);
+
+ len = uacpi_strnlen(src_buf->text, len);
+
+ dst_buf->text = uacpi_kernel_alloc(len + 1);
+ if (uacpi_unlikely(dst_buf->text == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ uacpi_memcpy(dst_buf->text, src_buf->data, len);
+ dst_buf->text[len] = '\0';
+ dst_buf->size = len + 1;
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_mid(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_object *src, *dst;
+ uacpi_data_view src_buf;
+ uacpi_buffer *dst_buf;
+ uacpi_size idx, len;
+ uacpi_bool is_string;
+
+ src = item_array_at(&op_ctx->items, 0)->obj;
+ if (uacpi_unlikely(src->type != UACPI_OBJECT_STRING &&
+ src->type != UACPI_OBJECT_BUFFER)) {
+ uacpi_error(
+ "invalid argument for Mid: %s, expected String/Buffer\n",
+ uacpi_object_type_to_string(src->type)
+ );
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+
+ idx = item_array_at(&op_ctx->items, 1)->obj->integer;
+ len = item_array_at(&op_ctx->items, 2)->obj->integer;
+ dst = item_array_at(&op_ctx->items, 4)->obj;
+ dst_buf = dst->buffer;
+
+ is_string = src->type == UACPI_OBJECT_STRING;
+ get_object_storage(src, &src_buf, UACPI_FALSE);
+
+ if (uacpi_unlikely(src_buf.length == 0 || idx >= src_buf.length ||
+ len == 0)) {
+ if (src->type == UACPI_OBJECT_STRING) {
+ dst->type = UACPI_OBJECT_STRING;
+ return make_null_string(dst_buf);
+ }
+
+ return make_null_buffer(dst_buf);
+ }
+
+ // Guaranteed to be at least 1 here
+ len = UACPI_MIN(len, src_buf.length - idx);
+
+ dst_buf->data = uacpi_kernel_alloc(len + is_string);
+ if (uacpi_unlikely(dst_buf->data == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ uacpi_memcpy(dst_buf->data, (uacpi_u8*)src_buf.bytes + idx, len);
+ dst_buf->size = len;
+
+ if (is_string) {
+ dst_buf->text[dst_buf->size++] = '\0';
+ dst->type = UACPI_OBJECT_STRING;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_concatenate(struct execution_context *ctx)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_object *arg0, *arg1, *dst;
+ uacpi_u8 *dst_buf;
+ uacpi_size buf_size = 0;
+
+ arg0 = item_array_at(&op_ctx->items, 0)->obj;
+ arg1 = item_array_at(&op_ctx->items, 1)->obj;
+ dst = item_array_at(&op_ctx->items, 3)->obj;
+
+ switch (arg0->type) {
+ case UACPI_OBJECT_INTEGER: {
+ uacpi_u64 arg1_as_int;
+ uacpi_size int_size;
+
+ int_size = sizeof_int();
+ buf_size = int_size * 2;
+
+ dst_buf = uacpi_kernel_alloc(buf_size);
+ if (uacpi_unlikely(dst_buf == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ arg1_as_int = object_to_integer(arg1, 8);
+
+ uacpi_memcpy(dst_buf, &arg0->integer, int_size);
+ uacpi_memcpy(dst_buf+ int_size, &arg1_as_int, int_size);
+ break;
+ }
+ case UACPI_OBJECT_BUFFER: {
+ uacpi_buffer *arg0_buf = arg0->buffer;
+ uacpi_data_view arg1_buf = { 0 };
+
+ get_object_storage(arg1, &arg1_buf, UACPI_TRUE);
+ buf_size = arg0_buf->size + arg1_buf.length;
+
+ dst_buf = uacpi_kernel_alloc(buf_size);
+ if (uacpi_unlikely(dst_buf == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ uacpi_memcpy(dst_buf, arg0_buf->data, arg0_buf->size);
+ uacpi_memcpy(dst_buf + arg0_buf->size, arg1_buf.bytes, arg1_buf.length);
+ break;
+ }
+ case UACPI_OBJECT_STRING: {
+ uacpi_char int_buf[17];
+ void *arg1_ptr;
+ uacpi_size arg0_size, arg1_size;
+ uacpi_buffer *arg0_buf = arg0->buffer;
+
+ switch (arg1->type) {
+ case UACPI_OBJECT_INTEGER: {
+ int size;
+ size = uacpi_snprintf(int_buf, sizeof(int_buf), "%"UACPI_PRIx64,
+ UACPI_FMT64(arg1->integer));
+ if (size < 0)
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ arg1_ptr = int_buf;
+ arg1_size = size + 1;
+ break;
+ }
+ case UACPI_OBJECT_STRING:
+ arg1_ptr = arg1->buffer->data;
+ arg1_size = arg1->buffer->size;
+ break;
+ case UACPI_OBJECT_BUFFER: {
+ uacpi_buffer tmp_buf;
+
+ ret = buffer_to_string(arg1->buffer, &tmp_buf, UACPI_TRUE);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ arg1_ptr = tmp_buf.data;
+ arg1_size = tmp_buf.size;
+ break;
+ }
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ arg0_size = arg0_buf->size ? arg0_buf->size - 1 : arg0_buf->size;
+ buf_size = arg0_size + arg1_size;
+
+ dst_buf = uacpi_kernel_alloc(buf_size);
+ if (uacpi_unlikely(dst_buf == UACPI_NULL)) {
+ ret = UACPI_STATUS_OUT_OF_MEMORY;
+ goto cleanup;
+ }
+
+ uacpi_memcpy(dst_buf, arg0_buf->data, arg0_size);
+ uacpi_memcpy(dst_buf + arg0_size, arg1_ptr, arg1_size);
+ dst->type = UACPI_OBJECT_STRING;
+
+ cleanup:
+ if (arg1->type == UACPI_OBJECT_BUFFER)
+ uacpi_free(arg1_ptr, arg1_size);
+ break;
+ }
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ if (uacpi_likely_success(ret)) {
+ dst->buffer->data = dst_buf;
+ dst->buffer->size = buf_size;
+ }
+ return ret;
+}
+
+static uacpi_status handle_concatenate_res(struct execution_context *ctx)
+{
+ uacpi_status ret;
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_data_view buffer;
+ uacpi_object *arg0, *arg1, *dst;
+ uacpi_u8 *dst_buf;
+ uacpi_size dst_size, arg0_size, arg1_size;
+
+ arg0 = item_array_at(&op_ctx->items, 0)->obj;
+ arg1 = item_array_at(&op_ctx->items, 1)->obj;
+ dst = item_array_at(&op_ctx->items, 3)->obj;
+
+ uacpi_buffer_to_view(arg0->buffer, &buffer);
+ ret = uacpi_find_aml_resource_end_tag(buffer, &arg0_size);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ uacpi_buffer_to_view(arg1->buffer, &buffer);
+ ret = uacpi_find_aml_resource_end_tag(buffer, &arg1_size);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ dst_size = arg0_size + arg1_size + sizeof(struct acpi_resource_end_tag);
+
+ dst_buf = uacpi_kernel_alloc(dst_size);
+ if (uacpi_unlikely(dst_buf == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ dst->buffer->data = dst_buf;
+ dst->buffer->size = dst_size;
+
+ uacpi_memcpy(dst_buf, arg0->buffer->data, arg0_size);
+ uacpi_memcpy(dst_buf + arg0_size, arg1->buffer->data, arg1_size);
+
+ /*
+ * Small item (0), End Tag (0x0F), length 1
+ * Leave the checksum as 0
+ */
+ dst_buf[dst_size - 2] =
+ (ACPI_RESOURCE_END_TAG << ACPI_SMALL_ITEM_NAME_IDX) |
+ (sizeof(struct acpi_resource_end_tag) - 1);
+ dst_buf[dst_size - 1] = 0;
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_sizeof(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_object *src, *dst;
+
+ src = item_array_at(&op_ctx->items, 0)->obj;
+ dst = item_array_at(&op_ctx->items, 1)->obj;
+
+ if (uacpi_likely(src->type == UACPI_OBJECT_REFERENCE))
+ src = reference_unwind(src)->inner_object;
+
+ switch (src->type) {
+ case UACPI_OBJECT_STRING:
+ case UACPI_OBJECT_BUFFER: {
+ uacpi_data_view buf;
+ get_object_storage(src, &buf, UACPI_FALSE);
+
+ dst->integer = buf.length;
+ break;
+ }
+
+ case UACPI_OBJECT_PACKAGE:
+ dst->integer = src->package->count;
+ break;
+
+ default:
+ uacpi_error(
+ "invalid argument for Sizeof: %s, "
+ "expected String/Buffer/Package\n",
+ uacpi_object_type_to_string(src->type)
+ );
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_object_type(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_object *src, *dst;
+
+ src = item_array_at(&op_ctx->items, 0)->obj;
+ dst = item_array_at(&op_ctx->items, 1)->obj;
+
+ if (uacpi_likely(src->type == UACPI_OBJECT_REFERENCE))
+ src = reference_unwind(src)->inner_object;
+
+ dst->integer = src->type;
+ if (dst->integer == UACPI_OBJECT_BUFFER_INDEX)
+ dst->integer = UACPI_OBJECT_BUFFER_FIELD;
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_timer(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_object *dst;
+
+ dst = item_array_at(&op_ctx->items, 0)->obj;
+ dst->integer = uacpi_kernel_get_nanoseconds_since_boot() / 100;
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_stall_or_sleep(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_u64 time;
+
+ time = item_array_at(&op_ctx->items, 0)->obj->integer;
+
+ if (op_ctx->op->code == UACPI_AML_OP_SleepOp) {
+ /*
+ * ACPICA doesn't allow sleeps longer than 2 seconds,
+ * so we shouldn't either.
+ */
+ if (time > 2000)
+ time = 2000;
+
+ uacpi_namespace_write_unlock();
+ uacpi_kernel_sleep(time);
+ uacpi_namespace_write_lock();
+ } else {
+ // Spec says this must evaluate to a ByteData
+ if (time > 0xFF)
+ time = 0xFF;
+ uacpi_kernel_stall(time);
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_bcd(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_u64 src, dst = 0;
+ uacpi_size i;
+ uacpi_object *dst_obj;
+
+ src = item_array_at(&op_ctx->items, 0)->obj->integer;
+ dst_obj = item_array_at(&op_ctx->items, 2)->obj;
+ i = 64;
+
+ /*
+ * NOTE: ACPICA just errors out for invalid BCD, but NT allows it just fine.
+ * FromBCD matches NT behavior 1:1 even for invalid BCD, but ToBCD
+ * produces different results when the input is too large.
+ */
+ if (op_ctx->op->code == UACPI_AML_OP_FromBCDOp) {
+ do {
+ i -= 4;
+ dst *= 10;
+ dst += (src >> i) & 0xF;
+ } while (i);
+ } else {
+ while (src != 0) {
+ dst >>= 4;
+ i -= 4;
+ dst |= (src % 10) << 60;
+ src /= 10;
+ }
+
+ dst >>= (i % 64);
+ }
+
+ dst_obj->integer = dst;
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_unload(struct execution_context *ctx)
+{
+ UACPI_UNUSED(ctx);
+
+ /*
+ * Technically this doesn't exist in the wild, from the dumps that I have
+ * the only user of the Unload opcode is the Surface Pro 3, which triggers
+ * an unload of some I2C-related table as a response to some event.
+ *
+ * This op has been long deprecated by the specification exactly because
+ * it hasn't really been used by anyone and the fact that it introduces
+ * an enormous layer of complexity, which no driver is really prepared to
+ * deal with (aka namespace nodes disappearing under its feet).
+ *
+ * Just pretend we have actually unloaded whatever the AML asked for, if it
+ * ever tries to re-load this table that will just skip opcodes that create
+ * already existing objects, which should be good enough and mostly
+ * transparent to the AML.
+ */
+ uacpi_warn("refusing to unload a table from AML\n");
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_logical_not(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_object *src, *dst;
+
+ src = item_array_at(&op_ctx->items, 0)->obj;
+ dst = item_array_at(&op_ctx->items, 1)->obj;
+
+ dst->type = UACPI_OBJECT_INTEGER;
+ dst->integer = src->integer ? 0 : ones();
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_bool handle_logical_equality(uacpi_object *lhs, uacpi_object *rhs)
+{
+ uacpi_bool res = UACPI_FALSE;
+
+ if (lhs->type == UACPI_OBJECT_STRING || lhs->type == UACPI_OBJECT_BUFFER) {
+ res = lhs->buffer->size == rhs->buffer->size;
+
+ if (res && lhs->buffer->size) {
+ res = uacpi_memcmp(
+ lhs->buffer->data,
+ rhs->buffer->data,
+ lhs->buffer->size
+ ) == 0;
+ }
+ } else if (lhs->type == UACPI_OBJECT_INTEGER) {
+ res = lhs->integer == rhs->integer;
+ }
+
+ return res;
+}
+
+static uacpi_bool handle_logical_less_or_greater(
+ uacpi_aml_op op, uacpi_object *lhs, uacpi_object *rhs
+)
+{
+ if (lhs->type == UACPI_OBJECT_STRING || lhs->type == UACPI_OBJECT_BUFFER) {
+ int res;
+ uacpi_buffer *lhs_buf, *rhs_buf;
+
+ lhs_buf = lhs->buffer;
+ rhs_buf = rhs->buffer;
+
+ res = uacpi_memcmp(lhs_buf->data, rhs_buf->data,
+ UACPI_MIN(lhs_buf->size, rhs_buf->size));
+ if (res == 0) {
+ if (lhs_buf->size < rhs_buf->size)
+ res = -1;
+ else if (lhs_buf->size > rhs_buf->size)
+ res = 1;
+ }
+
+ if (op == UACPI_AML_OP_LLessOp)
+ return res < 0;
+
+ return res > 0;
+ }
+
+ if (op == UACPI_AML_OP_LLessOp)
+ return lhs->integer < rhs->integer;
+
+ return lhs->integer > rhs->integer;
+}
+
+static uacpi_status handle_binary_logic(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_aml_op op = op_ctx->op->code;
+ uacpi_object *lhs, *rhs, *dst;
+ uacpi_bool res;
+
+ lhs = item_array_at(&op_ctx->items, 0)->obj;
+ rhs = item_array_at(&op_ctx->items, 1)->obj;
+ dst = item_array_at(&op_ctx->items, 2)->obj;
+
+ switch (op) {
+ case UACPI_AML_OP_LEqualOp:
+ case UACPI_AML_OP_LLessOp:
+ case UACPI_AML_OP_LGreaterOp:
+ // TODO: typecheck at parse time
+ if (lhs->type != rhs->type) {
+ uacpi_error(
+ "don't know how to do a logical comparison of '%s' and '%s'\n",
+ uacpi_object_type_to_string(lhs->type),
+ uacpi_object_type_to_string(rhs->type)
+ );
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+
+ if (op == UACPI_AML_OP_LEqualOp)
+ res = handle_logical_equality(lhs, rhs);
+ else
+ res = handle_logical_less_or_greater(op, lhs, rhs);
+ break;
+ default: {
+ uacpi_u64 lhs_int, rhs_int;
+
+ // NT only looks at the first 4 bytes of a buffer
+ lhs_int = object_to_integer(lhs, 4);
+ rhs_int = object_to_integer(rhs, 4);
+
+ if (op == UACPI_AML_OP_LandOp)
+ res = lhs_int && rhs_int;
+ else
+ res = lhs_int || rhs_int;
+ break;
+ }
+ }
+
+ dst->integer = res ? ones() : 0;
+ return UACPI_STATUS_OK;
+}
+
+enum match_op {
+ MTR = 0,
+ MEQ = 1,
+ MLE = 2,
+ MLT = 3,
+ MGE = 4,
+ MGT = 5,
+};
+
+static uacpi_bool match_one(enum match_op op, uacpi_u64 lhs, uacpi_u64 rhs)
+{
+ switch (op) {
+ case MTR:
+ return UACPI_TRUE;
+ case MEQ:
+ return lhs == rhs;
+ case MLE:
+ return lhs <= rhs;
+ case MLT:
+ return lhs < rhs;
+ case MGE:
+ return lhs >= rhs;
+ case MGT:
+ return lhs > rhs;
+ default:
+ return UACPI_FALSE;
+ }
+}
+
+static uacpi_status handle_match(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_package *pkg;
+ uacpi_u64 operand0, operand1, start_idx, i;
+ enum match_op mop0, mop1;
+ uacpi_object *dst;
+
+ pkg = item_array_at(&op_ctx->items, 0)->obj->package;
+ mop0 = item_array_at(&op_ctx->items, 1)->immediate;
+ operand0 = item_array_at(&op_ctx->items, 2)->obj->integer;
+ mop1 = item_array_at(&op_ctx->items, 3)->immediate;
+ operand1 = item_array_at(&op_ctx->items, 4)->obj->integer;
+ start_idx = item_array_at(&op_ctx->items, 5)->obj->integer;
+ dst = item_array_at(&op_ctx->items, 6)->obj;
+
+ for (i = start_idx; i < pkg->count; ++i) {
+ uacpi_object *obj = pkg->objects[i];
+
+ if (obj->type != UACPI_OBJECT_INTEGER)
+ continue;
+
+ if (match_one(mop0, obj->integer, operand0) &&
+ match_one(mop1, obj->integer, operand1))
+ break;
+ }
+
+ if (i < pkg->count)
+ dst->integer = i;
+ else
+ dst->integer = ones();
+
+ return UACPI_STATUS_OK;
+}
+
+/*
+ * PkgLength :=
+ * PkgLeadByte |
+ * <pkgleadbyte bytedata> |
+ * <pkgleadbyte bytedata bytedata> | <pkgleadbyte bytedata bytedata bytedata>
+ * PkgLeadByte :=
+ * <bit 7-6: bytedata count that follows (0-3)>
+ * <bit 5-4: only used if pkglength < 63>
+ * <bit 3-0: least significant package length nybble>
+ */
+static uacpi_status parse_package_length(struct call_frame *frame,
+ struct package_length *out_pkg)
+{
+ uacpi_u32 left, size;
+ uacpi_u8 *data, marker_length;
+
+ out_pkg->begin = frame->code_offset;
+ marker_length = 1;
+
+ left = call_frame_code_bytes_left(frame);
+ if (uacpi_unlikely(left < 1))
+ return UACPI_STATUS_AML_BAD_ENCODING;
+
+ data = call_frame_cursor(frame);
+ marker_length += *data >> 6;
+
+ if (uacpi_unlikely(left < marker_length))
+ return UACPI_STATUS_AML_BAD_ENCODING;
+
+ switch (marker_length) {
+ case 1:
+ size = *data & 0x3F;
+ break;
+ case 2:
+ case 3:
+ case 4: {
+ uacpi_u32 temp_byte = 0;
+
+ size = *data & 0xF;
+ uacpi_memcpy(&temp_byte, data + 1, marker_length - 1);
+
+ // marker_length - 1 is at most 3, so this shift is safe
+ size |= temp_byte << 4;
+ break;
+ }
+ }
+
+ frame->code_offset += marker_length;
+
+ out_pkg->end = out_pkg->begin + size;
+ if (uacpi_unlikely(out_pkg->end < out_pkg->begin)) {
+ uacpi_error(
+ "PkgLength overflow: start=%u, size=%u\n", out_pkg->begin, size
+ );
+ return UACPI_STATUS_AML_BAD_ENCODING;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+/*
+ * ByteData
+ * // bit 0-2: ArgCount (0-7)
+ * // bit 3: SerializeFlag
+ * // 0 NotSerialized
+ * // 1 Serialized
+ * // bit 4-7: SyncLevel (0x00-0x0f)
+ */
+static void init_method_flags(uacpi_control_method *method, uacpi_u8 flags_byte)
+{
+ method->args = flags_byte & 0x7;
+ method->is_serialized = (flags_byte >> 3) & 1;
+ method->sync_level = flags_byte >> 4;
+}
+
+static uacpi_status handle_create_method(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ struct uacpi_control_method *this_method, *method;
+ struct package_length *pkg;
+ struct uacpi_namespace_node *node;
+ struct uacpi_object *dst;
+ uacpi_u32 method_begin_offset, method_size;
+
+ this_method = ctx->cur_frame->method;
+ pkg = &item_array_at(&op_ctx->items, 0)->pkg;
+ node = item_array_at(&op_ctx->items, 1)->node;
+ method_begin_offset = item_array_at(&op_ctx->items, 3)->immediate;
+
+ if (uacpi_unlikely(pkg->end < pkg->begin ||
+ pkg->end < method_begin_offset ||
+ pkg->end > this_method->size)) {
+ uacpi_error(
+ "invalid method %.4s bounds [%u..%u] (parent size is %u)\n",
+ node->name.text, method_begin_offset, pkg->end, this_method->size
+ );
+ return UACPI_STATUS_AML_BAD_ENCODING;
+ }
+
+ dst = item_array_at(&op_ctx->items, 4)->obj;
+
+ method = dst->method;
+ method_size = pkg->end - method_begin_offset;
+
+ if (method_size) {
+ method->code = uacpi_kernel_alloc(method_size);
+ if (uacpi_unlikely(method->code == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ uacpi_memcpy(
+ method->code,
+ ctx->cur_frame->method->code + method_begin_offset,
+ method_size
+ );
+ method->size = method_size;
+ method->owns_code = 1;
+ }
+
+ init_method_flags(method, item_array_at(&op_ctx->items, 2)->immediate);
+
+ node->object = uacpi_create_internal_reference(UACPI_REFERENCE_KIND_NAMED,
+ dst);
+ if (uacpi_unlikely(node->object == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_create_mutex_or_event(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_namespace_node *node;
+ uacpi_object *dst;
+
+ node = item_array_at(&op_ctx->items, 0)->node;
+
+ if (op_ctx->op->code == UACPI_AML_OP_MutexOp) {
+ dst = item_array_at(&op_ctx->items, 2)->obj;
+
+ // bits 0-3: SyncLevel (0x00-0x0f), bits 4-7: Reserved (must be 0)
+ dst->mutex->sync_level = item_array_at(&op_ctx->items, 1)->immediate;
+ dst->mutex->sync_level &= 0xF;
+ } else {
+ dst = item_array_at(&op_ctx->items, 1)->obj;
+ }
+
+ node->object = uacpi_create_internal_reference(
+ UACPI_REFERENCE_KIND_NAMED,
+ dst
+ );
+ if (uacpi_unlikely(node->object == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_event_ctl(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_object *obj;
+
+ obj = uacpi_unwrap_internal_reference(
+ item_array_at(&op_ctx->items, 0)->obj
+ );
+ if (uacpi_unlikely(obj->type != UACPI_OBJECT_EVENT)) {
+ uacpi_error(
+ "%s: invalid argument '%s', expected an Event object\n",
+ op_ctx->op->name, uacpi_object_type_to_string(obj->type)
+ );
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+
+ switch (op_ctx->op->code)
+ {
+ case UACPI_AML_OP_SignalOp:
+ uacpi_kernel_signal_event(obj->event->handle);
+ break;
+ case UACPI_AML_OP_ResetOp:
+ uacpi_kernel_reset_event(obj->event->handle);
+ break;
+ case UACPI_AML_OP_WaitOp: {
+ uacpi_u64 timeout;
+ uacpi_bool ret;
+
+ timeout = item_array_at(&op_ctx->items, 1)->obj->integer;
+ if (timeout > 0xFFFF)
+ timeout = 0xFFFF;
+
+ uacpi_namespace_write_unlock();
+ ret = uacpi_kernel_wait_for_event(obj->event->handle, timeout);
+ uacpi_namespace_write_lock();
+
+ /*
+ * The return value here is inverted, we return 0 for success and Ones
+ * for timeout and everything else.
+ */
+ if (ret)
+ item_array_at(&op_ctx->items, 2)->obj->integer = 0;
+ break;
+ }
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_mutex_ctl(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_object *obj;
+
+ obj = uacpi_unwrap_internal_reference(
+ item_array_at(&op_ctx->items, 0)->obj
+ );
+ if (uacpi_unlikely(obj->type != UACPI_OBJECT_MUTEX)) {
+ uacpi_error(
+ "%s: invalid argument '%s', expected a Mutex object\n",
+ op_ctx->op->name, uacpi_object_type_to_string(obj->type)
+ );
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+
+ switch (op_ctx->op->code)
+ {
+ case UACPI_AML_OP_AcquireOp: {
+ uacpi_u64 timeout;
+ uacpi_u64 *return_value;
+ uacpi_status ret;
+
+ return_value = &item_array_at(&op_ctx->items, 2)->obj->integer;
+
+ if (uacpi_unlikely(ctx->sync_level > obj->mutex->sync_level)) {
+ uacpi_warn(
+ "ignoring attempt to acquire mutex @%p with a lower sync level "
+ "(%d < %d)\n", obj->mutex, obj->mutex->sync_level,
+ ctx->sync_level
+ );
+ break;
+ }
+
+ timeout = item_array_at(&op_ctx->items, 1)->immediate;
+ if (timeout > 0xFFFF)
+ timeout = 0xFFFF;
+
+ if (uacpi_this_thread_owns_aml_mutex(obj->mutex)) {
+ ret = uacpi_acquire_aml_mutex(obj->mutex, timeout);
+ if (uacpi_likely_success(ret))
+ *return_value = 0;
+ break;
+ }
+
+ ret = uacpi_acquire_aml_mutex(obj->mutex, timeout);
+ if (uacpi_unlikely_error(ret))
+ break;
+
+ ret = held_mutexes_array_push(&ctx->held_mutexes, obj->mutex);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_release_aml_mutex(obj->mutex);
+ return ret;
+ }
+
+ ctx->sync_level = obj->mutex->sync_level;
+ *return_value = 0;
+ break;
+ }
+
+ case UACPI_AML_OP_ReleaseOp: {
+ uacpi_status ret;
+
+ if (!uacpi_this_thread_owns_aml_mutex(obj->mutex)) {
+ uacpi_warn(
+ "attempted to release not-previously-acquired mutex object "
+ "@%p (%p)\n", obj->mutex, obj->mutex->handle
+ );
+ break;
+ }
+
+ ret = held_mutexes_array_remove_and_release(
+ &ctx->held_mutexes, obj->mutex,
+ FORCE_RELEASE_NO
+ );
+ if (uacpi_likely_success(ret)) {
+ uacpi_mutex **last_mutex;
+
+ last_mutex = held_mutexes_array_last(&ctx->held_mutexes);
+ if (last_mutex == UACPI_NULL) {
+ ctx->sync_level = 0;
+ break;
+ }
+
+ ctx->sync_level = (*last_mutex)->sync_level;
+ }
+ break;
+ }
+
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_notify(struct execution_context *ctx)
+{
+ uacpi_status ret;
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ struct uacpi_namespace_node *node;
+ uacpi_u64 value;
+
+ node = item_array_at(&op_ctx->items, 0)->node;
+ value = item_array_at(&op_ctx->items, 1)->obj->integer;
+
+ ret = uacpi_notify_all(node, value);
+ if (uacpi_likely_success(ret))
+ return ret;
+
+ if (ret == UACPI_STATUS_NO_HANDLER) {
+ const uacpi_char *path;
+
+ path = uacpi_namespace_node_generate_absolute_path(node);
+ uacpi_warn(
+ "ignoring firmware Notify(%s, 0x%"UACPI_PRIX64") request, "
+ "no listeners\n", path, UACPI_FMT64(value)
+ );
+ uacpi_free_dynamic_string(path);
+
+ return UACPI_STATUS_OK;
+ }
+
+ if (ret == UACPI_STATUS_INVALID_ARGUMENT) {
+ uacpi_error("Notify() called on an invalid object %.4s\n",
+ node->name.text);
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+
+ return ret;
+}
+
+static uacpi_status handle_firmware_request(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_firmware_request req = { 0 };
+
+ switch (op_ctx->op->code) {
+ case UACPI_AML_OP_BreakPointOp:
+ req.type = UACPI_FIRMWARE_REQUEST_TYPE_BREAKPOINT;
+ req.breakpoint.ctx = ctx;
+ break;
+ case UACPI_AML_OP_FatalOp:
+ req.type = UACPI_FIRMWARE_REQUEST_TYPE_FATAL;
+ req.fatal.type = item_array_at(&op_ctx->items, 0)->immediate;
+ req.fatal.code = item_array_at(&op_ctx->items, 1)->immediate;
+ req.fatal.arg = item_array_at(&op_ctx->items, 2)->obj->integer;
+ break;
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ uacpi_namespace_write_unlock();
+ uacpi_kernel_handle_firmware_request(&req);
+ uacpi_namespace_write_lock();
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_create_named(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ struct uacpi_namespace_node *node;
+ uacpi_object *src;
+
+ node = item_array_at(&op_ctx->items, 0)->node;
+ src = item_array_at(&op_ctx->items, 1)->obj;
+
+ node->object = uacpi_create_internal_reference(UACPI_REFERENCE_KIND_NAMED,
+ src);
+ if (uacpi_unlikely(node->object == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_object_type buffer_field_get_read_type(
+ struct uacpi_buffer_field *field
+)
+{
+ if (field->bit_length > (g_uacpi_rt_ctx.is_rev1 ? 32u : 64u) ||
+ field->force_buffer)
+ return UACPI_OBJECT_BUFFER;
+
+ return UACPI_OBJECT_INTEGER;
+}
+
+static uacpi_status field_get_read_type(
+ uacpi_object *obj, uacpi_object_type *out_type
+)
+{
+ if (obj->type == UACPI_OBJECT_BUFFER_FIELD) {
+ *out_type = buffer_field_get_read_type(&obj->buffer_field);
+ return UACPI_STATUS_OK;
+ }
+
+ return uacpi_field_unit_get_read_type(obj->field_unit, out_type);
+}
+
+static uacpi_status field_byte_size(
+ uacpi_object *obj, uacpi_size *out_size
+)
+{
+ uacpi_size bit_length;
+
+ if (obj->type == UACPI_OBJECT_BUFFER_FIELD) {
+ bit_length = obj->buffer_field.bit_length;
+ } else {
+ uacpi_status ret;
+
+ ret = uacpi_field_unit_get_bit_length(obj->field_unit, &bit_length);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ *out_size = uacpi_round_up_bits_to_bytes(bit_length);
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_field_read(struct execution_context *ctx)
+{
+ uacpi_status ret;
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ struct uacpi_namespace_node *node;
+ uacpi_object *src_obj, *dst_obj;
+ uacpi_size dst_size;
+ void *dst = UACPI_NULL;
+ uacpi_data_view wtr_response = { 0 };
+
+ node = item_array_at(&op_ctx->items, 0)->node;
+ src_obj = uacpi_namespace_node_get_object(node);
+ dst_obj = item_array_at(&op_ctx->items, 1)->obj;
+
+ if (op_ctx->op->code == UACPI_AML_OP_InternalOpReadFieldAsBuffer) {
+ uacpi_buffer *buf;
+
+ ret = field_byte_size(src_obj, &dst_size);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (dst_size != 0) {
+ buf = dst_obj->buffer;
+
+ dst = uacpi_kernel_alloc_zeroed(dst_size);
+ if (dst == UACPI_NULL)
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ buf->data = dst;
+ buf->size = dst_size;
+ }
+ } else {
+ dst = &dst_obj->integer;
+ dst_size = sizeof(uacpi_u64);
+ }
+
+ if (src_obj->type == UACPI_OBJECT_BUFFER_FIELD) {
+ uacpi_read_buffer_field(&src_obj->buffer_field, dst);
+ return UACPI_STATUS_OK;
+ }
+
+ ret = uacpi_read_field_unit(
+ src_obj->field_unit, dst, dst_size, &wtr_response
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (wtr_response.data != UACPI_NULL) {
+ uacpi_buffer *buf;
+
+ buf = dst_obj->buffer;
+ buf->data = wtr_response.data;
+ buf->size = wtr_response.length;
+ }
+
+ return ret;
+}
+
+static uacpi_status handle_create_buffer_field(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ struct uacpi_namespace_node *node;
+ uacpi_buffer *src_buf;
+ uacpi_object *field_obj;
+ uacpi_buffer_field *field;
+
+ /*
+ * Layout of items here:
+ * [0] -> Type checked source buffer object
+ * [1] -> Byte/bit index integer object
+ * [2] ( if CreateField) -> bit length integer object
+ * [3] (2 if not CreateField) -> the new namespace node
+ * [4] (3 if not CreateField) -> the buffer field object we're creating here
+ */
+ src_buf = item_array_at(&op_ctx->items, 0)->obj->buffer;
+
+ if (op_ctx->op->code == UACPI_AML_OP_CreateFieldOp) {
+ uacpi_object *idx_obj, *len_obj;
+
+ idx_obj = item_array_at(&op_ctx->items, 1)->obj;
+ len_obj = item_array_at(&op_ctx->items, 2)->obj;
+ node = item_array_at(&op_ctx->items, 3)->node;
+ field_obj = item_array_at(&op_ctx->items, 4)->obj;
+ field = &field_obj->buffer_field;
+
+ field->bit_index = idx_obj->integer;
+
+ if (uacpi_unlikely(!len_obj->integer ||
+ len_obj->integer > 0xFFFFFFFF)) {
+ uacpi_error("invalid bit field length (%u)\n", field->bit_length);
+ return UACPI_STATUS_AML_BAD_ENCODING;
+ }
+
+ field->bit_length = len_obj->integer;
+ field->force_buffer = UACPI_TRUE;
+ } else {
+ uacpi_object *idx_obj;
+
+ idx_obj = item_array_at(&op_ctx->items, 1)->obj;
+ node = item_array_at(&op_ctx->items, 2)->node;
+ field_obj = item_array_at(&op_ctx->items, 3)->obj;
+ field = &field_obj->buffer_field;
+
+ field->bit_index = idx_obj->integer;
+ switch (op_ctx->op->code) {
+ case UACPI_AML_OP_CreateBitFieldOp:
+ field->bit_length = 1;
+ break;
+ case UACPI_AML_OP_CreateByteFieldOp:
+ field->bit_length = 8;
+ break;
+ case UACPI_AML_OP_CreateWordFieldOp:
+ field->bit_length = 16;
+ break;
+ case UACPI_AML_OP_CreateDWordFieldOp:
+ field->bit_length = 32;
+ break;
+ case UACPI_AML_OP_CreateQWordFieldOp:
+ field->bit_length = 64;
+ break;
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ if (op_ctx->op->code != UACPI_AML_OP_CreateBitFieldOp)
+ field->bit_index *= 8;
+ }
+
+ if (uacpi_unlikely((field->bit_index + field->bit_length) >
+ src_buf->size * 8)) {
+ uacpi_error(
+ "invalid buffer field: bits [%zu..%zu], buffer size is %zu bytes\n",
+ field->bit_index, field->bit_index + field->bit_length,
+ src_buf->size
+ );
+ return UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX;
+ }
+
+ field->backing = src_buf;
+ uacpi_shareable_ref(field->backing);
+ node->object = uacpi_create_internal_reference(UACPI_REFERENCE_KIND_NAMED,
+ field_obj);
+ if (uacpi_unlikely(node->object == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_control_flow(struct execution_context *ctx)
+{
+ struct call_frame *frame = ctx->cur_frame;
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+
+ if (uacpi_unlikely(frame->last_while == UACPI_NULL)) {
+ uacpi_error(
+ "attempting to %s outside of a While block\n",
+ op_ctx->op->code == UACPI_AML_OP_BreakOp ? "Break" : "Continue"
+ );
+ return UACPI_STATUS_AML_BAD_ENCODING;
+ }
+
+ for (;;) {
+ if (ctx->cur_block != frame->last_while) {
+ frame_reset_post_end_block(ctx, ctx->cur_block->type);
+ continue;
+ }
+
+ if (op_ctx->op->code == UACPI_AML_OP_BreakOp)
+ frame->code_offset = ctx->cur_block->end;
+ else
+ frame->code_offset = ctx->cur_block->begin;
+ frame_reset_post_end_block(ctx, ctx->cur_block->type);
+ break;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status create_named_scope(struct op_context *op_ctx)
+{
+ uacpi_namespace_node *node;
+ uacpi_object *obj;
+
+ node = item_array_at(&op_ctx->items, 1)->node;
+ obj = item_array_last(&op_ctx->items)->obj;
+
+ switch (op_ctx->op->code) {
+ case UACPI_AML_OP_ProcessorOp: {
+ uacpi_processor *proc = obj->processor;
+ proc->id = item_array_at(&op_ctx->items, 2)->immediate;
+ proc->block_address = item_array_at(&op_ctx->items, 3)->immediate;
+ proc->block_length = item_array_at(&op_ctx->items, 4)->immediate;
+ break;
+ }
+
+ case UACPI_AML_OP_PowerResOp: {
+ uacpi_power_resource *power_res = &obj->power_resource;
+ power_res->system_level = item_array_at(&op_ctx->items, 2)->immediate;
+ power_res->resource_order = item_array_at(&op_ctx->items, 3)->immediate;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ node->object = uacpi_create_internal_reference(UACPI_REFERENCE_KIND_NAMED,
+ obj);
+ if (uacpi_unlikely(node->object == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_code_block(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+
+ switch (op_ctx->op->code) {
+ case UACPI_AML_OP_ProcessorOp:
+ case UACPI_AML_OP_PowerResOp:
+ case UACPI_AML_OP_ThermalZoneOp:
+ case UACPI_AML_OP_DeviceOp: {
+ uacpi_status ret;
+
+ ret = create_named_scope(op_ctx);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ UACPI_FALLTHROUGH;
+ }
+ case UACPI_AML_OP_ScopeOp:
+ case UACPI_AML_OP_IfOp:
+ case UACPI_AML_OP_ElseOp:
+ case UACPI_AML_OP_WhileOp: {
+ break;
+ }
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ return begin_block_execution(ctx);
+}
+
+static uacpi_status handle_return(struct execution_context *ctx)
+{
+ uacpi_status ret;
+ uacpi_object *dst = UACPI_NULL;
+
+ ctx->cur_frame->code_offset = ctx->cur_frame->method->size;
+ ret = method_get_ret_object(ctx, &dst);
+
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ if (dst == UACPI_NULL)
+ return UACPI_STATUS_OK;
+
+ /*
+ * Should be possible to move here if method returns a literal
+ * like Return(Buffer { ... }), otherwise we have to copy just to
+ * be safe.
+ */
+ return uacpi_object_assign(
+ dst,
+ item_array_at(&ctx->cur_op_ctx->items, 0)->obj,
+ UACPI_ASSIGN_BEHAVIOR_DEEP_COPY
+ );
+}
+
+static void refresh_ctx_pointers(struct execution_context *ctx)
+{
+ struct call_frame *frame = ctx->cur_frame;
+
+ if (frame == UACPI_NULL) {
+ ctx->cur_op_ctx = UACPI_NULL;
+ ctx->prev_op_ctx = UACPI_NULL;
+ ctx->cur_block = UACPI_NULL;
+ return;
+ }
+
+ ctx->cur_op_ctx = op_context_array_last(&frame->pending_ops);
+ ctx->prev_op_ctx = op_context_array_one_before_last(&frame->pending_ops);
+ ctx->cur_block = code_block_array_last(&frame->code_blocks);
+}
+
+static uacpi_bool ctx_has_non_preempted_op(struct execution_context *ctx)
+{
+ return ctx->cur_op_ctx && !ctx->cur_op_ctx->preempted;
+}
+
+enum op_trace_action_type {
+ OP_TRACE_ACTION_BEGIN,
+ OP_TRACE_ACTION_RESUME,
+ OP_TRACE_ACTION_END,
+};
+
+static const uacpi_char *const op_trace_action_types[3] = {
+ [OP_TRACE_ACTION_BEGIN] = "BEGIN",
+ [OP_TRACE_ACTION_RESUME] = "RESUME",
+ [OP_TRACE_ACTION_END] = "END",
+};
+
+static inline void trace_op(
+ const struct uacpi_op_spec *op, enum op_trace_action_type action
+)
+{
+ uacpi_debug(
+ "%s OP '%s' (0x%04X)\n",
+ op_trace_action_types[action], op->name, op->code
+ );
+}
+
+static inline void trace_pop(uacpi_u8 pop)
+{
+ uacpi_debug(" pOP: %s (0x%02X)\n", uacpi_parse_op_to_string(pop), pop);
+}
+
+static uacpi_status frame_push_args(struct call_frame *frame,
+ struct op_context *op_ctx)
+{
+ uacpi_size i;
+
+ /*
+ * MethodCall items:
+ * items[0] -> method namespace node
+ * items[1] -> immediate that was used for parsing the arguments
+ * items[2...nargs-1] -> method arguments
+ * items[-1] -> return value object
+ *
+ * Here we only care about the arguments though.
+ */
+ for (i = 2; i < item_array_size(&op_ctx->items) - 1; i++) {
+ uacpi_object *src, *dst;
+
+ src = item_array_at(&op_ctx->items, i)->obj;
+
+ dst = uacpi_create_internal_reference(UACPI_REFERENCE_KIND_ARG, src);
+ if (uacpi_unlikely(dst == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ frame->args[i - 2] = dst;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status frame_setup_base_scope(struct call_frame *frame,
+ uacpi_namespace_node *scope,
+ uacpi_control_method *method)
+{
+ struct code_block *block;
+
+ block = code_block_array_alloc(&frame->code_blocks);
+ if (uacpi_unlikely(block == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ block->type = CODE_BLOCK_SCOPE;
+ block->node = scope;
+ block->begin = 0;
+ block->end = method->size;
+ frame->method = method;
+ frame->cur_scope = scope;
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status push_new_frame(struct execution_context *ctx,
+ struct call_frame **out_frame)
+{
+ struct call_frame_array *call_stack = &ctx->call_stack;
+ struct call_frame *prev_frame;
+
+ *out_frame = call_frame_array_calloc(call_stack);
+ if (uacpi_unlikely(*out_frame == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ /*
+ * Allocating a new frame might have reallocated the dynamic buffer so our
+ * execution_context members might now be pointing to freed memory.
+ * Refresh them here.
+ */
+ prev_frame = call_frame_array_one_before_last(call_stack);
+ ctx->cur_frame = prev_frame;
+ refresh_ctx_pointers(ctx);
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_bool maybe_end_block(struct execution_context *ctx)
+{
+ struct code_block *block = ctx->cur_block;
+ struct call_frame *cur_frame = ctx->cur_frame;
+
+ if (!block)
+ return UACPI_FALSE;
+ if (cur_frame->code_offset != block->end)
+ return UACPI_FALSE;
+
+ if (block->type == CODE_BLOCK_WHILE)
+ cur_frame->code_offset = block->begin;
+
+ frame_reset_post_end_block(ctx, block->type);
+ return UACPI_TRUE;
+}
+
+static uacpi_status store_to_target(
+ uacpi_object *dst, uacpi_object *src, uacpi_data_view *wtr_response
+)
+{
+ uacpi_status ret;
+
+ switch (dst->type) {
+ case UACPI_OBJECT_DEBUG:
+ ret = debug_store(src);
+ break;
+ case UACPI_OBJECT_REFERENCE:
+ ret = store_to_reference(dst, src, wtr_response);
+ break;
+
+ case UACPI_OBJECT_BUFFER_INDEX:
+ src = uacpi_unwrap_internal_reference(src);
+ ret = object_assign_with_implicit_cast(dst, src, wtr_response);
+ break;
+
+ case UACPI_OBJECT_INTEGER:
+ // NULL target
+ if (dst->integer == 0) {
+ ret = UACPI_STATUS_OK;
+ break;
+ }
+ UACPI_FALLTHROUGH;
+ default:
+ uacpi_error("attempted to store to an invalid target: %s\n",
+ uacpi_object_type_to_string(dst->type));
+ ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+
+ return ret;
+}
+
+static uacpi_status handle_copy_object_or_store(struct execution_context *ctx)
+{
+ uacpi_object *src, *dst;
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+
+ src = item_array_at(&op_ctx->items, 0)->obj;
+ dst = item_array_at(&op_ctx->items, 1)->obj;
+
+ if (op_ctx->op->code == UACPI_AML_OP_StoreOp) {
+ uacpi_status ret;
+ uacpi_data_view wtr_response = { 0 };
+
+ ret = store_to_target(dst, src, &wtr_response);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ /*
+ * This was a write-then-read field access since we got a response
+ * buffer back from this store. Now we have to return this buffer
+ * as a prvalue from the StoreOp so that it can be used by AML to
+ * retrieve the response.
+ */
+ if (wtr_response.data != UACPI_NULL) {
+ uacpi_object *wtr_response_obj;
+
+ wtr_response_obj = uacpi_create_object(UACPI_OBJECT_BUFFER);
+ if (uacpi_unlikely(wtr_response_obj == UACPI_NULL)) {
+ uacpi_free(wtr_response.data, wtr_response.length);
+ return UACPI_STATUS_OUT_OF_MEMORY;
+ }
+
+ wtr_response_obj->buffer->data = wtr_response.data;
+ wtr_response_obj->buffer->size = wtr_response.length;
+
+ uacpi_object_unref(src);
+ item_array_at(&op_ctx->items, 0)->obj = wtr_response_obj;
+ }
+
+ return ret;
+ }
+
+ if (dst->type != UACPI_OBJECT_REFERENCE)
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+
+ return copy_object_to_reference(dst, src);
+}
+
+static uacpi_status handle_inc_dec(struct execution_context *ctx)
+{
+ uacpi_object *src, *dst;
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+ uacpi_bool field_allowed = UACPI_FALSE;
+ uacpi_object_type true_src_type;
+ uacpi_status ret;
+
+ src = item_array_at(&op_ctx->items, 0)->obj;
+ dst = item_array_at(&op_ctx->items, 1)->obj;
+
+ if (src->type == UACPI_OBJECT_REFERENCE) {
+ /*
+ * Increment/Decrement are the only two operators that modify the value
+ * in-place, thus we need very specific dereference rules here.
+ *
+ * Reading buffer fields & field units is only allowed if we were passed
+ * a namestring directly as opposed to some nested reference chain
+ * containing a field at the bottom.
+ */
+ if (src->flags == UACPI_REFERENCE_KIND_NAMED)
+ field_allowed = src->inner_object->type != UACPI_OBJECT_REFERENCE;
+
+ src = reference_unwind(src)->inner_object;
+ } // else buffer index
+
+ true_src_type = src->type;
+
+ switch (true_src_type) {
+ case UACPI_OBJECT_INTEGER:
+ dst->integer = src->integer;
+ break;
+ case UACPI_OBJECT_FIELD_UNIT:
+ case UACPI_OBJECT_BUFFER_FIELD:
+ if (uacpi_unlikely(!field_allowed))
+ goto out_bad_type;
+
+ ret = field_get_read_type(src, &true_src_type);
+ if (uacpi_unlikely_error(ret))
+ goto out_bad_type;
+ if (true_src_type != UACPI_OBJECT_INTEGER)
+ goto out_bad_type;
+
+ if (src->type == UACPI_OBJECT_FIELD_UNIT) {
+ ret = uacpi_read_field_unit(
+ src->field_unit, &dst->integer, sizeof_int(),
+ UACPI_NULL
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ } else {
+ uacpi_read_buffer_field(&src->buffer_field, &dst->integer);
+ }
+ break;
+ case UACPI_OBJECT_BUFFER_INDEX:
+ dst->integer = *buffer_index_cursor(&src->buffer_index);
+ break;
+ default:
+ goto out_bad_type;
+ }
+
+ if (op_ctx->op->code == UACPI_AML_OP_IncrementOp)
+ dst->integer++;
+ else
+ dst->integer--;
+
+ return UACPI_STATUS_OK;
+
+out_bad_type:
+ uacpi_error("Increment/Decrement: invalid object type '%s'\n",
+ uacpi_object_type_to_string(true_src_type));
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+}
+
+static uacpi_status enter_method(
+ struct execution_context *ctx, struct call_frame *new_frame,
+ uacpi_control_method *method
+)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+
+ uacpi_shareable_ref(method);
+
+ if (!method->is_serialized)
+ return ret;
+
+ if (uacpi_unlikely(ctx->sync_level > method->sync_level)) {
+ uacpi_error(
+ "cannot invoke method @%p, sync level %d is too low "
+ "(current is %d)\n",
+ method, method->sync_level, ctx->sync_level
+ );
+ return UACPI_STATUS_AML_SYNC_LEVEL_TOO_HIGH;
+ }
+
+ if (method->mutex == UACPI_NULL) {
+ method->mutex = uacpi_create_mutex();
+ if (uacpi_unlikely(method->mutex == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+ method->mutex->sync_level = method->sync_level;
+ }
+
+ if (!uacpi_this_thread_owns_aml_mutex(method->mutex)) {
+ ret = uacpi_acquire_aml_mutex(method->mutex, 0xFFFF);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = held_mutexes_array_push(&ctx->held_mutexes, method->mutex);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_release_aml_mutex(method->mutex);
+ return ret;
+ }
+ }
+
+ new_frame->prev_sync_level = ctx->sync_level;
+ ctx->sync_level = method->sync_level;
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status push_op(struct execution_context *ctx)
+{
+ struct call_frame *frame = ctx->cur_frame;
+ struct op_context *op_ctx;
+
+ op_ctx = op_context_array_calloc(&frame->pending_ops);
+ if (op_ctx == UACPI_NULL)
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ op_ctx->op = ctx->cur_op;
+ refresh_ctx_pointers(ctx);
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_bool pop_item(struct op_context *op_ctx)
+{
+ struct item *item;
+
+ if (item_array_size(&op_ctx->items) == 0)
+ return UACPI_FALSE;
+
+ item = item_array_last(&op_ctx->items);
+
+ if (item->type == ITEM_OBJECT)
+ uacpi_object_unref(item->obj);
+
+ if (item->type == ITEM_NAMESPACE_NODE)
+ uacpi_namespace_node_unref(item->node);
+
+ item_array_pop(&op_ctx->items);
+ return UACPI_TRUE;
+}
+
+static void pop_op(struct execution_context *ctx)
+{
+ struct call_frame *frame = ctx->cur_frame;
+ struct op_context *cur_op_ctx = ctx->cur_op_ctx;
+
+ while (pop_item(cur_op_ctx));
+
+ item_array_clear(&cur_op_ctx->items);
+ op_context_array_pop(&frame->pending_ops);
+ refresh_ctx_pointers(ctx);
+}
+
+static void call_frame_clear(struct call_frame *frame)
+{
+ uacpi_size i;
+ op_context_array_clear(&frame->pending_ops);
+ code_block_array_clear(&frame->code_blocks);
+
+ while (temp_namespace_node_array_size(&frame->temp_nodes) != 0) {
+ uacpi_namespace_node *node;
+
+ node = *temp_namespace_node_array_last(&frame->temp_nodes);
+ uacpi_namespace_node_uninstall(node);
+ temp_namespace_node_array_pop(&frame->temp_nodes);
+ }
+ temp_namespace_node_array_clear(&frame->temp_nodes);
+
+ for (i = 0; i < 7; ++i)
+ uacpi_object_unref(frame->args[i]);
+ for (i = 0; i < 8; ++i)
+ uacpi_object_unref(frame->locals[i]);
+
+ uacpi_method_unref(frame->method);
+}
+
+static uacpi_u8 parse_op_generates_item[0x100] = {
+ [UACPI_PARSE_OP_SIMPLE_NAME] = ITEM_EMPTY_OBJECT,
+ [UACPI_PARSE_OP_SUPERNAME] = ITEM_EMPTY_OBJECT,
+ [UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED] = ITEM_EMPTY_OBJECT,
+ [UACPI_PARSE_OP_TERM_ARG] = ITEM_EMPTY_OBJECT,
+ [UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL] = ITEM_EMPTY_OBJECT,
+ [UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT] = ITEM_EMPTY_OBJECT,
+ [UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED] = ITEM_EMPTY_OBJECT,
+ [UACPI_PARSE_OP_OPERAND] = ITEM_EMPTY_OBJECT,
+ [UACPI_PARSE_OP_STRING] = ITEM_EMPTY_OBJECT,
+ [UACPI_PARSE_OP_COMPUTATIONAL_DATA] = ITEM_EMPTY_OBJECT,
+ [UACPI_PARSE_OP_TARGET] = ITEM_EMPTY_OBJECT,
+ [UACPI_PARSE_OP_PKGLEN] = ITEM_PACKAGE_LENGTH,
+ [UACPI_PARSE_OP_TRACKED_PKGLEN] = ITEM_PACKAGE_LENGTH,
+ [UACPI_PARSE_OP_CREATE_NAMESTRING] = ITEM_NAMESPACE_NODE,
+ [UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD] = ITEM_NAMESPACE_NODE,
+ [UACPI_PARSE_OP_EXISTING_NAMESTRING] = ITEM_NAMESPACE_NODE,
+ [UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL] = ITEM_NAMESPACE_NODE,
+ [UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD] = ITEM_NAMESPACE_NODE,
+ [UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT] = ITEM_OBJECT,
+ [UACPI_PARSE_OP_LOAD_INLINE_IMM] = ITEM_IMMEDIATE,
+ [UACPI_PARSE_OP_LOAD_ZERO_IMM] = ITEM_IMMEDIATE,
+ [UACPI_PARSE_OP_LOAD_IMM] = ITEM_IMMEDIATE,
+ [UACPI_PARSE_OP_LOAD_IMM_AS_OBJECT] = ITEM_OBJECT,
+ [UACPI_PARSE_OP_LOAD_FALSE_OBJECT] = ITEM_OBJECT,
+ [UACPI_PARSE_OP_LOAD_TRUE_OBJECT] = ITEM_OBJECT,
+ [UACPI_PARSE_OP_OBJECT_ALLOC] = ITEM_OBJECT,
+ [UACPI_PARSE_OP_OBJECT_ALLOC_TYPED] = ITEM_OBJECT,
+ [UACPI_PARSE_OP_EMPTY_OBJECT_ALLOC] = ITEM_EMPTY_OBJECT,
+ [UACPI_PARSE_OP_OBJECT_CONVERT_TO_SHALLOW_COPY] = ITEM_OBJECT,
+ [UACPI_PARSE_OP_OBJECT_CONVERT_TO_DEEP_COPY] = ITEM_OBJECT,
+ [UACPI_PARSE_OP_RECORD_AML_PC] = ITEM_IMMEDIATE,
+};
+
+static const uacpi_u8 *op_decode_cursor(const struct op_context *ctx)
+{
+ const struct uacpi_op_spec *spec = ctx->op;
+
+ if (spec->properties & UACPI_OP_PROPERTY_OUT_OF_LINE)
+ return &spec->indirect_decode_ops[ctx->pc];
+
+ return &spec->decode_ops[ctx->pc];
+}
+
+static uacpi_u8 op_decode_byte(struct op_context *ctx)
+{
+ uacpi_u8 byte;
+
+ byte = *op_decode_cursor(ctx);
+ ctx->pc++;
+
+ return byte;
+}
+
+static uacpi_aml_op op_decode_aml_op(struct op_context *op_ctx)
+{
+ uacpi_aml_op op = 0;
+
+ op |= op_decode_byte(op_ctx);
+ op |= op_decode_byte(op_ctx) << 8;
+
+ return op;
+}
+
+// MSVC doesn't support __VA_OPT__ so we do this weirdness
+#define EXEC_OP_DO_LVL(lvl, reason, ...) \
+ uacpi_##lvl("Op 0x%04X ('%s'): "reason"\n", \
+ op_ctx->op->code, op_ctx->op->name __VA_ARGS__)
+
+#define EXEC_OP_DO_ERR(reason, ...) EXEC_OP_DO_LVL(error, reason, __VA_ARGS__)
+#define EXEC_OP_DO_WARN(reason, ...) EXEC_OP_DO_LVL(warn, reason, __VA_ARGS__)
+
+#define EXEC_OP_ERR_2(reason, arg0, arg1) EXEC_OP_DO_ERR(reason, ,arg0, arg1)
+#define EXEC_OP_ERR_1(reason, arg0) EXEC_OP_DO_ERR(reason, ,arg0)
+#define EXEC_OP_ERR(reason) EXEC_OP_DO_ERR(reason)
+
+#define EXEC_OP_WARN(reason) EXEC_OP_DO_WARN(reason)
+
+#define SPEC_SIMPLE_NAME "SimpleName := NameString | ArgObj | LocalObj"
+#define SPEC_SUPER_NAME \
+ "SuperName := SimpleName | DebugObj | ReferenceTypeOpcode"
+#define SPEC_TERM_ARG \
+ "TermArg := ExpressionOpcode | DataObject | ArgObj | LocalObj"
+#define SPEC_OPERAND "Operand := TermArg => Integer"
+#define SPEC_STRING "String := TermArg => String"
+#define SPEC_TARGET "Target := SuperName | NullName"
+
+#define SPEC_COMPUTATIONAL_DATA \
+ "ComputationalData := ByteConst | WordConst | DWordConst | QWordConst " \
+ "| String | ConstObj | RevisionOp | DefBuffer"
+
+static uacpi_bool op_wants_supername(enum uacpi_parse_op op)
+{
+ switch (op) {
+ case UACPI_PARSE_OP_SIMPLE_NAME:
+ case UACPI_PARSE_OP_SUPERNAME:
+ case UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED:
+ case UACPI_PARSE_OP_TARGET:
+ return UACPI_TRUE;
+ default:
+ return UACPI_FALSE;
+ }
+}
+
+static uacpi_bool op_wants_term_arg_or_operand(enum uacpi_parse_op op)
+{
+ switch (op) {
+ case UACPI_PARSE_OP_TERM_ARG:
+ case UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL:
+ case UACPI_PARSE_OP_OPERAND:
+ case UACPI_PARSE_OP_STRING:
+ case UACPI_PARSE_OP_COMPUTATIONAL_DATA:
+ return UACPI_TRUE;
+ default:
+ return UACPI_FALSE;
+ }
+}
+
+static uacpi_bool op_allows_unresolved(enum uacpi_parse_op op)
+{
+ switch (op) {
+ case UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED:
+ case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED:
+ case UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL:
+ return UACPI_TRUE;
+ default:
+ return UACPI_FALSE;
+ }
+}
+
+static uacpi_bool op_allows_unresolved_if_load(enum uacpi_parse_op op)
+{
+ switch (op) {
+ case UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD:
+ case UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD:
+ return UACPI_TRUE;
+ default:
+ return UACPI_FALSE;
+ }
+}
+
+static uacpi_status op_typecheck(const struct op_context *op_ctx,
+ const struct op_context *cur_op_ctx)
+{
+ const uacpi_char *expected_type_str;
+ uacpi_u8 ok_mask = 0;
+ uacpi_u8 props = cur_op_ctx->op->properties;
+
+ switch (*op_decode_cursor(op_ctx)) {
+ // SimpleName := NameString | ArgObj | LocalObj
+ case UACPI_PARSE_OP_SIMPLE_NAME:
+ expected_type_str = SPEC_SIMPLE_NAME;
+ ok_mask |= UACPI_OP_PROPERTY_SIMPLE_NAME;
+ break;
+
+ // Target := SuperName | NullName
+ case UACPI_PARSE_OP_TARGET:
+ expected_type_str = SPEC_TARGET;
+ ok_mask |= UACPI_OP_PROPERTY_TARGET | UACPI_OP_PROPERTY_SUPERNAME;
+ break;
+
+ // SuperName := SimpleName | DebugObj | ReferenceTypeOpcode
+ case UACPI_PARSE_OP_SUPERNAME:
+ case UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED:
+ expected_type_str = SPEC_SUPER_NAME;
+ ok_mask |= UACPI_OP_PROPERTY_SUPERNAME;
+ break;
+
+ // TermArg := ExpressionOpcode | DataObject | ArgObj | LocalObj
+ case UACPI_PARSE_OP_TERM_ARG:
+ case UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL:
+ case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT:
+ case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED:
+ case UACPI_PARSE_OP_OPERAND:
+ case UACPI_PARSE_OP_STRING:
+ case UACPI_PARSE_OP_COMPUTATIONAL_DATA:
+ expected_type_str = SPEC_TERM_ARG;
+ ok_mask |= UACPI_OP_PROPERTY_TERM_ARG;
+ break;
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ if (!(props & ok_mask)) {
+ EXEC_OP_ERR_2("invalid argument: '%s', expected a %s",
+ cur_op_ctx->op->name, expected_type_str);
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status typecheck_obj(
+ const struct op_context *op_ctx,
+ const uacpi_object *obj,
+ enum uacpi_object_type expected_type,
+ const uacpi_char *spec_desc
+)
+{
+ if (uacpi_likely(obj->type == expected_type))
+ return UACPI_STATUS_OK;
+
+ EXEC_OP_ERR_2("invalid argument type: %s, expected a %s",
+ uacpi_object_type_to_string(obj->type), spec_desc);
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+}
+
+static uacpi_status typecheck_operand(
+ const struct op_context *op_ctx,
+ const uacpi_object *obj
+)
+{
+ return typecheck_obj(op_ctx, obj, UACPI_OBJECT_INTEGER, SPEC_OPERAND);
+}
+
+static uacpi_status typecheck_string(
+ const struct op_context *op_ctx,
+ const uacpi_object *obj
+)
+{
+ return typecheck_obj(op_ctx, obj, UACPI_OBJECT_STRING, SPEC_STRING);
+}
+
+static uacpi_status typecheck_computational_data(
+ const struct op_context *op_ctx,
+ const uacpi_object *obj
+)
+{
+ switch (obj->type) {
+ case UACPI_OBJECT_STRING:
+ case UACPI_OBJECT_BUFFER:
+ case UACPI_OBJECT_INTEGER:
+ return UACPI_STATUS_OK;
+ default:
+ EXEC_OP_ERR_2(
+ "invalid argument type: %s, expected a %s",
+ uacpi_object_type_to_string(obj->type),
+ SPEC_COMPUTATIONAL_DATA
+ );
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+}
+
+static void emit_op_skip_warn(const struct op_context *op_ctx)
+{
+ EXEC_OP_WARN("skipping due to previous errors");
+}
+
+static void trace_named_object_lookup_or_creation_failure(
+ struct call_frame *frame, uacpi_size offset, enum uacpi_parse_op op,
+ uacpi_status ret, enum uacpi_log_level level
+)
+{
+ static const uacpi_char *oom_prefix = "<...>";
+ static const uacpi_char *empty_string = "";
+ static const uacpi_char *unknown_path = "<unknown-path>";
+ static const uacpi_char *invalid_path = "<invalid-path>";
+
+ uacpi_status conv_ret;
+ const uacpi_char *action;
+ const uacpi_char *requested_path_to_print;
+ const uacpi_char *middle_part = UACPI_NULL;
+ const uacpi_char *prefix_path = UACPI_NULL;
+ uacpi_char *requested_path = UACPI_NULL;
+ uacpi_size length;
+ uacpi_bool is_create;
+
+ is_create = op == UACPI_PARSE_OP_CREATE_NAMESTRING ||
+ op == UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD;
+
+ if (is_create)
+ action = "create";
+ else
+ action = "lookup";
+
+ conv_ret = name_string_to_path(
+ frame, offset, &requested_path, &length
+ );
+ if (uacpi_unlikely_error(conv_ret)) {
+ if (conv_ret == UACPI_STATUS_OUT_OF_MEMORY)
+ requested_path_to_print = unknown_path;
+ else
+ requested_path_to_print = invalid_path;
+ } else {
+ requested_path_to_print = requested_path;
+ }
+
+ if (requested_path && requested_path[0] != '\\') {
+ prefix_path = uacpi_namespace_node_generate_absolute_path(
+ frame->cur_scope
+ );
+ if (uacpi_unlikely(prefix_path == UACPI_NULL))
+ prefix_path = oom_prefix;
+
+ if (prefix_path[1] != '\0')
+ middle_part = ".";
+ } else {
+ prefix_path = empty_string;
+ }
+
+ if (middle_part == UACPI_NULL)
+ middle_part = empty_string;
+
+ if (length == 5 && !is_create) {
+ uacpi_log_lvl(
+ level,
+ "unable to %s named object '%s' within (or above) "
+ "scope '%s': %s\n", action, requested_path_to_print,
+ prefix_path, uacpi_status_to_string(ret)
+ );
+ } else {
+ uacpi_log_lvl(
+ level,
+ "unable to %s named object '%s%s%s': %s\n",
+ action, prefix_path, middle_part,
+ requested_path_to_print, uacpi_status_to_string(ret)
+ );
+ }
+
+ uacpi_free(requested_path, length);
+ if (prefix_path != oom_prefix && prefix_path != empty_string)
+ uacpi_free_dynamic_string(prefix_path);
+}
+
+static uacpi_status uninstalled_op_handler(struct execution_context *ctx)
+{
+ struct op_context *op_ctx = ctx->cur_op_ctx;
+
+ EXEC_OP_ERR("no dedicated handler installed");
+ return UACPI_STATUS_UNIMPLEMENTED;
+}
+
+enum op_handler {
+ OP_HANDLER_UNINSTALLED = 0,
+ OP_HANDLER_LOCAL,
+ OP_HANDLER_ARG,
+ OP_HANDLER_STRING,
+ OP_HANDLER_BINARY_MATH,
+ OP_HANDLER_CONTROL_FLOW,
+ OP_HANDLER_CODE_BLOCK,
+ OP_HANDLER_RETURN,
+ OP_HANDLER_CREATE_METHOD,
+ OP_HANDLER_COPY_OBJECT_OR_STORE,
+ OP_HANDLER_INC_DEC,
+ OP_HANDLER_REF_OR_DEREF_OF,
+ OP_HANDLER_LOGICAL_NOT,
+ OP_HANDLER_BINARY_LOGIC,
+ OP_HANDLER_NAMED_OBJECT,
+ OP_HANDLER_BUFFER,
+ OP_HANDLER_PACKAGE,
+ OP_HANDLER_CREATE_NAMED,
+ OP_HANDLER_CREATE_BUFFER_FIELD,
+ OP_HANDLER_READ_FIELD,
+ OP_HANDLER_ALIAS,
+ OP_HANDLER_CONCATENATE,
+ OP_HANDLER_CONCATENATE_RES,
+ OP_HANDLER_SIZEOF,
+ OP_HANDLER_UNARY_MATH,
+ OP_HANDLER_INDEX,
+ OP_HANDLER_OBJECT_TYPE,
+ OP_HANDLER_CREATE_OP_REGION,
+ OP_HANDLER_CREATE_DATA_REGION,
+ OP_HANDLER_CREATE_FIELD,
+ OP_HANDLER_TO,
+ OP_HANDLER_TO_STRING,
+ OP_HANDLER_TIMER,
+ OP_HANDLER_MID,
+ OP_HANDLER_MATCH,
+ OP_HANDLER_CREATE_MUTEX_OR_EVENT,
+ OP_HANDLER_BCD,
+ OP_HANDLER_UNLOAD,
+ OP_HANDLER_LOAD_TABLE,
+ OP_HANDLER_LOAD,
+ OP_HANDLER_STALL_OR_SLEEP,
+ OP_HANDLER_EVENT_CTL,
+ OP_HANDLER_MUTEX_CTL,
+ OP_HANDLER_NOTIFY,
+ OP_HANDLER_FIRMWARE_REQUEST,
+};
+
+static uacpi_status (*op_handlers[])(struct execution_context *ctx) = {
+ /*
+ * All OPs that don't have a handler dispatch to here if
+ * UACPI_PARSE_OP_INVOKE_HANDLER is reached.
+ */
+ [OP_HANDLER_UNINSTALLED] = uninstalled_op_handler,
+ [OP_HANDLER_LOCAL] = handle_local,
+ [OP_HANDLER_ARG] = handle_arg,
+ [OP_HANDLER_NAMED_OBJECT] = handle_named_object,
+ [OP_HANDLER_STRING] = handle_string,
+ [OP_HANDLER_BINARY_MATH] = handle_binary_math,
+ [OP_HANDLER_CONTROL_FLOW] = handle_control_flow,
+ [OP_HANDLER_CODE_BLOCK] = handle_code_block,
+ [OP_HANDLER_RETURN] = handle_return,
+ [OP_HANDLER_CREATE_METHOD] = handle_create_method,
+ [OP_HANDLER_CREATE_MUTEX_OR_EVENT] = handle_create_mutex_or_event,
+ [OP_HANDLER_COPY_OBJECT_OR_STORE] = handle_copy_object_or_store,
+ [OP_HANDLER_INC_DEC] = handle_inc_dec,
+ [OP_HANDLER_REF_OR_DEREF_OF] = handle_ref_or_deref_of,
+ [OP_HANDLER_LOGICAL_NOT] = handle_logical_not,
+ [OP_HANDLER_BINARY_LOGIC] = handle_binary_logic,
+ [OP_HANDLER_BUFFER] = handle_buffer,
+ [OP_HANDLER_PACKAGE] = handle_package,
+ [OP_HANDLER_CREATE_NAMED] = handle_create_named,
+ [OP_HANDLER_CREATE_BUFFER_FIELD] = handle_create_buffer_field,
+ [OP_HANDLER_READ_FIELD] = handle_field_read,
+ [OP_HANDLER_TO] = handle_to,
+ [OP_HANDLER_ALIAS] = handle_create_alias,
+ [OP_HANDLER_CONCATENATE] = handle_concatenate,
+ [OP_HANDLER_CONCATENATE_RES] = handle_concatenate_res,
+ [OP_HANDLER_SIZEOF] = handle_sizeof,
+ [OP_HANDLER_UNARY_MATH] = handle_unary_math,
+ [OP_HANDLER_INDEX] = handle_index,
+ [OP_HANDLER_OBJECT_TYPE] = handle_object_type,
+ [OP_HANDLER_CREATE_OP_REGION] = handle_create_op_region,
+ [OP_HANDLER_CREATE_DATA_REGION] = handle_create_data_region,
+ [OP_HANDLER_CREATE_FIELD] = handle_create_field,
+ [OP_HANDLER_TIMER] = handle_timer,
+ [OP_HANDLER_TO_STRING] = handle_to_string,
+ [OP_HANDLER_MID] = handle_mid,
+ [OP_HANDLER_MATCH] = handle_match,
+ [OP_HANDLER_BCD] = handle_bcd,
+ [OP_HANDLER_UNLOAD] = handle_unload,
+ [OP_HANDLER_LOAD_TABLE] = handle_load_table,
+ [OP_HANDLER_LOAD] = handle_load,
+ [OP_HANDLER_STALL_OR_SLEEP] = handle_stall_or_sleep,
+ [OP_HANDLER_EVENT_CTL] = handle_event_ctl,
+ [OP_HANDLER_MUTEX_CTL] = handle_mutex_ctl,
+ [OP_HANDLER_NOTIFY] = handle_notify,
+ [OP_HANDLER_FIRMWARE_REQUEST] = handle_firmware_request,
+};
+
+static uacpi_u8 handler_idx_of_op[0x100] = {
+ [UACPI_AML_OP_Local0Op] = OP_HANDLER_LOCAL,
+ [UACPI_AML_OP_Local1Op] = OP_HANDLER_LOCAL,
+ [UACPI_AML_OP_Local2Op] = OP_HANDLER_LOCAL,
+ [UACPI_AML_OP_Local3Op] = OP_HANDLER_LOCAL,
+ [UACPI_AML_OP_Local4Op] = OP_HANDLER_LOCAL,
+ [UACPI_AML_OP_Local5Op] = OP_HANDLER_LOCAL,
+ [UACPI_AML_OP_Local6Op] = OP_HANDLER_LOCAL,
+ [UACPI_AML_OP_Local7Op] = OP_HANDLER_LOCAL,
+
+ [UACPI_AML_OP_Arg0Op] = OP_HANDLER_ARG,
+ [UACPI_AML_OP_Arg1Op] = OP_HANDLER_ARG,
+ [UACPI_AML_OP_Arg2Op] = OP_HANDLER_ARG,
+ [UACPI_AML_OP_Arg3Op] = OP_HANDLER_ARG,
+ [UACPI_AML_OP_Arg4Op] = OP_HANDLER_ARG,
+ [UACPI_AML_OP_Arg5Op] = OP_HANDLER_ARG,
+ [UACPI_AML_OP_Arg6Op] = OP_HANDLER_ARG,
+
+ [UACPI_AML_OP_StringPrefix] = OP_HANDLER_STRING,
+
+ [UACPI_AML_OP_AddOp] = OP_HANDLER_BINARY_MATH,
+ [UACPI_AML_OP_SubtractOp] = OP_HANDLER_BINARY_MATH,
+ [UACPI_AML_OP_MultiplyOp] = OP_HANDLER_BINARY_MATH,
+ [UACPI_AML_OP_DivideOp] = OP_HANDLER_BINARY_MATH,
+ [UACPI_AML_OP_ShiftLeftOp] = OP_HANDLER_BINARY_MATH,
+ [UACPI_AML_OP_ShiftRightOp] = OP_HANDLER_BINARY_MATH,
+ [UACPI_AML_OP_AndOp] = OP_HANDLER_BINARY_MATH,
+ [UACPI_AML_OP_NandOp] = OP_HANDLER_BINARY_MATH,
+ [UACPI_AML_OP_OrOp] = OP_HANDLER_BINARY_MATH,
+ [UACPI_AML_OP_NorOp] = OP_HANDLER_BINARY_MATH,
+ [UACPI_AML_OP_XorOp] = OP_HANDLER_BINARY_MATH,
+ [UACPI_AML_OP_ModOp] = OP_HANDLER_BINARY_MATH,
+
+ [UACPI_AML_OP_IfOp] = OP_HANDLER_CODE_BLOCK,
+ [UACPI_AML_OP_ElseOp] = OP_HANDLER_CODE_BLOCK,
+ [UACPI_AML_OP_WhileOp] = OP_HANDLER_CODE_BLOCK,
+ [UACPI_AML_OP_ScopeOp] = OP_HANDLER_CODE_BLOCK,
+
+ [UACPI_AML_OP_ContinueOp] = OP_HANDLER_CONTROL_FLOW,
+ [UACPI_AML_OP_BreakOp] = OP_HANDLER_CONTROL_FLOW,
+
+ [UACPI_AML_OP_ReturnOp] = OP_HANDLER_RETURN,
+
+ [UACPI_AML_OP_MethodOp] = OP_HANDLER_CREATE_METHOD,
+
+ [UACPI_AML_OP_StoreOp] = OP_HANDLER_COPY_OBJECT_OR_STORE,
+ [UACPI_AML_OP_CopyObjectOp] = OP_HANDLER_COPY_OBJECT_OR_STORE,
+
+ [UACPI_AML_OP_IncrementOp] = OP_HANDLER_INC_DEC,
+ [UACPI_AML_OP_DecrementOp] = OP_HANDLER_INC_DEC,
+
+ [UACPI_AML_OP_RefOfOp] = OP_HANDLER_REF_OR_DEREF_OF,
+ [UACPI_AML_OP_DerefOfOp] = OP_HANDLER_REF_OR_DEREF_OF,
+
+ [UACPI_AML_OP_LnotOp] = OP_HANDLER_LOGICAL_NOT,
+
+ [UACPI_AML_OP_LEqualOp] = OP_HANDLER_BINARY_LOGIC,
+ [UACPI_AML_OP_LandOp] = OP_HANDLER_BINARY_LOGIC,
+ [UACPI_AML_OP_LorOp] = OP_HANDLER_BINARY_LOGIC,
+ [UACPI_AML_OP_LGreaterOp] = OP_HANDLER_BINARY_LOGIC,
+ [UACPI_AML_OP_LLessOp] = OP_HANDLER_BINARY_LOGIC,
+
+ [UACPI_AML_OP_InternalOpNamedObject] = OP_HANDLER_NAMED_OBJECT,
+
+ [UACPI_AML_OP_BufferOp] = OP_HANDLER_BUFFER,
+
+ [UACPI_AML_OP_PackageOp] = OP_HANDLER_PACKAGE,
+ [UACPI_AML_OP_VarPackageOp] = OP_HANDLER_PACKAGE,
+
+ [UACPI_AML_OP_NameOp] = OP_HANDLER_CREATE_NAMED,
+
+ [UACPI_AML_OP_CreateBitFieldOp] = OP_HANDLER_CREATE_BUFFER_FIELD,
+ [UACPI_AML_OP_CreateByteFieldOp] = OP_HANDLER_CREATE_BUFFER_FIELD,
+ [UACPI_AML_OP_CreateWordFieldOp] = OP_HANDLER_CREATE_BUFFER_FIELD,
+ [UACPI_AML_OP_CreateDWordFieldOp] = OP_HANDLER_CREATE_BUFFER_FIELD,
+ [UACPI_AML_OP_CreateQWordFieldOp] = OP_HANDLER_CREATE_BUFFER_FIELD,
+
+ [UACPI_AML_OP_InternalOpReadFieldAsBuffer] = OP_HANDLER_READ_FIELD,
+ [UACPI_AML_OP_InternalOpReadFieldAsInteger] = OP_HANDLER_READ_FIELD,
+
+ [UACPI_AML_OP_ToIntegerOp] = OP_HANDLER_TO,
+ [UACPI_AML_OP_ToBufferOp] = OP_HANDLER_TO,
+ [UACPI_AML_OP_ToDecimalStringOp] = OP_HANDLER_TO,
+ [UACPI_AML_OP_ToHexStringOp] = OP_HANDLER_TO,
+ [UACPI_AML_OP_ToStringOp] = OP_HANDLER_TO_STRING,
+
+ [UACPI_AML_OP_AliasOp] = OP_HANDLER_ALIAS,
+
+ [UACPI_AML_OP_ConcatOp] = OP_HANDLER_CONCATENATE,
+ [UACPI_AML_OP_ConcatResOp] = OP_HANDLER_CONCATENATE_RES,
+
+ [UACPI_AML_OP_SizeOfOp] = OP_HANDLER_SIZEOF,
+
+ [UACPI_AML_OP_NotOp] = OP_HANDLER_UNARY_MATH,
+ [UACPI_AML_OP_FindSetLeftBitOp] = OP_HANDLER_UNARY_MATH,
+ [UACPI_AML_OP_FindSetRightBitOp] = OP_HANDLER_UNARY_MATH,
+
+ [UACPI_AML_OP_IndexOp] = OP_HANDLER_INDEX,
+
+ [UACPI_AML_OP_ObjectTypeOp] = OP_HANDLER_OBJECT_TYPE,
+
+ [UACPI_AML_OP_MidOp] = OP_HANDLER_MID,
+
+ [UACPI_AML_OP_MatchOp] = OP_HANDLER_MATCH,
+
+ [UACPI_AML_OP_NotifyOp] = OP_HANDLER_NOTIFY,
+
+ [UACPI_AML_OP_BreakPointOp] = OP_HANDLER_FIRMWARE_REQUEST,
+};
+
+#define EXT_OP_IDX(op) (op & 0xFF)
+
+static uacpi_u8 handler_idx_of_ext_op[0x100] = {
+ [EXT_OP_IDX(UACPI_AML_OP_CreateFieldOp)] = OP_HANDLER_CREATE_BUFFER_FIELD,
+ [EXT_OP_IDX(UACPI_AML_OP_CondRefOfOp)] = OP_HANDLER_REF_OR_DEREF_OF,
+ [EXT_OP_IDX(UACPI_AML_OP_OpRegionOp)] = OP_HANDLER_CREATE_OP_REGION,
+ [EXT_OP_IDX(UACPI_AML_OP_DeviceOp)] = OP_HANDLER_CODE_BLOCK,
+ [EXT_OP_IDX(UACPI_AML_OP_ProcessorOp)] = OP_HANDLER_CODE_BLOCK,
+ [EXT_OP_IDX(UACPI_AML_OP_PowerResOp)] = OP_HANDLER_CODE_BLOCK,
+ [EXT_OP_IDX(UACPI_AML_OP_ThermalZoneOp)] = OP_HANDLER_CODE_BLOCK,
+ [EXT_OP_IDX(UACPI_AML_OP_TimerOp)] = OP_HANDLER_TIMER,
+ [EXT_OP_IDX(UACPI_AML_OP_MutexOp)] = OP_HANDLER_CREATE_MUTEX_OR_EVENT,
+ [EXT_OP_IDX(UACPI_AML_OP_EventOp)] = OP_HANDLER_CREATE_MUTEX_OR_EVENT,
+
+ [EXT_OP_IDX(UACPI_AML_OP_FieldOp)] = OP_HANDLER_CREATE_FIELD,
+ [EXT_OP_IDX(UACPI_AML_OP_IndexFieldOp)] = OP_HANDLER_CREATE_FIELD,
+ [EXT_OP_IDX(UACPI_AML_OP_BankFieldOp)] = OP_HANDLER_CREATE_FIELD,
+
+ [EXT_OP_IDX(UACPI_AML_OP_FromBCDOp)] = OP_HANDLER_BCD,
+ [EXT_OP_IDX(UACPI_AML_OP_ToBCDOp)] = OP_HANDLER_BCD,
+
+ [EXT_OP_IDX(UACPI_AML_OP_DataRegionOp)] = OP_HANDLER_CREATE_DATA_REGION,
+
+ [EXT_OP_IDX(UACPI_AML_OP_LoadTableOp)] = OP_HANDLER_LOAD_TABLE,
+ [EXT_OP_IDX(UACPI_AML_OP_LoadOp)] = OP_HANDLER_LOAD,
+ [EXT_OP_IDX(UACPI_AML_OP_UnloadOp)] = OP_HANDLER_UNLOAD,
+
+ [EXT_OP_IDX(UACPI_AML_OP_StallOp)] = OP_HANDLER_STALL_OR_SLEEP,
+ [EXT_OP_IDX(UACPI_AML_OP_SleepOp)] = OP_HANDLER_STALL_OR_SLEEP,
+
+ [EXT_OP_IDX(UACPI_AML_OP_SignalOp)] = OP_HANDLER_EVENT_CTL,
+ [EXT_OP_IDX(UACPI_AML_OP_ResetOp)] = OP_HANDLER_EVENT_CTL,
+ [EXT_OP_IDX(UACPI_AML_OP_WaitOp)] = OP_HANDLER_EVENT_CTL,
+
+ [EXT_OP_IDX(UACPI_AML_OP_AcquireOp)] = OP_HANDLER_MUTEX_CTL,
+ [EXT_OP_IDX(UACPI_AML_OP_ReleaseOp)] = OP_HANDLER_MUTEX_CTL,
+
+ [EXT_OP_IDX(UACPI_AML_OP_FatalOp)] = OP_HANDLER_FIRMWARE_REQUEST,
+};
+
+enum method_call_type {
+ METHOD_CALL_NATIVE,
+ METHOD_CALL_AML,
+ METHOD_CALL_TABLE_LOAD,
+};
+
+static uacpi_status prepare_method_call(
+ struct execution_context *ctx, uacpi_namespace_node *node,
+ uacpi_control_method *method, enum method_call_type type,
+ const uacpi_object_array *args
+)
+{
+ uacpi_status ret;
+ struct call_frame *frame;
+
+ if (uacpi_unlikely(call_frame_array_size(&ctx->call_stack) >=
+ g_uacpi_rt_ctx.max_call_stack_depth))
+ return UACPI_STATUS_AML_CALL_STACK_DEPTH_LIMIT;
+
+ ret = push_new_frame(ctx, &frame);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = enter_method(ctx, frame, method);
+ if (uacpi_unlikely_error(ret))
+ goto method_dispatch_error;
+
+ if (type == METHOD_CALL_NATIVE) {
+ uacpi_u8 arg_count;
+
+ arg_count = args ? args->count : 0;
+ if (uacpi_unlikely(arg_count != method->args)) {
+ uacpi_error(
+ "invalid number of arguments %zu to call %.4s, expected %d\n",
+ args ? args->count : 0, node->name.text, method->args
+ );
+
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ goto method_dispatch_error;
+ }
+
+ if (args != UACPI_NULL) {
+ uacpi_u8 i;
+
+ for (i = 0; i < method->args; ++i) {
+ frame->args[i] = args->objects[i];
+ uacpi_object_ref(args->objects[i]);
+ }
+ }
+ } else if (type == METHOD_CALL_AML) {
+ ret = frame_push_args(frame, ctx->cur_op_ctx);
+ if (uacpi_unlikely_error(ret))
+ goto method_dispatch_error;
+ }
+
+ ret = frame_setup_base_scope(frame, node, method);
+ if (uacpi_unlikely_error(ret))
+ goto method_dispatch_error;
+
+ ctx->cur_frame = frame;
+ ctx->cur_op_ctx = UACPI_NULL;
+ ctx->prev_op_ctx = UACPI_NULL;
+ ctx->cur_block = code_block_array_last(&ctx->cur_frame->code_blocks);
+
+ if (method->native_call) {
+ uacpi_object *retval;
+
+ ret = method_get_ret_object(ctx, &retval);
+ if (uacpi_unlikely_error(ret))
+ goto method_dispatch_error;
+
+ return method->handler(ctx, retval);
+ }
+
+ return UACPI_STATUS_OK;
+
+method_dispatch_error:
+ call_frame_clear(frame);
+ call_frame_array_pop(&ctx->call_stack);
+ return ret;
+}
+
+static void apply_tracked_pkg(
+ struct call_frame *frame, struct op_context *op_ctx
+)
+{
+ struct item *item;
+
+ if (op_ctx->tracked_pkg_idx == 0)
+ return;
+
+ item = item_array_at(&op_ctx->items, op_ctx->tracked_pkg_idx - 1);
+ frame->code_offset = item->pkg.end;
+}
+
+static uacpi_status exec_op(struct execution_context *ctx)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+ struct call_frame *frame = ctx->cur_frame;
+ struct op_context *op_ctx;
+ struct item *item = UACPI_NULL;
+ enum uacpi_parse_op prev_op = 0, op;
+
+ /*
+ * Allocate a new op context if previous is preempted (looking for a
+ * dynamic argument), or doesn't exist at all.
+ */
+ if (!ctx_has_non_preempted_op(ctx)) {
+ ret = push_op(ctx);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ } else {
+ trace_op(ctx->cur_op_ctx->op, OP_TRACE_ACTION_RESUME);
+ }
+
+ if (ctx->prev_op_ctx)
+ prev_op = *op_decode_cursor(ctx->prev_op_ctx);
+
+ for (;;) {
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ op_ctx = ctx->cur_op_ctx;
+ frame = ctx->cur_frame;
+
+ if (op_ctx->pc == 0 && ctx->prev_op_ctx) {
+ /*
+ * Type check the current arg type against what is expected by the
+ * preempted op. This check is able to catch most type violations
+ * with the only exception being Operand as we only know whether
+ * that evaluates to an integer after the fact.
+ */
+ ret = op_typecheck(ctx->prev_op_ctx, ctx->cur_op_ctx);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ op = op_decode_byte(op_ctx);
+ trace_pop(op);
+
+ if (parse_op_generates_item[op] != ITEM_NONE) {
+ item = item_array_alloc(&op_ctx->items);
+ if (uacpi_unlikely(item == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ item->type = parse_op_generates_item[op];
+ if (item->type == ITEM_OBJECT) {
+ enum uacpi_object_type type = UACPI_OBJECT_UNINITIALIZED;
+
+ if (op == UACPI_PARSE_OP_OBJECT_ALLOC_TYPED)
+ type = op_decode_byte(op_ctx);
+
+ item->obj = uacpi_create_object(type);
+ if (uacpi_unlikely(item->obj == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+ } else {
+ uacpi_memzero(&item->immediate, sizeof(item->immediate));
+ }
+ } else if (item == UACPI_NULL) {
+ item = item_array_last(&op_ctx->items);
+ }
+
+ switch (op) {
+ case UACPI_PARSE_OP_END:
+ case UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL: {
+ trace_op(ctx->cur_op_ctx->op, OP_TRACE_ACTION_END);
+
+ if (op == UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL) {
+ uacpi_u8 idx;
+
+ idx = op_decode_byte(op_ctx);
+ if (item_array_at(&op_ctx->items, idx)->handle != UACPI_NULL)
+ break;
+
+ emit_op_skip_warn(op_ctx);
+ }
+
+ apply_tracked_pkg(frame, op_ctx);
+
+ pop_op(ctx);
+ if (ctx->cur_op_ctx) {
+ ctx->cur_op_ctx->preempted = UACPI_FALSE;
+ ctx->cur_op_ctx->pc++;
+ }
+
+ return UACPI_STATUS_OK;
+ }
+
+ case UACPI_PARSE_OP_EMIT_SKIP_WARN:
+ emit_op_skip_warn(op_ctx);
+ break;
+
+ case UACPI_PARSE_OP_SIMPLE_NAME:
+ case UACPI_PARSE_OP_SUPERNAME:
+ case UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED:
+ case UACPI_PARSE_OP_TERM_ARG:
+ case UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL:
+ case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT:
+ case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED:
+ case UACPI_PARSE_OP_OPERAND:
+ case UACPI_PARSE_OP_STRING:
+ case UACPI_PARSE_OP_COMPUTATIONAL_DATA:
+ case UACPI_PARSE_OP_TARGET:
+ /*
+ * Preempt this op parsing for now as we wait for the dynamic arg
+ * to be parsed.
+ */
+ op_ctx->preempted = UACPI_TRUE;
+ op_ctx->pc--;
+ return UACPI_STATUS_OK;
+
+ case UACPI_PARSE_OP_TRACKED_PKGLEN:
+ op_ctx->tracked_pkg_idx = item_array_size(&op_ctx->items);
+ UACPI_FALLTHROUGH;
+ case UACPI_PARSE_OP_PKGLEN:
+ ret = parse_package_length(frame, &item->pkg);
+ break;
+
+ case UACPI_PARSE_OP_LOAD_INLINE_IMM:
+ case UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT: {
+ void *dst;
+ uacpi_u8 src_width;
+
+ if (op == UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT) {
+ item->obj->type = UACPI_OBJECT_INTEGER;
+ dst = &item->obj->integer;
+ src_width = 8;
+ } else {
+ dst = &item->immediate;
+ src_width = op_decode_byte(op_ctx);
+ }
+
+ uacpi_memcpy_zerout(
+ dst, op_decode_cursor(op_ctx),
+ sizeof(uacpi_u64), src_width
+ );
+ op_ctx->pc += src_width;
+ break;
+ }
+
+ case UACPI_PARSE_OP_LOAD_ZERO_IMM:
+ break;
+
+ case UACPI_PARSE_OP_LOAD_IMM:
+ case UACPI_PARSE_OP_LOAD_IMM_AS_OBJECT: {
+ uacpi_u8 width;
+ void *dst;
+
+ width = op_decode_byte(op_ctx);
+ if (uacpi_unlikely(call_frame_code_bytes_left(frame) < width))
+ return UACPI_STATUS_AML_BAD_ENCODING;
+
+ if (op == UACPI_PARSE_OP_LOAD_IMM_AS_OBJECT) {
+ item->obj->type = UACPI_OBJECT_INTEGER;
+ item->obj->integer = 0;
+ dst = &item->obj->integer;
+ } else {
+ dst = item->immediate_bytes;
+ }
+
+ uacpi_memcpy(dst, call_frame_cursor(frame), width);
+ frame->code_offset += width;
+ break;
+ }
+
+ case UACPI_PARSE_OP_LOAD_FALSE_OBJECT:
+ case UACPI_PARSE_OP_LOAD_TRUE_OBJECT: {
+ uacpi_object *obj = item->obj;
+ obj->type = UACPI_OBJECT_INTEGER;
+ obj->integer = op == UACPI_PARSE_OP_LOAD_FALSE_OBJECT ? 0 : ones();
+ break;
+ }
+
+ case UACPI_PARSE_OP_RECORD_AML_PC:
+ item->immediate = frame->code_offset;
+ break;
+
+ case UACPI_PARSE_OP_TRUNCATE_NUMBER:
+ truncate_number_if_needed(item->obj);
+ break;
+
+ case UACPI_PARSE_OP_TYPECHECK: {
+ enum uacpi_object_type expected_type;
+
+ expected_type = op_decode_byte(op_ctx);
+
+ if (uacpi_unlikely(item->obj->type != expected_type)) {
+ EXEC_OP_ERR_2("bad object type: expected %s, got %s!",
+ uacpi_object_type_to_string(expected_type),
+ uacpi_object_type_to_string(item->obj->type));
+ ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+
+ break;
+ }
+
+ case UACPI_PARSE_OP_BAD_OPCODE:
+ case UACPI_PARSE_OP_UNREACHABLE:
+ EXEC_OP_ERR("invalid/unexpected opcode");
+ ret = UACPI_STATUS_AML_INVALID_OPCODE;
+ break;
+
+ case UACPI_PARSE_OP_AML_PC_DECREMENT:
+ frame->code_offset--;
+ break;
+
+ case UACPI_PARSE_OP_IMM_DECREMENT:
+ item_array_at(&op_ctx->items, op_decode_byte(op_ctx))->immediate--;
+ break;
+
+ case UACPI_PARSE_OP_ITEM_POP:
+ pop_item(op_ctx);
+ item = item_array_last(&op_ctx->items);
+ break;
+
+ case UACPI_PARSE_OP_IF_HAS_DATA: {
+ uacpi_size pkg_idx = op_ctx->tracked_pkg_idx - 1;
+ struct package_length *pkg;
+ uacpi_u8 bytes_skip;
+
+ bytes_skip = op_decode_byte(op_ctx);
+ pkg = &item_array_at(&op_ctx->items, pkg_idx)->pkg;
+
+ if (frame->code_offset >= pkg->end)
+ op_ctx->pc += bytes_skip;
+
+ break;
+ }
+
+ case UACPI_PARSE_OP_IF_NOT_NULL:
+ case UACPI_PARSE_OP_IF_NULL:
+ case UACPI_PARSE_OP_IF_LAST_NULL:
+ case UACPI_PARSE_OP_IF_LAST_NOT_NULL: {
+ uacpi_u8 idx, bytes_skip;
+ uacpi_bool is_null, skip_if_null;
+
+ if (op == UACPI_PARSE_OP_IF_LAST_NULL ||
+ op == UACPI_PARSE_OP_IF_LAST_NOT_NULL) {
+ is_null = item->handle == UACPI_NULL;
+ } else {
+ idx = op_decode_byte(op_ctx);
+ is_null = item_array_at(&op_ctx->items, idx)->handle == UACPI_NULL;
+ }
+
+ bytes_skip = op_decode_byte(op_ctx);
+ skip_if_null = op == UACPI_PARSE_OP_IF_NOT_NULL ||
+ op == UACPI_PARSE_OP_IF_LAST_NOT_NULL;
+
+ if (is_null == skip_if_null)
+ op_ctx->pc += bytes_skip;
+
+ break;
+ }
+
+ case UACPI_PARSE_OP_IF_LAST_EQUALS: {
+ uacpi_u8 value, bytes_skip;
+
+ value = op_decode_byte(op_ctx);
+ bytes_skip = op_decode_byte(op_ctx);
+
+ if (item->immediate != value)
+ op_ctx->pc += bytes_skip;
+
+ break;
+ }
+
+ case UACPI_PARSE_OP_IF_LAST_FALSE:
+ case UACPI_PARSE_OP_IF_LAST_TRUE: {
+ uacpi_u8 bytes_skip;
+ uacpi_bool is_false, skip_if_false;
+
+ bytes_skip = op_decode_byte(op_ctx);
+ is_false = item->obj->integer == 0;
+ skip_if_false = op == UACPI_PARSE_OP_IF_LAST_TRUE;
+
+ if (is_false == skip_if_false)
+ op_ctx->pc += bytes_skip;
+
+ break;
+ }
+
+ case UACPI_PARSE_OP_JMP: {
+ op_ctx->pc = op_decode_byte(op_ctx);
+ break;
+ }
+
+ case UACPI_PARSE_OP_CREATE_NAMESTRING:
+ case UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD:
+ case UACPI_PARSE_OP_EXISTING_NAMESTRING:
+ case UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL:
+ case UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD: {
+ uacpi_size offset = frame->code_offset;
+ enum resolve_behavior behavior;
+
+ if (op == UACPI_PARSE_OP_CREATE_NAMESTRING ||
+ op == UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD)
+ behavior = RESOLVE_CREATE_LAST_NAMESEG_FAIL_IF_EXISTS;
+ else
+ behavior = RESOLVE_FAIL_IF_DOESNT_EXIST;
+
+ ret = resolve_name_string(frame, behavior, &item->node);
+
+ if (ret == UACPI_STATUS_NOT_FOUND) {
+ uacpi_bool is_ok;
+
+ if (prev_op) {
+ is_ok = op_allows_unresolved(prev_op);
+ is_ok &= op_allows_unresolved(op);
+ } else {
+ // This is the only standalone op where we allow unresolved
+ is_ok = op_ctx->op->code == UACPI_AML_OP_ExternalOp;
+ }
+
+ if (is_ok)
+ ret = UACPI_STATUS_OK;
+ }
+
+ if (uacpi_unlikely_error(ret)) {
+ enum uacpi_log_level lvl = UACPI_LOG_ERROR;
+ uacpi_status trace_ret = ret;
+ uacpi_bool abort_whileif = UACPI_FALSE;
+
+ if (frame->method->named_objects_persist &&
+ (ret == UACPI_STATUS_AML_OBJECT_ALREADY_EXISTS ||
+ ret == UACPI_STATUS_NOT_FOUND)) {
+ struct op_context *first_ctx;
+
+ first_ctx = op_context_array_at(&frame->pending_ops, 0);
+ abort_whileif = first_ctx->op->code == UACPI_AML_OP_WhileOp ||
+ first_ctx->op->code == UACPI_AML_OP_IfOp;
+
+ if (op_allows_unresolved_if_load(op) || abort_whileif) {
+ lvl = UACPI_LOG_WARN;
+ ret = UACPI_STATUS_OK;
+ }
+ }
+
+ trace_named_object_lookup_or_creation_failure(
+ frame, offset, op, trace_ret, lvl
+ );
+
+ if (abort_whileif) {
+ while (op_context_array_size(&frame->pending_ops) != 1)
+ pop_op(ctx);
+
+ op_ctx = op_context_array_at(&frame->pending_ops, 0);
+ op_ctx->pc++;
+ op_ctx->preempted = UACPI_FALSE;
+ break;
+ }
+
+ if (ret == UACPI_STATUS_NOT_FOUND)
+ ret = UACPI_STATUS_AML_UNDEFINED_REFERENCE;
+ }
+
+ if (behavior == RESOLVE_CREATE_LAST_NAMESEG_FAIL_IF_EXISTS &&
+ !frame->method->named_objects_persist)
+ item->node->flags |= UACPI_NAMESPACE_NODE_FLAG_TEMPORARY;
+
+ break;
+ }
+
+ case UACPI_PARSE_OP_INVOKE_HANDLER: {
+ uacpi_aml_op code = op_ctx->op->code;
+ uacpi_u8 idx;
+
+ if (code <= 0xFF)
+ idx = handler_idx_of_op[code];
+ else
+ idx = handler_idx_of_ext_op[EXT_OP_IDX(code)];
+
+ ret = op_handlers[idx](ctx);
+ break;
+ }
+
+ case UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE:
+ item = item_array_at(&op_ctx->items, op_decode_byte(op_ctx));
+ ret = do_install_node_item(frame, item);
+ break;
+
+ case UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV:
+ case UACPI_PARSE_OP_OBJECT_COPY_TO_PREV: {
+ uacpi_object *src;
+ struct item *dst;
+
+ if (!ctx->prev_op_ctx)
+ break;
+
+ switch (prev_op) {
+ case UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL:
+ case UACPI_PARSE_OP_COMPUTATIONAL_DATA:
+ case UACPI_PARSE_OP_OPERAND:
+ case UACPI_PARSE_OP_STRING:
+ src = uacpi_unwrap_internal_reference(item->obj);
+
+ if (prev_op == UACPI_PARSE_OP_OPERAND)
+ ret = typecheck_operand(ctx->prev_op_ctx, src);
+ else if (prev_op == UACPI_PARSE_OP_STRING)
+ ret = typecheck_string(ctx->prev_op_ctx, src);
+ else if (prev_op == UACPI_PARSE_OP_COMPUTATIONAL_DATA)
+ ret = typecheck_computational_data(ctx->prev_op_ctx, src);
+
+ break;
+ case UACPI_PARSE_OP_SUPERNAME:
+ case UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED:
+ src = item->obj;
+ break;
+
+ case UACPI_PARSE_OP_SIMPLE_NAME:
+ case UACPI_PARSE_OP_TERM_ARG:
+ case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT:
+ case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED:
+ case UACPI_PARSE_OP_TARGET:
+ src = item->obj;
+ break;
+
+ default:
+ EXEC_OP_ERR_1("don't know how to copy/transfer object to %d",
+ prev_op);
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ break;
+ }
+
+ if (uacpi_likely_success(ret)) {
+ dst = item_array_last(&ctx->prev_op_ctx->items);
+ dst->type = ITEM_OBJECT;
+
+ if (op == UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV) {
+ dst->obj = src;
+ uacpi_object_ref(dst->obj);
+ } else {
+ dst->obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED);
+ if (uacpi_unlikely(dst->obj == UACPI_NULL)) {
+ ret = UACPI_STATUS_OUT_OF_MEMORY;
+ break;
+ }
+
+ ret = uacpi_object_assign(dst->obj, src,
+ UACPI_ASSIGN_BEHAVIOR_DEEP_COPY);
+ }
+ }
+ break;
+ }
+
+ case UACPI_PARSE_OP_STORE_TO_TARGET:
+ case UACPI_PARSE_OP_STORE_TO_TARGET_INDIRECT: {
+ uacpi_object *dst, *src;
+
+ dst = item_array_at(&op_ctx->items, op_decode_byte(op_ctx))->obj;
+
+ if (op == UACPI_PARSE_OP_STORE_TO_TARGET_INDIRECT) {
+ src = item_array_at(&op_ctx->items,
+ op_decode_byte(op_ctx))->obj;
+ } else {
+ src = item->obj;
+ }
+
+ ret = store_to_target(dst, src, UACPI_NULL);
+ break;
+ }
+
+ // Nothing to do here, object is allocated automatically
+ case UACPI_PARSE_OP_OBJECT_ALLOC:
+ case UACPI_PARSE_OP_OBJECT_ALLOC_TYPED:
+ case UACPI_PARSE_OP_EMPTY_OBJECT_ALLOC:
+ break;
+
+ case UACPI_PARSE_OP_OBJECT_CONVERT_TO_SHALLOW_COPY:
+ case UACPI_PARSE_OP_OBJECT_CONVERT_TO_DEEP_COPY: {
+ uacpi_object *temp = item->obj;
+ enum uacpi_assign_behavior behavior;
+
+ item_array_pop(&op_ctx->items);
+ item = item_array_last(&op_ctx->items);
+
+ if (op == UACPI_PARSE_OP_OBJECT_CONVERT_TO_SHALLOW_COPY)
+ behavior = UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY;
+ else
+ behavior = UACPI_ASSIGN_BEHAVIOR_DEEP_COPY;
+
+ ret = uacpi_object_assign(temp, item->obj, behavior);
+ if (uacpi_unlikely_error(ret))
+ break;
+
+ uacpi_object_unref(item->obj);
+ item->obj = temp;
+ break;
+ }
+
+ case UACPI_PARSE_OP_DISPATCH_METHOD_CALL: {
+ struct uacpi_namespace_node *node;
+ struct uacpi_control_method *method;
+
+ node = item_array_at(&op_ctx->items, 0)->node;
+ method = uacpi_namespace_node_get_object(node)->method;
+
+ ret = prepare_method_call(
+ ctx, node, method, METHOD_CALL_AML, UACPI_NULL
+ );
+ return ret;
+ }
+
+ case UACPI_PARSE_OP_DISPATCH_TABLE_LOAD: {
+ struct uacpi_namespace_node *node;
+ struct uacpi_control_method *method;
+
+ node = item_array_at(&op_ctx->items, 0)->node;
+ method = item_array_at(&op_ctx->items, 1)->obj->method;
+
+ ret = prepare_method_call(
+ ctx, node, method, METHOD_CALL_TABLE_LOAD, UACPI_NULL
+ );
+ return ret;
+ }
+
+ case UACPI_PARSE_OP_CONVERT_NAMESTRING: {
+ uacpi_aml_op new_op = UACPI_AML_OP_InternalOpNamedObject;
+ uacpi_object *obj;
+
+ if (item->node == UACPI_NULL) {
+ if (!op_allows_unresolved(prev_op))
+ ret = UACPI_STATUS_NOT_FOUND;
+ break;
+ }
+
+ obj = uacpi_namespace_node_get_object(item->node);
+
+ switch (obj->type) {
+ case UACPI_OBJECT_METHOD: {
+ uacpi_bool should_invoke;
+
+ switch (prev_op) {
+ case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT:
+ case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED:
+ should_invoke = UACPI_FALSE;
+ break;
+ default:
+ should_invoke = !op_wants_supername(prev_op);
+ }
+
+ if (!should_invoke)
+ break;
+
+ new_op = UACPI_AML_OP_InternalOpMethodCall0Args;
+ new_op += obj->method->args;
+ break;
+ }
+
+ case UACPI_OBJECT_BUFFER_FIELD:
+ case UACPI_OBJECT_FIELD_UNIT: {
+ uacpi_object_type type;
+
+ if (!op_wants_term_arg_or_operand(prev_op))
+ break;
+
+ ret = field_get_read_type(obj, &type);
+ if (uacpi_unlikely_error(ret)) {
+ const uacpi_char *field_path;
+
+ field_path = uacpi_namespace_node_generate_absolute_path(
+ item->node
+ );
+
+ uacpi_error(
+ "unable to perform a read from field %s: "
+ "parent opregion gone\n", field_path
+ );
+ uacpi_free_absolute_path(field_path);
+ }
+
+ switch (type) {
+ case UACPI_OBJECT_BUFFER:
+ new_op = UACPI_AML_OP_InternalOpReadFieldAsBuffer;
+ break;
+ case UACPI_OBJECT_INTEGER:
+ new_op = UACPI_AML_OP_InternalOpReadFieldAsInteger;
+ break;
+ default:
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ continue;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ op_ctx->pc = 0;
+ op_ctx->op = uacpi_get_op_spec(new_op);
+ break;
+ }
+
+ case UACPI_PARSE_OP_SWITCH_TO_NEXT_IF_EQUALS: {
+ uacpi_aml_op op, target_op;
+ uacpi_u32 cur_offset;
+ uacpi_u8 op_length;
+
+ cur_offset = frame->code_offset;
+ apply_tracked_pkg(frame, op_ctx);
+ op_length = peek_next_op(frame, &op);
+
+ target_op = op_decode_aml_op(op_ctx);
+ if (op_length == 0 || op != target_op) {
+ // Revert tracked package
+ frame->code_offset = cur_offset;
+ break;
+ }
+
+ frame->code_offset += op_length;
+ op_ctx->switched_from = op_ctx->op->code;
+ op_ctx->op = uacpi_get_op_spec(target_op);
+ op_ctx->pc = 0;
+ break;
+ }
+
+ case UACPI_PARSE_OP_IF_SWITCHED_FROM: {
+ uacpi_aml_op target_op;
+ uacpi_u8 skip_bytes;
+
+ target_op = op_decode_aml_op(op_ctx);
+ skip_bytes = op_decode_byte(op_ctx);
+
+ if (op_ctx->switched_from != target_op)
+ op_ctx->pc += skip_bytes;
+ break;
+ }
+
+ default:
+ EXEC_OP_ERR_1("unhandled parser op '%d'", op);
+ ret = UACPI_STATUS_UNIMPLEMENTED;
+ break;
+ }
+ }
+}
+
+static void ctx_reload_post_ret(struct execution_context *ctx)
+{
+ uacpi_control_method *method = ctx->cur_frame->method;
+
+ if (method->is_serialized) {
+ held_mutexes_array_remove_and_release(
+ &ctx->held_mutexes, method->mutex, FORCE_RELEASE_YES
+ );
+ ctx->sync_level = ctx->cur_frame->prev_sync_level;
+ }
+
+ call_frame_clear(ctx->cur_frame);
+ call_frame_array_pop(&ctx->call_stack);
+
+ ctx->cur_frame = call_frame_array_last(&ctx->call_stack);
+ refresh_ctx_pointers(ctx);
+}
+
+static void trace_method_abort(struct code_block *block, uacpi_size depth)
+{
+ static const uacpi_char *unknown_path = "<unknown>";
+ uacpi_char oom_absolute_path[9] = "<?>.";
+
+ const uacpi_char *absolute_path;
+
+ if (block != UACPI_NULL && block->type == CODE_BLOCK_SCOPE) {
+ absolute_path = uacpi_namespace_node_generate_absolute_path(block->node);
+ if (uacpi_unlikely(absolute_path == UACPI_NULL))
+ uacpi_memcpy(oom_absolute_path + 4, block->node->name.text, 4);
+ } else {
+ absolute_path = unknown_path;
+ }
+
+ uacpi_error(" #%zu in %s()\n", depth, absolute_path);
+
+ if (absolute_path != oom_absolute_path && absolute_path != unknown_path)
+ uacpi_free_dynamic_string(absolute_path);
+}
+
+static void stack_unwind(struct execution_context *ctx)
+{
+ uacpi_size depth;
+ uacpi_bool should_stop;
+
+ /*
+ * Non-empty call stack here means the execution was aborted at some point,
+ * probably due to a bytecode error.
+ */
+ depth = call_frame_array_size(&ctx->call_stack);
+
+ if (depth != 0) {
+ uacpi_size idx = 0;
+ uacpi_bool table_level_code;
+
+ do {
+ table_level_code = ctx->cur_frame->method->named_objects_persist;
+
+ if (table_level_code && idx != 0)
+ /*
+ * This isn't the first frame that we are aborting.
+ * If this is table-level code, we have just unwound a call
+ * chain that had triggered an abort. Stop here, no need to
+ * abort table load because of it.
+ */
+ break;
+
+ while (op_context_array_size(&ctx->cur_frame->pending_ops) != 0)
+ pop_op(ctx);
+
+ trace_method_abort(
+ code_block_array_at(&ctx->cur_frame->code_blocks, 0), idx
+ );
+
+ should_stop = idx++ == 0 && table_level_code;
+ ctx_reload_post_ret(ctx);
+ } while (--depth && !should_stop);
+ }
+}
+
+static void execution_context_release(struct execution_context *ctx)
+{
+ if (ctx->ret)
+ uacpi_object_unref(ctx->ret);
+
+ while (held_mutexes_array_size(&ctx->held_mutexes) != 0) {
+ held_mutexes_array_remove_and_release(
+ &ctx->held_mutexes,
+ *held_mutexes_array_last(&ctx->held_mutexes),
+ FORCE_RELEASE_YES
+ );
+ }
+
+ call_frame_array_clear(&ctx->call_stack);
+ held_mutexes_array_clear(&ctx->held_mutexes);
+ uacpi_free(ctx, sizeof(*ctx));
+}
+
+uacpi_status uacpi_execute_control_method(
+ uacpi_namespace_node *scope, uacpi_control_method *method,
+ const uacpi_object_array *args, uacpi_object **out_obj
+)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+ struct execution_context *ctx;
+
+ ctx = uacpi_kernel_alloc_zeroed(sizeof(*ctx));
+ if (uacpi_unlikely(ctx == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ if (out_obj != UACPI_NULL) {
+ ctx->ret = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED);
+ if (uacpi_unlikely(ctx->ret == UACPI_NULL)) {
+ ret = UACPI_STATUS_OUT_OF_MEMORY;
+ goto out;
+ }
+ }
+
+ ret = prepare_method_call(ctx, scope, method, METHOD_CALL_NATIVE, args);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ for (;;) {
+ if (!ctx_has_non_preempted_op(ctx)) {
+ if (ctx->cur_frame == UACPI_NULL)
+ break;
+
+ if (maybe_end_block(ctx))
+ continue;
+
+ if (!call_frame_has_code(ctx->cur_frame)) {
+ ctx_reload_post_ret(ctx);
+ continue;
+ }
+
+ ret = get_op(ctx);
+ if (uacpi_unlikely_error(ret))
+ goto handle_method_abort;
+
+ trace_op(ctx->cur_op, OP_TRACE_ACTION_BEGIN);
+ }
+
+ ret = exec_op(ctx);
+ if (uacpi_unlikely_error(ret))
+ goto handle_method_abort;
+
+ continue;
+
+ handle_method_abort:
+ uacpi_error("aborting %s due to previous error: %s\n",
+ ctx->cur_frame->method->named_objects_persist ?
+ "table load" : "method invocation",
+ uacpi_status_to_string(ret));
+ stack_unwind(ctx);
+
+ /*
+ * Having a frame here implies that we just aborted a dynamic table
+ * load. Signal to the caller that it failed by setting the return
+ * value to false.
+ */
+ if (ctx->cur_frame) {
+ struct item *it;
+
+ it = item_array_last(&ctx->cur_op_ctx->items);
+ if (it != UACPI_NULL && it->obj != UACPI_NULL)
+ it->obj->integer = 0;
+ }
+ }
+
+out:
+ if (ctx->ret != UACPI_NULL) {
+ uacpi_object *ret_obj = UACPI_NULL;
+
+ if (ctx->ret->type != UACPI_OBJECT_UNINITIALIZED) {
+ ret_obj = ctx->ret;
+ uacpi_object_ref(ret_obj);
+ }
+
+ *out_obj = ret_obj;
+ }
+
+ execution_context_release(ctx);
+ return ret;
+}
+
+uacpi_status uacpi_osi(uacpi_handle handle, uacpi_object *retval)
+{
+ struct execution_context *ctx = handle;
+ uacpi_bool is_supported;
+ uacpi_status ret;
+ uacpi_object *arg;
+
+ arg = uacpi_unwrap_internal_reference(ctx->cur_frame->args[0]);
+ if (arg->type != UACPI_OBJECT_STRING) {
+ uacpi_error("_OSI: invalid argument type %s, expected a String\n",
+ uacpi_object_type_to_string(arg->type));
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+
+ if (retval == UACPI_NULL)
+ return UACPI_STATUS_OK;
+
+ retval->type = UACPI_OBJECT_INTEGER;
+
+ ret = uacpi_handle_osi(arg->buffer->text, &is_supported);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ retval->integer = is_supported ? ones() : 0;
+
+ uacpi_trace("_OSI(%s) => reporting as %ssupported\n",
+ arg->buffer->text, is_supported ? "" : "un");
+ return UACPI_STATUS_OK;
+}
+
+#endif // !UACPI_BAREBONES_MODE
diff --git a/sys/dev/acpi/uacpi/io.c b/sys/dev/acpi/uacpi/io.c
new file mode 100644
index 0000000..7d10005
--- /dev/null
+++ b/sys/dev/acpi/uacpi/io.c
@@ -0,0 +1,1116 @@
+#include <uacpi/internal/io.h>
+#include <uacpi/internal/stdlib.h>
+#include <uacpi/internal/log.h>
+#include <uacpi/internal/opregion.h>
+#include <uacpi/internal/utilities.h>
+#include <uacpi/internal/mutex.h>
+#include <uacpi/internal/namespace.h>
+
+#ifndef UACPI_BAREBONES_MODE
+
+uacpi_size uacpi_round_up_bits_to_bytes(uacpi_size bit_length)
+{
+ return UACPI_ALIGN_UP(bit_length, 8, uacpi_size) / 8;
+}
+
+static void cut_misaligned_tail(
+ uacpi_u8 *data, uacpi_size offset, uacpi_u32 bit_length
+)
+{
+ uacpi_u8 remainder = bit_length & 7;
+
+ if (remainder == 0)
+ return;
+
+ data[offset] &= ((1ull << remainder) - 1);
+}
+
+struct bit_span
+{
+ union {
+ uacpi_u8 *data;
+ const uacpi_u8 *const_data;
+ };
+ uacpi_u64 index;
+ uacpi_u64 length;
+};
+
+static uacpi_size bit_span_offset(struct bit_span *span, uacpi_size bits)
+{
+ uacpi_size delta = UACPI_MIN(span->length, bits);
+
+ span->index += delta;
+ span->length -= delta;
+
+ return delta;
+}
+
+static void bit_copy(struct bit_span *dst, struct bit_span *src)
+{
+ uacpi_u8 src_shift, dst_shift, bits = 0;
+ uacpi_u16 dst_mask;
+ uacpi_u8 *dst_ptr, *src_ptr;
+ uacpi_u64 dst_count, src_count;
+
+ dst_ptr = dst->data + (dst->index / 8);
+ src_ptr = src->data + (src->index / 8);
+
+ dst_count = dst->length;
+ dst_shift = dst->index & 7;
+
+ src_count = src->length;
+ src_shift = src->index & 7;
+
+ while (dst_count)
+ {
+ bits = 0;
+
+ if (src_count) {
+ bits = *src_ptr >> src_shift;
+
+ if (src_shift && src_count > (uacpi_u32)(8 - src_shift))
+ bits |= *(src_ptr + 1) << (8 - src_shift);
+
+ if (src_count < 8) {
+ bits &= (1 << src_count) - 1;
+ src_count = 0;
+ } else {
+ src_count -= 8;
+ src_ptr++;
+ }
+ }
+
+ dst_mask = (dst_count < 8 ? (1 << dst_count) - 1 : 0xFF) << dst_shift;
+ *dst_ptr = (*dst_ptr & ~dst_mask) | ((bits << dst_shift) & dst_mask);
+
+ if (dst_shift && dst_count > (uacpi_u32)(8 - dst_shift)) {
+ dst_mask >>= 8;
+ *(dst_ptr + 1) &= ~dst_mask;
+ *(dst_ptr + 1) |= (bits >> (8 - dst_shift)) & dst_mask;
+ }
+
+ dst_count = dst_count > 8 ? dst_count - 8 : 0;
+ ++dst_ptr;
+ }
+}
+
+static void do_misaligned_buffer_read(
+ const uacpi_buffer_field *field, uacpi_u8 *dst
+)
+{
+ struct bit_span src_span = { 0 };
+ struct bit_span dst_span = { 0 };
+
+ src_span.index = field->bit_index;
+ src_span.length = field->bit_length;
+ src_span.const_data = field->backing->data;
+
+ dst_span.data = dst;
+ dst_span.length = uacpi_round_up_bits_to_bytes(field->bit_length) * 8;
+ bit_copy(&dst_span, &src_span);
+}
+
+void uacpi_read_buffer_field(
+ const uacpi_buffer_field *field, void *dst
+)
+{
+ if (!(field->bit_index & 7)) {
+ uacpi_u8 *src = field->backing->data;
+ uacpi_size count;
+
+ count = uacpi_round_up_bits_to_bytes(field->bit_length);
+ uacpi_memcpy(dst, src + (field->bit_index / 8), count);
+ cut_misaligned_tail(dst, count - 1, field->bit_length);
+ return;
+ }
+
+ do_misaligned_buffer_read(field, dst);
+}
+
+static void do_write_misaligned_buffer_field(
+ uacpi_buffer_field *field,
+ const void *src, uacpi_size size
+)
+{
+ struct bit_span src_span = { 0 };
+ struct bit_span dst_span = { 0 };
+
+ src_span.length = size * 8;
+ src_span.const_data = src;
+
+ dst_span.index = field->bit_index;
+ dst_span.length = field->bit_length;
+ dst_span.data = field->backing->data;
+
+ bit_copy(&dst_span, &src_span);
+}
+
+void uacpi_write_buffer_field(
+ uacpi_buffer_field *field,
+ const void *src, uacpi_size size
+)
+{
+ if (!(field->bit_index & 7)) {
+ uacpi_u8 *dst, last_byte, tail_shift;
+ uacpi_size count;
+
+ dst = field->backing->data;
+ dst += field->bit_index / 8;
+ count = uacpi_round_up_bits_to_bytes(field->bit_length);
+
+ last_byte = dst[count - 1];
+ tail_shift = field->bit_length & 7;
+
+ uacpi_memcpy_zerout(dst, src, count, size);
+ if (tail_shift) {
+ uacpi_u8 last_shift = 8 - tail_shift;
+ dst[count - 1] = dst[count - 1] << last_shift;
+ dst[count - 1] >>= last_shift;
+ dst[count - 1] |= (last_byte >> tail_shift) << tail_shift;
+ }
+
+ return;
+ }
+
+ do_write_misaligned_buffer_field(field, src, size);
+}
+
+static uacpi_status access_field_unit(
+ uacpi_field_unit *field, uacpi_u32 offset, uacpi_region_op op,
+ union uacpi_opregion_io_data data
+)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+
+ if (field->lock_rule) {
+ ret = uacpi_acquire_aml_mutex(
+ g_uacpi_rt_ctx.global_lock_mutex, 0xFFFF
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ switch (field->kind) {
+ case UACPI_FIELD_UNIT_KIND_BANK:
+ ret = uacpi_write_field_unit(
+ field->bank_selection, &field->bank_value, sizeof(field->bank_value),
+ UACPI_NULL
+ );
+ break;
+ case UACPI_FIELD_UNIT_KIND_NORMAL:
+ break;
+ case UACPI_FIELD_UNIT_KIND_INDEX:
+ ret = uacpi_write_field_unit(
+ field->index, &offset, sizeof(offset),
+ UACPI_NULL
+ );
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ switch (op) {
+ case UACPI_REGION_OP_READ:
+ ret = uacpi_read_field_unit(
+ field->data, data.integer, field->access_width_bytes,
+ UACPI_NULL
+ );
+ break;
+ case UACPI_REGION_OP_WRITE:
+ ret = uacpi_write_field_unit(
+ field->data, data.integer, field->access_width_bytes,
+ UACPI_NULL
+ );
+ break;
+ default:
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ break;
+ }
+
+ goto out;
+
+ default:
+ uacpi_error("invalid field unit kind %d\n", field->kind);
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ ret = uacpi_dispatch_opregion_io(field, offset, op, data);
+
+out:
+ if (field->lock_rule)
+ uacpi_release_aml_mutex(g_uacpi_rt_ctx.global_lock_mutex);
+ return ret;
+}
+
+#define SERIAL_HEADER_SIZE 2
+#define IPMI_DATA_SIZE 64
+
+static uacpi_status wtr_buffer_size(
+ uacpi_field_unit *field, uacpi_address_space space,
+ uacpi_size *out_size
+)
+{
+ switch (space) {
+ case UACPI_ADDRESS_SPACE_IPMI:
+ *out_size = SERIAL_HEADER_SIZE + IPMI_DATA_SIZE;
+ break;
+ case UACPI_ADDRESS_SPACE_PRM:
+ *out_size = 26;
+ break;
+ case UACPI_ADDRESS_SPACE_FFIXEDHW:
+ *out_size = 256;
+ break;
+ case UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS:
+ case UACPI_ADDRESS_SPACE_SMBUS: {
+ uacpi_size size_for_protocol = SERIAL_HEADER_SIZE;
+
+ switch (field->attributes) {
+ case UACPI_ACCESS_ATTRIBUTE_QUICK:
+ break; // + 0
+ case UACPI_ACCESS_ATTRIBUTE_SEND_RECEIVE:
+ case UACPI_ACCESS_ATTRIBUTE_BYTE:
+ size_for_protocol += 1;
+ break;
+
+ case UACPI_ACCESS_ATTRIBUTE_WORD:
+ case UACPI_ACCESS_ATTRIBUTE_PROCESS_CALL:
+ size_for_protocol += 2;
+ break;
+
+ case UACPI_ACCESS_ATTRIBUTE_BYTES:
+ size_for_protocol += field->access_length;
+ break;
+
+ case UACPI_ACCESS_ATTRIBUTE_BLOCK:
+ case UACPI_ACCESS_ATTRIBUTE_BLOCK_PROCESS_CALL:
+ case UACPI_ACCESS_ATTRIBUTE_RAW_BYTES:
+ case UACPI_ACCESS_ATTRIBUTE_RAW_PROCESS_BYTES:
+ size_for_protocol += 255;
+ break;
+
+ default:
+ uacpi_error(
+ "unsupported field@%p access attribute %d\n",
+ field, field->attributes
+ );
+ return UACPI_STATUS_UNIMPLEMENTED;
+ }
+
+ *out_size = size_for_protocol;
+ break;
+ }
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_special_field(
+ uacpi_field_unit *field, uacpi_data_view buf,
+ uacpi_region_op op, uacpi_data_view *wtr_response,
+ uacpi_bool *did_handle
+)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+ uacpi_object *obj;
+ uacpi_operation_region *region;
+ uacpi_u64 in_out;
+ uacpi_data_view wtr_buffer;
+ union uacpi_opregion_io_data data;
+
+ *did_handle = UACPI_FALSE;
+
+ if (field->kind == UACPI_FIELD_UNIT_KIND_INDEX)
+ return ret;
+
+ obj = uacpi_namespace_node_get_object_typed(
+ field->region, UACPI_OBJECT_OPERATION_REGION_BIT
+ );
+ if (uacpi_unlikely(obj == UACPI_NULL)) {
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ uacpi_trace_region_error(
+ field->region, "attempted access to deleted", ret
+ );
+ goto out_handled;
+ }
+ region = obj->op_region;
+
+ switch (region->space) {
+ case UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO:
+ if (op == UACPI_REGION_OP_WRITE) {
+ uacpi_memcpy_zerout(
+ &in_out, buf.const_data, sizeof(in_out), buf.length
+ );
+ }
+
+ data.integer = &in_out;
+ ret = access_field_unit(field, 0, op, data);
+ if (uacpi_unlikely_error(ret))
+ goto out_handled;
+
+ if (op == UACPI_REGION_OP_READ)
+ uacpi_memcpy_zerout(buf.data, &in_out, buf.length, sizeof(in_out));
+ goto out_handled;
+ case UACPI_ADDRESS_SPACE_IPMI:
+ case UACPI_ADDRESS_SPACE_PRM:
+ if (uacpi_unlikely(op == UACPI_REGION_OP_READ)) {
+ ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ uacpi_trace_region_error(
+ field->region, "attempted to read from a write-only", ret
+ );
+ goto out_handled;
+ }
+ UACPI_FALLTHROUGH;
+ case UACPI_ADDRESS_SPACE_FFIXEDHW:
+ case UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS:
+ case UACPI_ADDRESS_SPACE_SMBUS:
+ goto do_wtr;
+ default:
+ return ret;
+ }
+
+do_wtr:
+ ret = wtr_buffer_size(field, region->space, &wtr_buffer.length);
+ if (uacpi_unlikely_error(ret))
+ goto out_handled;
+
+ wtr_buffer.data = uacpi_kernel_alloc(wtr_buffer.length);
+ if (uacpi_unlikely(wtr_buffer.data == UACPI_NULL)) {
+ ret = UACPI_STATUS_OUT_OF_MEMORY;
+ goto out_handled;
+ }
+
+ uacpi_memcpy_zerout(
+ wtr_buffer.data, buf.const_data, wtr_buffer.length, buf.length
+ );
+ data.buffer = wtr_buffer;
+ ret = access_field_unit(
+ field, field->byte_offset,
+ op, data
+ );
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_free(wtr_buffer.data, wtr_buffer.length);
+ goto out_handled;
+ }
+
+ if (wtr_response != UACPI_NULL)
+ *wtr_response = wtr_buffer;
+
+out_handled:
+ *did_handle = UACPI_TRUE;
+ return ret;
+}
+
+static uacpi_status do_read_misaligned_field_unit(
+ uacpi_field_unit *field, uacpi_u8 *dst, uacpi_size size
+)
+{
+ uacpi_status ret;
+ uacpi_size reads_to_do;
+ uacpi_u64 out;
+ uacpi_u32 byte_offset = field->byte_offset;
+ uacpi_u32 bits_left = field->bit_length;
+ uacpi_u8 width_access_bits = field->access_width_bytes * 8;
+
+ struct bit_span src_span = { 0 };
+ struct bit_span dst_span = { 0 };
+
+ src_span.data = (uacpi_u8*)&out;
+ src_span.index = field->bit_offset_within_first_byte;
+
+ dst_span.data = dst;
+ dst_span.index = 0;
+ dst_span.length = size * 8;
+
+ reads_to_do = UACPI_ALIGN_UP(
+ field->bit_offset_within_first_byte + field->bit_length,
+ width_access_bits,
+ uacpi_u32
+ );
+ reads_to_do /= width_access_bits;
+
+ while (reads_to_do-- > 0) {
+ union uacpi_opregion_io_data data;
+
+ src_span.length = UACPI_MIN(
+ bits_left, width_access_bits - src_span.index
+ );
+
+ data.integer = &out;
+ ret = access_field_unit(
+ field, byte_offset, UACPI_REGION_OP_READ,
+ data
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ bit_copy(&dst_span, &src_span);
+ bits_left -= src_span.length;
+ src_span.index = 0;
+
+ bit_span_offset(&dst_span, src_span.length);
+ byte_offset += field->access_width_bytes;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_read_field_unit(
+ uacpi_field_unit *field, void *dst, uacpi_size size,
+ uacpi_data_view *wtr_response
+)
+{
+ uacpi_status ret;
+ uacpi_u32 field_byte_length;
+ uacpi_bool did_handle;
+ uacpi_data_view data_view = { 0 };
+
+ data_view.data = dst;
+ data_view.length = size;
+
+ ret = handle_special_field(
+ field, data_view, UACPI_REGION_OP_READ,
+ wtr_response, &did_handle
+ );
+ if (did_handle)
+ return ret;
+
+ field_byte_length = uacpi_round_up_bits_to_bytes(field->bit_length);
+
+ /*
+ * Very simple fast case:
+ * - Bit offset within first byte is 0
+ * AND
+ * - Field size is <= access width
+ */
+ if (field->bit_offset_within_first_byte == 0 &&
+ field_byte_length <= field->access_width_bytes)
+ {
+ uacpi_u64 out;
+ union uacpi_opregion_io_data data;
+
+ data.integer = &out;
+ ret = access_field_unit(
+ field, field->byte_offset, UACPI_REGION_OP_READ,
+ data
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ uacpi_memcpy_zerout(dst, &out, size, field_byte_length);
+ if (size >= field_byte_length)
+ cut_misaligned_tail(dst, field_byte_length - 1, field->bit_length);
+
+ return UACPI_STATUS_OK;
+ }
+
+ // Slow case
+ return do_read_misaligned_field_unit(field, dst, size);
+}
+
+static uacpi_status write_generic_field_unit(
+ uacpi_field_unit *field, const void *src, uacpi_size size
+)
+{
+ uacpi_status ret;
+ uacpi_u32 bits_left, byte_offset = field->byte_offset;
+ uacpi_u8 width_access_bits = field->access_width_bytes * 8;
+ uacpi_u64 in;
+ struct bit_span src_span = { 0 };
+ struct bit_span dst_span = { 0 };
+
+ src_span.const_data = src;
+ src_span.index = 0;
+ src_span.length = size * 8;
+
+ dst_span.data = (uacpi_u8 *)&in;
+ dst_span.index = field->bit_offset_within_first_byte;
+
+ bits_left = field->bit_length;
+
+ while (bits_left) {
+ union uacpi_opregion_io_data data;
+
+ in = 0;
+ dst_span.length = UACPI_MIN(
+ width_access_bits - dst_span.index, bits_left
+ );
+
+ if (dst_span.index != 0 || dst_span.length < width_access_bits) {
+ switch (field->update_rule) {
+ case UACPI_UPDATE_RULE_PRESERVE:
+ data.integer = &in;
+ ret = access_field_unit(
+ field, byte_offset, UACPI_REGION_OP_READ,
+ data
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ break;
+ case UACPI_UPDATE_RULE_WRITE_AS_ONES:
+ in = ~in;
+ break;
+ case UACPI_UPDATE_RULE_WRITE_AS_ZEROES:
+ break;
+ default:
+ uacpi_error("invalid field@%p update rule %d\n",
+ field, field->update_rule);
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+ }
+
+ bit_copy(&dst_span, &src_span);
+ bit_span_offset(&src_span, dst_span.length);
+
+ data.integer = &in;
+
+ ret = access_field_unit(
+ field, byte_offset, UACPI_REGION_OP_WRITE,
+ data
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ bits_left -= dst_span.length;
+ dst_span.index = 0;
+ byte_offset += field->access_width_bytes;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_write_field_unit(
+ uacpi_field_unit *field, const void *src, uacpi_size size,
+ uacpi_data_view *wtr_response
+)
+{
+ uacpi_status ret;
+ uacpi_bool did_handle;
+ uacpi_data_view data_view = { 0 };
+
+ data_view.const_data = src;
+ data_view.length = size;
+
+ ret = handle_special_field(
+ field, data_view, UACPI_REGION_OP_WRITE,
+ wtr_response, &did_handle
+ );
+ if (did_handle)
+ return ret;
+
+ return write_generic_field_unit(field, src, size);
+}
+
+uacpi_status uacpi_field_unit_get_read_type(
+ struct uacpi_field_unit *field, uacpi_object_type *out_type
+)
+{
+ uacpi_object *obj;
+
+ if (field->kind == UACPI_FIELD_UNIT_KIND_INDEX)
+ goto out_basic_field;
+
+ obj = uacpi_namespace_node_get_object_typed(
+ field->region, UACPI_OBJECT_OPERATION_REGION_BIT
+ );
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ if (uacpi_is_buffer_access_address_space(obj->op_region->space)) {
+ *out_type = UACPI_OBJECT_BUFFER;
+ return UACPI_STATUS_OK;
+ }
+
+out_basic_field:
+ if (field->bit_length > (g_uacpi_rt_ctx.is_rev1 ? 32u : 64u))
+ *out_type = UACPI_OBJECT_BUFFER;
+ else
+ *out_type = UACPI_OBJECT_INTEGER;
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_field_unit_get_bit_length(
+ struct uacpi_field_unit *field, uacpi_size *out_length
+)
+{
+ uacpi_object *obj;
+
+ if (field->kind == UACPI_FIELD_UNIT_KIND_INDEX)
+ goto out_basic_field;
+
+ obj = uacpi_namespace_node_get_object_typed(
+ field->region, UACPI_OBJECT_OPERATION_REGION_BIT
+ );
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ if (uacpi_is_buffer_access_address_space(obj->op_region->space)) {
+ /*
+ * Bit length is protocol specific, the data will be returned
+ * via the write-then-read response buffer.
+ */
+ *out_length = 0;
+ return UACPI_STATUS_OK;
+ }
+
+out_basic_field:
+ *out_length = field->bit_length;
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_u8 gas_get_access_bit_width(const struct acpi_gas *gas)
+{
+ /*
+ * Same algorithm as ACPICA.
+ *
+ * The reason we do this is apparently GAS bit offset being non-zero means
+ * that it's an APEI register, as opposed to FADT, which needs special
+ * handling. In the case of a FADT register we want to ignore the specified
+ * access size.
+ */
+ uacpi_u8 access_bit_width;
+
+ if (gas->register_bit_offset == 0 &&
+ UACPI_IS_POWER_OF_TWO(gas->register_bit_width, uacpi_u8) &&
+ UACPI_IS_ALIGNED(gas->register_bit_width, 8, uacpi_u8)) {
+ access_bit_width = gas->register_bit_width;
+ } else if (gas->access_size) {
+ access_bit_width = gas->access_size * 8;
+ } else {
+ uacpi_u8 msb;
+
+ msb = uacpi_bit_scan_backward(
+ (gas->register_bit_offset + gas->register_bit_width) - 1
+ );
+ access_bit_width = 1 << msb;
+
+ if (access_bit_width <= 8) {
+ access_bit_width = 8;
+ } else {
+ /*
+ * Keep backing off to previous power of two until we find one
+ * that is aligned to the address specified in GAS.
+ */
+ while (!UACPI_IS_ALIGNED(
+ gas->address, access_bit_width / 8, uacpi_u64
+ ))
+ access_bit_width /= 2;
+ }
+ }
+
+ return UACPI_MIN(
+ access_bit_width,
+ gas->address_space_id == UACPI_ADDRESS_SPACE_SYSTEM_IO ? 32 : 64
+ );
+}
+
+static uacpi_status gas_validate(
+ const struct acpi_gas *gas, uacpi_u8 *access_bit_width,
+ uacpi_u8 *bit_width
+)
+{
+ uacpi_size total_width, aligned_width;
+
+ if (uacpi_unlikely(gas == UACPI_NULL))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ if (!gas->address)
+ return UACPI_STATUS_NOT_FOUND;
+
+ if (gas->address_space_id != UACPI_ADDRESS_SPACE_SYSTEM_IO &&
+ gas->address_space_id != UACPI_ADDRESS_SPACE_SYSTEM_MEMORY) {
+ uacpi_warn("unsupported GAS address space '%s' (%d)\n",
+ uacpi_address_space_to_string(gas->address_space_id),
+ gas->address_space_id);
+ return UACPI_STATUS_UNIMPLEMENTED;
+ }
+
+ if (gas->access_size > 4) {
+ uacpi_warn("unsupported GAS access size %d\n",
+ gas->access_size);
+ return UACPI_STATUS_UNIMPLEMENTED;
+ }
+
+ *access_bit_width = gas_get_access_bit_width(gas);
+
+ total_width = gas->register_bit_offset + gas->register_bit_width;
+ aligned_width = UACPI_ALIGN_UP(total_width, *access_bit_width, uacpi_size);
+
+ if (uacpi_unlikely(aligned_width > 64)) {
+ uacpi_warn(
+ "GAS register total width is too large: %zu\n", total_width
+ );
+ return UACPI_STATUS_UNIMPLEMENTED;
+ }
+
+ *bit_width = total_width;
+ return UACPI_STATUS_OK;
+}
+
+/*
+ * Apparently both reading and writing GAS works differently from operation
+ * region in that bit offsets are not respected when writing the data.
+ *
+ * Let's follow ACPICA's approach here so that we don't accidentally
+ * break any quirky hardware.
+ */
+uacpi_status uacpi_gas_read_mapped(
+ const uacpi_mapped_gas *gas, uacpi_u64 *out_value
+)
+{
+ uacpi_status ret;
+ uacpi_u8 access_byte_width;
+ uacpi_u8 bit_offset, bits_left, index = 0;
+ uacpi_u64 data, mask = 0xFFFFFFFFFFFFFFFF;
+ uacpi_size offset = 0;
+
+ bit_offset = gas->bit_offset;
+ bits_left = gas->total_bit_width;
+
+ access_byte_width = gas->access_bit_width / 8;
+
+ if (access_byte_width < 8)
+ mask = ~(mask << gas->access_bit_width);
+
+ *out_value = 0;
+
+ while (bits_left) {
+ if (bit_offset >= gas->access_bit_width) {
+ data = 0;
+ bit_offset -= gas->access_bit_width;
+ } else {
+ ret = gas->read(gas->mapping, offset, access_byte_width, &data);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ *out_value |= (data & mask) << (index * gas->access_bit_width);
+ bits_left -= UACPI_MIN(bits_left, gas->access_bit_width);
+ ++index;
+ offset += access_byte_width;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_gas_write_mapped(
+ const uacpi_mapped_gas *gas, uacpi_u64 in_value
+)
+{
+ uacpi_status ret;
+ uacpi_u8 access_byte_width;
+ uacpi_u8 bit_offset, bits_left, index = 0;
+ uacpi_u64 data, mask = 0xFFFFFFFFFFFFFFFF;
+ uacpi_size offset = 0;
+
+ bit_offset = gas->bit_offset;
+ bits_left = gas->total_bit_width;
+ access_byte_width = gas->access_bit_width / 8;
+
+ if (access_byte_width < 8)
+ mask = ~(mask << gas->access_bit_width);
+
+ while (bits_left) {
+ data = (in_value >> (index * gas->access_bit_width)) & mask;
+
+ if (bit_offset >= gas->access_bit_width) {
+ bit_offset -= gas->access_bit_width;
+ } else {
+ ret = gas->write(gas->mapping, offset, access_byte_width, data);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ bits_left -= UACPI_MIN(bits_left, gas->access_bit_width);
+ ++index;
+ offset += access_byte_width;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static void unmap_gas_io(uacpi_handle io_handle, uacpi_size size)
+{
+ UACPI_UNUSED(size);
+ uacpi_kernel_io_unmap(io_handle);
+}
+
+uacpi_status uacpi_map_gas_noalloc(
+ const struct acpi_gas *gas, uacpi_mapped_gas *out_mapped
+)
+{
+ uacpi_status ret;
+ uacpi_u8 access_bit_width, total_width;
+
+ ret = gas_validate(gas, &access_bit_width, &total_width);
+ if (ret != UACPI_STATUS_OK)
+ return ret;
+
+ if (gas->address_space_id == UACPI_ADDRESS_SPACE_SYSTEM_MEMORY) {
+ out_mapped->mapping = uacpi_kernel_map(gas->address, total_width / 8);
+ if (uacpi_unlikely(out_mapped->mapping == UACPI_NULL))
+ return UACPI_STATUS_MAPPING_FAILED;
+
+ out_mapped->read = uacpi_system_memory_read;
+ out_mapped->write = uacpi_system_memory_write;
+ out_mapped->unmap = uacpi_kernel_unmap;
+ } else { // IO, validated by gas_validate above
+ ret = uacpi_kernel_io_map(gas->address, total_width / 8, &out_mapped->mapping);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ out_mapped->read = uacpi_system_io_read;
+ out_mapped->write = uacpi_system_io_write;
+ out_mapped->unmap = unmap_gas_io;
+ }
+
+ out_mapped->access_bit_width = access_bit_width;
+ out_mapped->total_bit_width = total_width;
+ out_mapped->bit_offset = gas->register_bit_offset;
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_map_gas(
+ const struct acpi_gas *gas, uacpi_mapped_gas **out_mapped
+)
+{
+ uacpi_status ret;
+ uacpi_mapped_gas *mapping;
+
+ mapping = uacpi_kernel_alloc(sizeof(*mapping));
+ if (uacpi_unlikely(mapping == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ ret = uacpi_map_gas_noalloc(gas, mapping);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_free(mapping, sizeof(*mapping));
+ return ret;
+ }
+
+ *out_mapped = mapping;
+ return ret;
+}
+
+void uacpi_unmap_gas_nofree(uacpi_mapped_gas *gas)
+{
+ gas->unmap(gas->mapping, gas->access_bit_width / 8);
+}
+
+void uacpi_unmap_gas(uacpi_mapped_gas *gas)
+{
+ uacpi_unmap_gas_nofree(gas);
+ uacpi_free(gas, sizeof(*gas));
+}
+
+uacpi_status uacpi_gas_read(const struct acpi_gas *gas, uacpi_u64 *out_value)
+{
+ uacpi_status ret;
+ uacpi_mapped_gas mapping;
+
+ ret = uacpi_map_gas_noalloc(gas, &mapping);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_gas_read_mapped(&mapping, out_value);
+ uacpi_unmap_gas_nofree(&mapping);
+
+ return ret;
+}
+
+uacpi_status uacpi_gas_write(const struct acpi_gas *gas, uacpi_u64 in_value)
+{
+ uacpi_status ret;
+ uacpi_mapped_gas mapping;
+
+ ret = uacpi_map_gas_noalloc(gas, &mapping);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_gas_write_mapped(&mapping, in_value);
+ uacpi_unmap_gas_nofree(&mapping);
+
+ return ret;
+}
+
+uacpi_status uacpi_system_memory_read(
+ void *ptr, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out
+)
+{
+ ptr = UACPI_PTR_ADD(ptr, offset);
+
+ switch (width) {
+ case 1:
+ *out = *(volatile uacpi_u8*)ptr;
+ break;
+ case 2:
+ *out = *(volatile uacpi_u16*)ptr;
+ break;
+ case 4:
+ *out = *(volatile uacpi_u32*)ptr;
+ break;
+ case 8:
+ *out = *(volatile uacpi_u64*)ptr;
+ break;
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_system_memory_write(
+ void *ptr, uacpi_size offset, uacpi_u8 width, uacpi_u64 in
+)
+{
+ ptr = UACPI_PTR_ADD(ptr, offset);
+
+ switch (width) {
+ case 1:
+ *(volatile uacpi_u8*)ptr = in;
+ break;
+ case 2:
+ *(volatile uacpi_u16*)ptr = in;
+ break;
+ case 4:
+ *(volatile uacpi_u32*)ptr = in;
+ break;
+ case 8:
+ *(volatile uacpi_u64*)ptr = in;
+ break;
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+union integer_data {
+ uacpi_u8 byte;
+ uacpi_u16 word;
+ uacpi_u32 dword;
+ uacpi_u64 qword;
+};
+
+uacpi_status uacpi_system_io_read(
+ uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out
+)
+{
+ uacpi_status ret;
+ union integer_data data = {
+ .qword = 0,
+ };
+
+ switch (width) {
+ case 1:
+ ret = uacpi_kernel_io_read8(handle, offset, &data.byte);
+ break;
+ case 2:
+ ret = uacpi_kernel_io_read16(handle, offset, &data.word);
+ break;
+ case 4:
+ ret = uacpi_kernel_io_read32(handle, offset, &data.dword);
+ break;
+ default:
+ uacpi_error(
+ "invalid SystemIO read %p@%zu width=%d\n",
+ handle, offset, width
+ );
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ if (uacpi_likely_success(ret))
+ *out = data.qword;
+ return ret;
+}
+
+uacpi_status uacpi_system_io_write(
+ uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 in
+)
+{
+ uacpi_status ret;
+
+ switch (width) {
+ case 1:
+ ret = uacpi_kernel_io_write8(handle, offset, in);
+ break;
+ case 2:
+ ret = uacpi_kernel_io_write16(handle, offset, in);
+ break;
+ case 4:
+ ret = uacpi_kernel_io_write32(handle, offset, in);
+ break;
+ default:
+ uacpi_error(
+ "invalid SystemIO write %p@%zu width=%d\n",
+ handle, offset, width
+ );
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ return ret;
+}
+
+uacpi_status uacpi_pci_read(
+ uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out
+)
+{
+ uacpi_status ret;
+ union integer_data data = {
+ .qword = 0,
+ };
+
+ switch (width) {
+ case 1:
+ ret = uacpi_kernel_pci_read8(handle, offset, &data.byte);
+ break;
+ case 2:
+ ret = uacpi_kernel_pci_read16(handle, offset, &data.word);
+ break;
+ case 4:
+ ret = uacpi_kernel_pci_read32(handle, offset, &data.dword);
+ break;
+ default:
+ uacpi_error(
+ "invalid PCI_Config read %p@%zu width=%d\n",
+ handle, offset, width
+ );
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ if (uacpi_likely_success(ret))
+ *out = data.qword;
+ return ret;
+}
+
+uacpi_status uacpi_pci_write(
+ uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 in
+)
+{
+ uacpi_status ret;
+
+ switch (width) {
+ case 1:
+ ret = uacpi_kernel_pci_write8(handle, offset, in);
+ break;
+ case 2:
+ ret = uacpi_kernel_pci_write16(handle, offset, in);
+ break;
+ case 4:
+ ret = uacpi_kernel_pci_write32(handle, offset, in);
+ break;
+ default:
+ uacpi_error(
+ "invalid PCI_Config write %p@%zu width=%d\n",
+ handle, offset, width
+ );
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ return ret;
+}
+
+#endif // !UACPI_BAREBONES_MODE
diff --git a/sys/dev/acpi/uacpi/mutex.c b/sys/dev/acpi/uacpi/mutex.c
new file mode 100644
index 0000000..44cbac3
--- /dev/null
+++ b/sys/dev/acpi/uacpi/mutex.c
@@ -0,0 +1,396 @@
+#include <uacpi/platform/atomic.h>
+#include <uacpi/internal/mutex.h>
+#include <uacpi/internal/log.h>
+#include <uacpi/internal/registers.h>
+#include <uacpi/internal/context.h>
+#include <uacpi/kernel_api.h>
+#include <uacpi/internal/namespace.h>
+
+#ifndef UACPI_BAREBONES_MODE
+
+#ifndef UACPI_REDUCED_HARDWARE
+
+#define GLOBAL_LOCK_PENDING (1 << 0)
+
+#define GLOBAL_LOCK_OWNED_BIT 1
+#define GLOBAL_LOCK_OWNED (1 << GLOBAL_LOCK_OWNED_BIT)
+
+#define GLOBAL_LOCK_MASK 3u
+
+static uacpi_bool try_acquire_global_lock_from_firmware(uacpi_u32 *lock)
+{
+ uacpi_u32 value, new_value;
+ uacpi_bool was_owned;
+
+ value = *(volatile uacpi_u32*)lock;
+ do {
+ was_owned = (value & GLOBAL_LOCK_OWNED) >> GLOBAL_LOCK_OWNED_BIT;
+
+ // Clear both owned & pending bits.
+ new_value = value & ~GLOBAL_LOCK_MASK;
+
+ // Set owned unconditionally
+ new_value |= GLOBAL_LOCK_OWNED;
+
+ // Set pending iff the lock was owned at the time of reading
+ if (was_owned)
+ new_value |= GLOBAL_LOCK_PENDING;
+ } while (!uacpi_atomic_cmpxchg32(lock, &value, new_value));
+
+ return !was_owned;
+}
+
+static uacpi_bool do_release_global_lock_to_firmware(uacpi_u32 *lock)
+{
+ uacpi_u32 value, new_value;
+
+ value = *(volatile uacpi_u32*)lock;
+ do {
+ new_value = value & ~GLOBAL_LOCK_MASK;
+ } while (!uacpi_atomic_cmpxchg32(lock, &value, new_value));
+
+ return value & GLOBAL_LOCK_PENDING;
+}
+
+static uacpi_status uacpi_acquire_global_lock_from_firmware(void)
+{
+ uacpi_cpu_flags flags;
+ uacpi_u16 spins = 0;
+ uacpi_bool success;
+
+ if (!g_uacpi_rt_ctx.has_global_lock)
+ return UACPI_STATUS_OK;
+
+ flags = uacpi_kernel_lock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock);
+ for (;;) {
+ spins++;
+ uacpi_trace(
+ "trying to acquire the global lock from firmware... (attempt %u)\n",
+ spins
+ );
+
+ success = try_acquire_global_lock_from_firmware(
+ &g_uacpi_rt_ctx.facs->global_lock
+ );
+ if (success)
+ break;
+
+ if (uacpi_unlikely(spins == 0xFFFF))
+ break;
+
+ g_uacpi_rt_ctx.global_lock_pending = UACPI_TRUE;
+ uacpi_trace(
+ "global lock is owned by firmware, waiting for a release "
+ "notification...\n"
+ );
+ uacpi_kernel_unlock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock, flags);
+
+ uacpi_kernel_wait_for_event(g_uacpi_rt_ctx.global_lock_event, 0xFFFF);
+ flags = uacpi_kernel_lock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock);
+ }
+
+ g_uacpi_rt_ctx.global_lock_pending = UACPI_FALSE;
+ uacpi_kernel_unlock_spinlock(g_uacpi_rt_ctx.global_lock_spinlock, flags);
+
+ if (uacpi_unlikely(!success)) {
+ uacpi_error("unable to acquire global lock after %u attempts\n", spins);
+ return UACPI_STATUS_HARDWARE_TIMEOUT;
+ }
+
+ uacpi_trace("global lock successfully acquired after %u attempt%s\n",
+ spins, spins > 1 ? "s" : "");
+ return UACPI_STATUS_OK;
+}
+
+static void uacpi_release_global_lock_to_firmware(void)
+{
+ if (!g_uacpi_rt_ctx.has_global_lock)
+ return;
+
+ uacpi_trace("releasing the global lock to firmware...\n");
+ if (do_release_global_lock_to_firmware(&g_uacpi_rt_ctx.facs->global_lock)) {
+ uacpi_trace("notifying firmware of the global lock release since the "
+ "pending bit was set\n");
+ uacpi_write_register_field(UACPI_REGISTER_FIELD_GBL_RLS, 1);
+ }
+}
+#endif
+
+UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE(
+ uacpi_status uacpi_acquire_global_lock_from_firmware(void)
+)
+UACPI_STUB_IF_REDUCED_HARDWARE(
+ void uacpi_release_global_lock_to_firmware(void)
+)
+
+uacpi_status uacpi_acquire_native_mutex_with_timeout(
+ uacpi_handle mtx, uacpi_u16 timeout
+)
+{
+ uacpi_status ret;
+
+ if (uacpi_unlikely(mtx == UACPI_NULL))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ ret = uacpi_kernel_acquire_mutex(mtx, timeout);
+ if (uacpi_likely_success(ret))
+ return ret;
+
+ if (uacpi_unlikely(ret != UACPI_STATUS_TIMEOUT || timeout == 0xFFFF)) {
+ uacpi_error(
+ "unexpected status %08X (%s) while acquiring %p (timeout=%04X)\n",
+ ret, uacpi_status_to_string(ret), mtx, timeout
+ );
+ }
+
+ return ret;
+}
+
+uacpi_status uacpi_acquire_global_lock(uacpi_u16 timeout, uacpi_u32 *out_seq)
+{
+ uacpi_status ret;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ if (uacpi_unlikely(out_seq == UACPI_NULL))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ ret = uacpi_acquire_native_mutex_with_timeout(
+ g_uacpi_rt_ctx.global_lock_mutex->handle, timeout
+ );
+ if (ret != UACPI_STATUS_OK)
+ return ret;
+
+ ret = uacpi_acquire_global_lock_from_firmware();
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_release_native_mutex(g_uacpi_rt_ctx.global_lock_mutex->handle);
+ return ret;
+ }
+
+ if (uacpi_unlikely(g_uacpi_rt_ctx.global_lock_seq_num == 0xFFFFFFFF))
+ g_uacpi_rt_ctx.global_lock_seq_num = 0;
+
+ *out_seq = g_uacpi_rt_ctx.global_lock_seq_num++;
+ g_uacpi_rt_ctx.global_lock_acquired = UACPI_TRUE;
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_release_global_lock(uacpi_u32 seq)
+{
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ if (uacpi_unlikely(!g_uacpi_rt_ctx.global_lock_acquired ||
+ seq != g_uacpi_rt_ctx.global_lock_seq_num))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ g_uacpi_rt_ctx.global_lock_acquired = UACPI_FALSE;
+ uacpi_release_global_lock_to_firmware();
+ uacpi_release_native_mutex(g_uacpi_rt_ctx.global_lock_mutex->handle);
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_bool uacpi_this_thread_owns_aml_mutex(uacpi_mutex *mutex)
+{
+ uacpi_thread_id id;
+
+ id = UACPI_ATOMIC_LOAD_THREAD_ID(&mutex->owner);
+ return id == uacpi_kernel_get_thread_id();
+}
+
+uacpi_status uacpi_acquire_aml_mutex(uacpi_mutex *mutex, uacpi_u16 timeout)
+{
+ uacpi_thread_id this_id;
+ uacpi_status ret = UACPI_STATUS_OK;
+
+ this_id = uacpi_kernel_get_thread_id();
+ if (UACPI_ATOMIC_LOAD_THREAD_ID(&mutex->owner) == this_id) {
+ if (uacpi_unlikely(mutex->depth == 0xFFFF)) {
+ uacpi_warn(
+ "failing an attempt to acquire mutex @%p, too many recursive "
+ "acquires\n", mutex
+ );
+ return UACPI_STATUS_DENIED;
+ }
+
+ mutex->depth++;
+ return ret;
+ }
+
+ uacpi_namespace_write_unlock();
+ ret = uacpi_acquire_native_mutex_with_timeout(mutex->handle, timeout);
+ if (ret != UACPI_STATUS_OK)
+ goto out;
+
+ if (mutex->handle == g_uacpi_rt_ctx.global_lock_mutex->handle) {
+ ret = uacpi_acquire_global_lock_from_firmware();
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_release_native_mutex(mutex->handle);
+ goto out;
+ }
+ }
+
+ UACPI_ATOMIC_STORE_THREAD_ID(&mutex->owner, this_id);
+ mutex->depth = 1;
+
+out:
+ uacpi_namespace_write_lock();
+ return ret;
+}
+
+uacpi_status uacpi_release_aml_mutex(uacpi_mutex *mutex)
+{
+ if (mutex->depth-- > 1)
+ return UACPI_STATUS_OK;
+
+ if (mutex->handle == g_uacpi_rt_ctx.global_lock_mutex->handle)
+ uacpi_release_global_lock_to_firmware();
+
+ UACPI_ATOMIC_STORE_THREAD_ID(&mutex->owner, UACPI_THREAD_ID_NONE);
+ uacpi_release_native_mutex(mutex->handle);
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_recursive_lock_init(struct uacpi_recursive_lock *lock)
+{
+ lock->mutex = uacpi_kernel_create_mutex();
+ if (uacpi_unlikely(lock->mutex == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ lock->owner = UACPI_THREAD_ID_NONE;
+ lock->depth = 0;
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_recursive_lock_deinit(struct uacpi_recursive_lock *lock)
+{
+ if (uacpi_unlikely(lock->depth)) {
+ uacpi_warn(
+ "de-initializing active recursive lock %p with depth=%zu\n",
+ lock, lock->depth
+ );
+ lock->depth = 0;
+ }
+
+ lock->owner = UACPI_THREAD_ID_NONE;
+
+ if (lock->mutex != UACPI_NULL) {
+ uacpi_kernel_free_mutex(lock->mutex);
+ lock->mutex = UACPI_NULL;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_recursive_lock_acquire(struct uacpi_recursive_lock *lock)
+{
+ uacpi_thread_id this_id;
+ uacpi_status ret = UACPI_STATUS_OK;
+
+ this_id = uacpi_kernel_get_thread_id();
+ if (UACPI_ATOMIC_LOAD_THREAD_ID(&lock->owner) == this_id) {
+ lock->depth++;
+ return ret;
+ }
+
+ ret = uacpi_acquire_native_mutex(lock->mutex);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ UACPI_ATOMIC_STORE_THREAD_ID(&lock->owner, this_id);
+ lock->depth = 1;
+ return ret;
+}
+
+uacpi_status uacpi_recursive_lock_release(struct uacpi_recursive_lock *lock)
+{
+ if (lock->depth-- > 1)
+ return UACPI_STATUS_OK;
+
+ UACPI_ATOMIC_STORE_THREAD_ID(&lock->owner, UACPI_THREAD_ID_NONE);
+ return uacpi_release_native_mutex(lock->mutex);
+}
+
+uacpi_status uacpi_rw_lock_init(struct uacpi_rw_lock *lock)
+{
+ lock->read_mutex = uacpi_kernel_create_mutex();
+ if (uacpi_unlikely(lock->read_mutex == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ lock->write_mutex = uacpi_kernel_create_mutex();
+ if (uacpi_unlikely(lock->write_mutex == UACPI_NULL)) {
+ uacpi_kernel_free_mutex(lock->read_mutex);
+ lock->read_mutex = UACPI_NULL;
+ return UACPI_STATUS_OUT_OF_MEMORY;
+ }
+
+ lock->num_readers = 0;
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_rw_lock_deinit(struct uacpi_rw_lock *lock)
+{
+ if (uacpi_unlikely(lock->num_readers)) {
+ uacpi_warn("de-initializing rw_lock %p with %zu active readers\n",
+ lock, lock->num_readers);
+ lock->num_readers = 0;
+ }
+
+ if (lock->read_mutex != UACPI_NULL) {
+ uacpi_kernel_free_mutex(lock->read_mutex);
+ lock->read_mutex = UACPI_NULL;
+ }
+ if (lock->write_mutex != UACPI_NULL) {
+ uacpi_kernel_free_mutex(lock->write_mutex);
+ lock->write_mutex = UACPI_NULL;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_rw_lock_read(struct uacpi_rw_lock *lock)
+{
+ uacpi_status ret;
+
+ ret = uacpi_acquire_native_mutex(lock->read_mutex);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (lock->num_readers++ == 0) {
+ ret = uacpi_acquire_native_mutex(lock->write_mutex);
+ if (uacpi_unlikely_error(ret))
+ lock->num_readers = 0;
+ }
+
+ uacpi_kernel_release_mutex(lock->read_mutex);
+ return ret;
+}
+
+uacpi_status uacpi_rw_unlock_read(struct uacpi_rw_lock *lock)
+{
+ uacpi_status ret;
+
+ ret = uacpi_acquire_native_mutex(lock->read_mutex);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (lock->num_readers-- == 1)
+ uacpi_release_native_mutex(lock->write_mutex);
+
+ uacpi_kernel_release_mutex(lock->read_mutex);
+ return ret;
+}
+
+uacpi_status uacpi_rw_lock_write(struct uacpi_rw_lock *lock)
+{
+ return uacpi_acquire_native_mutex(lock->write_mutex);
+}
+
+uacpi_status uacpi_rw_unlock_write(struct uacpi_rw_lock *lock)
+{
+ return uacpi_release_native_mutex(lock->write_mutex);
+}
+
+#endif // !UACPI_BAREBONES_MODE
diff --git a/sys/dev/acpi/uacpi/namespace.c b/sys/dev/acpi/uacpi/namespace.c
new file mode 100644
index 0000000..e847dea
--- /dev/null
+++ b/sys/dev/acpi/uacpi/namespace.c
@@ -0,0 +1,1081 @@
+#include <uacpi/namespace.h>
+#include <uacpi/internal/namespace.h>
+#include <uacpi/internal/types.h>
+#include <uacpi/internal/stdlib.h>
+#include <uacpi/internal/interpreter.h>
+#include <uacpi/internal/opregion.h>
+#include <uacpi/internal/log.h>
+#include <uacpi/internal/utilities.h>
+#include <uacpi/internal/mutex.h>
+#include <uacpi/kernel_api.h>
+
+#ifndef UACPI_BAREBONES_MODE
+
+#define UACPI_REV_VALUE 2
+#define UACPI_OS_VALUE "Microsoft Windows NT"
+
+#define MAKE_PREDEFINED(c0, c1, c2, c3) \
+ { \
+ .name.text = { c0, c1, c2, c3 }, \
+ .flags = UACPI_NAMESPACE_NODE_PREDEFINED \
+ }
+
+static uacpi_namespace_node
+predefined_namespaces[UACPI_PREDEFINED_NAMESPACE_MAX + 1] = {
+ [UACPI_PREDEFINED_NAMESPACE_ROOT] = MAKE_PREDEFINED('\\', 0, 0, 0),
+ [UACPI_PREDEFINED_NAMESPACE_GPE] = MAKE_PREDEFINED('_', 'G', 'P', 'E'),
+ [UACPI_PREDEFINED_NAMESPACE_PR] = MAKE_PREDEFINED('_', 'P', 'R', '_'),
+ [UACPI_PREDEFINED_NAMESPACE_SB] = MAKE_PREDEFINED('_', 'S', 'B', '_'),
+ [UACPI_PREDEFINED_NAMESPACE_SI] = MAKE_PREDEFINED('_', 'S', 'I', '_'),
+ [UACPI_PREDEFINED_NAMESPACE_TZ] = MAKE_PREDEFINED('_', 'T', 'Z', '_'),
+ [UACPI_PREDEFINED_NAMESPACE_GL] = MAKE_PREDEFINED('_', 'G', 'L', '_'),
+ [UACPI_PREDEFINED_NAMESPACE_OS] = MAKE_PREDEFINED('_', 'O', 'S', '_'),
+ [UACPI_PREDEFINED_NAMESPACE_OSI] = MAKE_PREDEFINED('_', 'O', 'S', 'I'),
+ [UACPI_PREDEFINED_NAMESPACE_REV] = MAKE_PREDEFINED('_', 'R', 'E', 'V'),
+};
+
+static struct uacpi_rw_lock namespace_lock;
+
+uacpi_status uacpi_namespace_read_lock(void)
+{
+ return uacpi_rw_lock_read(&namespace_lock);
+}
+
+uacpi_status uacpi_namespace_read_unlock(void)
+{
+ return uacpi_rw_unlock_read(&namespace_lock);
+}
+
+uacpi_status uacpi_namespace_write_lock(void)
+{
+ return uacpi_rw_lock_write(&namespace_lock);
+}
+
+uacpi_status uacpi_namespace_write_unlock(void)
+{
+ return uacpi_rw_unlock_write(&namespace_lock);
+}
+
+static uacpi_object *make_object_for_predefined(
+ enum uacpi_predefined_namespace ns
+)
+{
+ uacpi_object *obj;
+
+ switch (ns) {
+ case UACPI_PREDEFINED_NAMESPACE_ROOT:
+ /*
+ * The real root object is stored in the global context, whereas the \
+ * node gets a placeholder uninitialized object instead. This is to
+ * protect against CopyObject(JUNK, \), so that all of the opregion and
+ * notify handlers are preserved if AML decides to do that.
+ */
+ g_uacpi_rt_ctx.root_object = uacpi_create_object(UACPI_OBJECT_DEVICE);
+ if (uacpi_unlikely(g_uacpi_rt_ctx.root_object == UACPI_NULL))
+ return UACPI_NULL;
+
+ obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED);
+ break;
+
+ case UACPI_PREDEFINED_NAMESPACE_OS:
+ obj = uacpi_create_object(UACPI_OBJECT_STRING);
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return obj;
+
+ obj->buffer->text = uacpi_kernel_alloc(sizeof(UACPI_OS_VALUE));
+ if (uacpi_unlikely(obj->buffer->text == UACPI_NULL)) {
+ uacpi_object_unref(obj);
+ return UACPI_NULL;
+ }
+
+ obj->buffer->size = sizeof(UACPI_OS_VALUE);
+ uacpi_memcpy(obj->buffer->text, UACPI_OS_VALUE, obj->buffer->size);
+ break;
+
+ case UACPI_PREDEFINED_NAMESPACE_REV:
+ obj = uacpi_create_object(UACPI_OBJECT_INTEGER);
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return obj;
+
+ obj->integer = UACPI_REV_VALUE;
+ break;
+
+ case UACPI_PREDEFINED_NAMESPACE_GL:
+ obj = uacpi_create_object(UACPI_OBJECT_MUTEX);
+ if (uacpi_likely(obj != UACPI_NULL)) {
+ uacpi_shareable_ref(obj->mutex);
+ g_uacpi_rt_ctx.global_lock_mutex = obj->mutex;
+ }
+ break;
+
+ case UACPI_PREDEFINED_NAMESPACE_OSI:
+ obj = uacpi_create_object(UACPI_OBJECT_METHOD);
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return obj;
+
+ obj->method->native_call = UACPI_TRUE;
+ obj->method->handler = uacpi_osi;
+ obj->method->args = 1;
+ break;
+
+ default:
+ obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED);
+ break;
+ }
+
+ return obj;
+}
+
+static void namespace_node_detach_object(uacpi_namespace_node *node)
+{
+ uacpi_object *object;
+
+ object = uacpi_namespace_node_get_object(node);
+ if (object != UACPI_NULL) {
+ if (object->type == UACPI_OBJECT_OPERATION_REGION)
+ uacpi_opregion_uninstall_handler(node);
+
+ uacpi_object_unref(node->object);
+ node->object = UACPI_NULL;
+ }
+}
+
+static void free_namespace_node(uacpi_handle handle)
+{
+ uacpi_namespace_node *node = handle;
+
+ if (uacpi_likely(!uacpi_namespace_node_is_predefined(node))) {
+ uacpi_free(node, sizeof(*node));
+ return;
+ }
+
+ node->flags = UACPI_NAMESPACE_NODE_PREDEFINED;
+ node->object = UACPI_NULL;
+ node->parent = UACPI_NULL;
+ node->child = UACPI_NULL;
+ node->next = UACPI_NULL;
+}
+
+uacpi_status uacpi_initialize_namespace(void)
+{
+ enum uacpi_predefined_namespace ns;
+ uacpi_object *obj;
+ uacpi_namespace_node *node;
+ uacpi_status ret;
+
+ ret = uacpi_rw_lock_init(&namespace_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ for (ns = 0; ns <= UACPI_PREDEFINED_NAMESPACE_MAX; ns++) {
+ node = &predefined_namespaces[ns];
+ uacpi_shareable_init(node);
+
+ obj = make_object_for_predefined(ns);
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ node->object = uacpi_create_internal_reference(
+ UACPI_REFERENCE_KIND_NAMED, obj
+ );
+ if (uacpi_unlikely(node->object == UACPI_NULL)) {
+ uacpi_object_unref(obj);
+ return UACPI_STATUS_OUT_OF_MEMORY;
+ }
+
+ uacpi_object_unref(obj);
+ }
+
+ for (ns = UACPI_PREDEFINED_NAMESPACE_GPE;
+ ns <= UACPI_PREDEFINED_NAMESPACE_MAX; ns++) {
+
+ /*
+ * Skip the installation of \_OSI if it was disabled by user.
+ * We still create the object, but it's not attached to the namespace.
+ */
+ if (ns == UACPI_PREDEFINED_NAMESPACE_OSI &&
+ uacpi_check_flag(UACPI_FLAG_NO_OSI))
+ continue;
+
+ uacpi_namespace_node_install(
+ uacpi_namespace_root(), &predefined_namespaces[ns]
+ );
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+void uacpi_deinitialize_namespace(void)
+{
+ uacpi_status ret;
+ uacpi_namespace_node *current, *next = UACPI_NULL;
+ uacpi_u32 depth = 1;
+
+ current = uacpi_namespace_root();
+
+ ret = uacpi_namespace_write_lock();
+
+ while (depth) {
+ next = next == UACPI_NULL ? current->child : next->next;
+
+ /*
+ * The previous value of 'next' was the last child of this subtree,
+ * we can now remove the entire scope of 'current->child'
+ */
+ if (next == UACPI_NULL) {
+ depth--;
+
+ // Wipe the subtree
+ while (current->child != UACPI_NULL)
+ uacpi_namespace_node_uninstall(current->child);
+
+ // Reset the pointers back as if this iteration never happened
+ next = current;
+ current = current->parent;
+
+ continue;
+ }
+
+ /*
+ * We have more nodes to process, proceed to the next one, either the
+ * child of the 'next' node, if one exists, or its peer
+ */
+ if (next->child) {
+ depth++;
+ current = next;
+ next = UACPI_NULL;
+ }
+
+ // This node has no children, move on to its peer
+ }
+
+ namespace_node_detach_object(uacpi_namespace_root());
+ free_namespace_node(uacpi_namespace_root());
+
+ if (ret == UACPI_STATUS_OK)
+ uacpi_namespace_write_unlock();
+
+ uacpi_object_unref(g_uacpi_rt_ctx.root_object);
+ g_uacpi_rt_ctx.root_object = UACPI_NULL;
+
+ uacpi_mutex_unref(g_uacpi_rt_ctx.global_lock_mutex);
+ g_uacpi_rt_ctx.global_lock_mutex = UACPI_NULL;
+
+ uacpi_rw_lock_deinit(&namespace_lock);
+}
+
+uacpi_namespace_node *uacpi_namespace_root(void)
+{
+ return &predefined_namespaces[UACPI_PREDEFINED_NAMESPACE_ROOT];
+}
+
+uacpi_namespace_node *uacpi_namespace_get_predefined(
+ enum uacpi_predefined_namespace ns
+)
+{
+ if (uacpi_unlikely(ns > UACPI_PREDEFINED_NAMESPACE_MAX)) {
+ uacpi_warn("requested invalid predefined namespace %d\n", ns);
+ return UACPI_NULL;
+ }
+
+ return &predefined_namespaces[ns];
+}
+
+uacpi_namespace_node *uacpi_namespace_node_alloc(uacpi_object_name name)
+{
+ uacpi_namespace_node *ret;
+
+ ret = uacpi_kernel_alloc_zeroed(sizeof(*ret));
+ if (uacpi_unlikely(ret == UACPI_NULL))
+ return ret;
+
+ uacpi_shareable_init(ret);
+ ret->name = name;
+ return ret;
+}
+
+void uacpi_namespace_node_unref(uacpi_namespace_node *node)
+{
+ uacpi_shareable_unref_and_delete_if_last(node, free_namespace_node);
+}
+
+uacpi_status uacpi_namespace_node_install(
+ uacpi_namespace_node *parent,
+ uacpi_namespace_node *node
+)
+{
+ if (parent == UACPI_NULL)
+ parent = uacpi_namespace_root();
+
+ if (uacpi_unlikely(uacpi_namespace_node_is_dangling(node))) {
+ uacpi_warn("attempting to install a dangling namespace node %.4s\n",
+ node->name.text);
+ return UACPI_STATUS_NAMESPACE_NODE_DANGLING;
+ }
+
+ if (parent->child == UACPI_NULL) {
+ parent->child = node;
+ } else {
+ uacpi_namespace_node *prev = parent->child;
+
+ while (prev->next != UACPI_NULL)
+ prev = prev->next;
+
+ prev->next = node;
+ }
+
+ node->parent = parent;
+ return UACPI_STATUS_OK;
+}
+
+uacpi_bool uacpi_namespace_node_is_alias(uacpi_namespace_node *node)
+{
+ return node->flags & UACPI_NAMESPACE_NODE_FLAG_ALIAS;
+}
+
+uacpi_bool uacpi_namespace_node_is_dangling(uacpi_namespace_node *node)
+{
+ return node->flags & UACPI_NAMESPACE_NODE_FLAG_DANGLING;
+}
+
+uacpi_bool uacpi_namespace_node_is_temporary(uacpi_namespace_node *node)
+{
+ return node->flags & UACPI_NAMESPACE_NODE_FLAG_TEMPORARY;
+}
+
+uacpi_bool uacpi_namespace_node_is_predefined(uacpi_namespace_node *node)
+{
+ return node->flags & UACPI_NAMESPACE_NODE_PREDEFINED;
+}
+
+uacpi_status uacpi_namespace_node_uninstall(uacpi_namespace_node *node)
+{
+ uacpi_namespace_node *prev;
+
+ if (uacpi_unlikely(uacpi_namespace_node_is_dangling(node))) {
+ uacpi_warn("attempting to uninstall a dangling namespace node %.4s\n",
+ node->name.text);
+ return UACPI_STATUS_INTERNAL_ERROR;
+ }
+
+ /*
+ * The way to trigger this is as follows:
+ *
+ * Method (FOO) {
+ * // Temporary device, will be deleted upon returning from FOO
+ * Device (\BAR) {
+ * }
+ *
+ * //
+ * // Load TBL where TBL is:
+ * // Scope (\BAR) {
+ * // Name (TEST, 123)
+ * // }
+ * //
+ * Load(TBL)
+ * }
+ *
+ * In the above example, TEST is a permanent node attached by bad AML to a
+ * temporary node created inside the FOO method at \BAR. The cleanup code
+ * will attempt to remove the \BAR device upon exit from FOO, but that is
+ * no longer possible as there's now a permanent child attached to it.
+ */
+ if (uacpi_unlikely(node->child != UACPI_NULL)) {
+ uacpi_warn(
+ "refusing to uninstall node %.4s with a child (%.4s)\n",
+ node->name.text, node->child->name.text
+ );
+ return UACPI_STATUS_DENIED;
+ }
+
+ /*
+ * Even though namespace_node is reference-counted it still has an 'invalid'
+ * state that is entered after it is uninstalled from the global namespace.
+ *
+ * Reference counting is only needed to combat dangling pointer issues
+ * whereas bad AML might try to prolong a local object lifetime by
+ * returning it from a method, or CopyObject it somewhere. In that case the
+ * namespace node object itself is still alive, but no longer has a valid
+ * object associated with it.
+ *
+ * Example:
+ * Method (BAD) {
+ * OperationRegion(REG, SystemMemory, 0xDEADBEEF, 4)
+ * Field (REG, AnyAcc, NoLock) {
+ * FILD, 8,
+ * }
+ *
+ * Return (RefOf(FILD))
+ * }
+ *
+ * // Local0 is now the sole owner of the 'FILD' object that under the
+ * // hood is still referencing the 'REG' operation region object from
+ * // the 'BAD' method.
+ * Local0 = DerefOf(BAD())
+ *
+ * This is done to prevent potential very deep recursion where an object
+ * frees a namespace node that frees an attached object that frees a
+ * namespace node as well as potential infinite cycles between a namespace
+ * node and an object.
+ */
+ namespace_node_detach_object(node);
+
+ prev = node->parent ? node->parent->child : UACPI_NULL;
+
+ if (prev == node) {
+ node->parent->child = node->next;
+ } else {
+ while (uacpi_likely(prev != UACPI_NULL) && prev->next != node)
+ prev = prev->next;
+
+ if (uacpi_unlikely(prev == UACPI_NULL)) {
+ uacpi_warn(
+ "trying to uninstall a node %.4s (%p) not linked to any peer\n",
+ node->name.text, node
+ );
+ return UACPI_STATUS_INTERNAL_ERROR;
+ }
+
+ prev->next = node->next;
+ }
+
+ node->flags |= UACPI_NAMESPACE_NODE_FLAG_DANGLING;
+ uacpi_namespace_node_unref(node);
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_namespace_node *uacpi_namespace_node_find_sub_node(
+ uacpi_namespace_node *parent,
+ uacpi_object_name name
+)
+{
+ uacpi_namespace_node *node;
+
+ if (parent == UACPI_NULL)
+ parent = uacpi_namespace_root();
+
+ node = parent->child;
+
+ while (node) {
+ if (node->name.id == name.id)
+ return node;
+
+ node = node->next;
+ }
+
+ return UACPI_NULL;
+}
+
+static uacpi_object_name segment_to_name(
+ const uacpi_char **string, uacpi_size *in_out_size
+)
+{
+ uacpi_object_name out_name;
+ const uacpi_char *cursor = *string;
+ uacpi_size offset, bytes_left = *in_out_size;
+
+ for (offset = 0; offset < 4; offset++) {
+ if (bytes_left < 1 || *cursor == '.') {
+ out_name.text[offset] = '_';
+ continue;
+ }
+
+ out_name.text[offset] = *cursor++;
+ bytes_left--;
+ }
+
+ *string = cursor;
+ *in_out_size = bytes_left;
+ return out_name;
+}
+
+uacpi_status uacpi_namespace_node_resolve(
+ uacpi_namespace_node *parent, const uacpi_char *path,
+ enum uacpi_should_lock should_lock,
+ enum uacpi_may_search_above_parent may_search_above_parent,
+ enum uacpi_permanent_only permanent_only,
+ uacpi_namespace_node **out_node
+)
+{
+ uacpi_namespace_node *cur_node = parent;
+ uacpi_status ret = UACPI_STATUS_OK;
+ const uacpi_char *cursor = path;
+ uacpi_size bytes_left;
+ uacpi_char prev_char = 0;
+ uacpi_bool single_nameseg = UACPI_TRUE;
+
+ if (cur_node == UACPI_NULL)
+ cur_node = uacpi_namespace_root();
+
+ bytes_left = uacpi_strlen(path);
+
+ if (should_lock == UACPI_SHOULD_LOCK_YES) {
+ ret = uacpi_namespace_read_lock();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ for (;;) {
+ if (bytes_left == 0)
+ goto out;
+
+ switch (*cursor) {
+ case '\\':
+ single_nameseg = UACPI_FALSE;
+
+ if (prev_char == '^') {
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ cur_node = uacpi_namespace_root();
+ break;
+ case '^':
+ single_nameseg = UACPI_FALSE;
+
+ // Tried to go behind root
+ if (uacpi_unlikely(cur_node == uacpi_namespace_root())) {
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ cur_node = cur_node->parent;
+ break;
+ default:
+ break;
+ }
+
+ prev_char = *cursor;
+
+ switch (prev_char) {
+ case '^':
+ case '\\':
+ cursor++;
+ bytes_left--;
+ break;
+ default:
+ break;
+ }
+
+ if (prev_char != '^')
+ break;
+ }
+
+ while (bytes_left != 0) {
+ uacpi_object_name nameseg;
+
+ if (*cursor == '.') {
+ cursor++;
+ bytes_left--;
+ }
+
+ nameseg = segment_to_name(&cursor, &bytes_left);
+ if (bytes_left != 0 && single_nameseg)
+ single_nameseg = UACPI_FALSE;
+
+ cur_node = uacpi_namespace_node_find_sub_node(cur_node, nameseg);
+ if (cur_node == UACPI_NULL) {
+ if (may_search_above_parent == UACPI_MAY_SEARCH_ABOVE_PARENT_NO ||
+ !single_nameseg)
+ goto out;
+
+ parent = parent->parent;
+
+ while (parent) {
+ cur_node = uacpi_namespace_node_find_sub_node(parent, nameseg);
+ if (cur_node != UACPI_NULL)
+ goto out;
+
+ parent = parent->parent;
+ }
+
+ goto out;
+ }
+ }
+
+out:
+ if (uacpi_unlikely(ret == UACPI_STATUS_INVALID_ARGUMENT)) {
+ uacpi_warn("invalid path '%s'\n", path);
+ goto out_read_unlock;
+ }
+
+ if (cur_node == UACPI_NULL) {
+ ret = UACPI_STATUS_NOT_FOUND;
+ goto out_read_unlock;
+ }
+
+ if (uacpi_namespace_node_is_temporary(cur_node) &&
+ permanent_only == UACPI_PERMANENT_ONLY_YES) {
+ uacpi_warn("denying access to temporary namespace node '%.4s'\n",
+ cur_node->name.text);
+ ret = UACPI_STATUS_DENIED;
+ goto out_read_unlock;
+ }
+
+ if (out_node != UACPI_NULL)
+ *out_node = cur_node;
+
+out_read_unlock:
+ if (should_lock == UACPI_SHOULD_LOCK_YES)
+ uacpi_namespace_read_unlock();
+ return ret;
+}
+
+uacpi_status uacpi_namespace_node_find(
+ uacpi_namespace_node *parent, const uacpi_char *path,
+ uacpi_namespace_node **out_node
+)
+{
+ return uacpi_namespace_node_resolve(
+ parent, path, UACPI_SHOULD_LOCK_YES, UACPI_MAY_SEARCH_ABOVE_PARENT_NO,
+ UACPI_PERMANENT_ONLY_YES, out_node
+ );
+}
+
+uacpi_status uacpi_namespace_node_resolve_from_aml_namepath(
+ uacpi_namespace_node *scope,
+ const uacpi_char *path,
+ uacpi_namespace_node **out_node
+)
+{
+ return uacpi_namespace_node_resolve(
+ scope, path, UACPI_SHOULD_LOCK_YES, UACPI_MAY_SEARCH_ABOVE_PARENT_YES,
+ UACPI_PERMANENT_ONLY_YES, out_node
+ );
+}
+
+uacpi_object *uacpi_namespace_node_get_object(const uacpi_namespace_node *node)
+{
+ if (node == UACPI_NULL || node->object == UACPI_NULL)
+ return UACPI_NULL;
+
+ return uacpi_unwrap_internal_reference(node->object);
+}
+
+uacpi_object *uacpi_namespace_node_get_object_typed(
+ const uacpi_namespace_node *node, uacpi_object_type_bits type_mask
+)
+{
+ uacpi_object *obj;
+
+ obj = uacpi_namespace_node_get_object(node);
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return obj;
+
+ if (!uacpi_object_is_one_of(obj, type_mask))
+ return UACPI_NULL;
+
+ return obj;
+}
+
+uacpi_status uacpi_namespace_node_acquire_object_typed(
+ const uacpi_namespace_node *node, uacpi_object_type_bits type_mask,
+ uacpi_object **out_obj
+)
+{
+ uacpi_status ret;
+ uacpi_object *obj;
+
+ ret = uacpi_namespace_read_lock();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ obj = uacpi_namespace_node_get_object(node);
+
+ if (uacpi_unlikely(obj == UACPI_NULL) ||
+ !uacpi_object_is_one_of(obj, type_mask)) {
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ uacpi_object_ref(obj);
+ *out_obj = obj;
+
+out:
+ uacpi_namespace_read_unlock();
+ return ret;
+}
+
+uacpi_status uacpi_namespace_node_acquire_object(
+ const uacpi_namespace_node *node, uacpi_object **out_obj
+)
+{
+ return uacpi_namespace_node_acquire_object_typed(
+ node, UACPI_OBJECT_ANY_BIT, out_obj
+ );
+}
+
+enum action {
+ ACTION_REACQUIRE,
+ ACTION_PUT,
+};
+
+static uacpi_status object_mutate_refcount(
+ uacpi_object *obj, void (*cb)(uacpi_object*)
+)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+
+ if (uacpi_likely(!uacpi_object_is(obj, UACPI_OBJECT_REFERENCE))) {
+ cb(obj);
+ return ret;
+ }
+
+ /*
+ * Reference objects must be (un)referenced under at least a read lock, as
+ * this requires walking down the entire reference chain and dropping each
+ * object ref-count by 1. This might race with the interpreter and
+ * object_replace_child in case an object in the chain is CopyObject'ed
+ * into.
+ */
+ ret = uacpi_namespace_read_lock();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ cb(obj);
+
+ uacpi_namespace_read_unlock();
+ return ret;
+}
+
+uacpi_status uacpi_namespace_node_reacquire_object(
+ uacpi_object *obj
+)
+{
+ return object_mutate_refcount(obj, uacpi_object_ref);
+}
+
+uacpi_status uacpi_namespace_node_release_object(uacpi_object *obj)
+{
+ return object_mutate_refcount(obj, uacpi_object_unref);
+}
+
+uacpi_object_name uacpi_namespace_node_name(const uacpi_namespace_node *node)
+{
+ return node->name;
+}
+
+uacpi_status uacpi_namespace_node_type_unlocked(
+ const uacpi_namespace_node *node, uacpi_object_type *out_type
+)
+{
+ uacpi_object *obj;
+
+ if (uacpi_unlikely(node == UACPI_NULL))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ obj = uacpi_namespace_node_get_object(node);
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return UACPI_STATUS_NOT_FOUND;
+
+ *out_type = obj->type;
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_namespace_node_type(
+ const uacpi_namespace_node *node, uacpi_object_type *out_type
+)
+{
+ uacpi_status ret;
+
+ ret = uacpi_namespace_read_lock();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_namespace_node_type_unlocked(node, out_type);
+
+ uacpi_namespace_read_unlock();
+ return ret;
+}
+
+uacpi_status uacpi_namespace_node_is_one_of_unlocked(
+ const uacpi_namespace_node *node, uacpi_object_type_bits type_mask, uacpi_bool *out
+)
+{
+ uacpi_object *obj;
+
+ if (uacpi_unlikely(node == UACPI_NULL))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ obj = uacpi_namespace_node_get_object(node);
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return UACPI_STATUS_NOT_FOUND;
+
+ *out = uacpi_object_is_one_of(obj, type_mask);
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_namespace_node_is_one_of(
+ const uacpi_namespace_node *node, uacpi_object_type_bits type_mask,
+ uacpi_bool *out
+)
+{
+ uacpi_status ret;
+
+ ret = uacpi_namespace_read_lock();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_namespace_node_is_one_of_unlocked(node,type_mask, out);
+
+ uacpi_namespace_read_unlock();
+ return ret;
+}
+
+uacpi_status uacpi_namespace_node_is(
+ const uacpi_namespace_node *node, uacpi_object_type type, uacpi_bool *out
+)
+{
+ return uacpi_namespace_node_is_one_of(
+ node, 1u << type, out
+ );
+}
+
+uacpi_status uacpi_namespace_do_for_each_child(
+ uacpi_namespace_node *node, uacpi_iteration_callback descending_callback,
+ uacpi_iteration_callback ascending_callback,
+ uacpi_object_type_bits type_mask, uacpi_u32 max_depth,
+ enum uacpi_should_lock should_lock,
+ enum uacpi_permanent_only permanent_only, void *user
+)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+ uacpi_iteration_decision decision;
+ uacpi_iteration_callback cb;
+ uacpi_bool walking_up = UACPI_FALSE, matches = UACPI_FALSE;
+ uacpi_u32 depth = 1;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ if (uacpi_unlikely(descending_callback == UACPI_NULL &&
+ ascending_callback == UACPI_NULL))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ if (uacpi_unlikely(node == UACPI_NULL || max_depth == 0))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ if (should_lock == UACPI_SHOULD_LOCK_YES) {
+ ret = uacpi_namespace_read_lock();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ if (node->child == UACPI_NULL)
+ goto out;
+
+ node = node->child;
+
+ while (depth) {
+ uacpi_namespace_node_is_one_of_unlocked(node, type_mask, &matches);
+ if (!matches) {
+ decision = UACPI_ITERATION_DECISION_CONTINUE;
+ goto do_next;
+ }
+
+ if (permanent_only == UACPI_PERMANENT_ONLY_YES &&
+ uacpi_namespace_node_is_temporary(node)) {
+ decision = UACPI_ITERATION_DECISION_NEXT_PEER;
+ goto do_next;
+ }
+
+ cb = walking_up ? ascending_callback : descending_callback;
+ if (cb != UACPI_NULL) {
+ if (should_lock == UACPI_SHOULD_LOCK_YES) {
+ ret = uacpi_namespace_read_unlock();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ decision = cb(user, node, depth);
+ if (decision == UACPI_ITERATION_DECISION_BREAK)
+ return ret;
+
+ if (should_lock == UACPI_SHOULD_LOCK_YES) {
+ ret = uacpi_namespace_read_lock();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+ } else {
+ decision = UACPI_ITERATION_DECISION_CONTINUE;
+ }
+
+ do_next:
+ if (walking_up) {
+ if (node->next) {
+ node = node->next;
+ walking_up = UACPI_FALSE;
+ continue;
+ }
+
+ depth--;
+ node = node->parent;
+ continue;
+ }
+
+ switch (decision) {
+ case UACPI_ITERATION_DECISION_CONTINUE:
+ if ((depth != max_depth) && (node->child != UACPI_NULL)) {
+ node = node->child;
+ depth++;
+ continue;
+ }
+ UACPI_FALLTHROUGH;
+ case UACPI_ITERATION_DECISION_NEXT_PEER:
+ walking_up = UACPI_TRUE;
+ continue;
+ default:
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ goto out;
+ }
+ }
+
+out:
+ if (should_lock == UACPI_SHOULD_LOCK_YES)
+ uacpi_namespace_read_unlock();
+ return ret;
+}
+
+uacpi_status uacpi_namespace_for_each_child_simple(
+ uacpi_namespace_node *parent, uacpi_iteration_callback callback, void *user
+)
+{
+ return uacpi_namespace_do_for_each_child(
+ parent, callback, UACPI_NULL, UACPI_OBJECT_ANY_BIT, UACPI_MAX_DEPTH_ANY,
+ UACPI_SHOULD_LOCK_YES, UACPI_PERMANENT_ONLY_YES, user
+ );
+}
+
+uacpi_status uacpi_namespace_for_each_child(
+ uacpi_namespace_node *parent, uacpi_iteration_callback descending_callback,
+ uacpi_iteration_callback ascending_callback,
+ uacpi_object_type_bits type_mask, uacpi_u32 max_depth, void *user
+)
+{
+ return uacpi_namespace_do_for_each_child(
+ parent, descending_callback, ascending_callback, type_mask, max_depth,
+ UACPI_SHOULD_LOCK_YES, UACPI_PERMANENT_ONLY_YES, user
+ );
+}
+
+uacpi_status uacpi_namespace_node_next_typed(
+ uacpi_namespace_node *parent, uacpi_namespace_node **iter,
+ uacpi_object_type_bits type_mask
+)
+{
+ uacpi_status ret;
+ uacpi_bool is_one_of;
+ uacpi_namespace_node *node;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ if (uacpi_unlikely(parent == UACPI_NULL && *iter == UACPI_NULL))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ ret = uacpi_namespace_read_lock();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ node = *iter;
+ if (node == UACPI_NULL)
+ node = parent->child;
+ else
+ node = node->next;
+
+ for (; node != UACPI_NULL; node = node->next) {
+ if (uacpi_namespace_node_is_temporary(node))
+ continue;
+
+ ret = uacpi_namespace_node_is_one_of_unlocked(
+ node, type_mask, &is_one_of
+ );
+ if (uacpi_unlikely_error(ret))
+ break;
+ if (is_one_of)
+ break;
+ }
+
+ uacpi_namespace_read_unlock();
+ if (node == UACPI_NULL)
+ return UACPI_STATUS_NOT_FOUND;
+
+ if (uacpi_likely_success(ret))
+ *iter = node;
+ return ret;
+}
+
+uacpi_status uacpi_namespace_node_next(
+ uacpi_namespace_node *parent, uacpi_namespace_node **iter
+)
+{
+ return uacpi_namespace_node_next_typed(
+ parent, iter, UACPI_OBJECT_ANY_BIT
+ );
+}
+
+uacpi_size uacpi_namespace_node_depth(const uacpi_namespace_node *node)
+{
+ uacpi_size depth = 0;
+
+ while (node->parent) {
+ depth++;
+ node = node->parent;
+ }
+
+ return depth;
+}
+
+uacpi_namespace_node *uacpi_namespace_node_parent(
+ uacpi_namespace_node *node
+)
+{
+ return node->parent;
+}
+
+const uacpi_char *uacpi_namespace_node_generate_absolute_path(
+ const uacpi_namespace_node *node
+)
+{
+ uacpi_size depth, offset;
+ uacpi_size bytes_needed;
+ uacpi_char *path;
+
+ depth = uacpi_namespace_node_depth(node) + 1;
+
+ // \ only needs 1 byte, the rest is 4 bytes
+ bytes_needed = 1 + (depth - 1) * sizeof(uacpi_object_name);
+
+ // \ and the first NAME don't need a '.', every other segment does
+ bytes_needed += depth > 2 ? depth - 2 : 0;
+
+ // Null terminator
+ bytes_needed += 1;
+
+ path = uacpi_kernel_alloc(bytes_needed);
+ if (uacpi_unlikely(path == UACPI_NULL))
+ return path;
+
+ path[0] = '\\';
+
+ offset = bytes_needed - 1;
+ path[offset] = '\0';
+
+ while (node != uacpi_namespace_root()) {
+ offset -= sizeof(uacpi_object_name);
+ uacpi_memcpy(&path[offset], node->name.text, sizeof(uacpi_object_name));
+
+ node = node->parent;
+ if (node != uacpi_namespace_root())
+ path[--offset] = '.';
+ }
+
+ return path;
+}
+
+void uacpi_free_absolute_path(const uacpi_char *path)
+{
+ uacpi_free_dynamic_string(path);
+}
+
+#endif // !UACPI_BAREBONES_MODE
diff --git a/sys/dev/acpi/uacpi/notify.c b/sys/dev/acpi/uacpi/notify.c
new file mode 100644
index 0000000..b413df9
--- /dev/null
+++ b/sys/dev/acpi/uacpi/notify.c
@@ -0,0 +1,255 @@
+#include <uacpi/internal/notify.h>
+#include <uacpi/internal/shareable.h>
+#include <uacpi/internal/namespace.h>
+#include <uacpi/internal/log.h>
+#include <uacpi/internal/mutex.h>
+#include <uacpi/internal/utilities.h>
+#include <uacpi/internal/stdlib.h>
+#include <uacpi/kernel_api.h>
+
+#ifndef UACPI_BAREBONES_MODE
+
+static uacpi_handle notify_mutex;
+
+uacpi_status uacpi_initialize_notify(void)
+{
+ notify_mutex = uacpi_kernel_create_mutex();
+ if (uacpi_unlikely(notify_mutex == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ return UACPI_STATUS_OK;
+}
+
+void uacpi_deinitialize_notify(void)
+{
+ if (notify_mutex != UACPI_NULL)
+ uacpi_kernel_free_mutex(notify_mutex);
+
+ notify_mutex = UACPI_NULL;
+}
+
+struct notification_ctx {
+ uacpi_namespace_node *node;
+ uacpi_u64 value;
+ uacpi_object *node_object;
+};
+
+static void free_notification_ctx(struct notification_ctx *ctx)
+{
+ uacpi_namespace_node_release_object(ctx->node_object);
+ uacpi_namespace_node_unref(ctx->node);
+ uacpi_free(ctx, sizeof(*ctx));
+}
+
+static void do_notify(uacpi_handle opaque)
+{
+ struct notification_ctx *ctx = opaque;
+ uacpi_device_notify_handler *handler;
+ uacpi_bool did_notify_root = UACPI_FALSE;
+
+ handler = ctx->node_object->handlers->notify_head;
+
+ for (;;) {
+ if (handler == UACPI_NULL) {
+ if (did_notify_root) {
+ free_notification_ctx(ctx);
+ return;
+ }
+
+ handler = g_uacpi_rt_ctx.root_object->handlers->notify_head;
+ did_notify_root = UACPI_TRUE;
+ continue;
+ }
+
+ handler->callback(handler->user_context, ctx->node, ctx->value);
+ handler = handler->next;
+ }
+}
+
+uacpi_status uacpi_notify_all(uacpi_namespace_node *node, uacpi_u64 value)
+{
+ uacpi_status ret;
+ struct notification_ctx *ctx;
+ uacpi_object *node_object;
+
+ node_object = uacpi_namespace_node_get_object_typed(
+ node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT |
+ UACPI_OBJECT_PROCESSOR_BIT
+ );
+ if (uacpi_unlikely(node_object == UACPI_NULL))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ ret = uacpi_acquire_native_mutex(notify_mutex);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (node_object->handlers->notify_head == UACPI_NULL &&
+ g_uacpi_rt_ctx.root_object->handlers->notify_head == UACPI_NULL) {
+ ret = UACPI_STATUS_NO_HANDLER;
+ goto out;
+ }
+
+ ctx = uacpi_kernel_alloc(sizeof(*ctx));
+ if (uacpi_unlikely(ctx == UACPI_NULL)) {
+ ret = UACPI_STATUS_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ ctx->node = node;
+ // In case this node goes out of scope
+ uacpi_shareable_ref(node);
+
+ ctx->value = value;
+ ctx->node_object = uacpi_namespace_node_get_object(node);
+ uacpi_object_ref(ctx->node_object);
+
+ ret = uacpi_kernel_schedule_work(UACPI_WORK_NOTIFICATION, do_notify, ctx);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_warn("unable to schedule notification work: %s\n",
+ uacpi_status_to_string(ret));
+ free_notification_ctx(ctx);
+ }
+
+out:
+ uacpi_release_native_mutex(notify_mutex);
+ return ret;
+}
+
+static uacpi_device_notify_handler *handler_container(
+ uacpi_handlers *handlers, uacpi_notify_handler target_handler
+)
+{
+ uacpi_device_notify_handler *handler = handlers->notify_head;
+
+ while (handler) {
+ if (handler->callback == target_handler)
+ return handler;
+
+ handler = handler->next;
+ }
+
+ return UACPI_NULL;
+}
+
+uacpi_status uacpi_install_notify_handler(
+ uacpi_namespace_node *node, uacpi_notify_handler handler,
+ uacpi_handle handler_context
+)
+{
+ uacpi_status ret;
+ uacpi_object *obj;
+ uacpi_handlers *handlers;
+ uacpi_device_notify_handler *new_handler;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ if (node == uacpi_namespace_root()) {
+ obj = g_uacpi_rt_ctx.root_object;
+ } else {
+ ret = uacpi_namespace_node_acquire_object_typed(
+ node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT |
+ UACPI_OBJECT_PROCESSOR_BIT, &obj
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ ret = uacpi_acquire_native_mutex(notify_mutex);
+ if (uacpi_unlikely_error(ret))
+ goto out_no_mutex;
+
+ uacpi_kernel_wait_for_work_completion();
+
+ handlers = obj->handlers;
+
+ if (handler_container(handlers, handler) != UACPI_NULL) {
+ ret = UACPI_STATUS_ALREADY_EXISTS;
+ goto out;
+ }
+
+ new_handler = uacpi_kernel_alloc_zeroed(sizeof(*new_handler));
+ if (uacpi_unlikely(new_handler == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ new_handler->callback = handler;
+ new_handler->user_context = handler_context;
+ new_handler->next = handlers->notify_head;
+
+ handlers->notify_head = new_handler;
+
+out:
+ uacpi_release_native_mutex(notify_mutex);
+out_no_mutex:
+ if (node != uacpi_namespace_root())
+ uacpi_object_unref(obj);
+
+ return ret;
+}
+
+uacpi_status uacpi_uninstall_notify_handler(
+ uacpi_namespace_node *node, uacpi_notify_handler handler
+)
+{
+ uacpi_status ret;
+ uacpi_object *obj;
+ uacpi_handlers *handlers;
+ uacpi_device_notify_handler *prev_handler, *containing = UACPI_NULL;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ if (node == uacpi_namespace_root()) {
+ obj = g_uacpi_rt_ctx.root_object;
+ } else {
+ ret = uacpi_namespace_node_acquire_object_typed(
+ node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT |
+ UACPI_OBJECT_PROCESSOR_BIT, &obj
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ ret = uacpi_acquire_native_mutex(notify_mutex);
+ if (uacpi_unlikely_error(ret))
+ goto out_no_mutex;
+
+ uacpi_kernel_wait_for_work_completion();
+
+ handlers = obj->handlers;
+
+ containing = handler_container(handlers, handler);
+ if (containing == UACPI_NULL) {
+ ret = UACPI_STATUS_NOT_FOUND;
+ goto out;
+ }
+
+ prev_handler = handlers->notify_head;
+
+ // Are we the last linked handler?
+ if (prev_handler == containing) {
+ handlers->notify_head = containing->next;
+ goto out;
+ }
+
+ // Nope, we're somewhere in the middle. Do a search.
+ while (prev_handler) {
+ if (prev_handler->next == containing) {
+ prev_handler->next = containing->next;
+ goto out;
+ }
+
+ prev_handler = prev_handler->next;
+ }
+
+out:
+ uacpi_release_native_mutex(notify_mutex);
+out_no_mutex:
+ if (node != uacpi_namespace_root())
+ uacpi_object_unref(obj);
+
+ if (uacpi_likely_success(ret))
+ uacpi_free(containing, sizeof(*containing));
+
+ return ret;
+}
+
+#endif // !UACPI_BAREBONES_MODE
diff --git a/sys/dev/acpi/uacpi/opcodes.c b/sys/dev/acpi/uacpi/opcodes.c
new file mode 100644
index 0000000..3665631
--- /dev/null
+++ b/sys/dev/acpi/uacpi/opcodes.c
@@ -0,0 +1,272 @@
+#include <uacpi/internal/opcodes.h>
+
+#ifndef UACPI_BAREBONES_MODE
+
+#define UACPI_OP(opname, opcode, props, ...) \
+ { #opname, { .decode_ops = __VA_ARGS__ }, .properties = props, .code = opcode },
+
+#define UACPI_OUT_OF_LINE_OP(opname, opcode, out_of_line_buf, props) \
+ { \
+ .name = #opname, \
+ { .indirect_decode_ops = out_of_line_buf }, \
+ .properties = props, \
+ .code = opcode, \
+ },
+
+static const struct uacpi_op_spec opcode_table[0x100] = {
+ UACPI_ENUMERATE_OPCODES
+};
+
+static const struct uacpi_op_spec ext_opcode_table[] = {
+ UACPI_ENUMERATE_EXT_OPCODES
+};
+
+#define _(op) (op & 0x00FF)
+
+static const uacpi_u8 ext_op_to_idx[0x100] = {
+ [_(UACPI_AML_OP_MutexOp)] = 1, [_(UACPI_AML_OP_EventOp)] = 2,
+ [_(UACPI_AML_OP_CondRefOfOp)] = 3, [_(UACPI_AML_OP_CreateFieldOp)] = 4,
+ [_(UACPI_AML_OP_LoadTableOp)] = 5, [_(UACPI_AML_OP_LoadOp)] = 6,
+ [_(UACPI_AML_OP_StallOp)] = 7, [_(UACPI_AML_OP_SleepOp)] = 8,
+ [_(UACPI_AML_OP_AcquireOp)] = 9, [_(UACPI_AML_OP_SignalOp)] = 10,
+ [_(UACPI_AML_OP_WaitOp)] = 11, [_(UACPI_AML_OP_ResetOp)] = 12,
+ [_(UACPI_AML_OP_ReleaseOp)] = 13, [_(UACPI_AML_OP_FromBCDOp)] = 14,
+ [_(UACPI_AML_OP_ToBCDOp)] = 15, [_(UACPI_AML_OP_UnloadOp)] = 16,
+ [_(UACPI_AML_OP_RevisionOp)] = 17, [_(UACPI_AML_OP_DebugOp)] = 18,
+ [_(UACPI_AML_OP_FatalOp)] = 19, [_(UACPI_AML_OP_TimerOp)] = 20,
+ [_(UACPI_AML_OP_OpRegionOp)] = 21, [_(UACPI_AML_OP_FieldOp)] = 22,
+ [_(UACPI_AML_OP_DeviceOp)] = 23, [_(UACPI_AML_OP_ProcessorOp)] = 24,
+ [_(UACPI_AML_OP_PowerResOp)] = 25, [_(UACPI_AML_OP_ThermalZoneOp)] = 26,
+ [_(UACPI_AML_OP_IndexFieldOp)] = 27, [_(UACPI_AML_OP_BankFieldOp)] = 28,
+ [_(UACPI_AML_OP_DataRegionOp)] = 29,
+};
+
+const struct uacpi_op_spec *uacpi_get_op_spec(uacpi_aml_op op)
+{
+ if (op > 0xFF)
+ return &ext_opcode_table[ext_op_to_idx[_(op)]];
+
+ return &opcode_table[op];
+}
+
+#define PARSE_FIELD_ELEMENTS(parse_loop_pc) \
+ /* Parse every field element found inside */ \
+ UACPI_PARSE_OP_IF_HAS_DATA, 44, \
+ /* Look at the first byte */ \
+ UACPI_PARSE_OP_LOAD_IMM, 1, \
+ \
+ /* ReservedField := 0x00 PkgLength */ \
+ UACPI_PARSE_OP_IF_LAST_EQUALS, 0x00, 3, \
+ UACPI_PARSE_OP_PKGLEN, \
+ UACPI_PARSE_OP_JMP, parse_loop_pc, \
+ \
+ /* AccessField := 0x01 AccessType AccessAttrib */ \
+ UACPI_PARSE_OP_IF_LAST_EQUALS, 0x01, 6, \
+ UACPI_PARSE_OP_LOAD_IMM, 1, \
+ UACPI_PARSE_OP_LOAD_IMM, 1, \
+ UACPI_PARSE_OP_JMP, parse_loop_pc, \
+ \
+ /* ConnectField := <0x02 NameString> | <0x02 BufferData> */ \
+ UACPI_PARSE_OP_IF_LAST_EQUALS, 0x02, 5, \
+ UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \
+ UACPI_PARSE_OP_TYPECHECK, UACPI_OBJECT_BUFFER, \
+ UACPI_PARSE_OP_JMP, parse_loop_pc, \
+ \
+ /* ExtendedAccessField := 0x03 AccessType ExtendedAccessAttrib \
+ * AccessLength */ \
+ UACPI_PARSE_OP_IF_LAST_EQUALS, 0x03, 8, \
+ UACPI_PARSE_OP_LOAD_IMM, 1, \
+ UACPI_PARSE_OP_LOAD_IMM, 1, \
+ UACPI_PARSE_OP_LOAD_IMM, 1, \
+ UACPI_PARSE_OP_JMP, parse_loop_pc, \
+ \
+ /* NamedField := NameSeg PkgLength */ \
+ \
+ /* \
+ * Discard the immediate, as it's the first byte of the \
+ * nameseg. We don't need it. \
+ */ \
+ UACPI_PARSE_OP_ITEM_POP, \
+ UACPI_PARSE_OP_AML_PC_DECREMENT, \
+ UACPI_PARSE_OP_CREATE_NAMESTRING, \
+ UACPI_PARSE_OP_PKGLEN, \
+ UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_FIELD_UNIT, \
+ UACPI_PARSE_OP_JMP, parse_loop_pc, \
+ \
+ UACPI_PARSE_OP_INVOKE_HANDLER, \
+ UACPI_PARSE_OP_END
+
+uacpi_u8 uacpi_field_op_decode_ops[] = {
+ UACPI_PARSE_OP_TRACKED_PKGLEN,
+ UACPI_PARSE_OP_EXISTING_NAMESTRING,
+ UACPI_PARSE_OP_LOAD_IMM, 1,
+ PARSE_FIELD_ELEMENTS(4),
+};
+
+uacpi_u8 uacpi_bank_field_op_decode_ops[] = {
+ UACPI_PARSE_OP_TRACKED_PKGLEN,
+ UACPI_PARSE_OP_EXISTING_NAMESTRING,
+ UACPI_PARSE_OP_EXISTING_NAMESTRING,
+ UACPI_PARSE_OP_OPERAND,
+ UACPI_PARSE_OP_LOAD_IMM, 1,
+ PARSE_FIELD_ELEMENTS(6),
+};
+
+uacpi_u8 uacpi_index_field_op_decode_ops[] = {
+ UACPI_PARSE_OP_TRACKED_PKGLEN,
+ UACPI_PARSE_OP_EXISTING_NAMESTRING,
+ UACPI_PARSE_OP_EXISTING_NAMESTRING,
+ UACPI_PARSE_OP_LOAD_IMM, 1,
+ PARSE_FIELD_ELEMENTS(5),
+};
+
+uacpi_u8 uacpi_load_op_decode_ops[] = {
+ // Storage for the scope pointer, this is left as 0 in case of errors
+ UACPI_PARSE_OP_LOAD_ZERO_IMM,
+ UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_METHOD,
+ UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL,
+ UACPI_PARSE_OP_TARGET,
+
+ /*
+ * Invoke the handler here to initialize the table. If this fails, it's
+ * expected to keep the item 0 as NULL, which is checked below to return
+ * false to the caller of Load.
+ */
+ UACPI_PARSE_OP_INVOKE_HANDLER,
+ UACPI_PARSE_OP_IF_NULL, 0, 3,
+ UACPI_PARSE_OP_LOAD_FALSE_OBJECT,
+ UACPI_PARSE_OP_JMP, 15,
+
+ UACPI_PARSE_OP_LOAD_TRUE_OBJECT,
+ UACPI_PARSE_OP_DISPATCH_TABLE_LOAD,
+
+ /*
+ * Invoke the handler a second time to initialize any AML GPE handlers that
+ * might've been loaded from this table.
+ */
+ UACPI_PARSE_OP_INVOKE_HANDLER,
+ UACPI_PARSE_OP_STORE_TO_TARGET, 3,
+ UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV,
+ UACPI_PARSE_OP_END,
+};
+
+uacpi_u8 uacpi_load_table_op_decode_ops[] = {
+ // Storage for the scope pointer, this is left as 0 in case of errors
+ UACPI_PARSE_OP_LOAD_ZERO_IMM,
+ UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_METHOD,
+ // Index of the table we are going to be loaded to unref it later
+ UACPI_PARSE_OP_LOAD_ZERO_IMM,
+ // Storage for the target pointer, this is left as 0 if none was requested
+ UACPI_PARSE_OP_LOAD_ZERO_IMM,
+
+ UACPI_PARSE_OP_LOAD_INLINE_IMM, 1, 5,
+ UACPI_PARSE_OP_IF_NOT_NULL, 4, 5,
+ UACPI_PARSE_OP_STRING,
+ UACPI_PARSE_OP_IMM_DECREMENT, 4,
+ UACPI_PARSE_OP_JMP, 8,
+ UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL,
+
+ /*
+ * Invoke the handler here to initialize the table. If this fails, it's
+ * expected to keep the item 0 as NULL, which is checked below to return
+ * false to the caller of Load.
+ */
+ UACPI_PARSE_OP_INVOKE_HANDLER,
+ UACPI_PARSE_OP_IF_NULL, 0, 3,
+ UACPI_PARSE_OP_LOAD_FALSE_OBJECT,
+ UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV,
+ UACPI_PARSE_OP_END,
+
+ UACPI_PARSE_OP_LOAD_TRUE_OBJECT,
+ UACPI_PARSE_OP_DISPATCH_TABLE_LOAD,
+
+ /*
+ * Invoke the handler a second time to block the store to target in case
+ * the load above failed, as well as do any AML GPE handler initialization.
+ */
+ UACPI_PARSE_OP_INVOKE_HANDLER,
+
+ // If we were given a target to store to, do the store
+ UACPI_PARSE_OP_IF_NOT_NULL, 3, 3,
+ UACPI_PARSE_OP_STORE_TO_TARGET_INDIRECT, 3, 10,
+
+ UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV,
+ UACPI_PARSE_OP_END,
+};
+
+#define POP(x) UACPI_PARSE_OP_##x
+
+static
+const uacpi_char *const pop_names[UACPI_PARSE_OP_MAX + 1] = {
+ [POP(END)] = "<END-OF-OP>",
+ [POP(SKIP_WITH_WARN_IF_NULL)] = "SKIP_WITH_WARN_IF_NULL",
+ [POP(EMIT_SKIP_WARN)] = "EMIT_SKIP_WARN",
+ [POP(SIMPLE_NAME)] = "SIMPLE_NAME",
+ [POP(SUPERNAME)] = "SUPERNAME",
+ [POP(SUPERNAME_OR_UNRESOLVED)] = "SUPERNAME_OR_UNRESOLVED",
+ [POP(TERM_ARG)] = "TERM_ARG",
+ [POP(TERM_ARG_UNWRAP_INTERNAL)] = "TERM_ARG_UNWRAP_INTERNAL",
+ [POP(TERM_ARG_OR_NAMED_OBJECT)] = "TERM_ARG_OR_NAMED_OBJECT",
+ [POP(TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED)] = "TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED",
+ [POP(OPERAND)] = "OPERAND",
+ [POP(STRING)] = "STRING",
+ [POP(COMPUTATIONAL_DATA)] = "COMPUTATIONAL_DATA",
+ [POP(TARGET)] = "TARGET",
+ [POP(PKGLEN)] = "PKGLEN",
+ [POP(TRACKED_PKGLEN)] = "TRACKED_PKGLEN",
+ [POP(CREATE_NAMESTRING)] = "CREATE_NAMESTRING",
+ [POP(CREATE_NAMESTRING_OR_NULL_IF_LOAD)] = "CREATE_NAMESTRING_OR_NULL_IF_LOAD",
+ [POP(EXISTING_NAMESTRING)] = "EXISTING_NAMESTRING",
+ [POP(EXISTING_NAMESTRING_OR_NULL)] = "EXISTING_NAMESTRING_OR_NULL",
+ [POP(EXISTING_NAMESTRING_OR_NULL_IF_LOAD)] = "EXISTING_NAMESTRING_OR_NULL_IF_LOAD",
+ [POP(INVOKE_HANDLER)] = "INVOKE_HANDLER",
+ [POP(OBJECT_ALLOC)] = "OBJECT_ALLOC",
+ [POP(EMPTY_OBJECT_ALLOC)] = "EMPTY_OBJECT_ALLOC",
+ [POP(OBJECT_CONVERT_TO_SHALLOW_COPY)] = "OBJECT_CONVERT_TO_SHALLOW_COPY",
+ [POP(OBJECT_CONVERT_TO_DEEP_COPY)] = "OBJECT_CONVERT_TO_DEEP_COPY",
+ [POP(OBJECT_ALLOC_TYPED)] = "OBJECT_ALLOC_TYPED",
+ [POP(RECORD_AML_PC)] = "RECORD_AML_PC",
+ [POP(LOAD_INLINE_IMM_AS_OBJECT)] = "LOAD_INLINE_IMM_AS_OBJECT",
+ [POP(LOAD_INLINE_IMM)] = "LOAD_INLINE_IMM",
+ [POP(LOAD_ZERO_IMM)] = "LOAD_ZERO_IMM",
+ [POP(LOAD_IMM)] = "LOAD_IMM",
+ [POP(LOAD_IMM_AS_OBJECT)] = "LOAD_IMM_AS_OBJECT",
+ [POP(LOAD_FALSE_OBJECT)] = "LOAD_FALSE_OBJECT",
+ [POP(LOAD_TRUE_OBJECT)] = "LOAD_TRUE_OBJECT",
+ [POP(TRUNCATE_NUMBER)] = "TRUNCATE_NUMBER",
+ [POP(TYPECHECK)] = "TYPECHECK",
+ [POP(INSTALL_NAMESPACE_NODE)] = "INSTALL_NAMESPACE_NODE",
+ [POP(OBJECT_TRANSFER_TO_PREV)] = "OBJECT_TRANSFER_TO_PREV",
+ [POP(OBJECT_COPY_TO_PREV)] = "OBJECT_COPY_TO_PREV",
+ [POP(STORE_TO_TARGET)] = "STORE_TO_TARGET",
+ [POP(STORE_TO_TARGET_INDIRECT)] = "STORE_TO_TARGET_INDIRECT",
+ [POP(UNREACHABLE)] = "UNREACHABLE",
+ [POP(BAD_OPCODE)] = "BAD_OPCODE",
+ [POP(AML_PC_DECREMENT)] = "AML_PC_DECREMENT",
+ [POP(IMM_DECREMENT)] = "IMM_DECREMENT",
+ [POP(ITEM_POP)] = "ITEM_POP",
+ [POP(DISPATCH_METHOD_CALL)] = "DISPATCH_METHOD_CALL",
+ [POP(DISPATCH_TABLE_LOAD)] = "DISPATCH_TABLE_LOAD",
+ [POP(CONVERT_NAMESTRING)] = "CONVERT_NAMESTRING",
+ [POP(IF_HAS_DATA)] = "IF_HAS_DATA",
+ [POP(IF_NULL)] = "IF_NULL",
+ [POP(IF_LAST_NULL)] = "IF_LAST_NULL",
+ [POP(IF_NOT_NULL)] = "IF_NOT_NULL",
+ [POP(IF_LAST_NOT_NULL)] = "IF_NOT_NULL",
+ [POP(IF_LAST_EQUALS)] = "IF_LAST_EQUALS",
+ [POP(IF_LAST_FALSE)] = "IF_LAST_FALSE",
+ [POP(IF_LAST_TRUE)] = "IF_LAST_TRUE",
+ [POP(SWITCH_TO_NEXT_IF_EQUALS)] = "SWITCH_TO_NEXT_IF_EQUALS",
+ [POP(IF_SWITCHED_FROM)] = "IF_SWITCHED_FROM",
+ [POP(JMP)] = "JMP",
+};
+
+const uacpi_char *uacpi_parse_op_to_string(enum uacpi_parse_op op)
+{
+ if (uacpi_unlikely(op > UACPI_PARSE_OP_MAX))
+ return "<INVALID-OP>";
+
+ return pop_names[op];
+}
+
+#endif // !UACPI_BAREBONES_MODE
diff --git a/sys/dev/acpi/uacpi/opregion.c b/sys/dev/acpi/uacpi/opregion.c
new file mode 100644
index 0000000..ec0bc37
--- /dev/null
+++ b/sys/dev/acpi/uacpi/opregion.c
@@ -0,0 +1,1056 @@
+#include <uacpi/kernel_api.h>
+
+#include <uacpi/internal/opregion.h>
+#include <uacpi/internal/namespace.h>
+#include <uacpi/internal/stdlib.h>
+#include <uacpi/internal/log.h>
+#include <uacpi/internal/utilities.h>
+#include <uacpi/internal/mutex.h>
+#include <uacpi/internal/interpreter.h>
+
+#ifndef UACPI_BAREBONES_MODE
+
+struct uacpi_recursive_lock g_opregion_lock;
+
+uacpi_status uacpi_initialize_opregion(void)
+{
+ return uacpi_recursive_lock_init(&g_opregion_lock);
+}
+
+void uacpi_deinitialize_opregion(void)
+{
+ uacpi_recursive_lock_deinit(&g_opregion_lock);
+}
+
+void uacpi_trace_region_error(
+ uacpi_namespace_node *node, uacpi_char *message, uacpi_status ret
+)
+{
+ const uacpi_char *path, *space_string = "<unknown>";
+ uacpi_object *obj;
+
+ path = uacpi_namespace_node_generate_absolute_path(node);
+
+ obj = uacpi_namespace_node_get_object_typed(
+ node, UACPI_OBJECT_OPERATION_REGION_BIT
+ );
+ if (uacpi_likely(obj != UACPI_NULL))
+ space_string = uacpi_address_space_to_string(obj->op_region->space);
+
+ uacpi_error(
+ "%s (%s) operation region %s: %s\n",
+ message, space_string, path, uacpi_status_to_string(ret)
+ );
+ uacpi_free_dynamic_string(path);
+}
+
+static void trace_region_io(
+ uacpi_field_unit *field, uacpi_address_space space, uacpi_u64 offset,
+ uacpi_region_op op, union uacpi_opregion_io_data data
+)
+{
+ const uacpi_char *path;
+ const uacpi_char *type_str;
+
+ if (!uacpi_should_log(UACPI_LOG_TRACE))
+ return;
+
+ switch (op) {
+ case UACPI_REGION_OP_READ:
+ type_str = "read from";
+ break;
+ case UACPI_REGION_OP_WRITE:
+ type_str = "write to";
+ break;
+ default:
+ type_str = "<INVALID-OP>";
+ }
+
+ path = uacpi_namespace_node_generate_absolute_path(field->region);
+
+ switch (space) {
+ case UACPI_ADDRESS_SPACE_IPMI:
+ case UACPI_ADDRESS_SPACE_PRM:
+ case UACPI_ADDRESS_SPACE_FFIXEDHW:
+ uacpi_trace(
+ "write-then-read from [%s] %s[0x%016"UACPI_PRIX64"] = "
+ "<buffer of %zu bytes>\n", path,
+ uacpi_address_space_to_string(space),
+ UACPI_FMT64(offset), data.buffer.length
+ );
+ break;
+ case UACPI_ADDRESS_SPACE_SMBUS:
+ case UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS:
+ uacpi_trace(
+ "%s [%s] %s[0x%016"UACPI_PRIX64"] = "
+ "<buffer of %zu bytes>\n", type_str, path,
+ uacpi_address_space_to_string(space),
+ UACPI_FMT64(offset), data.buffer.length
+ );
+ break;
+ case UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO:
+ uacpi_trace(
+ "%s [%s] %s pins[%u..%u] = 0x%"UACPI_PRIX64"\n",
+ type_str, path, uacpi_address_space_to_string(space),
+ field->pin_offset, (field->pin_offset + field->bit_length) - 1,
+ UACPI_FMT64(*data.integer)
+ );
+ break;
+ default:
+ uacpi_trace(
+ "%s [%s] (%d bytes) %s[0x%016"UACPI_PRIX64"] = 0x%"UACPI_PRIX64"\n",
+ type_str, path, field->access_width_bytes,
+ uacpi_address_space_to_string(space),
+ UACPI_FMT64(offset), UACPI_FMT64(*data.integer)
+ );
+ break;
+ }
+
+ uacpi_free_dynamic_string(path);
+}
+
+static uacpi_bool space_needs_reg(enum uacpi_address_space space)
+{
+ if (space == UACPI_ADDRESS_SPACE_SYSTEM_MEMORY ||
+ space == UACPI_ADDRESS_SPACE_SYSTEM_IO ||
+ space == UACPI_ADDRESS_SPACE_TABLE_DATA)
+ return UACPI_FALSE;
+
+ return UACPI_TRUE;
+}
+
+static uacpi_status region_run_reg(
+ uacpi_namespace_node *node, uacpi_u8 connection_code
+)
+{
+ uacpi_status ret;
+ uacpi_namespace_node *reg_node;
+ uacpi_object_array method_args;
+ uacpi_object *reg_obj, *args[2];
+
+ ret = uacpi_namespace_node_resolve(
+ node->parent, "_REG", UACPI_SHOULD_LOCK_NO,
+ UACPI_MAY_SEARCH_ABOVE_PARENT_NO, UACPI_PERMANENT_ONLY_NO, &reg_node
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ reg_obj = uacpi_namespace_node_get_object_typed(
+ reg_node, UACPI_OBJECT_METHOD_BIT
+ );
+ if (uacpi_unlikely(reg_obj == UACPI_NULL))
+ return UACPI_STATUS_OK;
+
+ args[0] = uacpi_create_object(UACPI_OBJECT_INTEGER);
+ if (uacpi_unlikely(args[0] == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ args[1] = uacpi_create_object(UACPI_OBJECT_INTEGER);
+ if (uacpi_unlikely(args[1] == UACPI_NULL)) {
+ uacpi_object_unref(args[0]);
+ return UACPI_STATUS_OUT_OF_MEMORY;
+ }
+
+ args[0]->integer = uacpi_namespace_node_get_object(node)->op_region->space;
+ args[1]->integer = connection_code;
+ method_args.objects = args;
+ method_args.count = 2;
+
+ ret = uacpi_execute_control_method(
+ reg_node, reg_obj->method, &method_args, UACPI_NULL
+ );
+ if (uacpi_unlikely_error(ret))
+ uacpi_trace_region_error(node, "error during _REG execution for", ret);
+
+ uacpi_object_unref(args[0]);
+ uacpi_object_unref(args[1]);
+ return ret;
+}
+
+uacpi_address_space_handlers *uacpi_node_get_address_space_handlers(
+ uacpi_namespace_node *node
+)
+{
+ uacpi_object *object;
+
+ if (node == uacpi_namespace_root())
+ return g_uacpi_rt_ctx.root_object->address_space_handlers;
+
+ object = uacpi_namespace_node_get_object(node);
+ if (uacpi_unlikely(object == UACPI_NULL))
+ return UACPI_NULL;
+
+ switch (object->type) {
+ case UACPI_OBJECT_DEVICE:
+ case UACPI_OBJECT_PROCESSOR:
+ case UACPI_OBJECT_THERMAL_ZONE:
+ return object->address_space_handlers;
+ default:
+ return UACPI_NULL;
+ }
+}
+
+static uacpi_address_space_handler *find_handler(
+ uacpi_address_space_handlers *handlers,
+ enum uacpi_address_space space
+)
+{
+ uacpi_address_space_handler *handler = handlers->head;
+
+ while (handler) {
+ if (handler->space == space)
+ return handler;
+
+ handler = handler->next;
+ }
+
+ return UACPI_NULL;
+}
+
+static uacpi_operation_region *find_previous_region_link(
+ uacpi_operation_region *region
+)
+{
+ uacpi_address_space_handler *handler = region->handler;
+ uacpi_operation_region *parent = handler->regions;
+
+ if (parent == region)
+ // This is the last attached region, it has no previous link
+ return region;
+
+ while (parent->next != region) {
+ parent = parent->next;
+
+ if (uacpi_unlikely(parent == UACPI_NULL))
+ return UACPI_NULL;
+ }
+
+ return parent;
+}
+
+uacpi_status uacpi_opregion_attach(uacpi_namespace_node *node)
+{
+ uacpi_object *obj;
+ uacpi_operation_region *region;
+ uacpi_address_space_handler *handler;
+ uacpi_status ret;
+ uacpi_region_attach_data attach_data = { 0 };
+
+ if (uacpi_namespace_node_is_dangling(node))
+ return UACPI_STATUS_NAMESPACE_NODE_DANGLING;
+
+ obj = uacpi_namespace_node_get_object_typed(
+ node, UACPI_OBJECT_OPERATION_REGION_BIT
+ );
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ region = obj->op_region;
+
+ if (region->handler == UACPI_NULL)
+ return UACPI_STATUS_NO_HANDLER;
+ if (region->state_flags & UACPI_OP_REGION_STATE_ATTACHED)
+ return UACPI_STATUS_OK;
+
+ handler = region->handler;
+ attach_data.region_node = node;
+
+ switch (region->space) {
+ case UACPI_ADDRESS_SPACE_PCC:
+ if (region->length) {
+ region->internal_buffer = uacpi_kernel_alloc_zeroed(region->length);
+ if (uacpi_unlikely(region->internal_buffer == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+ }
+
+ attach_data.pcc_info.buffer.bytes = region->internal_buffer;
+ attach_data.pcc_info.buffer.length = region->length;
+ attach_data.pcc_info.subspace_id = region->offset;
+ break;
+ case UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO:
+ attach_data.gpio_info.num_pins = region->length;
+ break;
+ default:
+ attach_data.generic_info.base = region->offset;
+ attach_data.generic_info.length = region->length;
+ break;
+ }
+
+ attach_data.handler_context = handler->user_context;
+
+ uacpi_object_ref(obj);
+ uacpi_namespace_write_unlock();
+ ret = handler->callback(UACPI_REGION_OP_ATTACH, &attach_data);
+ uacpi_namespace_write_lock();
+
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_object_unref(obj);
+ return ret;
+ }
+
+ region->state_flags |= UACPI_OP_REGION_STATE_ATTACHED;
+ region->user_context = attach_data.out_region_context;
+ uacpi_object_unref(obj);
+ return ret;
+}
+
+static void region_install_handler(
+ uacpi_namespace_node *node, uacpi_address_space_handler *handler
+)
+{
+ uacpi_operation_region *region;
+
+ region = uacpi_namespace_node_get_object(node)->op_region;
+ region->handler = handler;
+ uacpi_shareable_ref(handler);
+
+ region->next = handler->regions;
+ handler->regions = region;
+}
+
+enum unreg {
+ UNREG_NO = 0,
+ UNREG_YES,
+};
+
+static void region_uninstall_handler(
+ uacpi_namespace_node *node, enum unreg unreg
+)
+{
+ uacpi_status ret;
+ uacpi_object *obj;
+ uacpi_address_space_handler *handler;
+ uacpi_operation_region *region, *link;
+
+ obj = uacpi_namespace_node_get_object_typed(
+ node, UACPI_OBJECT_OPERATION_REGION_BIT
+ );
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return;
+
+ region = obj->op_region;
+
+ handler = region->handler;
+ if (handler == UACPI_NULL)
+ return;
+
+ link = find_previous_region_link(region);
+ if (uacpi_unlikely(link == UACPI_NULL)) {
+ uacpi_error("operation region @%p not in the handler@%p list(?)\n",
+ region, handler);
+ goto out;
+ } else if (link == region) {
+ link = link->next;
+ handler->regions = link;
+ } else {
+ link->next = region->next;
+ }
+
+out:
+ if (region->state_flags & UACPI_OP_REGION_STATE_ATTACHED) {
+ uacpi_region_detach_data detach_data = { 0 };
+
+ detach_data.region_node = node;
+ detach_data.region_context = region->user_context;
+ detach_data.handler_context = handler->user_context;
+
+ uacpi_shareable_ref(node);
+ uacpi_namespace_write_unlock();
+
+ ret = handler->callback(UACPI_REGION_OP_DETACH, &detach_data);
+
+ uacpi_namespace_write_lock();
+ uacpi_namespace_node_unref(node);
+
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_trace_region_error(
+ node, "error during handler detach for", ret
+ );
+ }
+ }
+
+ if ((region->state_flags & UACPI_OP_REGION_STATE_REG_EXECUTED) &&
+ unreg == UNREG_YES) {
+ region_run_reg(node, ACPI_REG_DISCONNECT);
+ region->state_flags &= ~UACPI_OP_REGION_STATE_REG_EXECUTED;
+ }
+
+ uacpi_address_space_handler_unref(region->handler);
+ region->handler = UACPI_NULL;
+ region->state_flags &= ~UACPI_OP_REGION_STATE_ATTACHED;
+}
+
+static uacpi_status upgrade_to_opregion_lock(void)
+{
+ uacpi_status ret;
+
+ /*
+ * Drop the namespace lock, and reacquire it after the opregion lock
+ * so we keep the ordering with user API.
+ */
+ uacpi_namespace_write_unlock();
+
+ ret = uacpi_recursive_lock_acquire(&g_opregion_lock);
+ uacpi_namespace_write_lock();
+ return ret;
+}
+
+void uacpi_opregion_uninstall_handler(uacpi_namespace_node *node)
+{
+ if (uacpi_unlikely_error(upgrade_to_opregion_lock()))
+ return;
+
+ region_uninstall_handler(node, UNREG_YES);
+
+ uacpi_recursive_lock_release(&g_opregion_lock);
+}
+
+uacpi_bool uacpi_address_space_handler_is_default(
+ uacpi_address_space_handler *handler
+)
+{
+ return handler->flags & UACPI_ADDRESS_SPACE_HANDLER_DEFAULT;
+}
+
+enum opregion_iter_action {
+ OPREGION_ITER_ACTION_UNINSTALL,
+ OPREGION_ITER_ACTION_INSTALL,
+};
+
+struct opregion_iter_ctx {
+ enum opregion_iter_action action;
+ uacpi_address_space_handler *handler;
+};
+
+static uacpi_iteration_decision do_install_or_uninstall_handler(
+ uacpi_handle opaque, uacpi_namespace_node *node, uacpi_u32 depth
+)
+{
+ struct opregion_iter_ctx *ctx = opaque;
+ uacpi_address_space_handlers *handlers;
+ uacpi_object *object;
+
+ UACPI_UNUSED(depth);
+
+ object = uacpi_namespace_node_get_object(node);
+ if (object->type == UACPI_OBJECT_OPERATION_REGION) {
+ uacpi_operation_region *region = object->op_region;
+
+ if (region->space != ctx->handler->space)
+ return UACPI_ITERATION_DECISION_CONTINUE;
+
+ if (ctx->action == OPREGION_ITER_ACTION_INSTALL) {
+ if (region->handler)
+ region_uninstall_handler(node, UNREG_NO);
+
+ region_install_handler(node, ctx->handler);
+ } else {
+ if (uacpi_unlikely(region->handler != ctx->handler)) {
+ uacpi_trace_region_error(
+ node, "handler mismatch for",
+ UACPI_STATUS_INTERNAL_ERROR
+ );
+ return UACPI_ITERATION_DECISION_CONTINUE;
+ }
+
+ region_uninstall_handler(node, UNREG_NO);
+ }
+
+ return UACPI_ITERATION_DECISION_CONTINUE;
+ }
+
+ handlers = uacpi_node_get_address_space_handlers(node);
+ if (handlers == UACPI_NULL)
+ return UACPI_ITERATION_DECISION_CONTINUE;
+
+ // Device already has a handler for this space installed
+ if (find_handler(handlers, ctx->handler->space) != UACPI_NULL)
+ return UACPI_ITERATION_DECISION_NEXT_PEER;
+
+ return UACPI_ITERATION_DECISION_CONTINUE;
+}
+
+struct reg_run_ctx {
+ uacpi_u8 space;
+ uacpi_u8 connection_code;
+ uacpi_size reg_executed;
+ uacpi_size reg_errors;
+};
+
+static uacpi_iteration_decision do_run_reg(
+ void *opaque, uacpi_namespace_node *node, uacpi_u32 depth
+)
+{
+ struct reg_run_ctx *ctx = opaque;
+ uacpi_operation_region *region;
+ uacpi_status ret;
+ uacpi_bool was_regged;
+
+ UACPI_UNUSED(depth);
+
+ region = uacpi_namespace_node_get_object(node)->op_region;
+
+ if (region->space != ctx->space)
+ return UACPI_ITERATION_DECISION_CONTINUE;
+
+ was_regged = region->state_flags & UACPI_OP_REGION_STATE_REG_EXECUTED;
+ if (was_regged == (ctx->connection_code == ACPI_REG_CONNECT))
+ return UACPI_ITERATION_DECISION_CONTINUE;
+
+ ret = region_run_reg(node, ctx->connection_code);
+ if (ctx->connection_code == ACPI_REG_DISCONNECT)
+ region->state_flags &= ~UACPI_OP_REGION_STATE_REG_EXECUTED;
+
+ if (ret == UACPI_STATUS_NOT_FOUND)
+ return UACPI_ITERATION_DECISION_CONTINUE;
+
+ if (ctx->connection_code == ACPI_REG_CONNECT)
+ region->state_flags |= UACPI_OP_REGION_STATE_REG_EXECUTED;
+
+ ctx->reg_executed++;
+
+ if (uacpi_unlikely_error(ret)) {
+ ctx->reg_errors++;
+ return UACPI_ITERATION_DECISION_CONTINUE;
+ }
+
+ return UACPI_ITERATION_DECISION_CONTINUE;
+}
+
+static uacpi_status reg_or_unreg_all_opregions(
+ uacpi_namespace_node *device_node, enum uacpi_address_space space,
+ uacpi_u8 connection_code
+)
+{
+ uacpi_address_space_handlers *handlers;
+ uacpi_bool is_connect;
+ enum uacpi_permanent_only perm_only;
+ struct reg_run_ctx ctx = { 0 };
+
+ ctx.space = space;
+ ctx.connection_code = connection_code;
+
+ handlers = uacpi_node_get_address_space_handlers(device_node);
+ if (uacpi_unlikely(handlers == UACPI_NULL))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ is_connect = connection_code == ACPI_REG_CONNECT;
+ if (uacpi_unlikely(is_connect &&
+ find_handler(handlers, space) == UACPI_NULL))
+ return UACPI_STATUS_NO_HANDLER;
+
+ /*
+ * We want to unreg non-permanent opregions as well, however,
+ * registering them is handled separately and should not be
+ * done by us.
+ */
+ perm_only = is_connect ? UACPI_PERMANENT_ONLY_YES : UACPI_PERMANENT_ONLY_NO;
+
+ uacpi_namespace_do_for_each_child(
+ device_node, do_run_reg, UACPI_NULL,
+ UACPI_OBJECT_OPERATION_REGION_BIT, UACPI_MAX_DEPTH_ANY,
+ UACPI_SHOULD_LOCK_NO, perm_only, &ctx
+ );
+
+ uacpi_trace(
+ "%sactivated all '%s' opregions controlled by '%.4s', "
+ "%zu _REG() calls (%zu errors)\n",
+ connection_code == ACPI_REG_CONNECT ? "" : "de",
+ uacpi_address_space_to_string(space),
+ device_node->name.text, ctx.reg_executed, ctx.reg_errors
+ );
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_address_space_handlers *extract_handlers(
+ uacpi_namespace_node *node
+)
+{
+ uacpi_object *handlers_obj;
+
+ if (node == uacpi_namespace_root())
+ return g_uacpi_rt_ctx.root_object->address_space_handlers;
+
+ handlers_obj = uacpi_namespace_node_get_object_typed(
+ node,
+ UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT |
+ UACPI_OBJECT_PROCESSOR_BIT
+ );
+ if (uacpi_unlikely(handlers_obj == UACPI_NULL))
+ return UACPI_NULL;
+
+ return handlers_obj->address_space_handlers;
+}
+
+uacpi_status uacpi_reg_all_opregions(
+ uacpi_namespace_node *device_node,
+ enum uacpi_address_space space
+)
+{
+ uacpi_status ret;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ if (!space_needs_reg(space))
+ return UACPI_STATUS_OK;
+
+ ret = uacpi_recursive_lock_acquire(&g_opregion_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_namespace_write_lock();
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_recursive_lock_release(&g_opregion_lock);
+ return ret;
+ }
+
+ if (uacpi_unlikely(extract_handlers(device_node) == UACPI_NULL)) {
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ ret = reg_or_unreg_all_opregions(device_node, space, ACPI_REG_CONNECT);
+
+out:
+ uacpi_namespace_write_unlock();
+ uacpi_recursive_lock_release(&g_opregion_lock);
+ return ret;
+}
+
+uacpi_status uacpi_install_address_space_handler_with_flags(
+ uacpi_namespace_node *device_node, enum uacpi_address_space space,
+ uacpi_region_handler handler, uacpi_handle handler_context,
+ uacpi_u16 flags
+)
+{
+ uacpi_status ret;
+ uacpi_address_space_handlers *handlers;
+ uacpi_address_space_handler *this_handler, *new_handler;
+ struct opregion_iter_ctx iter_ctx;
+
+ ret = uacpi_recursive_lock_acquire(&g_opregion_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_namespace_write_lock();
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_recursive_lock_release(&g_opregion_lock);
+ return ret;
+ }
+
+ handlers = extract_handlers(device_node);
+ if (uacpi_unlikely(handlers == UACPI_NULL)) {
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ this_handler = find_handler(handlers, space);
+ if (this_handler != UACPI_NULL) {
+ ret = UACPI_STATUS_ALREADY_EXISTS;
+ goto out;
+ }
+
+ new_handler = uacpi_kernel_alloc(sizeof(*new_handler));
+ if (new_handler == UACPI_NULL) {
+ ret = UACPI_STATUS_OUT_OF_MEMORY;
+ goto out;
+ }
+ uacpi_shareable_init(new_handler);
+
+ new_handler->next = handlers->head;
+ new_handler->space = space;
+ new_handler->user_context = handler_context;
+ new_handler->callback = handler;
+ new_handler->regions = UACPI_NULL;
+ new_handler->flags = flags;
+ handlers->head = new_handler;
+
+ iter_ctx.handler = new_handler;
+ iter_ctx.action = OPREGION_ITER_ACTION_INSTALL;
+
+ uacpi_namespace_do_for_each_child(
+ device_node, do_install_or_uninstall_handler, UACPI_NULL,
+ UACPI_OBJECT_ANY_BIT, UACPI_MAX_DEPTH_ANY, UACPI_SHOULD_LOCK_NO,
+ UACPI_PERMANENT_ONLY_YES, &iter_ctx
+ );
+
+ if (!space_needs_reg(space))
+ goto out;
+
+ /*
+ * Installing an early address space handler, obviously not possible to
+ * execute any _REG methods here. Just return and hope that it is either
+ * a global address space handler, or a handler installed by a user who
+ * will run uacpi_reg_all_opregions manually after loading/initializing
+ * the namespace.
+ */
+ if (g_uacpi_rt_ctx.init_level < UACPI_INIT_LEVEL_NAMESPACE_LOADED)
+ goto out;
+
+ // Init level is NAMESPACE_INITIALIZED, so we can safely run _REG now
+ ret = reg_or_unreg_all_opregions(
+ device_node, space, ACPI_REG_CONNECT
+ );
+
+out:
+ uacpi_namespace_write_unlock();
+ uacpi_recursive_lock_release(&g_opregion_lock);
+ return ret;
+}
+
+uacpi_status uacpi_install_address_space_handler(
+ uacpi_namespace_node *device_node, enum uacpi_address_space space,
+ uacpi_region_handler handler, uacpi_handle handler_context
+)
+{
+ return uacpi_install_address_space_handler_with_flags(
+ device_node, space, handler, handler_context, 0
+ );
+}
+
+uacpi_status uacpi_uninstall_address_space_handler(
+ uacpi_namespace_node *device_node,
+ enum uacpi_address_space space
+)
+{
+ uacpi_status ret;
+ uacpi_address_space_handlers *handlers;
+ uacpi_address_space_handler *handler = UACPI_NULL, *prev_handler;
+ struct opregion_iter_ctx iter_ctx;
+
+ ret = uacpi_recursive_lock_acquire(&g_opregion_lock);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_namespace_write_lock();
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_recursive_lock_release(&g_opregion_lock);
+ return ret;
+ }
+
+ handlers = extract_handlers(device_node);
+ if (uacpi_unlikely(handlers == UACPI_NULL)) {
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ handler = find_handler(handlers, space);
+ if (uacpi_unlikely(handler == UACPI_NULL)) {
+ ret = UACPI_STATUS_NO_HANDLER;
+ goto out;
+ }
+
+ iter_ctx.handler = handler;
+ iter_ctx.action = OPREGION_ITER_ACTION_UNINSTALL;
+
+ uacpi_namespace_do_for_each_child(
+ device_node, do_install_or_uninstall_handler, UACPI_NULL,
+ UACPI_OBJECT_ANY_BIT, UACPI_MAX_DEPTH_ANY, UACPI_SHOULD_LOCK_NO,
+ UACPI_PERMANENT_ONLY_NO, &iter_ctx
+ );
+
+ prev_handler = handlers->head;
+
+ // Are we the last linked handler?
+ if (prev_handler == handler) {
+ handlers->head = handler->next;
+ goto out_unreg;
+ }
+
+ // Nope, we're somewhere in the middle. Do a search.
+ while (prev_handler) {
+ if (prev_handler->next == handler) {
+ prev_handler->next = handler->next;
+ goto out;
+ }
+
+ prev_handler = prev_handler->next;
+ }
+
+out_unreg:
+ if (space_needs_reg(space))
+ reg_or_unreg_all_opregions(device_node, space, ACPI_REG_DISCONNECT);
+
+out:
+ if (handler != UACPI_NULL)
+ uacpi_address_space_handler_unref(handler);
+
+ uacpi_namespace_write_unlock();
+ uacpi_recursive_lock_release(&g_opregion_lock);
+ return ret;
+}
+
+uacpi_status uacpi_initialize_opregion_node(uacpi_namespace_node *node)
+{
+ uacpi_status ret;
+ uacpi_namespace_node *parent = node->parent;
+ uacpi_operation_region *region;
+ uacpi_address_space_handlers *handlers;
+ uacpi_address_space_handler *handler;
+
+ ret = upgrade_to_opregion_lock();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ region = uacpi_namespace_node_get_object(node)->op_region;
+ ret = UACPI_STATUS_NOT_FOUND;
+
+ while (parent) {
+ handlers = uacpi_node_get_address_space_handlers(parent);
+ if (handlers != UACPI_NULL) {
+ handler = find_handler(handlers, region->space);
+
+ if (handler != UACPI_NULL) {
+ region_install_handler(node, handler);
+ ret = UACPI_STATUS_OK;
+ break;
+ }
+ }
+
+ parent = parent->parent;
+ }
+
+ if (ret != UACPI_STATUS_OK)
+ goto out;
+ if (!space_needs_reg(region->space))
+ goto out;
+ if (uacpi_get_current_init_level() < UACPI_INIT_LEVEL_NAMESPACE_LOADED)
+ goto out;
+
+ if (region_run_reg(node, ACPI_REG_CONNECT) != UACPI_STATUS_NOT_FOUND)
+ region->state_flags |= UACPI_OP_REGION_STATE_REG_EXECUTED;
+
+out:
+ uacpi_recursive_lock_release(&g_opregion_lock);
+ return ret;
+}
+
+uacpi_bool uacpi_is_buffer_access_address_space(uacpi_address_space space)
+{
+ switch (space) {
+ case UACPI_ADDRESS_SPACE_SMBUS:
+ case UACPI_ADDRESS_SPACE_IPMI:
+ case UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS:
+ case UACPI_ADDRESS_SPACE_PRM:
+ case UACPI_ADDRESS_SPACE_FFIXEDHW:
+ return UACPI_TRUE;
+ default:
+ return UACPI_FALSE;
+ }
+}
+
+static uacpi_bool space_needs_bounds_checking(uacpi_address_space space)
+{
+ return !uacpi_is_buffer_access_address_space(space) &&
+ space != UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO;
+}
+
+uacpi_status uacpi_dispatch_opregion_io(
+ uacpi_field_unit *field, uacpi_u32 offset, uacpi_region_op op,
+ union uacpi_opregion_io_data data
+)
+{
+ uacpi_status ret;
+ uacpi_object *obj;
+ uacpi_operation_region *region;
+ uacpi_address_space_handler *handler;
+ uacpi_address_space space;
+ uacpi_u64 abs_offset, offset_end = offset;
+ uacpi_bool is_oob = UACPI_FALSE;
+ uacpi_region_op orig_op = op;
+
+ union {
+ uacpi_region_rw_data rw;
+ uacpi_region_pcc_send_data pcc;
+ uacpi_region_gpio_rw_data gpio;
+ uacpi_region_ipmi_rw_data ipmi;
+ uacpi_region_ffixedhw_rw_data ffixedhw;
+ uacpi_region_prm_rw_data prm;
+ uacpi_region_serial_rw_data serial;
+ } handler_data;
+
+ ret = upgrade_to_opregion_lock();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_opregion_attach(field->region);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_trace_region_error(
+ field->region, "unable to attach", ret
+ );
+ goto out;
+ }
+
+ obj = uacpi_namespace_node_get_object_typed(
+ field->region, UACPI_OBJECT_OPERATION_REGION_BIT
+ );
+ if (uacpi_unlikely(obj == UACPI_NULL)) {
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ region = obj->op_region;
+ space = region->space;
+ handler = region->handler;
+
+ abs_offset = region->offset + offset;
+ offset_end += field->access_width_bytes;
+
+ if (uacpi_likely(space_needs_bounds_checking(region->space)))
+ is_oob = region->length < offset_end || abs_offset < offset;
+ if (uacpi_unlikely(is_oob)) {
+ const uacpi_char *path;
+
+ path = uacpi_namespace_node_generate_absolute_path(field->region);
+ uacpi_error(
+ "out-of-bounds access to opregion %s[0x%"UACPI_PRIX64"->"
+ "0x%"UACPI_PRIX64"] at 0x%"UACPI_PRIX64" (idx=%u, width=%d)\n",
+ path, UACPI_FMT64(region->offset),
+ UACPI_FMT64(region->offset + region->length),
+ UACPI_FMT64(abs_offset), offset, field->access_width_bytes
+ );
+ uacpi_free_dynamic_string(path);
+ ret = UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX;
+ goto out;
+ }
+
+ handler_data.rw.region_context = region->user_context;
+ handler_data.rw.handler_context = handler->user_context;
+
+ switch (region->space) {
+ case UACPI_ADDRESS_SPACE_PCC: {
+ uacpi_u8 *cursor;
+
+ cursor = region->internal_buffer + offset;
+
+ /*
+ * Reads from PCC just return the current contents of the internal
+ * buffer.
+ */
+ if (op == UACPI_REGION_OP_READ) {
+ uacpi_memcpy_zerout(
+ data.integer, cursor, sizeof(*data.integer),
+ field->access_width_bytes
+ );
+ goto io_done;
+ }
+
+ uacpi_memcpy(cursor, data.integer, field->access_width_bytes);
+
+ /*
+ * Dispatch a PCC send command if this was a write to the command field
+ *
+ * ACPI 6.5: 14.3. Extended PCC Subspace Shared Memory Region
+ */
+ if (offset >= 12 && offset < 16) {
+ uacpi_memzero(&handler_data.pcc.buffer, sizeof(handler_data.pcc.buffer));
+ handler_data.pcc.buffer.bytes = region->internal_buffer;
+ handler_data.pcc.buffer.length = region->length;
+
+ op = UACPI_REGION_OP_PCC_SEND;
+ break;
+ }
+
+ // No dispatch needed, IO is done
+ goto io_done;
+ }
+ case UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO:
+ handler_data.gpio.pin_offset = field->pin_offset;
+ handler_data.gpio.num_pins = field->bit_length;
+ handler_data.gpio.value = *data.integer;
+
+ ret = uacpi_object_get_string_or_buffer(
+ field->connection, &handler_data.gpio.connection
+ );
+ if (uacpi_unlikely_error(ret))
+ goto io_done;
+
+ op = op == UACPI_REGION_OP_READ ?
+ UACPI_REGION_OP_GPIO_READ : UACPI_REGION_OP_GPIO_WRITE;
+ break;
+ case UACPI_ADDRESS_SPACE_IPMI:
+ handler_data.ipmi.in_out_message = data.buffer;
+ handler_data.ipmi.command = abs_offset;
+ op = UACPI_REGION_OP_IPMI_COMMAND;
+ break;
+ case UACPI_ADDRESS_SPACE_FFIXEDHW:
+ handler_data.ffixedhw.in_out_message = data.buffer;
+ handler_data.ffixedhw.command = abs_offset;
+ op = UACPI_REGION_OP_FFIXEDHW_COMMAND;
+ break;
+ case UACPI_ADDRESS_SPACE_PRM:
+ handler_data.prm.in_out_message = data.buffer;
+ op = UACPI_REGION_OP_PRM_COMMAND;
+ break;
+ case UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS:
+ case UACPI_ADDRESS_SPACE_SMBUS:
+ ret = uacpi_object_get_string_or_buffer(
+ field->connection, &handler_data.serial.connection
+ );
+ if (uacpi_unlikely_error(ret))
+ goto io_done;
+
+ handler_data.serial.command = abs_offset;
+ handler_data.serial.in_out_buffer = data.buffer;
+ handler_data.serial.access_attribute = field->attributes;
+
+ switch (field->attributes) {
+ case UACPI_ACCESS_ATTRIBUTE_BYTES:
+ case UACPI_ACCESS_ATTRIBUTE_RAW_BYTES:
+ case UACPI_ACCESS_ATTRIBUTE_RAW_PROCESS_BYTES:
+ handler_data.serial.access_length = field->access_length;
+ break;
+ default:
+ handler_data.serial.access_length = 0;
+ }
+
+ op = op == UACPI_REGION_OP_READ ?
+ UACPI_REGION_OP_SERIAL_READ : UACPI_REGION_OP_SERIAL_WRITE;
+ break;
+ default:
+ handler_data.rw.byte_width = field->access_width_bytes;
+ handler_data.rw.offset = abs_offset;
+ handler_data.rw.value = *data.integer;
+ break;
+ }
+
+ uacpi_object_ref(obj);
+ uacpi_namespace_write_unlock();
+
+ ret = handler->callback(op, &handler_data);
+
+ uacpi_namespace_write_lock();
+ uacpi_object_unref(obj);
+
+io_done:
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_trace_region_error(field->region, "unable to perform IO", ret);
+ goto out;
+ }
+
+ if (orig_op == UACPI_REGION_OP_READ) {
+ switch (region->space) {
+ case UACPI_ADDRESS_SPACE_PCC:
+ case UACPI_ADDRESS_SPACE_IPMI:
+ case UACPI_ADDRESS_SPACE_FFIXEDHW:
+ case UACPI_ADDRESS_SPACE_PRM:
+ case UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS:
+ case UACPI_ADDRESS_SPACE_SMBUS:
+ break;
+ case UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO:
+ *data.integer = handler_data.gpio.value;
+ break;
+ default:
+ *data.integer = handler_data.rw.value;
+ break;
+ }
+ }
+
+ trace_region_io(field, space, abs_offset, orig_op, data);
+
+out:
+ uacpi_recursive_lock_release(&g_opregion_lock);
+ return ret;
+}
+
+#endif // !UACPI_BAREBONES_MODE
diff --git a/sys/dev/acpi/uacpi/osi.c b/sys/dev/acpi/uacpi/osi.c
new file mode 100644
index 0000000..0940261
--- /dev/null
+++ b/sys/dev/acpi/uacpi/osi.c
@@ -0,0 +1,388 @@
+#include <uacpi/platform/atomic.h>
+#include <uacpi/internal/osi.h>
+#include <uacpi/internal/helpers.h>
+#include <uacpi/internal/stdlib.h>
+#include <uacpi/internal/utilities.h>
+#include <uacpi/internal/mutex.h>
+#include <uacpi/kernel_api.h>
+
+#ifndef UACPI_BAREBONES_MODE
+
+struct registered_interface {
+ const uacpi_char *name;
+ uacpi_u8 weight;
+ uacpi_u8 kind;
+
+ // Only applicable for predefined host interfaces
+ uacpi_u8 host_type;
+
+ // Only applicable for predefined interfaces
+ uacpi_u8 disabled : 1;
+ uacpi_u8 dynamic : 1;
+
+ struct registered_interface *next;
+};
+
+static uacpi_handle interface_mutex;
+static struct registered_interface *registered_interfaces;
+static uacpi_interface_handler interface_handler;
+static uacpi_u32 latest_queried_interface;
+
+#define WINDOWS(string, interface) \
+ { \
+ .name = "Windows "string, \
+ .weight = UACPI_VENDOR_INTERFACE_WINDOWS_##interface, \
+ .kind = UACPI_INTERFACE_KIND_VENDOR, \
+ .host_type = 0, \
+ .disabled = 0, \
+ .dynamic = 0, \
+ .next = UACPI_NULL \
+ }
+
+#define HOST_FEATURE(string, type) \
+ { \
+ .name = string, \
+ .weight = 0, \
+ .kind = UACPI_INTERFACE_KIND_FEATURE, \
+ .host_type = UACPI_HOST_INTERFACE_##type, \
+ .disabled = 1, \
+ .dynamic = 0, \
+ .next = UACPI_NULL, \
+ }
+
+static struct registered_interface predefined_interfaces[] = {
+ // Vendor strings
+ WINDOWS("2000", 2000),
+ WINDOWS("2001", XP),
+ WINDOWS("2001 SP1", XP_SP1),
+ WINDOWS("2001.1", SERVER_2003),
+ WINDOWS("2001 SP2", XP_SP2),
+ WINDOWS("2001.1 SP1", SERVER_2003_SP1),
+ WINDOWS("2006", VISTA),
+ WINDOWS("2006.1", SERVER_2008),
+ WINDOWS("2006 SP1", VISTA_SP1),
+ WINDOWS("2006 SP2", VISTA_SP2),
+ WINDOWS("2009", 7),
+ WINDOWS("2012", 8),
+ WINDOWS("2013", 8_1),
+ WINDOWS("2015", 10),
+ WINDOWS("2016", 10_RS1),
+ WINDOWS("2017", 10_RS2),
+ WINDOWS("2017.2", 10_RS3),
+ WINDOWS("2018", 10_RS4),
+ WINDOWS("2018.2", 10_RS5),
+ WINDOWS("2019", 10_19H1),
+ WINDOWS("2020", 10_20H1),
+ WINDOWS("2021", 11),
+ WINDOWS("2022", 11_22H2),
+
+ // Feature strings
+ HOST_FEATURE("Module Device", MODULE_DEVICE),
+ HOST_FEATURE("Processor Device", PROCESSOR_DEVICE),
+ HOST_FEATURE("3.0 Thermal Model", 3_0_THERMAL_MODEL),
+ HOST_FEATURE("3.0 _SCP Extensions", 3_0_SCP_EXTENSIONS),
+ HOST_FEATURE("Processor Aggregator Device", PROCESSOR_AGGREGATOR_DEVICE),
+
+ // Interpreter features
+ { .name = "Extended Address Space Descriptor" },
+};
+
+uacpi_status uacpi_initialize_interfaces(void)
+{
+ uacpi_size i;
+
+ registered_interfaces = &predefined_interfaces[0];
+
+ interface_mutex = uacpi_kernel_create_mutex();
+ if (uacpi_unlikely(interface_mutex == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ for (i = 0; i < (UACPI_ARRAY_SIZE(predefined_interfaces) - 1); ++i)
+ predefined_interfaces[i].next = &predefined_interfaces[i + 1];
+
+ return UACPI_STATUS_OK;
+}
+
+void uacpi_deinitialize_interfaces(void)
+{
+ struct registered_interface *iface, *next_iface = registered_interfaces;
+
+ while (next_iface) {
+ iface = next_iface;
+ next_iface = iface->next;
+
+ iface->next = UACPI_NULL;
+
+ if (iface->dynamic) {
+ uacpi_free_dynamic_string(iface->name);
+ uacpi_free(iface, sizeof(*iface));
+ continue;
+ }
+
+ // Only features are disabled by default
+ iface->disabled = iface->kind == UACPI_INTERFACE_KIND_FEATURE ?
+ UACPI_TRUE : UACPI_FALSE;
+ }
+
+ if (interface_mutex)
+ uacpi_kernel_free_mutex(interface_mutex);
+
+ interface_mutex = UACPI_NULL;
+ interface_handler = UACPI_NULL;
+ latest_queried_interface = 0;
+ registered_interfaces = UACPI_NULL;
+}
+
+uacpi_vendor_interface uacpi_latest_queried_vendor_interface(void)
+{
+ return uacpi_atomic_load32(&latest_queried_interface);
+}
+
+static struct registered_interface *find_interface_unlocked(
+ const uacpi_char *name
+)
+{
+ struct registered_interface *interface = registered_interfaces;
+
+ while (interface) {
+ if (uacpi_strcmp(interface->name, name) == 0)
+ return interface;
+
+ interface = interface->next;
+ }
+
+ return UACPI_NULL;
+}
+
+static struct registered_interface *find_host_interface_unlocked(
+ uacpi_host_interface type
+)
+{
+ struct registered_interface *interface = registered_interfaces;
+
+ while (interface) {
+ if (interface->host_type == type)
+ return interface;
+
+ interface = interface->next;
+ }
+
+ return UACPI_NULL;
+}
+
+uacpi_status uacpi_install_interface(
+ const uacpi_char *name, uacpi_interface_kind kind
+)
+{
+ struct registered_interface *interface;
+ uacpi_status ret;
+ uacpi_char *name_copy;
+ uacpi_size name_size;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ ret = uacpi_acquire_native_mutex(interface_mutex);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ interface = find_interface_unlocked(name);
+ if (interface != UACPI_NULL) {
+ if (interface->disabled)
+ interface->disabled = UACPI_FALSE;
+
+ ret = UACPI_STATUS_ALREADY_EXISTS;
+ goto out;
+ }
+
+ interface = uacpi_kernel_alloc(sizeof(*interface));
+ if (uacpi_unlikely(interface == UACPI_NULL)) {
+ ret = UACPI_STATUS_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ name_size = uacpi_strlen(name) + 1;
+ name_copy = uacpi_kernel_alloc(name_size);
+ if (uacpi_unlikely(name_copy == UACPI_NULL)) {
+ uacpi_free(interface, sizeof(*interface));
+ ret = UACPI_STATUS_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ uacpi_memcpy(name_copy, name, name_size);
+ interface->name = name_copy;
+ interface->weight = 0;
+ interface->kind = kind;
+ interface->host_type = 0;
+ interface->disabled = 0;
+ interface->dynamic = 1;
+ interface->next = registered_interfaces;
+ registered_interfaces = interface;
+
+out:
+ uacpi_release_native_mutex(interface_mutex);
+ return ret;
+}
+
+uacpi_status uacpi_uninstall_interface(const uacpi_char *name)
+{
+ struct registered_interface *cur, *prev;
+ uacpi_status ret;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ ret = uacpi_acquire_native_mutex(interface_mutex);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ cur = registered_interfaces;
+ prev = cur;
+
+ ret = UACPI_STATUS_NOT_FOUND;
+ while (cur) {
+ if (uacpi_strcmp(cur->name, name) != 0) {
+ prev = cur;
+ cur = cur->next;
+ continue;
+ }
+
+ if (cur->dynamic) {
+ if (prev == cur) {
+ registered_interfaces = cur->next;
+ } else {
+ prev->next = cur->next;
+ }
+
+ uacpi_release_native_mutex(interface_mutex);
+ uacpi_free_dynamic_string(cur->name);
+ uacpi_free(cur, sizeof(*cur));
+ return UACPI_STATUS_OK;
+ }
+
+ /*
+ * If this interface was already disabled, pretend we didn't actually
+ * find it and keep ret as UACPI_STATUS_NOT_FOUND. The fact that it's
+ * still in the registered list is an implementation detail of
+ * predefined interfaces.
+ */
+ if (!cur->disabled) {
+ cur->disabled = UACPI_TRUE;
+ ret = UACPI_STATUS_OK;
+ }
+
+ break;
+ }
+
+ uacpi_release_native_mutex(interface_mutex);
+ return ret;
+}
+
+static uacpi_status configure_host_interface(
+ uacpi_host_interface type, uacpi_bool enabled
+)
+{
+ struct registered_interface *interface;
+ uacpi_status ret;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ ret = uacpi_acquire_native_mutex(interface_mutex);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ interface = find_host_interface_unlocked(type);
+ if (interface == UACPI_NULL) {
+ ret = UACPI_STATUS_NOT_FOUND;
+ goto out;
+ }
+
+ interface->disabled = !enabled;
+out:
+ uacpi_release_native_mutex(interface_mutex);
+ return ret;
+}
+
+uacpi_status uacpi_enable_host_interface(uacpi_host_interface type)
+{
+ return configure_host_interface(type, UACPI_TRUE);
+}
+
+uacpi_status uacpi_disable_host_interface(uacpi_host_interface type)
+{
+ return configure_host_interface(type, UACPI_FALSE);
+}
+
+uacpi_status uacpi_set_interface_query_handler(
+ uacpi_interface_handler handler
+)
+{
+ uacpi_status ret;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ ret = uacpi_acquire_native_mutex(interface_mutex);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (interface_handler != UACPI_NULL && handler != UACPI_NULL) {
+ ret = UACPI_STATUS_ALREADY_EXISTS;
+ goto out;
+ }
+
+ interface_handler = handler;
+out:
+ uacpi_release_native_mutex(interface_mutex);
+ return ret;
+}
+
+uacpi_status uacpi_bulk_configure_interfaces(
+ uacpi_interface_action action, uacpi_interface_kind kind
+)
+{
+ uacpi_status ret;
+ struct registered_interface *interface;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ ret = uacpi_acquire_native_mutex(interface_mutex);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ interface = registered_interfaces;
+ while (interface) {
+ if (kind & interface->kind)
+ interface->disabled = (action == UACPI_INTERFACE_ACTION_DISABLE);
+
+ interface = interface->next;
+ }
+
+ uacpi_release_native_mutex(interface_mutex);
+ return ret;
+}
+
+uacpi_status uacpi_handle_osi(const uacpi_char *string, uacpi_bool *out_value)
+{
+ uacpi_status ret;
+ struct registered_interface *interface;
+ uacpi_bool is_supported = UACPI_FALSE;
+
+ ret = uacpi_acquire_native_mutex(interface_mutex);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ interface = find_interface_unlocked(string);
+ if (interface == UACPI_NULL)
+ goto out;
+
+ if (interface->weight > latest_queried_interface)
+ uacpi_atomic_store32(&latest_queried_interface, interface->weight);
+
+ is_supported = !interface->disabled;
+ if (interface_handler)
+ is_supported = interface_handler(string, is_supported);
+out:
+ uacpi_release_native_mutex(interface_mutex);
+ *out_value = is_supported;
+ return UACPI_STATUS_OK;
+}
+
+#endif // !UACPI_BAREBONES_MODE
diff --git a/sys/dev/acpi/uacpi/registers.c b/sys/dev/acpi/uacpi/registers.c
new file mode 100644
index 0000000..a52ce97
--- /dev/null
+++ b/sys/dev/acpi/uacpi/registers.c
@@ -0,0 +1,572 @@
+#include <uacpi/internal/registers.h>
+#include <uacpi/internal/stdlib.h>
+#include <uacpi/internal/context.h>
+#include <uacpi/internal/io.h>
+#include <uacpi/internal/log.h>
+#include <uacpi/platform/atomic.h>
+#include <uacpi/acpi.h>
+
+#ifndef UACPI_BAREBONES_MODE
+
+static uacpi_handle g_reg_lock;
+
+enum register_kind {
+ REGISTER_KIND_GAS,
+ REGISTER_KIND_IO,
+};
+
+enum register_access_kind {
+ REGISTER_ACCESS_KIND_PRESERVE,
+ REGISTER_ACCESS_KIND_WRITE_TO_CLEAR,
+ REGISTER_ACCESS_KIND_NORMAL,
+};
+
+struct register_spec {
+ uacpi_u8 kind;
+ uacpi_u8 access_kind;
+ uacpi_u8 access_width; // only REGISTER_KIND_IO
+ void *accessors[2];
+ uacpi_u64 write_only_mask;
+ uacpi_u64 preserve_mask;
+};
+
+static const struct register_spec g_registers[UACPI_REGISTER_MAX + 1] = {
+ [UACPI_REGISTER_PM1_STS] = {
+ .kind = REGISTER_KIND_GAS,
+ .access_kind = REGISTER_ACCESS_KIND_WRITE_TO_CLEAR,
+ .accessors = {
+ &g_uacpi_rt_ctx.pm1a_status_blk,
+ &g_uacpi_rt_ctx.pm1b_status_blk,
+ },
+ .preserve_mask = ACPI_PM1_STS_IGN0_MASK,
+ },
+ [UACPI_REGISTER_PM1_EN] = {
+ .kind = REGISTER_KIND_GAS,
+ .access_kind = REGISTER_ACCESS_KIND_PRESERVE,
+ .accessors = {
+ &g_uacpi_rt_ctx.pm1a_enable_blk,
+ &g_uacpi_rt_ctx.pm1b_enable_blk,
+ },
+ },
+ [UACPI_REGISTER_PM1_CNT] = {
+ .kind = REGISTER_KIND_GAS,
+ .access_kind = REGISTER_ACCESS_KIND_PRESERVE,
+ .accessors = {
+ &g_uacpi_rt_ctx.fadt.x_pm1a_cnt_blk,
+ &g_uacpi_rt_ctx.fadt.x_pm1b_cnt_blk,
+ },
+ .write_only_mask = ACPI_PM1_CNT_SLP_EN_MASK |
+ ACPI_PM1_CNT_GBL_RLS_MASK,
+ .preserve_mask = ACPI_PM1_CNT_PRESERVE_MASK,
+ },
+ [UACPI_REGISTER_PM_TMR] = {
+ .kind = REGISTER_KIND_GAS,
+ .access_kind = REGISTER_ACCESS_KIND_PRESERVE,
+ .accessors = { &g_uacpi_rt_ctx.fadt.x_pm_tmr_blk, },
+ },
+ [UACPI_REGISTER_PM2_CNT] = {
+ .kind = REGISTER_KIND_GAS,
+ .access_kind = REGISTER_ACCESS_KIND_PRESERVE,
+ .accessors = { &g_uacpi_rt_ctx.fadt.x_pm2_cnt_blk, },
+ .preserve_mask = ACPI_PM2_CNT_PRESERVE_MASK,
+ },
+ [UACPI_REGISTER_SLP_CNT] = {
+ .kind = REGISTER_KIND_GAS,
+ .access_kind = REGISTER_ACCESS_KIND_PRESERVE,
+ .accessors = { &g_uacpi_rt_ctx.fadt.sleep_control_reg, },
+ .write_only_mask = ACPI_SLP_CNT_SLP_EN_MASK,
+ .preserve_mask = ACPI_SLP_CNT_PRESERVE_MASK,
+ },
+ [UACPI_REGISTER_SLP_STS] = {
+ .kind = REGISTER_KIND_GAS,
+ .access_kind = REGISTER_ACCESS_KIND_WRITE_TO_CLEAR,
+ .accessors = { &g_uacpi_rt_ctx.fadt.sleep_status_reg, },
+ .preserve_mask = ACPI_SLP_STS_PRESERVE_MASK,
+ },
+ [UACPI_REGISTER_RESET] = {
+ .kind = REGISTER_KIND_GAS,
+ .access_kind = REGISTER_ACCESS_KIND_NORMAL,
+ .accessors = { &g_uacpi_rt_ctx.fadt.reset_reg, },
+ },
+ [UACPI_REGISTER_SMI_CMD] = {
+ .kind = REGISTER_KIND_IO,
+ .access_kind = REGISTER_ACCESS_KIND_NORMAL,
+ .access_width = 1,
+ .accessors = { &g_uacpi_rt_ctx.fadt.smi_cmd, },
+ },
+};
+
+enum register_mapping_state {
+ REGISTER_MAPPING_STATE_NONE = 0,
+ REGISTER_MAPPING_STATE_NOT_NEEDED,
+ REGISTER_MAPPING_STATE_MAPPED,
+};
+
+struct register_mapping {
+ uacpi_mapped_gas mappings[2];
+ uacpi_u8 states[2];
+};
+static struct register_mapping g_register_mappings[UACPI_REGISTER_MAX + 1];
+
+static uacpi_status map_one(
+ const struct register_spec *spec, struct register_mapping *mapping,
+ uacpi_u8 idx
+)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+
+ if (mapping->states[idx] != REGISTER_MAPPING_STATE_NONE)
+ return ret;
+
+ if (spec->kind == REGISTER_KIND_GAS) {
+ struct acpi_gas *gas = spec->accessors[idx];
+
+ if (gas == UACPI_NULL || gas->address == 0) {
+ mapping->states[idx] = REGISTER_MAPPING_STATE_NOT_NEEDED;
+ return ret;
+ }
+
+ ret = uacpi_map_gas_noalloc(gas, &mapping->mappings[idx]);
+ } else {
+ struct acpi_gas temp_gas = { 0 };
+
+ if (idx != 0) {
+ mapping->states[idx] = REGISTER_MAPPING_STATE_NOT_NEEDED;
+ return ret;
+ }
+
+ temp_gas.address_space_id = UACPI_ADDRESS_SPACE_SYSTEM_IO;
+ temp_gas.address = *(uacpi_u32*)spec->accessors[0];
+ temp_gas.register_bit_width = spec->access_width * 8;
+
+ ret = uacpi_map_gas_noalloc(&temp_gas, &mapping->mappings[idx]);
+ }
+
+ if (uacpi_likely_success(ret))
+ mapping->states[idx] = REGISTER_MAPPING_STATE_MAPPED;
+
+ return ret;
+}
+
+static uacpi_status ensure_register_mapped(
+ const struct register_spec *spec, struct register_mapping *mapping
+)
+{
+ uacpi_status ret;
+ uacpi_bool needs_mapping = UACPI_FALSE;
+ uacpi_u8 state;
+ uacpi_cpu_flags flags;
+
+ state = uacpi_atomic_load8(&mapping->states[0]);
+ needs_mapping |= state == REGISTER_MAPPING_STATE_NONE;
+
+ state = uacpi_atomic_load8(&mapping->states[1]);
+ needs_mapping |= state == REGISTER_MAPPING_STATE_NONE;
+
+ if (!needs_mapping)
+ return UACPI_STATUS_OK;
+
+ flags = uacpi_kernel_lock_spinlock(g_reg_lock);
+
+ ret = map_one(spec, mapping, 0);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ ret = map_one(spec, mapping, 1);
+out:
+ uacpi_kernel_unlock_spinlock(g_reg_lock, flags);
+ return ret;
+}
+
+static uacpi_status get_reg(
+ uacpi_u8 idx, const struct register_spec **out_spec,
+ struct register_mapping **out_mapping
+)
+{
+ if (idx > UACPI_REGISTER_MAX)
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ *out_spec = &g_registers[idx];
+ *out_mapping = &g_register_mappings[idx];
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status do_read_one(
+ struct register_mapping *mapping, uacpi_u8 idx, uacpi_u64 *out_value
+)
+{
+ if (mapping->states[idx] != REGISTER_MAPPING_STATE_MAPPED)
+ return UACPI_STATUS_OK;
+
+ return uacpi_gas_read_mapped(&mapping->mappings[idx], out_value);
+}
+
+static uacpi_status do_read_register(
+ const struct register_spec *reg, struct register_mapping *mapping,
+ uacpi_u64 *out_value
+)
+{
+ uacpi_status ret;
+ uacpi_u64 value0 = 0, value1 = 0;
+
+ ret = do_read_one(mapping, 0, &value0);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = do_read_one(mapping, 1, &value1);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ *out_value = value0 | value1;
+ if (reg->write_only_mask)
+ *out_value &= ~reg->write_only_mask;
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_read_register(
+ enum uacpi_register reg_enum, uacpi_u64 *out_value
+)
+{
+ uacpi_status ret;
+ const struct register_spec *reg;
+ struct register_mapping *mapping;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ ret = get_reg(reg_enum, &reg, &mapping);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = ensure_register_mapped(reg, mapping);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ return do_read_register(reg, mapping, out_value);
+}
+
+static uacpi_status do_write_one(
+ struct register_mapping *mapping, uacpi_u8 idx, uacpi_u64 in_value
+)
+{
+ if (mapping->states[idx] != REGISTER_MAPPING_STATE_MAPPED)
+ return UACPI_STATUS_OK;
+
+ return uacpi_gas_write_mapped(&mapping->mappings[idx], in_value);
+}
+
+static uacpi_status do_write_register(
+ const struct register_spec *reg, struct register_mapping *mapping,
+ uacpi_u64 in_value
+)
+{
+ uacpi_status ret;
+
+ if (reg->preserve_mask) {
+ in_value &= ~reg->preserve_mask;
+
+ if (reg->access_kind == REGISTER_ACCESS_KIND_PRESERVE) {
+ uacpi_u64 data;
+
+ ret = do_read_register(reg, mapping, &data);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ in_value |= data & reg->preserve_mask;
+ }
+ }
+
+ ret = do_write_one(mapping, 0, in_value);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ return do_write_one(mapping, 1, in_value);
+}
+
+uacpi_status uacpi_write_register(
+ enum uacpi_register reg_enum, uacpi_u64 in_value
+)
+{
+ uacpi_status ret;
+ const struct register_spec *reg;
+ struct register_mapping *mapping;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ ret = get_reg(reg_enum, &reg, &mapping);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = ensure_register_mapped(reg, mapping);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ return do_write_register(reg, mapping, in_value);
+}
+
+uacpi_status uacpi_write_registers(
+ enum uacpi_register reg_enum, uacpi_u64 in_value0, uacpi_u64 in_value1
+)
+{
+ uacpi_status ret;
+ const struct register_spec *reg;
+ struct register_mapping *mapping;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ ret = get_reg(reg_enum, &reg, &mapping);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = ensure_register_mapped(reg, mapping);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = do_write_one(mapping, 0, in_value0);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ return do_write_one(mapping, 1, in_value1);
+}
+
+struct register_field {
+ uacpi_u8 reg;
+ uacpi_u8 offset;
+ uacpi_u16 mask;
+};
+
+static const struct register_field g_fields[UACPI_REGISTER_FIELD_MAX + 1] = {
+ [UACPI_REGISTER_FIELD_TMR_STS] = {
+ .reg = UACPI_REGISTER_PM1_STS,
+ .offset = ACPI_PM1_STS_TMR_STS_IDX,
+ .mask = ACPI_PM1_STS_TMR_STS_MASK,
+ },
+ [UACPI_REGISTER_FIELD_BM_STS] = {
+ .reg = UACPI_REGISTER_PM1_STS,
+ .offset = ACPI_PM1_STS_BM_STS_IDX,
+ .mask = ACPI_PM1_STS_BM_STS_MASK,
+ },
+ [UACPI_REGISTER_FIELD_GBL_STS] = {
+ .reg = UACPI_REGISTER_PM1_STS,
+ .offset = ACPI_PM1_STS_GBL_STS_IDX,
+ .mask = ACPI_PM1_STS_GBL_STS_MASK,
+ },
+ [UACPI_REGISTER_FIELD_PWRBTN_STS] = {
+ .reg = UACPI_REGISTER_PM1_STS,
+ .offset = ACPI_PM1_STS_PWRBTN_STS_IDX,
+ .mask = ACPI_PM1_STS_PWRBTN_STS_MASK,
+ },
+ [UACPI_REGISTER_FIELD_SLPBTN_STS] = {
+ .reg = UACPI_REGISTER_PM1_STS,
+ .offset = ACPI_PM1_STS_SLPBTN_STS_IDX,
+ .mask = ACPI_PM1_STS_SLPBTN_STS_MASK,
+ },
+ [UACPI_REGISTER_FIELD_RTC_STS] = {
+ .reg = UACPI_REGISTER_PM1_STS,
+ .offset = ACPI_PM1_STS_RTC_STS_IDX,
+ .mask = ACPI_PM1_STS_RTC_STS_MASK,
+ },
+ [UACPI_REGISTER_FIELD_HWR_WAK_STS] = {
+ .reg = UACPI_REGISTER_SLP_STS,
+ .offset = ACPI_SLP_STS_WAK_STS_IDX,
+ .mask = ACPI_SLP_STS_WAK_STS_MASK,
+ },
+ [UACPI_REGISTER_FIELD_WAK_STS] = {
+ .reg = UACPI_REGISTER_PM1_STS,
+ .offset = ACPI_PM1_STS_WAKE_STS_IDX,
+ .mask = ACPI_PM1_STS_WAKE_STS_MASK,
+ },
+ [UACPI_REGISTER_FIELD_PCIEX_WAKE_STS] = {
+ .reg = UACPI_REGISTER_PM1_STS,
+ .offset = ACPI_PM1_STS_PCIEXP_WAKE_STS_IDX,
+ .mask = ACPI_PM1_STS_PCIEXP_WAKE_STS_MASK,
+ },
+ [UACPI_REGISTER_FIELD_TMR_EN] = {
+ .reg = UACPI_REGISTER_PM1_EN,
+ .offset = ACPI_PM1_EN_TMR_EN_IDX,
+ .mask = ACPI_PM1_EN_TMR_EN_MASK,
+ },
+ [UACPI_REGISTER_FIELD_GBL_EN] = {
+ .reg = UACPI_REGISTER_PM1_EN,
+ .offset = ACPI_PM1_EN_GBL_EN_IDX,
+ .mask = ACPI_PM1_EN_GBL_EN_MASK,
+ },
+ [UACPI_REGISTER_FIELD_PWRBTN_EN] = {
+ .reg = UACPI_REGISTER_PM1_EN,
+ .offset = ACPI_PM1_EN_PWRBTN_EN_IDX,
+ .mask = ACPI_PM1_EN_PWRBTN_EN_MASK,
+ },
+ [UACPI_REGISTER_FIELD_SLPBTN_EN] = {
+ .reg = UACPI_REGISTER_PM1_EN,
+ .offset = ACPI_PM1_EN_SLPBTN_EN_IDX,
+ .mask = ACPI_PM1_EN_SLPBTN_EN_MASK,
+ },
+ [UACPI_REGISTER_FIELD_RTC_EN] = {
+ .reg = UACPI_REGISTER_PM1_EN,
+ .offset = ACPI_PM1_EN_RTC_EN_IDX,
+ .mask = ACPI_PM1_EN_RTC_EN_MASK,
+ },
+ [UACPI_REGISTER_FIELD_PCIEXP_WAKE_DIS] = {
+ .reg = UACPI_REGISTER_PM1_EN,
+ .offset = ACPI_PM1_EN_PCIEXP_WAKE_DIS_IDX,
+ .mask = ACPI_PM1_EN_PCIEXP_WAKE_DIS_MASK,
+ },
+ [UACPI_REGISTER_FIELD_SCI_EN] = {
+ .reg = UACPI_REGISTER_PM1_CNT,
+ .offset = ACPI_PM1_CNT_SCI_EN_IDX,
+ .mask = ACPI_PM1_CNT_SCI_EN_MASK,
+ },
+ [UACPI_REGISTER_FIELD_BM_RLD] = {
+ .reg = UACPI_REGISTER_PM1_CNT,
+ .offset = ACPI_PM1_CNT_BM_RLD_IDX,
+ .mask = ACPI_PM1_CNT_BM_RLD_MASK,
+ },
+ [UACPI_REGISTER_FIELD_GBL_RLS] = {
+ .reg = UACPI_REGISTER_PM1_CNT,
+ .offset = ACPI_PM1_CNT_GBL_RLS_IDX,
+ .mask = ACPI_PM1_CNT_GBL_RLS_MASK,
+ },
+ [UACPI_REGISTER_FIELD_SLP_TYP] = {
+ .reg = UACPI_REGISTER_PM1_CNT,
+ .offset = ACPI_PM1_CNT_SLP_TYP_IDX,
+ .mask = ACPI_PM1_CNT_SLP_TYP_MASK,
+ },
+ [UACPI_REGISTER_FIELD_SLP_EN] = {
+ .reg = UACPI_REGISTER_PM1_CNT,
+ .offset = ACPI_PM1_CNT_SLP_EN_IDX,
+ .mask = ACPI_PM1_CNT_SLP_EN_MASK,
+ },
+ [UACPI_REGISTER_FIELD_HWR_SLP_TYP] = {
+ .reg = UACPI_REGISTER_SLP_CNT,
+ .offset = ACPI_SLP_CNT_SLP_TYP_IDX,
+ .mask = ACPI_SLP_CNT_SLP_TYP_MASK,
+ },
+ [UACPI_REGISTER_FIELD_HWR_SLP_EN] = {
+ .reg = UACPI_REGISTER_SLP_CNT,
+ .offset = ACPI_SLP_CNT_SLP_EN_IDX,
+ .mask = ACPI_SLP_CNT_SLP_EN_MASK,
+ },
+ [UACPI_REGISTER_FIELD_ARB_DIS] = {
+ .reg = UACPI_REGISTER_PM2_CNT,
+ .offset = ACPI_PM2_CNT_ARB_DIS_IDX,
+ .mask = ACPI_PM2_CNT_ARB_DIS_MASK,
+ },
+};
+
+uacpi_status uacpi_initialize_registers(void)
+{
+ g_reg_lock = uacpi_kernel_create_spinlock();
+ if (uacpi_unlikely(g_reg_lock == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ return UACPI_STATUS_OK;
+}
+
+void uacpi_deinitialize_registers(void)
+{
+ uacpi_u8 i;
+ struct register_mapping *mapping;
+
+ if (g_reg_lock != UACPI_NULL) {
+ uacpi_kernel_free_spinlock(g_reg_lock);
+ g_reg_lock = UACPI_NULL;
+ }
+
+ for (i = 0; i <= UACPI_REGISTER_MAX; ++i) {
+ mapping = &g_register_mappings[i];
+
+ if (mapping->states[0] == REGISTER_MAPPING_STATE_MAPPED)
+ uacpi_unmap_gas_nofree(&mapping->mappings[0]);
+ if (mapping->states[1] == REGISTER_MAPPING_STATE_MAPPED)
+ uacpi_unmap_gas_nofree(&mapping->mappings[1]);
+ }
+
+ uacpi_memzero(&g_register_mappings, sizeof(g_register_mappings));
+}
+
+uacpi_status uacpi_read_register_field(
+ enum uacpi_register_field field_enum, uacpi_u64 *out_value
+)
+{
+ uacpi_status ret;
+ uacpi_u8 field_idx = field_enum;
+ const struct register_field *field;
+ const struct register_spec *reg;
+ struct register_mapping *mapping;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ if (uacpi_unlikely(field_idx > UACPI_REGISTER_FIELD_MAX))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ field = &g_fields[field_idx];
+ reg = &g_registers[field->reg];
+ mapping = &g_register_mappings[field->reg];
+
+ ret = ensure_register_mapped(reg, mapping);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = do_read_register(reg, mapping, out_value);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ *out_value = (*out_value & field->mask) >> field->offset;
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_write_register_field(
+ enum uacpi_register_field field_enum, uacpi_u64 in_value
+)
+{
+ uacpi_status ret;
+ uacpi_u8 field_idx = field_enum;
+ const struct register_field *field;
+ const struct register_spec *reg;
+ struct register_mapping *mapping;
+
+ uacpi_u64 data;
+ uacpi_cpu_flags flags;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ if (uacpi_unlikely(field_idx > UACPI_REGISTER_FIELD_MAX))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ field = &g_fields[field_idx];
+ reg = &g_registers[field->reg];
+ mapping = &g_register_mappings[field->reg];
+
+ ret = ensure_register_mapped(reg, mapping);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ in_value = (in_value << field->offset) & field->mask;
+
+ flags = uacpi_kernel_lock_spinlock(g_reg_lock);
+
+ if (reg->kind == REGISTER_ACCESS_KIND_WRITE_TO_CLEAR) {
+ if (in_value == 0) {
+ ret = UACPI_STATUS_OK;
+ goto out;
+ }
+
+ ret = do_write_register(reg, mapping, in_value);
+ goto out;
+ }
+
+ ret = do_read_register(reg, mapping, &data);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ data &= ~field->mask;
+ data |= in_value;
+
+ ret = do_write_register(reg, mapping, data);
+
+out:
+ uacpi_kernel_unlock_spinlock(g_reg_lock, flags);
+ return ret;
+}
+
+#endif // !UACPI_BAREBONES_MODE
diff --git a/sys/dev/acpi/uacpi/resources.c b/sys/dev/acpi/uacpi/resources.c
new file mode 100644
index 0000000..a9bcb82
--- /dev/null
+++ b/sys/dev/acpi/uacpi/resources.c
@@ -0,0 +1,2569 @@
+#include <uacpi/types.h>
+#include <uacpi/acpi.h>
+#include <uacpi/internal/resources.h>
+#include <uacpi/internal/stdlib.h>
+#include <uacpi/internal/utilities.h>
+#include <uacpi/internal/log.h>
+#include <uacpi/internal/namespace.h>
+#include <uacpi/uacpi.h>
+
+#ifndef UACPI_BAREBONES_MODE
+
+#define LARGE_RESOURCE_BASE (ACPI_RESOURCE_END_TAG + 1)
+#define L(x) (x + LARGE_RESOURCE_BASE)
+
+/*
+ * Map raw AML resource types to the internal enum, this also takes care of type
+ * sanitization by returning UACPI_AML_RESOURCE_INVALID for any unknown type.
+ */
+static const uacpi_u8 aml_resource_to_type[256] = {
+ // Small items
+ [ACPI_RESOURCE_IRQ] = UACPI_AML_RESOURCE_IRQ,
+ [ACPI_RESOURCE_DMA] = UACPI_AML_RESOURCE_DMA,
+ [ACPI_RESOURCE_START_DEPENDENT] = UACPI_AML_RESOURCE_START_DEPENDENT,
+ [ACPI_RESOURCE_END_DEPENDENT] = UACPI_AML_RESOURCE_END_DEPENDENT,
+ [ACPI_RESOURCE_IO] = UACPI_AML_RESOURCE_IO,
+ [ACPI_RESOURCE_FIXED_IO] = UACPI_AML_RESOURCE_FIXED_IO,
+ [ACPI_RESOURCE_FIXED_DMA] = UACPI_AML_RESOURCE_FIXED_DMA,
+ [ACPI_RESOURCE_VENDOR_TYPE0] = UACPI_AML_RESOURCE_VENDOR_TYPE0,
+ [ACPI_RESOURCE_END_TAG] = UACPI_AML_RESOURCE_END_TAG,
+
+ // Large items
+ [L(ACPI_RESOURCE_MEMORY24)] = UACPI_AML_RESOURCE_MEMORY24,
+ [L(ACPI_RESOURCE_GENERIC_REGISTER)] = UACPI_AML_RESOURCE_GENERIC_REGISTER,
+ [L(ACPI_RESOURCE_VENDOR_TYPE1)] = UACPI_AML_RESOURCE_VENDOR_TYPE1,
+ [L(ACPI_RESOURCE_MEMORY32)] = UACPI_AML_RESOURCE_MEMORY32,
+ [L(ACPI_RESOURCE_FIXED_MEMORY32)] = UACPI_AML_RESOURCE_FIXED_MEMORY32,
+ [L(ACPI_RESOURCE_ADDRESS32)] = UACPI_AML_RESOURCE_ADDRESS32,
+ [L(ACPI_RESOURCE_ADDRESS16)] = UACPI_AML_RESOURCE_ADDRESS16,
+ [L(ACPI_RESOURCE_EXTENDED_IRQ)] = UACPI_AML_RESOURCE_EXTENDED_IRQ,
+ [L(ACPI_RESOURCE_ADDRESS64_EXTENDED)] = UACPI_AML_RESOURCE_ADDRESS64_EXTENDED,
+ [L(ACPI_RESOURCE_ADDRESS64)] = UACPI_AML_RESOURCE_ADDRESS64,
+ [L(ACPI_RESOURCE_GPIO_CONNECTION)] = UACPI_AML_RESOURCE_GPIO_CONNECTION,
+ [L(ACPI_RESOURCE_PIN_FUNCTION)] = UACPI_AML_RESOURCE_PIN_FUNCTION,
+ [L(ACPI_RESOURCE_SERIAL_CONNECTION)] = UACPI_AML_RESOURCE_SERIAL_CONNECTION,
+ [L(ACPI_RESOURCE_PIN_CONFIGURATION)] = UACPI_AML_RESOURCE_PIN_CONFIGURATION,
+ [L(ACPI_RESOURCE_PIN_GROUP)] = UACPI_AML_RESOURCE_PIN_GROUP,
+ [L(ACPI_RESOURCE_PIN_GROUP_FUNCTION)] = UACPI_AML_RESOURCE_PIN_GROUP_FUNCTION,
+ [L(ACPI_RESOURCE_PIN_GROUP_CONFIGURATION)] = UACPI_AML_RESOURCE_PIN_GROUP_CONFIGURATION,
+ [L(ACPI_RESOURCE_CLOCK_INPUT)] = UACPI_AML_RESOURCE_CLOCK_INPUT,
+};
+
+static const uacpi_u8 type_to_aml_resource[] = {
+ [UACPI_AML_RESOURCE_IRQ] = ACPI_RESOURCE_IRQ,
+ [UACPI_AML_RESOURCE_DMA] = ACPI_RESOURCE_DMA,
+ [UACPI_AML_RESOURCE_START_DEPENDENT] = ACPI_RESOURCE_START_DEPENDENT,
+ [UACPI_AML_RESOURCE_END_DEPENDENT] = ACPI_RESOURCE_END_DEPENDENT,
+ [UACPI_AML_RESOURCE_IO] = ACPI_RESOURCE_IO,
+ [UACPI_AML_RESOURCE_FIXED_IO] = ACPI_RESOURCE_FIXED_IO,
+ [UACPI_AML_RESOURCE_FIXED_DMA] = ACPI_RESOURCE_FIXED_DMA,
+ [UACPI_AML_RESOURCE_VENDOR_TYPE0] = ACPI_RESOURCE_VENDOR_TYPE0,
+ [UACPI_AML_RESOURCE_END_TAG] = ACPI_RESOURCE_END_TAG,
+
+ // Large items
+ [UACPI_AML_RESOURCE_MEMORY24] = ACPI_RESOURCE_MEMORY24,
+ [UACPI_AML_RESOURCE_GENERIC_REGISTER] = ACPI_RESOURCE_GENERIC_REGISTER,
+ [UACPI_AML_RESOURCE_VENDOR_TYPE1] = ACPI_RESOURCE_VENDOR_TYPE1,
+ [UACPI_AML_RESOURCE_MEMORY32] = ACPI_RESOURCE_MEMORY32,
+ [UACPI_AML_RESOURCE_FIXED_MEMORY32] = ACPI_RESOURCE_FIXED_MEMORY32,
+ [UACPI_AML_RESOURCE_ADDRESS32] = ACPI_RESOURCE_ADDRESS32,
+ [UACPI_AML_RESOURCE_ADDRESS16] = ACPI_RESOURCE_ADDRESS16,
+ [UACPI_AML_RESOURCE_EXTENDED_IRQ] = ACPI_RESOURCE_EXTENDED_IRQ,
+ [UACPI_AML_RESOURCE_ADDRESS64_EXTENDED] = ACPI_RESOURCE_ADDRESS64_EXTENDED,
+ [UACPI_AML_RESOURCE_ADDRESS64] = ACPI_RESOURCE_ADDRESS64,
+ [UACPI_AML_RESOURCE_GPIO_CONNECTION] = ACPI_RESOURCE_GPIO_CONNECTION,
+ [UACPI_AML_RESOURCE_PIN_FUNCTION] = ACPI_RESOURCE_PIN_FUNCTION,
+ [UACPI_AML_RESOURCE_SERIAL_CONNECTION] = ACPI_RESOURCE_SERIAL_CONNECTION,
+ [UACPI_AML_RESOURCE_PIN_CONFIGURATION] = ACPI_RESOURCE_PIN_CONFIGURATION,
+ [UACPI_AML_RESOURCE_PIN_GROUP] = ACPI_RESOURCE_PIN_GROUP,
+ [UACPI_AML_RESOURCE_PIN_GROUP_FUNCTION] = ACPI_RESOURCE_PIN_GROUP_FUNCTION,
+ [UACPI_AML_RESOURCE_PIN_GROUP_CONFIGURATION] = ACPI_RESOURCE_PIN_GROUP_CONFIGURATION,
+ [UACPI_AML_RESOURCE_CLOCK_INPUT] = ACPI_RESOURCE_CLOCK_INPUT,
+};
+
+static const uacpi_u8 native_resource_to_type[UACPI_RESOURCE_TYPE_MAX + 1] = {
+ [UACPI_RESOURCE_TYPE_IRQ] = UACPI_AML_RESOURCE_IRQ,
+ [UACPI_RESOURCE_TYPE_EXTENDED_IRQ] = UACPI_AML_RESOURCE_EXTENDED_IRQ,
+ [UACPI_RESOURCE_TYPE_DMA] = UACPI_AML_RESOURCE_DMA,
+ [UACPI_RESOURCE_TYPE_FIXED_DMA] = UACPI_AML_RESOURCE_FIXED_DMA,
+ [UACPI_RESOURCE_TYPE_IO] = UACPI_AML_RESOURCE_IO,
+ [UACPI_RESOURCE_TYPE_FIXED_IO] = UACPI_AML_RESOURCE_FIXED_IO,
+ [UACPI_RESOURCE_TYPE_ADDRESS16] = UACPI_AML_RESOURCE_ADDRESS16,
+ [UACPI_RESOURCE_TYPE_ADDRESS32] = UACPI_AML_RESOURCE_ADDRESS32,
+ [UACPI_RESOURCE_TYPE_ADDRESS64] = UACPI_AML_RESOURCE_ADDRESS64,
+ [UACPI_RESOURCE_TYPE_ADDRESS64_EXTENDED] = UACPI_AML_RESOURCE_ADDRESS64_EXTENDED,
+ [UACPI_RESOURCE_TYPE_MEMORY24] = UACPI_AML_RESOURCE_MEMORY24,
+ [UACPI_RESOURCE_TYPE_MEMORY32] = UACPI_AML_RESOURCE_MEMORY32,
+ [UACPI_RESOURCE_TYPE_FIXED_MEMORY32] = UACPI_AML_RESOURCE_FIXED_MEMORY32,
+ [UACPI_RESOURCE_TYPE_START_DEPENDENT] = UACPI_AML_RESOURCE_START_DEPENDENT,
+ [UACPI_RESOURCE_TYPE_END_DEPENDENT] = UACPI_AML_RESOURCE_END_DEPENDENT,
+ [UACPI_RESOURCE_TYPE_VENDOR_SMALL] = UACPI_AML_RESOURCE_VENDOR_TYPE0,
+ [UACPI_RESOURCE_TYPE_VENDOR_LARGE] = UACPI_AML_RESOURCE_VENDOR_TYPE1,
+ [UACPI_RESOURCE_TYPE_GENERIC_REGISTER] = UACPI_AML_RESOURCE_GENERIC_REGISTER,
+ [UACPI_RESOURCE_TYPE_GPIO_CONNECTION] = UACPI_AML_RESOURCE_GPIO_CONNECTION,
+ [UACPI_RESOURCE_TYPE_SERIAL_I2C_CONNECTION] = UACPI_AML_RESOURCE_SERIAL_CONNECTION,
+ [UACPI_RESOURCE_TYPE_SERIAL_SPI_CONNECTION] = UACPI_AML_RESOURCE_SERIAL_CONNECTION,
+ [UACPI_RESOURCE_TYPE_SERIAL_UART_CONNECTION] = UACPI_AML_RESOURCE_SERIAL_CONNECTION,
+ [UACPI_RESOURCE_TYPE_SERIAL_CSI2_CONNECTION] = UACPI_AML_RESOURCE_SERIAL_CONNECTION,
+ [UACPI_RESOURCE_TYPE_PIN_FUNCTION] = UACPI_AML_RESOURCE_PIN_FUNCTION,
+ [UACPI_RESOURCE_TYPE_PIN_CONFIGURATION] = UACPI_AML_RESOURCE_PIN_CONFIGURATION,
+ [UACPI_RESOURCE_TYPE_PIN_GROUP] = UACPI_AML_RESOURCE_PIN_GROUP,
+ [UACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION] = UACPI_AML_RESOURCE_PIN_GROUP_FUNCTION,
+ [UACPI_RESOURCE_TYPE_PIN_GROUP_CONFIGURATION] = UACPI_AML_RESOURCE_PIN_GROUP_CONFIGURATION,
+ [UACPI_RESOURCE_TYPE_CLOCK_INPUT] = UACPI_AML_RESOURCE_CLOCK_INPUT,
+ [UACPI_RESOURCE_TYPE_END_TAG] = UACPI_AML_RESOURCE_END_TAG,
+};
+
+#define SMALL_ITEM_HEADER_SIZE sizeof(struct acpi_small_item)
+#define LARGE_ITEM_HEADER_SIZE sizeof(struct acpi_large_item)
+
+static const uacpi_u8 aml_resource_kind_to_header_size[2] = {
+ [UACPI_AML_RESOURCE_KIND_SMALL] = SMALL_ITEM_HEADER_SIZE,
+ [UACPI_AML_RESOURCE_KIND_LARGE] = LARGE_ITEM_HEADER_SIZE,
+};
+
+static uacpi_size aml_size_with_header(const struct uacpi_resource_spec *spec)
+{
+ return spec->aml_size +
+ aml_resource_kind_to_header_size[spec->resource_kind];
+}
+
+static uacpi_size extra_size_for_native_irq_or_dma(
+ const struct uacpi_resource_spec *spec, void *data, uacpi_size size
+)
+{
+ uacpi_u16 mask;
+ uacpi_u8 i, total_bits, num_bits = 0;
+
+ UACPI_UNUSED(size);
+
+ if (spec->type == UACPI_AML_RESOURCE_IRQ) {
+ struct acpi_resource_irq *irq = data;
+ mask = irq->irq_mask;
+ total_bits = 16;
+ } else {
+ struct acpi_resource_dma *dma = data;
+ mask = dma->channel_mask;
+ total_bits = 8;
+ }
+
+ for (i = 0; i < total_bits; ++i)
+ num_bits += !!(mask & (1 << i));
+
+ return num_bits;
+}
+
+static uacpi_size size_for_aml_irq(
+ const struct uacpi_resource_spec *spec, uacpi_resource *resource
+)
+{
+ uacpi_resource_irq *irq = &resource->irq;
+ uacpi_size size;
+
+ size = aml_size_with_header(spec);
+
+ switch (irq->length_kind) {
+ case UACPI_RESOURCE_LENGTH_KIND_FULL:
+ goto out_full;
+ case UACPI_RESOURCE_LENGTH_KIND_ONE_LESS:
+ case UACPI_RESOURCE_LENGTH_KIND_DONT_CARE:
+ if (irq->triggering != UACPI_TRIGGERING_EDGE)
+ goto out_full;
+ if (irq->polarity != UACPI_POLARITY_ACTIVE_HIGH)
+ goto out_full;
+ if (irq->sharing != UACPI_EXCLUSIVE)
+ goto out_full;
+
+ return size - 1;
+ }
+
+out_full:
+ if (uacpi_unlikely(irq->length_kind ==
+ UACPI_RESOURCE_LENGTH_KIND_ONE_LESS)) {
+ uacpi_warn("requested IRQ resource length is "
+ "not compatible with specified flags, corrected\n");
+ }
+
+ return size;
+}
+
+static uacpi_size size_for_aml_start_dependent(
+ const struct uacpi_resource_spec *spec, uacpi_resource *resource
+)
+{
+ uacpi_resource_start_dependent *start_dep = &resource->start_dependent;
+ uacpi_size size;
+
+ size = aml_size_with_header(spec);
+ switch (start_dep->length_kind) {
+ case UACPI_RESOURCE_LENGTH_KIND_FULL:
+ goto out_full;
+ case UACPI_RESOURCE_LENGTH_KIND_ONE_LESS:
+ case UACPI_RESOURCE_LENGTH_KIND_DONT_CARE:
+ if (start_dep->compatibility != UACPI_ACCEPTABLE)
+ goto out_full;
+ if (start_dep->performance != UACPI_ACCEPTABLE)
+ goto out_full;
+
+ return size - 1;
+ }
+
+out_full:
+ if (uacpi_unlikely(start_dep->length_kind ==
+ UACPI_RESOURCE_LENGTH_KIND_ONE_LESS)) {
+ uacpi_warn("requested StartDependentFn resource length is "
+ "not compatible with specified flags, corrected\n");
+ }
+
+ return size;
+}
+
+static uacpi_size extra_size_for_native_vendor(
+ const struct uacpi_resource_spec *spec, void *data, uacpi_size size
+)
+{
+ UACPI_UNUSED(spec);
+ UACPI_UNUSED(data);
+ return size;
+}
+
+static uacpi_size size_for_aml_vendor(
+ const struct uacpi_resource_spec *spec, uacpi_resource *resource
+)
+{
+ uacpi_size size = resource->vendor.length;
+
+ UACPI_UNUSED(spec);
+
+ if (size > 7 || resource->type == UACPI_RESOURCE_TYPE_VENDOR_LARGE) {
+ size += aml_resource_kind_to_header_size[
+ UACPI_AML_RESOURCE_KIND_LARGE
+ ];
+
+ if (uacpi_unlikely(resource->type != UACPI_RESOURCE_TYPE_VENDOR_LARGE)) {
+ uacpi_warn("vendor data too large for small descriptor (%zu), "
+ "correcting to large\n", size);
+ resource->type = UACPI_RESOURCE_TYPE_VENDOR_LARGE;
+ }
+ } else {
+ size += aml_resource_kind_to_header_size[
+ UACPI_AML_RESOURCE_KIND_SMALL
+ ];
+ }
+
+ return size;
+}
+
+static uacpi_size extra_size_for_resource_source(
+ uacpi_size base_size, uacpi_size reported_size
+)
+{
+ uacpi_size string_length;
+
+ if (reported_size <= base_size)
+ return 0;
+
+ /*
+ * The remainder of the descriptor minus the resource index field
+ */
+ string_length = (reported_size - base_size) - 1;
+ return UACPI_ALIGN_UP(string_length, sizeof(void*), uacpi_size);
+}
+
+static uacpi_size size_for_aml_resource_source(
+ uacpi_resource_source *source, uacpi_bool with_index
+)
+{
+ uacpi_size length = source->length;
+
+ if (uacpi_unlikely(length && !source->index_present)) {
+ uacpi_warn("resource declares no source index with non-empty "
+ "string (%zu bytes), corrected\n", length);
+ source->index_present = UACPI_TRUE;
+ }
+
+ // If index is included in the dynamic resource source, add it to the length
+ if (with_index)
+ length += source->index_present;
+
+ return length;
+}
+
+static uacpi_size extra_size_for_native_address_or_clock_input(
+ const struct uacpi_resource_spec *spec, void *data, uacpi_size size
+)
+{
+ UACPI_UNUSED(data);
+ return extra_size_for_resource_source(spec->aml_size, size);
+}
+
+static uacpi_size size_for_aml_address_or_clock_input(
+ const struct uacpi_resource_spec *spec, uacpi_resource *resource
+)
+{
+ uacpi_resource_source *source;
+ bool has_index = UACPI_TRUE;
+
+ switch (resource->type) {
+ case UACPI_RESOURCE_TYPE_ADDRESS16:
+ source = &resource->address16.source;
+ break;
+ case UACPI_RESOURCE_TYPE_ADDRESS32:
+ source = &resource->address32.source;
+ break;
+ case UACPI_RESOURCE_TYPE_ADDRESS64:
+ source = &resource->address64.source;
+ break;
+ case UACPI_RESOURCE_TYPE_CLOCK_INPUT:
+ source = &resource->clock_input.source;
+ has_index = UACPI_FALSE;
+ break;
+ default:
+ return 0;
+ }
+
+ return aml_size_with_header(spec) +
+ size_for_aml_resource_source(source, has_index);
+}
+
+static uacpi_size extra_size_for_extended_irq(
+ const struct uacpi_resource_spec *spec, void *data, uacpi_size size
+)
+{
+ struct acpi_resource_extended_irq *irq = data;
+ uacpi_size extra_size = 0;
+
+ extra_size += irq->num_irqs * sizeof(uacpi_u32);
+ extra_size += extra_size_for_resource_source(
+ spec->aml_size, size - extra_size
+ );
+
+ return extra_size;
+}
+
+static uacpi_size size_for_aml_extended_irq(
+ const struct uacpi_resource_spec *spec, uacpi_resource *resource
+)
+{
+ uacpi_resource_extended_irq *irq = &resource->extended_irq;
+ uacpi_size size;
+
+ size = aml_size_with_header(spec);
+ size += irq->num_irqs * 4;
+ size += size_for_aml_resource_source(&irq->source, UACPI_TRUE);
+
+ return size;
+}
+
+static uacpi_size extra_size_for_native_gpio_or_pins(
+ const struct uacpi_resource_spec *spec, void *data, uacpi_size size
+)
+{
+ uacpi_size pin_table_offset;
+
+ /*
+ * These resources pretend to have variable layout by declaring "offset"
+ * fields, but the layout is hardcoded and mandated by the spec to be
+ * very specific. We can use the offset numbers here to calculate the final
+ * length.
+ *
+ * For example, the layout of GPIO connection _always_ looks as follows:
+ * [0...22] -> fixed data
+ * [23...<source name offset - 1>] -> pin table
+ * [<source name offset>...<vendor data offset - 1>] -> source name
+ * [<vendor data offset>...<data offset + data length>] -> vendor data
+ */
+ switch (spec->type) {
+ case UACPI_AML_RESOURCE_GPIO_CONNECTION: {
+ struct acpi_resource_gpio_connection *gpio = data;
+ pin_table_offset = gpio->pin_table_offset;
+ break;
+ }
+
+ case UACPI_AML_RESOURCE_PIN_FUNCTION: {
+ struct acpi_resource_pin_function *pin = data;
+ pin_table_offset = pin->pin_table_offset;
+ break;
+ }
+
+ case UACPI_AML_RESOURCE_PIN_CONFIGURATION: {
+ struct acpi_resource_pin_configuration *config = data;
+ pin_table_offset = config->pin_table_offset;
+ break;
+ }
+
+ case UACPI_AML_RESOURCE_PIN_GROUP: {
+ struct acpi_resource_pin_group *group = data;
+ pin_table_offset = group->pin_table_offset;
+ break;
+ }
+
+ default:
+ return 0;
+ }
+
+ /*
+ * The size we get passed here does not include the header size because
+ * that's how resources are encoded. Subtract it here so that we get the
+ * correct final length.
+ */
+ return size - (pin_table_offset - LARGE_ITEM_HEADER_SIZE);
+}
+
+static uacpi_size size_for_aml_gpio_or_pins(
+ const struct uacpi_resource_spec *spec, uacpi_resource *resource
+)
+{
+ uacpi_size source_length, vendor_length, pin_table_length, size;
+
+ size = aml_size_with_header(spec);
+ switch (spec->type) {
+ case UACPI_AML_RESOURCE_GPIO_CONNECTION: {
+ uacpi_resource_gpio_connection *res = &resource->gpio_connection;
+ source_length = res->source.length;
+ pin_table_length = res->pin_table_length;
+ vendor_length = res->vendor_data_length;
+ break;
+ }
+
+ case UACPI_AML_RESOURCE_PIN_FUNCTION: {
+ uacpi_resource_pin_function *res = &resource->pin_function;
+ source_length = res->source.length;
+ pin_table_length = res->pin_table_length;
+ vendor_length = res->vendor_data_length;
+ break;
+ }
+
+ case UACPI_AML_RESOURCE_PIN_CONFIGURATION: {
+ uacpi_resource_pin_configuration *res = &resource->pin_configuration;
+ source_length = res->source.length;
+ pin_table_length = res->pin_table_length;
+ vendor_length = res->vendor_data_length;
+ break;
+ }
+
+ case UACPI_AML_RESOURCE_PIN_GROUP: {
+ uacpi_resource_pin_group *res = &resource->pin_group;
+ source_length = res->label.length;
+ pin_table_length = res->pin_table_length;
+ vendor_length = res->vendor_data_length;
+ break;
+ }
+
+ default:
+ return 0;
+ }
+
+ size += source_length;
+ size += pin_table_length * 2;
+ size += vendor_length;
+
+ return size;
+}
+
+static uacpi_size extra_size_for_native_pin_group(
+ const struct uacpi_resource_spec *spec, void *data, uacpi_size size
+)
+{
+ uacpi_size source_offset;
+
+ switch (spec->type) {
+ case UACPI_AML_RESOURCE_PIN_GROUP_FUNCTION: {
+ struct acpi_resource_pin_group_function *func = data;
+ source_offset = func->source_offset;
+ break;
+ }
+
+ case UACPI_AML_RESOURCE_PIN_GROUP_CONFIGURATION: {
+ struct acpi_resource_pin_group_configuration *config = data;
+ source_offset = config->source_offset;
+ break;
+ }
+
+ default:
+ return 0;
+ }
+
+ // Same logic as extra_size_for_native_gpio_or_pins
+ return size - (source_offset - LARGE_ITEM_HEADER_SIZE);
+}
+
+static uacpi_size size_for_aml_pin_group(
+ const struct uacpi_resource_spec *spec, uacpi_resource *resource
+)
+{
+ uacpi_size source_length, label_length, vendor_length, size;
+
+ size = aml_size_with_header(spec);
+ switch (spec->type) {
+ case UACPI_AML_RESOURCE_PIN_GROUP_FUNCTION: {
+ uacpi_resource_pin_group_function *res = &resource->pin_group_function;
+ source_length = res->source.length;
+ label_length = res->label.length;
+ vendor_length = res->vendor_data_length;
+ break;
+ }
+
+ case UACPI_AML_RESOURCE_PIN_GROUP_CONFIGURATION: {
+ uacpi_resource_pin_group_configuration *res;
+ res = &resource->pin_group_configuration;
+ source_length = res->source.length;
+ label_length = res->label.length;
+ vendor_length = res->vendor_data_length;
+ break;
+ }
+
+ default:
+ return 0;
+ }
+
+ size += source_length;
+ size += label_length;
+ size += vendor_length;
+
+ return size;
+}
+
+#define AML_SERIAL_RESOURCE_EXTRA_SIZE(type) \
+ (sizeof(struct acpi_resource_serial_##type) \
+ - sizeof(struct acpi_resource_serial))
+
+#define NATIVE_SERIAL_RESOURCE_EXTRA_SIZE(type) \
+ (sizeof(uacpi_resource_##type##_connection) \
+ - sizeof(uacpi_resource_serial_bus_common))
+
+static const uacpi_u8 aml_serial_resource_to_extra_aml_size
+[ACPI_SERIAL_TYPE_MAX + 1] = {
+ [ACPI_SERIAL_TYPE_I2C] = AML_SERIAL_RESOURCE_EXTRA_SIZE(i2c),
+ [ACPI_SERIAL_TYPE_SPI] = AML_SERIAL_RESOURCE_EXTRA_SIZE(spi),
+ [ACPI_SERIAL_TYPE_UART] = AML_SERIAL_RESOURCE_EXTRA_SIZE(uart),
+ [ACPI_SERIAL_TYPE_CSI2] = AML_SERIAL_RESOURCE_EXTRA_SIZE(csi2),
+};
+
+static const uacpi_u8 aml_serial_resource_to_extra_native_size
+[ACPI_SERIAL_TYPE_MAX + 1] = {
+ [ACPI_SERIAL_TYPE_I2C] = NATIVE_SERIAL_RESOURCE_EXTRA_SIZE(i2c),
+ [ACPI_SERIAL_TYPE_SPI] = NATIVE_SERIAL_RESOURCE_EXTRA_SIZE(spi),
+ [ACPI_SERIAL_TYPE_UART] = NATIVE_SERIAL_RESOURCE_EXTRA_SIZE(uart),
+ [ACPI_SERIAL_TYPE_CSI2] = NATIVE_SERIAL_RESOURCE_EXTRA_SIZE(csi2),
+};
+
+static uacpi_size extra_size_for_serial_connection(
+ const struct uacpi_resource_spec *spec, void *data, uacpi_size size
+)
+{
+ struct acpi_resource_serial *serial = data;
+ uacpi_size extra_bytes = size;
+
+ extra_bytes -= spec->aml_size;
+ extra_bytes -= aml_serial_resource_to_extra_aml_size[serial->type];
+ extra_bytes += aml_serial_resource_to_extra_native_size[serial->type];
+
+ return extra_bytes;
+}
+
+static uacpi_size aml_size_for_serial_connection(
+ const struct uacpi_resource_spec *spec, uacpi_resource *resource
+)
+{
+ uacpi_size size;
+ uacpi_resource_serial_bus_common *serial_bus = &resource->serial_bus_common;
+
+ size = aml_size_with_header(spec);
+ size += aml_serial_resource_to_extra_aml_size[serial_bus->type];
+ size += serial_bus->vendor_data_length;
+ size += serial_bus->source.length;
+
+ return size;
+}
+
+#define OP(short_code, ...) \
+{ \
+ .code = UACPI_RESOURCE_CONVERT_OPCODE_##short_code, \
+ __VA_ARGS__ \
+}
+
+#define END() OP(END)
+
+#define AML_O(short_aml_name, field) \
+ uacpi_offsetof(struct acpi_resource_##short_aml_name, field)
+
+#define AML_F(short_aml_name, field) \
+ .f1.aml_offset = AML_O(short_aml_name, field)
+
+#define NATIVE_O(short_name, field) \
+ uacpi_offsetof(uacpi_resource_##short_name, field)
+
+#define NATIVE_F(short_native_name, field) \
+ .f2.native_offset = NATIVE_O(short_native_name, field)
+
+#define IMM(value) .f3.imm = value
+#define ARG0(value) .f1.arg0 = (value)
+#define ARG1(value) .f2.arg1 = (value)
+#define ARG2(value) .f3.arg2 = (value)
+
+
+static const struct uacpi_resource_convert_instruction convert_irq_to_native[] = {
+ OP(PACKED_ARRAY_16, AML_F(irq, irq_mask), NATIVE_F(irq, irqs),
+ ARG2(NATIVE_O(irq, num_irqs))),
+ OP(SKIP_IF_AML_SIZE_LESS_THAN, ARG0(3), IMM(6)),
+ OP(SET_TO_IMM, NATIVE_F(irq, length_kind),
+ IMM(UACPI_RESOURCE_LENGTH_KIND_FULL)),
+ OP(BIT_FIELD_1, AML_F(irq, flags), NATIVE_F(irq, triggering), IMM(0)),
+ OP(BIT_FIELD_1, AML_F(irq, flags), NATIVE_F(irq, polarity), IMM(3)),
+ OP(BIT_FIELD_1, AML_F(irq, flags), NATIVE_F(irq, sharing), IMM(4)),
+ OP(BIT_FIELD_1, AML_F(irq, flags), NATIVE_F(irq, wake_capability), IMM(5)),
+ END(),
+ OP(SET_TO_IMM, NATIVE_F(irq, length_kind),
+ IMM(UACPI_RESOURCE_LENGTH_KIND_ONE_LESS)),
+ OP(SET_TO_IMM, NATIVE_F(irq, triggering), IMM(UACPI_TRIGGERING_EDGE)),
+ END(),
+};
+
+const struct uacpi_resource_convert_instruction convert_irq_to_aml[] = {
+ OP(PACKED_ARRAY_16, AML_F(irq, irq_mask), NATIVE_F(irq, irqs),
+ ARG2(NATIVE_O(irq, num_irqs))),
+ OP(SKIP_IF_AML_SIZE_LESS_THAN, ARG0(3), IMM(4)),
+ OP(BIT_FIELD_1, AML_F(irq, flags), NATIVE_F(irq, triggering), IMM(0)),
+ OP(BIT_FIELD_1, AML_F(irq, flags), NATIVE_F(irq, polarity), IMM(3)),
+ OP(BIT_FIELD_1, AML_F(irq, flags), NATIVE_F(irq, sharing), IMM(4)),
+ OP(BIT_FIELD_1, AML_F(irq, flags), NATIVE_F(irq, wake_capability), IMM(5)),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction convert_dma[] = {
+ OP(PACKED_ARRAY_8, AML_F(dma, channel_mask), NATIVE_F(dma, channels),
+ ARG2(NATIVE_O(dma, num_channels))),
+ OP(BIT_FIELD_2, AML_F(dma, flags), NATIVE_F(dma, transfer_type), IMM(0)),
+ OP(BIT_FIELD_1, AML_F(dma, flags), NATIVE_F(dma, bus_master_status), IMM(2)),
+ OP(BIT_FIELD_2, AML_F(dma, flags), NATIVE_F(dma, channel_speed), IMM(5)),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction
+convert_start_dependent_to_native[] = {
+ OP(SKIP_IF_AML_SIZE_LESS_THAN, ARG0(1), IMM(4)),
+ OP(SET_TO_IMM, NATIVE_F(start_dependent, length_kind),
+ IMM(UACPI_RESOURCE_LENGTH_KIND_FULL)),
+ OP(BIT_FIELD_2, AML_F(start_dependent, flags),
+ NATIVE_F(start_dependent, compatibility), IMM(0)),
+ OP(BIT_FIELD_2, AML_F(start_dependent, flags),
+ NATIVE_F(start_dependent, performance), IMM(2)),
+ END(),
+ OP(SET_TO_IMM, NATIVE_F(start_dependent, length_kind),
+ IMM(UACPI_RESOURCE_LENGTH_KIND_ONE_LESS)),
+ OP(SET_TO_IMM, NATIVE_F(start_dependent, compatibility),
+ IMM(UACPI_ACCEPTABLE)),
+ OP(SET_TO_IMM, NATIVE_F(start_dependent, performance),
+ IMM(UACPI_ACCEPTABLE)),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction
+convert_start_dependent_to_aml[] = {
+ OP(SKIP_IF_AML_SIZE_LESS_THAN, ARG0(1), IMM(1)),
+ OP(BIT_FIELD_2, AML_F(start_dependent, flags),
+ NATIVE_F(start_dependent, compatibility), IMM(0)),
+ OP(BIT_FIELD_2, AML_F(start_dependent, flags),
+ NATIVE_F(start_dependent, performance), IMM(2)),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction convert_io[] = {
+ OP(BIT_FIELD_1, AML_F(io, information), NATIVE_F(io, decode_type)),
+ OP(FIELD_16, AML_F(io, minimum), NATIVE_F(io, minimum)),
+ OP(FIELD_16, AML_F(io, maximum), NATIVE_F(io, maximum)),
+ OP(FIELD_8, AML_F(io, alignment), NATIVE_F(io, alignment)),
+ OP(FIELD_8, AML_F(io, length), NATIVE_F(io, length)),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction convert_fixed_io[] = {
+ OP(FIELD_16, AML_F(fixed_io, address), NATIVE_F(fixed_io, address)),
+ OP(FIELD_8, AML_F(fixed_io, length), NATIVE_F(fixed_io, length)),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction convert_fixed_dma[] = {
+ OP(FIELD_16, AML_F(fixed_dma, request_line),
+ NATIVE_F(fixed_dma, request_line)),
+ OP(FIELD_16, AML_F(fixed_dma, channel), NATIVE_F(fixed_dma, channel)),
+ OP(FIELD_8, AML_F(fixed_dma, transfer_width),
+ NATIVE_F(fixed_dma, transfer_width)),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction convert_vendor_type0[] = {
+ OP(LOAD_AML_SIZE_32, NATIVE_F(vendor, length)),
+ OP(FIELD_8, AML_F(vendor_defined_type0, byte_data), NATIVE_F(vendor, data)),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction convert_vendor_type1[] = {
+ OP(LOAD_AML_SIZE_32, NATIVE_F(vendor, length)),
+ OP(FIELD_8, AML_F(vendor_defined_type1, byte_data), NATIVE_F(vendor, data)),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction convert_memory24[] = {
+ OP(BIT_FIELD_1, AML_F(memory24, information),
+ NATIVE_F(memory24, write_status), IMM(0)),
+ OP(FIELD_16, AML_F(memory24, minimum), NATIVE_F(memory24, minimum), IMM(4)),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction convert_memory32[] = {
+ OP(BIT_FIELD_1, AML_F(memory32, information),
+ NATIVE_F(memory32, write_status), IMM(0)),
+ OP(FIELD_32, AML_F(memory32, minimum), NATIVE_F(memory32, minimum), IMM(4)),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction
+convert_fixed_memory32[] = {
+ OP(BIT_FIELD_1, AML_F(fixed_memory32, information),
+ NATIVE_F(fixed_memory32, write_status), IMM(0)),
+ OP(FIELD_32, AML_F(fixed_memory32, address),
+ NATIVE_F(fixed_memory32, address)),
+ OP(FIELD_32, AML_F(fixed_memory32, length),
+ NATIVE_F(fixed_memory32, length)),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction
+convert_generic_register[] = {
+ OP(FIELD_8, AML_F(generic_register, address_space_id),
+ NATIVE_F(generic_register, address_space_id), IMM(4)),
+ OP(FIELD_64, AML_F(generic_register, address),
+ NATIVE_F(generic_register, address)),
+ END(),
+};
+
+#define CONVERT_TYPE_SPECIFIC_FLAGS(addr_type) \
+ OP(LOAD_8_STORE, AML_F(addr_type, common.type), \
+ NATIVE_F(addr_type, common.type)), \
+ OP(SKIP_IF_NOT_EQUALS, ARG0(UACPI_RANGE_MEMORY), IMM(5)), \
+ OP(BIT_FIELD_1, \
+ AML_F(addr_type, common.type_flags), \
+ NATIVE_F(addr_type, common.attribute.memory.write_status), IMM(0)), \
+ OP(BIT_FIELD_2, \
+ AML_F(addr_type, common.type_flags), \
+ NATIVE_F(addr_type, common.attribute.memory.caching), IMM(1)), \
+ OP(BIT_FIELD_2, \
+ AML_F(addr_type, common.type_flags), \
+ NATIVE_F(addr_type, common.attribute.memory.range_type), IMM(3)), \
+ OP(BIT_FIELD_1, \
+ AML_F(addr_type, common.type_flags), \
+ NATIVE_F(addr_type, common.attribute.memory.translation), IMM(5)), \
+ END(), \
+ OP(SKIP_IF_NOT_EQUALS, ARG0(UACPI_RANGE_IO), IMM(4)), \
+ OP(BIT_FIELD_2, \
+ AML_F(addr_type, common.type_flags), \
+ NATIVE_F(addr_type, common.attribute.io.range_type), IMM(0)), \
+ OP(BIT_FIELD_1, \
+ AML_F(addr_type, common.type_flags), \
+ NATIVE_F(addr_type, common.attribute.io.translation_type), IMM(4)), \
+ OP(BIT_FIELD_1, \
+ AML_F(addr_type, common.type_flags), \
+ NATIVE_F(addr_type, common.attribute.io.translation), IMM(5)), \
+ END(), \
+ /* Memory type that we don't know, just copy the byte */ \
+ OP(FIELD_8, AML_F(addr_type, common.type_flags), \
+ NATIVE_F(addr_type, common.attribute.type_specific), IMM(0xFF)), \
+ END()
+
+#define CONVERT_GENERAL_ADDRESS_FLAGS(addr_type) \
+ OP(BIT_FIELD_1, \
+ AML_F(addr_type, common.flags), \
+ NATIVE_F(addr_type, common.direction), IMM(0)), \
+ OP(BIT_FIELD_1, \
+ AML_F(addr_type, common.flags), \
+ NATIVE_F(addr_type, common.decode_type), IMM(1)), \
+ OP(BIT_FIELD_1, \
+ AML_F(addr_type, common.flags), \
+ NATIVE_F(addr_type, common.fixed_min_address), IMM(2)), \
+ OP(BIT_FIELD_1, \
+ AML_F(addr_type, common.flags), \
+ NATIVE_F(addr_type, common.fixed_max_address), IMM(3)) \
+
+#define DEFINE_ADDRESS_CONVERSION(width) \
+ static const struct uacpi_resource_convert_instruction \
+ convert_address##width[] = { \
+ CONVERT_GENERAL_ADDRESS_FLAGS(address##width), \
+ OP(FIELD_##width, AML_F(address##width, granularity), \
+ NATIVE_F(address##width, granularity), IMM(5)), \
+ OP(RESOURCE_SOURCE, NATIVE_F(address##width, source)), \
+ CONVERT_TYPE_SPECIFIC_FLAGS(address##width), \
+ };
+
+DEFINE_ADDRESS_CONVERSION(16)
+DEFINE_ADDRESS_CONVERSION(32)
+DEFINE_ADDRESS_CONVERSION(64)
+
+static const struct uacpi_resource_convert_instruction
+convert_address64_extended[] = {
+ CONVERT_GENERAL_ADDRESS_FLAGS(address64_extended),
+ OP(FIELD_8, AML_F(address64_extended, revision_id),
+ NATIVE_F(address64_extended, revision_id)),
+ OP(FIELD_64, AML_F(address64_extended, granularity),
+ NATIVE_F(address64_extended, granularity), IMM(6)),
+ CONVERT_TYPE_SPECIFIC_FLAGS(address64_extended),
+};
+
+static const struct uacpi_resource_convert_instruction
+convert_extended_irq[] = {
+ OP(BIT_FIELD_1, AML_F(extended_irq, flags),
+ NATIVE_F(extended_irq, direction), IMM(0)),
+ OP(BIT_FIELD_1, AML_F(extended_irq, flags),
+ NATIVE_F(extended_irq, triggering), IMM(1)),
+ OP(BIT_FIELD_1, AML_F(extended_irq, flags),
+ NATIVE_F(extended_irq, polarity), IMM(2)),
+ OP(BIT_FIELD_1, AML_F(extended_irq, flags),
+ NATIVE_F(extended_irq, sharing), IMM(3)),
+ OP(BIT_FIELD_1, AML_F(extended_irq, flags),
+ NATIVE_F(extended_irq, wake_capability), IMM(4)),
+ OP(LOAD_8_STORE, AML_F(extended_irq, num_irqs),
+ NATIVE_F(extended_irq, num_irqs), IMM(4)),
+ OP(RESOURCE_SOURCE, NATIVE_F(extended_irq, source)),
+
+ // Use FIELD_8 here since the accumulator has been multiplied by 4
+ OP(FIELD_8, AML_F(extended_irq, irqs), NATIVE_F(extended_irq, irqs)),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction convert_clock_input[] = {
+ OP(FIELD_8, AML_F(clock_input, revision_id),
+ NATIVE_F(clock_input, revision_id)),
+ OP(BIT_FIELD_1, AML_F(clock_input, flags), NATIVE_F(clock_input, frequency),
+ IMM(0)),
+ OP(BIT_FIELD_2, AML_F(clock_input, flags), NATIVE_F(clock_input, scale),
+ IMM(1)),
+ OP(FIELD_16, AML_F(clock_input, divisor), NATIVE_F(clock_input, divisor)),
+ OP(FIELD_32, AML_F(clock_input, numerator), NATIVE_F(clock_input, numerator)),
+ OP(FIELD_8, AML_F(clock_input, source_index), NATIVE_F(clock_input, source.index)),
+ OP(RESOURCE_SOURCE_NO_INDEX, NATIVE_F(clock_input, source)),
+ END(),
+};
+
+#define DECODE_SOURCE_INDEX(short_aml_name) \
+ OP(FIELD_8, AML_F(short_aml_name, source_index), \
+ NATIVE_F(short_aml_name, source.index)) \
+
+#define DECODE_RES_PIN_TBL_AND_VENDOR_DATA( \
+ short_aml_name, res_opcode, offset_field, res_field \
+) \
+ OP(LOAD_PIN_TABLE_LENGTH, AML_F(short_aml_name, offset_field), \
+ NATIVE_F(short_aml_name, pin_table_length)), \
+ OP(RESOURCE_##res_opcode, NATIVE_F(short_aml_name, res_field), \
+ AML_F(short_aml_name, offset_field), \
+ ARG2(AML_O(short_aml_name, vendor_data_offset))), \
+ OP(PIN_TABLE, AML_F(short_aml_name, pin_table_offset), \
+ NATIVE_F(short_aml_name, pin_table_length), \
+ ARG2(NATIVE_O(short_aml_name, pin_table))), \
+ OP(VENDOR_DATA, AML_F(short_aml_name, vendor_data_offset), \
+ NATIVE_F(short_aml_name, vendor_data_length), \
+ ARG2(NATIVE_O(short_aml_name, vendor_data)))
+
+static const struct uacpi_resource_convert_instruction
+convert_gpio_connection[] = {
+ OP(FIELD_8, AML_F(gpio_connection, revision_id),
+ NATIVE_F(gpio_connection, revision_id)),
+ OP(BIT_FIELD_1, AML_F(gpio_connection, general_flags),
+ NATIVE_F(gpio_connection, direction)),
+ OP(FIELD_8, AML_F(gpio_connection, pull_configuration),
+ NATIVE_F(gpio_connection, pull_configuration)),
+ OP(FIELD_16, AML_F(gpio_connection, drive_strength),
+ NATIVE_F(gpio_connection, drive_strength), IMM(2)),
+ DECODE_SOURCE_INDEX(gpio_connection),
+ DECODE_RES_PIN_TBL_AND_VENDOR_DATA(
+ gpio_connection, SOURCE_NO_INDEX, source_offset, source
+ ),
+ OP(LOAD_8_STORE, AML_F(gpio_connection, type), NATIVE_F(gpio_connection, type)),
+ OP(SKIP_IF_NOT_EQUALS, ARG0(UACPI_GPIO_CONNECTION_INTERRUPT), IMM(5)),
+ OP(BIT_FIELD_1, AML_F(gpio_connection, connection_flags),
+ NATIVE_F(gpio_connection, intr.triggering), IMM(0)),
+ OP(BIT_FIELD_2, AML_F(gpio_connection, connection_flags),
+ NATIVE_F(gpio_connection, intr.polarity), IMM(1)),
+ OP(BIT_FIELD_1, AML_F(gpio_connection, connection_flags),
+ NATIVE_F(gpio_connection, intr.sharing), IMM(3)),
+ OP(BIT_FIELD_1, AML_F(gpio_connection, connection_flags),
+ NATIVE_F(gpio_connection, intr.wake_capability), IMM(4)),
+ END(),
+ OP(SKIP_IF_NOT_EQUALS, ARG0(UACPI_GPIO_CONNECTION_IO), IMM(3)),
+ OP(BIT_FIELD_2, AML_F(gpio_connection, connection_flags),
+ NATIVE_F(gpio_connection, io.restriction), IMM(0)),
+ OP(BIT_FIELD_1, AML_F(gpio_connection, connection_flags),
+ NATIVE_F(gpio_connection, io.sharing), IMM(3)),
+ END(),
+ OP(FIELD_16, AML_F(gpio_connection, connection_flags),
+ NATIVE_F(gpio_connection, type_specific), IMM(0xFF)),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction
+convert_pin_function[] = {
+ OP(FIELD_8, AML_F(pin_function, revision_id),
+ NATIVE_F(pin_function, revision_id)),
+ OP(BIT_FIELD_1, AML_F(pin_function, flags),
+ NATIVE_F(pin_function, sharing), IMM(0)),
+ OP(FIELD_8, AML_F(pin_function, pull_configuration),
+ NATIVE_F(pin_function, pull_configuration)),
+ OP(FIELD_16, AML_F(pin_function, function_number),
+ NATIVE_F(pin_function, function_number)),
+ DECODE_SOURCE_INDEX(pin_function),
+ DECODE_RES_PIN_TBL_AND_VENDOR_DATA(
+ pin_function, SOURCE_NO_INDEX, source_offset, source
+ ),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction
+convert_pin_configuration[] = {
+ OP(FIELD_8, AML_F(pin_configuration, revision_id),
+ NATIVE_F(pin_configuration, revision_id)),
+ OP(BIT_FIELD_1, AML_F(pin_configuration, flags),
+ NATIVE_F(pin_configuration, sharing), IMM(0)),
+ OP(BIT_FIELD_1, AML_F(pin_configuration, flags),
+ NATIVE_F(pin_configuration, direction), IMM(1)),
+ OP(FIELD_8, AML_F(pin_configuration, type),
+ NATIVE_F(pin_configuration, type)),
+ OP(FIELD_32, AML_F(pin_configuration, value),
+ NATIVE_F(pin_configuration, value)),
+ DECODE_SOURCE_INDEX(pin_configuration),
+ DECODE_RES_PIN_TBL_AND_VENDOR_DATA(
+ pin_configuration, SOURCE_NO_INDEX, source_offset, source
+ ),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction convert_pin_group[] = {
+ OP(FIELD_8, AML_F(pin_group, revision_id),
+ NATIVE_F(pin_group, revision_id)),
+ OP(BIT_FIELD_1, AML_F(pin_group, flags),
+ NATIVE_F(pin_group, direction), IMM(0)),
+ DECODE_RES_PIN_TBL_AND_VENDOR_DATA(
+ pin_group, LABEL, source_lable_offset, label
+ ),
+ END(),
+};
+
+#define DECODE_PIN_GROUP_RES_SOURCES(postfix) \
+ DECODE_SOURCE_INDEX(pin_group_##postfix), \
+ OP(RESOURCE_SOURCE_NO_INDEX, NATIVE_F(pin_group_##postfix, source), \
+ AML_F(pin_group_##postfix, source_offset), \
+ ARG2(AML_O(pin_group_##postfix, source_lable_offset))), \
+ OP(LOAD_16_NATIVE, NATIVE_F(pin_group_##postfix, source.length)), \
+ OP(RESOURCE_LABEL, NATIVE_F(pin_group_##postfix, label), \
+ AML_F(pin_group_##postfix, source_lable_offset), \
+ ARG2(AML_O(pin_group_##postfix, vendor_data_offset))), \
+ OP(VENDOR_DATA, AML_F(pin_group_##postfix, vendor_data_offset), \
+ NATIVE_F(pin_group_##postfix, vendor_data_length), \
+ ARG2(NATIVE_O(pin_group_##postfix, vendor_data)))
+
+static const struct uacpi_resource_convert_instruction
+convert_pin_group_function[] = {
+ OP(FIELD_8, AML_F(pin_group_function, revision_id),
+ NATIVE_F(pin_group_function, revision_id)),
+ OP(BIT_FIELD_1, AML_F(pin_group_function, flags),
+ NATIVE_F(pin_group_function, sharing), IMM(0)),
+ OP(BIT_FIELD_1, AML_F(pin_group_function, flags),
+ NATIVE_F(pin_group_function, direction), IMM(1)),
+ OP(FIELD_16, AML_F(pin_group_function, function),
+ NATIVE_F(pin_group_function, function)),
+ DECODE_PIN_GROUP_RES_SOURCES(function),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction
+convert_pin_group_configuration[] = {
+ OP(FIELD_8, AML_F(pin_group_configuration, revision_id),
+ NATIVE_F(pin_group_configuration, revision_id)),
+ OP(BIT_FIELD_1, AML_F(pin_group_configuration, flags),
+ NATIVE_F(pin_group_configuration, sharing), IMM(0)),
+ OP(BIT_FIELD_1, AML_F(pin_group_configuration, flags),
+ NATIVE_F(pin_group_configuration, direction), IMM(1)),
+ OP(FIELD_8, AML_F(pin_group_configuration, type),
+ NATIVE_F(pin_group_configuration, type)),
+ OP(FIELD_32, AML_F(pin_group_configuration, value),
+ NATIVE_F(pin_group_configuration, value)),
+ DECODE_PIN_GROUP_RES_SOURCES(configuration),
+ END(),
+};
+
+static const struct uacpi_resource_convert_instruction
+convert_generic_serial_bus[] = {
+ OP(FIELD_8, AML_F(serial, revision_id),
+ NATIVE_F(serial_bus_common, revision_id)),
+ OP(FIELD_8, AML_F(serial, type_specific_revision_id),
+ NATIVE_F(serial_bus_common, type_revision_id)),
+ OP(FIELD_8, AML_F(serial, source_index),
+ NATIVE_F(serial_bus_common, source.index)),
+ OP(FIELD_16, AML_F(serial, type_data_length),
+ NATIVE_F(serial_bus_common, type_data_length)),
+ OP(BIT_FIELD_1, AML_F(serial, flags),
+ NATIVE_F(serial_bus_common, mode), IMM(0)),
+ OP(BIT_FIELD_1, AML_F(serial, flags),
+ NATIVE_F(serial_bus_common, direction), IMM(1)),
+ OP(BIT_FIELD_1, AML_F(serial, flags),
+ NATIVE_F(serial_bus_common, sharing), IMM(2)),
+ OP(SERIAL_TYPE_SPECIFIC, AML_F(serial, type),
+ NATIVE_F(serial_bus_common, type)),
+ OP(RESOURCE_SOURCE_NO_INDEX, NATIVE_F(serial_bus_common, source)),
+ OP(LOAD_8_NATIVE, NATIVE_F(serial_bus_common, type)),
+ OP(SKIP_IF_NOT_EQUALS, ARG0(ACPI_SERIAL_TYPE_I2C), IMM(4)),
+ OP(BIT_FIELD_1, AML_F(serial, type_specific_flags),
+ NATIVE_F(i2c_connection, addressing_mode), IMM(0)),
+ OP(FIELD_32, AML_F(serial_i2c, connection_speed),
+ NATIVE_F(i2c_connection, connection_speed), IMM(0xFF)),
+ OP(FIELD_16, AML_F(serial_i2c, slave_address),
+ NATIVE_F(i2c_connection, slave_address)),
+ END(),
+ OP(SKIP_IF_NOT_EQUALS, ARG0(ACPI_SERIAL_TYPE_SPI), IMM(5)),
+ OP(BIT_FIELD_1, AML_F(serial, type_specific_flags),
+ NATIVE_F(spi_connection, wire_mode), IMM(0)),
+ OP(BIT_FIELD_1, AML_F(serial, type_specific_flags),
+ NATIVE_F(spi_connection, device_polarity), IMM(1)),
+ OP(FIELD_32, AML_F(serial_spi, connection_speed),
+ NATIVE_F(spi_connection, connection_speed), IMM(0xFF)),
+ OP(FIELD_8, AML_F(serial_spi, data_bit_length),
+ NATIVE_F(spi_connection, data_bit_length), IMM(5)),
+ END(),
+ OP(SKIP_IF_NOT_EQUALS, ARG0(ACPI_SERIAL_TYPE_UART), IMM(8)),
+ OP(BIT_FIELD_2, AML_F(serial, type_specific_flags),
+ NATIVE_F(uart_connection, flow_control), IMM(0)),
+ OP(BIT_FIELD_2, AML_F(serial, type_specific_flags),
+ NATIVE_F(uart_connection, stop_bits), IMM(2)),
+ OP(BIT_FIELD_3, AML_F(serial, type_specific_flags),
+ NATIVE_F(uart_connection, data_bits), IMM(4)),
+ OP(BIT_FIELD_1, AML_F(serial, type_specific_flags),
+ NATIVE_F(uart_connection, endianness), IMM(7)),
+ OP(FIELD_32, AML_F(serial_uart, baud_rate),
+ NATIVE_F(uart_connection, baud_rate), IMM(0xFF)),
+ OP(FIELD_16, AML_F(serial_uart, rx_fifo),
+ NATIVE_F(uart_connection, rx_fifo), IMM(2)),
+ OP(FIELD_8, AML_F(serial_uart, parity),
+ NATIVE_F(uart_connection, parity), IMM(2)),
+ END(),
+ OP(SKIP_IF_NOT_EQUALS, ARG0(ACPI_SERIAL_TYPE_CSI2), IMM(3)),
+ OP(BIT_FIELD_2, AML_F(serial, type_specific_flags),
+ NATIVE_F(csi2_connection, phy_type), IMM(0)),
+ OP(BIT_FIELD_6, AML_F(serial, type_specific_flags),
+ NATIVE_F(csi2_connection, local_port), IMM(2)),
+ END(),
+
+ /*
+ * Insert a trap to catch unimplemented types, this should be unreachable
+ * because of validation earlier.
+ */
+ OP(UNREACHABLE),
+};
+
+#define NATIVE_RESOURCE_HEADER_SIZE 8
+
+#define DEFINE_SMALL_AML_RESOURCE(aml_type_enum, native_type_enum, \
+ aml_struct, native_struct, ...) \
+ [aml_type_enum] = { \
+ .type = aml_type_enum, \
+ .native_type = native_type_enum, \
+ .resource_kind = UACPI_AML_RESOURCE_KIND_SMALL, \
+ .aml_size = sizeof(aml_struct) - SMALL_ITEM_HEADER_SIZE, \
+ .native_size = sizeof(native_struct) + NATIVE_RESOURCE_HEADER_SIZE, \
+ __VA_ARGS__ \
+ }
+
+#define DEFINE_SMALL_AML_RESOURCE_NO_NATIVE_REPR( \
+ aml_type_enum, native_type_enum, aml_struct, ... \
+) \
+ [aml_type_enum] = { \
+ .type = aml_type_enum, \
+ .native_type = native_type_enum, \
+ .resource_kind = UACPI_AML_RESOURCE_KIND_SMALL, \
+ .aml_size = sizeof(aml_struct) - SMALL_ITEM_HEADER_SIZE, \
+ .native_size = NATIVE_RESOURCE_HEADER_SIZE, \
+ __VA_ARGS__ \
+ }
+
+#define DEFINE_LARGE_AML_RESOURCE(aml_type_enum, native_type_enum, \
+ aml_struct, native_struct, ...) \
+ [aml_type_enum] = { \
+ .type = aml_type_enum, \
+ .native_type = native_type_enum, \
+ .resource_kind = UACPI_AML_RESOURCE_KIND_LARGE, \
+ .aml_size = sizeof(aml_struct) - LARGE_ITEM_HEADER_SIZE, \
+ .native_size = sizeof(native_struct) + NATIVE_RESOURCE_HEADER_SIZE, \
+ __VA_ARGS__ \
+ }
+
+const struct uacpi_resource_spec aml_resources[UACPI_AML_RESOURCE_MAX + 1] = {
+ DEFINE_SMALL_AML_RESOURCE(
+ UACPI_AML_RESOURCE_IRQ,
+ UACPI_RESOURCE_TYPE_IRQ,
+ struct acpi_resource_irq,
+ uacpi_resource_irq,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED_OR_ONE_LESS,
+ .extra_size_for_native = extra_size_for_native_irq_or_dma,
+ .size_for_aml = size_for_aml_irq,
+ .to_native = convert_irq_to_native,
+ .to_aml = convert_irq_to_aml,
+ ),
+ DEFINE_SMALL_AML_RESOURCE(
+ UACPI_AML_RESOURCE_DMA,
+ UACPI_RESOURCE_TYPE_DMA,
+ struct acpi_resource_dma,
+ uacpi_resource_dma,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED,
+ .extra_size_for_native = extra_size_for_native_irq_or_dma,
+ .to_native = convert_dma,
+ .to_aml = convert_dma,
+ ),
+ DEFINE_SMALL_AML_RESOURCE(
+ UACPI_AML_RESOURCE_START_DEPENDENT,
+ UACPI_RESOURCE_TYPE_START_DEPENDENT,
+ struct acpi_resource_start_dependent,
+ uacpi_resource_start_dependent,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED_OR_ONE_LESS,
+ .size_for_aml = size_for_aml_start_dependent,
+ .to_native = convert_start_dependent_to_native,
+ .to_aml = convert_start_dependent_to_aml,
+ ),
+ DEFINE_SMALL_AML_RESOURCE_NO_NATIVE_REPR(
+ UACPI_AML_RESOURCE_END_DEPENDENT,
+ UACPI_RESOURCE_TYPE_END_DEPENDENT,
+ struct acpi_resource_end_dependent,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED,
+ ),
+ DEFINE_SMALL_AML_RESOURCE(
+ UACPI_AML_RESOURCE_IO,
+ UACPI_RESOURCE_TYPE_IO,
+ struct acpi_resource_io,
+ uacpi_resource_io,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED,
+ .to_native = convert_io,
+ .to_aml = convert_io,
+ ),
+ DEFINE_SMALL_AML_RESOURCE(
+ UACPI_AML_RESOURCE_FIXED_IO,
+ UACPI_RESOURCE_TYPE_FIXED_IO,
+ struct acpi_resource_fixed_io,
+ uacpi_resource_fixed_io,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED,
+ .to_native = convert_fixed_io,
+ .to_aml = convert_fixed_io,
+ ),
+ DEFINE_SMALL_AML_RESOURCE(
+ UACPI_AML_RESOURCE_FIXED_DMA,
+ UACPI_RESOURCE_TYPE_FIXED_DMA,
+ struct acpi_resource_fixed_dma,
+ uacpi_resource_fixed_dma,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED,
+ .to_native = convert_fixed_dma,
+ .to_aml = convert_fixed_dma,
+ ),
+ DEFINE_SMALL_AML_RESOURCE(
+ UACPI_AML_RESOURCE_VENDOR_TYPE0,
+ UACPI_RESOURCE_TYPE_VENDOR_SMALL,
+ struct acpi_resource_vendor_defined_type0,
+ uacpi_resource_vendor,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE,
+ .size_for_aml = size_for_aml_vendor,
+ .extra_size_for_native = extra_size_for_native_vendor,
+ .to_native = convert_vendor_type0,
+ .to_aml = convert_vendor_type0,
+ ),
+ DEFINE_SMALL_AML_RESOURCE_NO_NATIVE_REPR(
+ UACPI_AML_RESOURCE_END_TAG,
+ UACPI_RESOURCE_TYPE_END_TAG,
+ struct acpi_resource_end_tag,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_MEMORY24,
+ UACPI_RESOURCE_TYPE_MEMORY24,
+ struct acpi_resource_memory24,
+ uacpi_resource_memory24,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED,
+ .to_native = convert_memory24,
+ .to_aml = convert_memory24,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_GENERIC_REGISTER,
+ UACPI_RESOURCE_TYPE_GENERIC_REGISTER,
+ struct acpi_resource_generic_register,
+ uacpi_resource_generic_register,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED,
+ .to_native = convert_generic_register,
+ .to_aml = convert_generic_register,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_VENDOR_TYPE1,
+ UACPI_RESOURCE_TYPE_VENDOR_LARGE,
+ struct acpi_resource_vendor_defined_type1,
+ uacpi_resource_vendor,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE,
+ .extra_size_for_native = extra_size_for_native_vendor,
+ .size_for_aml = size_for_aml_vendor,
+ .to_native = convert_vendor_type1,
+ .to_aml = convert_vendor_type1,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_MEMORY32,
+ UACPI_RESOURCE_TYPE_MEMORY32,
+ struct acpi_resource_memory32,
+ uacpi_resource_memory32,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED,
+ .to_native = convert_memory32,
+ .to_aml = convert_memory32,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_FIXED_MEMORY32,
+ UACPI_RESOURCE_TYPE_FIXED_MEMORY32,
+ struct acpi_resource_fixed_memory32,
+ uacpi_resource_fixed_memory32,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED,
+ .to_native = convert_fixed_memory32,
+ .to_aml = convert_fixed_memory32,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_ADDRESS32,
+ UACPI_RESOURCE_TYPE_ADDRESS32,
+ struct acpi_resource_address32,
+ uacpi_resource_address32,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE,
+ .extra_size_for_native = extra_size_for_native_address_or_clock_input,
+ .size_for_aml = size_for_aml_address_or_clock_input,
+ .to_native = convert_address32,
+ .to_aml = convert_address32,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_ADDRESS16,
+ UACPI_RESOURCE_TYPE_ADDRESS16,
+ struct acpi_resource_address16,
+ uacpi_resource_address16,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE,
+ .extra_size_for_native = extra_size_for_native_address_or_clock_input,
+ .size_for_aml = size_for_aml_address_or_clock_input,
+ .to_native = convert_address16,
+ .to_aml = convert_address16,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_EXTENDED_IRQ,
+ UACPI_RESOURCE_TYPE_EXTENDED_IRQ,
+ struct acpi_resource_extended_irq,
+ uacpi_resource_extended_irq,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE,
+ .extra_size_for_native = extra_size_for_extended_irq,
+ .size_for_aml = size_for_aml_extended_irq,
+ .to_native = convert_extended_irq,
+ .to_aml = convert_extended_irq,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_ADDRESS64,
+ UACPI_RESOURCE_TYPE_ADDRESS64,
+ struct acpi_resource_address64,
+ uacpi_resource_address64,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE,
+ .extra_size_for_native = extra_size_for_native_address_or_clock_input,
+ .size_for_aml = size_for_aml_address_or_clock_input,
+ .to_native = convert_address64,
+ .to_aml = convert_address64,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_ADDRESS64_EXTENDED,
+ UACPI_RESOURCE_TYPE_ADDRESS64_EXTENDED,
+ struct acpi_resource_address64_extended,
+ uacpi_resource_address64_extended,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_FIXED,
+ .to_native = convert_address64_extended,
+ .to_aml = convert_address64_extended,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_GPIO_CONNECTION,
+ UACPI_RESOURCE_TYPE_GPIO_CONNECTION,
+ struct acpi_resource_gpio_connection,
+ uacpi_resource_gpio_connection,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE,
+ .extra_size_for_native = extra_size_for_native_gpio_or_pins,
+ .size_for_aml = size_for_aml_gpio_or_pins,
+ .to_aml = convert_gpio_connection,
+ .to_native = convert_gpio_connection,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_PIN_FUNCTION,
+ UACPI_RESOURCE_TYPE_PIN_FUNCTION,
+ struct acpi_resource_pin_function,
+ uacpi_resource_pin_function,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE,
+ .extra_size_for_native = extra_size_for_native_gpio_or_pins,
+ .size_for_aml = size_for_aml_gpio_or_pins,
+ .to_aml = convert_pin_function,
+ .to_native = convert_pin_function,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_SERIAL_CONNECTION,
+ 0, // the native type here is determined dynamically
+ struct acpi_resource_serial,
+ uacpi_resource_serial_bus_common,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE,
+ .extra_size_for_native = extra_size_for_serial_connection,
+ .size_for_aml = aml_size_for_serial_connection,
+ .to_native = convert_generic_serial_bus,
+ .to_aml = convert_generic_serial_bus,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_PIN_CONFIGURATION,
+ UACPI_RESOURCE_TYPE_PIN_CONFIGURATION,
+ struct acpi_resource_pin_configuration,
+ uacpi_resource_pin_configuration,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE,
+ .extra_size_for_native = extra_size_for_native_gpio_or_pins,
+ .size_for_aml = size_for_aml_gpio_or_pins,
+ .to_native = convert_pin_configuration,
+ .to_aml = convert_pin_configuration,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_PIN_GROUP,
+ UACPI_RESOURCE_TYPE_PIN_GROUP,
+ struct acpi_resource_pin_group,
+ uacpi_resource_pin_group,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE,
+ .extra_size_for_native = extra_size_for_native_gpio_or_pins,
+ .size_for_aml = size_for_aml_gpio_or_pins,
+ .to_native = convert_pin_group,
+ .to_aml = convert_pin_group,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_PIN_GROUP_FUNCTION,
+ UACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION,
+ struct acpi_resource_pin_group_function,
+ uacpi_resource_pin_group_function,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE,
+ .extra_size_for_native = extra_size_for_native_pin_group,
+ .size_for_aml = size_for_aml_pin_group,
+ .to_native = convert_pin_group_function,
+ .to_aml = convert_pin_group_function,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_PIN_GROUP_CONFIGURATION,
+ UACPI_RESOURCE_TYPE_PIN_GROUP_CONFIGURATION,
+ struct acpi_resource_pin_group_configuration,
+ uacpi_resource_pin_group_configuration,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE,
+ .extra_size_for_native = extra_size_for_native_pin_group,
+ .size_for_aml = size_for_aml_pin_group,
+ .to_native = convert_pin_group_configuration,
+ .to_aml = convert_pin_group_configuration,
+ ),
+ DEFINE_LARGE_AML_RESOURCE(
+ UACPI_AML_RESOURCE_CLOCK_INPUT,
+ UACPI_RESOURCE_TYPE_CLOCK_INPUT,
+ struct acpi_resource_clock_input,
+ uacpi_resource_clock_input,
+ .size_kind = UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE,
+ .extra_size_for_native = extra_size_for_native_address_or_clock_input,
+ .size_for_aml = size_for_aml_address_or_clock_input,
+ .to_native = convert_clock_input,
+ .to_aml = convert_clock_input,
+ ),
+};
+
+static enum uacpi_aml_resource get_aml_resource_type(uacpi_u8 raw_byte)
+{
+ if (raw_byte & ACPI_LARGE_ITEM) {
+ return aml_resource_to_type[
+ LARGE_RESOURCE_BASE + (raw_byte & ACPI_LARGE_ITEM_NAME_MASK)
+ ];
+ }
+
+ return aml_resource_to_type[
+ (raw_byte >> ACPI_SMALL_ITEM_NAME_IDX) & ACPI_SMALL_ITEM_NAME_MASK
+ ];
+}
+
+static uacpi_status get_aml_resource_size(
+ uacpi_u8 *data, uacpi_size bytes_left, uacpi_u16 *out_size
+)
+{
+ uacpi_u16 size;
+
+ /*
+ * Resource header is not included in size for both, so we subtract
+ * the header size from bytes_left to validate it.
+ */
+ if (*data & ACPI_LARGE_ITEM) {
+ if (uacpi_unlikely(bytes_left < 3))
+ return UACPI_STATUS_AML_INVALID_RESOURCE;
+
+ uacpi_memcpy(&size, data + 1, sizeof(size));
+ bytes_left -= aml_resource_kind_to_header_size[
+ UACPI_AML_RESOURCE_KIND_LARGE
+ ];
+ } else {
+ size = *data & ACPI_SMALL_ITEM_LENGTH_MASK;
+ bytes_left -= aml_resource_kind_to_header_size[
+ UACPI_AML_RESOURCE_KIND_SMALL
+ ];
+ }
+
+ if (uacpi_unlikely(size > bytes_left))
+ return UACPI_STATUS_AML_INVALID_RESOURCE;
+
+ *out_size = size;
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status validate_aml_serial_type(uacpi_u8 type)
+{
+ if (uacpi_unlikely(type < ACPI_SERIAL_TYPE_I2C ||
+ type > ACPI_SERIAL_TYPE_CSI2)) {
+ uacpi_error("invalid/unsupported serial connection type %d\n", type);
+ return UACPI_STATUS_AML_INVALID_RESOURCE;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_for_each_aml_resource(
+ uacpi_data_view buffer, uacpi_aml_resource_iteration_callback cb, void *user
+)
+{
+ uacpi_status ret;
+ uacpi_iteration_decision decision;
+ uacpi_u8 *data;
+ uacpi_size bytes_left;
+ uacpi_u16 resource_size;
+ enum uacpi_aml_resource type;
+ const struct uacpi_resource_spec *spec;
+
+ bytes_left = buffer.length;
+ data = buffer.bytes;
+
+ while (bytes_left) {
+ type = get_aml_resource_type(*data);
+ if (uacpi_unlikely(type == UACPI_AML_RESOURCE_TYPE_INVALID))
+ return UACPI_STATUS_AML_INVALID_RESOURCE;
+
+ ret = get_aml_resource_size(data, bytes_left, &resource_size);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ spec = &aml_resources[type];
+ switch (spec->size_kind) {
+ case UACPI_AML_RESOURCE_SIZE_KIND_FIXED:
+ if (resource_size != spec->aml_size)
+ return UACPI_STATUS_AML_INVALID_RESOURCE;
+ break;
+ case UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE:
+ if (resource_size < spec->aml_size)
+ return UACPI_STATUS_AML_INVALID_RESOURCE;
+ break;
+ case UACPI_AML_RESOURCE_SIZE_KIND_FIXED_OR_ONE_LESS:
+ if (resource_size != spec->aml_size &&
+ resource_size != (spec->aml_size - 1))
+ return UACPI_STATUS_AML_INVALID_RESOURCE;
+ break;
+ default:
+ return UACPI_STATUS_INTERNAL_ERROR;
+ }
+
+ if (spec->type == UACPI_AML_RESOURCE_SERIAL_CONNECTION) {
+ struct acpi_resource_serial *serial;
+
+ serial = (struct acpi_resource_serial*)data;
+
+ ret = validate_aml_serial_type(serial->type);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ decision = cb(user, data, resource_size, spec);
+ switch (decision) {
+ case UACPI_ITERATION_DECISION_BREAK:
+ return UACPI_STATUS_OK;
+ case UACPI_ITERATION_DECISION_CONTINUE: {
+ uacpi_size total_size = resource_size;
+
+ total_size += aml_resource_kind_to_header_size[spec->resource_kind];
+ data += total_size;
+ bytes_left -= total_size;
+ break;
+ }
+ default:
+ return UACPI_STATUS_INTERNAL_ERROR;
+ }
+
+ if (type == UACPI_AML_RESOURCE_END_TAG)
+ return UACPI_STATUS_OK;
+ }
+
+ return UACPI_STATUS_NO_RESOURCE_END_TAG;
+}
+
+static uacpi_iteration_decision find_end(
+ void *opaque, uacpi_u8 *data, uacpi_u16 resource_size,
+ const struct uacpi_resource_spec *spec
+)
+{
+ uacpi_u8 **out_ptr = opaque;
+ UACPI_UNUSED(resource_size);
+
+ if (spec->type != UACPI_AML_RESOURCE_END_TAG)
+ return UACPI_ITERATION_DECISION_CONTINUE;
+
+ *out_ptr = data;
+ return UACPI_ITERATION_DECISION_BREAK;
+}
+
+static uacpi_size native_size_for_aml_resource(
+ uacpi_u8 *data, uacpi_u16 size, const struct uacpi_resource_spec *spec
+)
+{
+ uacpi_size final_size = spec->native_size;
+
+ if (spec->extra_size_for_native)
+ final_size += spec->extra_size_for_native(spec, data, size);
+
+ return UACPI_ALIGN_UP(final_size, sizeof(void*), uacpi_size);
+}
+
+uacpi_status uacpi_find_aml_resource_end_tag(
+ uacpi_data_view buffer, uacpi_size *out_offset
+)
+{
+ uacpi_u8 *end_tag_ptr = UACPI_NULL;
+ uacpi_status ret;
+
+ if (buffer.length == 0) {
+ *out_offset = 0;
+ return UACPI_STATUS_OK;
+ }
+
+ /*
+ * This returning UACPI_STATUS_OK guarantees that end_tag_ptr is set to
+ * a valid value because a missing end tag would produce a
+ * UACPI_STATUS_NO_RESOURCE_END_TAG error.
+ */
+ ret = uacpi_for_each_aml_resource(buffer, find_end, &end_tag_ptr);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ *out_offset = end_tag_ptr - buffer.bytes;
+ return UACPI_STATUS_OK;
+}
+
+struct resource_conversion_ctx {
+ union {
+ void *buf;
+ uacpi_u8 *byte_buf;
+ uacpi_size size;
+ };
+ uacpi_status st;
+ uacpi_bool just_one;
+};
+
+static uacpi_iteration_decision conditional_continue(
+ struct resource_conversion_ctx *ctx
+)
+{
+ return ctx->just_one ? UACPI_ITERATION_DECISION_BREAK :
+ UACPI_ITERATION_DECISION_CONTINUE;
+}
+
+// Opcodes that are the same for both AML->native and native->AML
+#define CONVERSION_OPCODES_COMMON(native_buf) \
+ case UACPI_RESOURCE_CONVERT_OPCODE_END: \
+ return conditional_continue(ctx); \
+ \
+ case UACPI_RESOURCE_CONVERT_OPCODE_FIELD_8: \
+ case UACPI_RESOURCE_CONVERT_OPCODE_FIELD_16: \
+ case UACPI_RESOURCE_CONVERT_OPCODE_FIELD_32: \
+ case UACPI_RESOURCE_CONVERT_OPCODE_FIELD_64: { \
+ uacpi_u8 bytes; \
+ \
+ bytes = 1 << (insn->code - UACPI_RESOURCE_CONVERT_OPCODE_FIELD_8); \
+ accumulator = insn->f3.imm == 0xFF ? 0 : accumulator + insn->f3.imm; \
+ \
+ uacpi_memcpy(dst, src, bytes * UACPI_MAX(1, accumulator)); \
+ accumulator = 0; \
+ break; \
+ } \
+ \
+ case UACPI_RESOURCE_CONVERT_OPCODE_SKIP_IF_AML_SIZE_LESS_THAN: \
+ if (aml_size < insn->f1.arg0) \
+ pc += insn->f3.imm; \
+ break; \
+ case UACPI_RESOURCE_CONVERT_OPCODE_SKIP_IF_NOT_EQUALS: \
+ if (insn->f1.arg0 != accumulator) \
+ pc += insn->f3.imm; \
+ break; \
+ \
+ case UACPI_RESOURCE_CONVERT_OPCODE_SET_TO_IMM: \
+ uacpi_memcpy(dst, &insn->f3.imm, sizeof(insn->f3.imm)); \
+ break; \
+ \
+ case UACPI_RESOURCE_CONVERT_OPCODE_LOAD_IMM: \
+ accumulator = insn->f3.imm; \
+ break; \
+ \
+ case UACPI_RESOURCE_CONVERT_OPCODE_LOAD_8_STORE: \
+ uacpi_memcpy_zerout(&accumulator, src, sizeof(accumulator), 1); \
+ uacpi_memcpy(dst, &accumulator, 1); \
+ \
+ if (insn->f3.imm) \
+ accumulator *= insn->f3.imm; \
+ break; \
+ \
+ case UACPI_RESOURCE_CONVERT_OPCODE_LOAD_8_NATIVE: \
+ case UACPI_RESOURCE_CONVERT_OPCODE_LOAD_16_NATIVE: { \
+ uacpi_u8 bytes; \
+ \
+ bytes = \
+ 1 << (insn->code - UACPI_RESOURCE_CONVERT_OPCODE_LOAD_8_NATIVE); \
+ uacpi_memcpy_zerout( \
+ &accumulator, native_buf, sizeof(accumulator), bytes \
+ ); \
+ break; \
+ } \
+ \
+ case UACPI_RESOURCE_CONVERT_OPCODE_UNREACHABLE: \
+ default: \
+ if (insn->code != UACPI_RESOURCE_CONVERT_OPCODE_UNREACHABLE) { \
+ uacpi_error("unhandled resource conversion opcode %d\n", \
+ insn->code); \
+ } else { \
+ uacpi_error("tried to execute unreachable conversion opcode\n"); \
+ } \
+ ctx->st = UACPI_STATUS_INTERNAL_ERROR; \
+ return UACPI_ITERATION_DECISION_BREAK;
+
+#define PTR_AT(ptr, offset) (void*)((uacpi_u8*)(ptr) + (offset))
+
+#define NATIVE_OFFSET(res, offset) \
+ PTR_AT(res, (offset) + (sizeof(uacpi_u32) * 2))
+
+#define NATIVE_FIELD(res, name, field) \
+ NATIVE_OFFSET(res, NATIVE_O(name, field))
+
+#define CHECK_AML_OOB(offset, prefix, what) \
+ if (uacpi_unlikely(offset > ((uacpi_u32)aml_size + header_size))) { \
+ uacpi_error(prefix what " is OOB: %zu > %u\n", \
+ (uacpi_size)offset, (uacpi_u32)aml_size + header_size); \
+ ctx->st = UACPI_STATUS_AML_INVALID_RESOURCE; \
+ return UACPI_ITERATION_DECISION_BREAK; \
+ }
+
+#define CHECK_AML_OFFSET_BASE(offset, what) \
+ if (uacpi_unlikely(offset < base_aml_size_with_header)) { \
+ uacpi_error( \
+ "invalid " what " offset: %zu, expected at least %u\n", \
+ (uacpi_size)offset, base_aml_size_with_header); \
+ ctx->st = UACPI_STATUS_AML_INVALID_RESOURCE; \
+ return UACPI_ITERATION_DECISION_BREAK; \
+ }
+
+#define CHECK_AML_OFFSET(offset, what) \
+ CHECK_AML_OOB(offset, "end of ", what) \
+ CHECK_AML_OFFSET_BASE(offset, what)
+
+static uacpi_resource_type aml_serial_to_native_type(
+ uacpi_u8 type
+)
+{
+ return (type - ACPI_SERIAL_TYPE_I2C) +
+ UACPI_RESOURCE_TYPE_SERIAL_I2C_CONNECTION;
+}
+
+static uacpi_iteration_decision do_aml_resource_to_native(
+ void *opaque, uacpi_u8 *data, uacpi_u16 aml_size,
+ const struct uacpi_resource_spec *spec
+)
+{
+ struct resource_conversion_ctx *ctx = opaque;
+ uacpi_resource *resource = ctx->buf;
+ const struct uacpi_resource_convert_instruction *insns, *insn;
+ uacpi_u8 header_size, pc = 0;
+ uacpi_u8 *src, *dst;
+ void *resource_end;
+ uacpi_u16 base_aml_size;
+ uacpi_u32 base_aml_size_with_header, accumulator = 0;
+
+ insns = spec->to_native;
+
+ header_size = aml_resource_kind_to_header_size[spec->resource_kind];
+ resource->type = spec->native_type;
+ resource->length = native_size_for_aml_resource(data, aml_size, spec);
+ resource_end = ctx->byte_buf + spec->native_size;
+ ctx->byte_buf += resource->length;
+
+ base_aml_size = base_aml_size_with_header = spec->aml_size;
+ base_aml_size_with_header += header_size;
+
+ if (insns == UACPI_NULL)
+ return conditional_continue(ctx);
+
+ for (;;) {
+ insn = &insns[pc++];
+
+ src = data + insn->f1.aml_offset;
+ dst = NATIVE_OFFSET(resource, insn->f2.native_offset);
+
+ switch (insn->code) {
+ case UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_8:
+ case UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_16: {
+ uacpi_size i, j, max_bit;
+ uacpi_u16 value;
+
+ if (insn->code == UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_16) {
+ max_bit = 16;
+ uacpi_memcpy(&value, src, sizeof(uacpi_u16));
+ } else {
+ max_bit = 8;
+ uacpi_memcpy_zerout(
+ &value, src, sizeof(value), sizeof(uacpi_u8)
+ );
+ }
+
+ for (i = 0, j = 0; i < max_bit; ++i) {
+ if (!(value & (1 << i)))
+ continue;
+
+ dst[j++] = i;
+ }
+
+ uacpi_memcpy(NATIVE_OFFSET(resource, insn->f3.arg2), &j, 1);
+ break;
+ }
+
+ case UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_1:
+ case UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_2:
+ case UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_3:
+ case UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_6:{
+ uacpi_u8 mask, value;
+
+ mask = (insn->code - UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_1) + 1;
+ mask = (1 << mask) - 1;
+
+ value = (*src >> insn->f3.imm) & mask;
+ uacpi_memcpy(dst, &value, sizeof(value));
+ break;
+ }
+
+ case UACPI_RESOURCE_CONVERT_OPCODE_LOAD_AML_SIZE_32:
+ accumulator = aml_size;
+ uacpi_memcpy(dst, &accumulator, 4);
+ break;
+
+ case UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE:
+ case UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE_NO_INDEX:
+ case UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_LABEL: {
+ uacpi_size offset = 0, max_offset, length = 0;
+ uacpi_char *src_string, *dst_string;
+ union {
+ void *ptr;
+ uacpi_resource_source *source;
+ uacpi_resource_label *label;
+ } dst_name = { 0 };
+
+ dst_name.ptr = dst;
+
+ /*
+ * Check if the string is bounded by anything at the top. If not, we
+ * just assume it ends at the end of the resource.
+ */
+ if (insn->f3.arg2) {
+ uacpi_memcpy_zerout(&max_offset, data + insn->f3.arg2,
+ sizeof(max_offset), sizeof(uacpi_u16));
+ CHECK_AML_OFFSET(max_offset, "resource source");
+ } else {
+ max_offset = aml_size + header_size;
+ }
+
+ offset += base_aml_size_with_header;
+ offset += accumulator;
+
+ if (insn->code != UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_LABEL)
+ dst_name.source->index_present = UACPI_TRUE;
+
+ if (offset >= max_offset) {
+ if (insn->code == UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE)
+ dst_name.source->index_present = UACPI_FALSE;
+ break;
+ }
+
+ src_string = PTR_AT(data, offset);
+
+ if (insn->code == UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE) {
+ uacpi_memcpy(&dst_name.source->index, src_string++, 1);
+ offset++;
+ }
+
+ if (offset == max_offset)
+ break;
+
+ while (offset++ < max_offset) {
+ if (src_string[length++] == '\0')
+ break;
+ }
+
+ if (src_string[length - 1] != '\0') {
+ uacpi_error("non-null-terminated resource source string\n");
+ ctx->st = UACPI_STATUS_AML_INVALID_RESOURCE;
+ return UACPI_ITERATION_DECISION_BREAK;
+ }
+
+ dst_string = PTR_AT(resource_end, accumulator);
+ uacpi_memcpy(dst_string, src_string, length);
+
+ if (insn->code == UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_LABEL) {
+ dst_name.label->length = length;
+ dst_name.label->string = dst_string;
+ } else {
+ dst_name.source->length = length;
+ dst_name.source->string = dst_string;
+ }
+
+ break;
+ }
+
+ case UACPI_RESOURCE_CONVERT_OPCODE_LOAD_PIN_TABLE_LENGTH:
+ uacpi_memcpy_zerout(&accumulator, src,
+ sizeof(accumulator), sizeof(uacpi_u16));
+ CHECK_AML_OFFSET(accumulator, "pin table");
+
+ accumulator -= base_aml_size_with_header;
+ break;
+
+ case UACPI_RESOURCE_CONVERT_OPCODE_PIN_TABLE: {
+ uacpi_u16 entry_count = accumulator / 2;
+
+ /*
+ * Pin table is stored right at the end of the resource buffer,
+ * copy the data there.
+ */
+ uacpi_memcpy(
+ resource_end,
+ data + base_aml_size_with_header,
+ accumulator
+ );
+
+ // Set pin_table_length
+ uacpi_memcpy(dst, &entry_count, sizeof(entry_count));
+
+ // Set pin_table pointer
+ uacpi_memcpy(NATIVE_OFFSET(resource, insn->f3.arg2),
+ &resource_end, sizeof(void*));
+ break;
+ }
+
+ case UACPI_RESOURCE_CONVERT_OPCODE_VENDOR_DATA: {
+ uacpi_size length;
+ uacpi_u16 data_offset, offset_from_end;
+ void *native_dst, *vendor_data;
+
+ uacpi_memcpy(&data_offset, src, sizeof(data_offset));
+ CHECK_AML_OFFSET(data_offset, "vendor data");
+
+ vendor_data = data + data_offset;
+
+ /*
+ * Rebase the offset to cut off the header as it's not included
+ * in the size fields.
+ */
+ data_offset -= header_size;
+
+ length = aml_size - data_offset;
+ if (length == 0)
+ break;
+
+ uacpi_memcpy(dst, &length, sizeof(uacpi_u16));
+
+ offset_from_end = data_offset - base_aml_size;
+ native_dst = PTR_AT(resource_end, offset_from_end);
+
+ uacpi_memcpy(native_dst, vendor_data, length);
+ uacpi_memcpy(NATIVE_OFFSET(resource, insn->f3.arg2),
+ &native_dst, sizeof(void*));
+ break;
+ }
+
+ case UACPI_RESOURCE_CONVERT_OPCODE_SERIAL_TYPE_SPECIFIC: {
+ uacpi_resource_serial_bus_common *serial_bus_common;
+ uacpi_u8 serial_type, extra_size, type_length;
+
+ serial_bus_common = &resource->serial_bus_common;
+ serial_type = *src;
+ serial_bus_common->type = serial_type;
+ resource->type = aml_serial_to_native_type(serial_type);
+
+ /*
+ * Now that we know the serial type rebase the end pointers and
+ * sizes.
+ */
+ resource_end = PTR_AT(
+ resource_end,
+ aml_serial_resource_to_extra_native_size[serial_type]
+ );
+ extra_size = aml_serial_resource_to_extra_aml_size[serial_type];
+ base_aml_size += extra_size;
+ base_aml_size_with_header += extra_size;
+
+ type_length = serial_bus_common->type_data_length;
+ if (uacpi_unlikely(type_length < extra_size)) {
+ uacpi_error(
+ "invalid type-specific data length: %d, "
+ "expected at least %d\n", type_length, extra_size
+ );
+ ctx->st = UACPI_STATUS_AML_INVALID_RESOURCE;
+ return UACPI_ITERATION_DECISION_BREAK;
+ }
+
+ /*
+ * Calculate the length of the vendor data. All the extra data
+ * beyond the end of type-specific size is considered vendor data.
+ */
+ accumulator = type_length - extra_size;
+ if (accumulator == 0)
+ break;
+
+ serial_bus_common->vendor_data_length = accumulator;
+ serial_bus_common->vendor_data = resource_end;
+ uacpi_memcpy(
+ resource_end,
+ data + base_aml_size_with_header,
+ accumulator
+ );
+ break;
+ }
+
+ CONVERSION_OPCODES_COMMON(dst)
+ }
+ }
+}
+
+static uacpi_iteration_decision accumulate_native_buffer_size(
+ void *opaque, uacpi_u8 *data, uacpi_u16 resource_size,
+ const struct uacpi_resource_spec *spec
+)
+{
+ struct resource_conversion_ctx *ctx = opaque;
+ uacpi_size size_for_this;
+
+ size_for_this = native_size_for_aml_resource(data, resource_size, spec);
+ if (size_for_this == 0 || (ctx->size + size_for_this) < ctx->size) {
+ uacpi_error("invalid native size for aml resource: %zu\n",
+ size_for_this);
+ ctx->st = UACPI_STATUS_AML_INVALID_RESOURCE;
+ return UACPI_ITERATION_DECISION_BREAK;
+ }
+
+ ctx->size += size_for_this;
+ return conditional_continue(ctx);
+}
+
+static uacpi_status eval_resource_helper(
+ uacpi_namespace_node *node, const uacpi_char *method,
+ uacpi_object **out_obj
+)
+{
+ uacpi_status ret;
+ uacpi_bool is_device;
+
+ ret = uacpi_namespace_node_is(node, UACPI_OBJECT_DEVICE, &is_device);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (uacpi_unlikely(!is_device))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ return uacpi_eval_simple_buffer(
+ node, method, out_obj
+ );
+}
+
+uacpi_status uacpi_native_resources_from_aml(
+ uacpi_data_view aml_buffer, uacpi_resources **out_resources
+)
+{
+ uacpi_status ret;
+ struct resource_conversion_ctx ctx = { 0 };
+ uacpi_resources *resources;
+
+ ret = uacpi_for_each_aml_resource(
+ aml_buffer, accumulate_native_buffer_size, &ctx
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (uacpi_unlikely_error(ctx.st))
+ return ctx.st;
+
+ // Realistically any resource buffer bigger than this is probably a bug
+ if (uacpi_unlikely(ctx.size > (5 * 1024u * 1024u))) {
+ uacpi_error("bug: bogus native resource buffer size %zu\n", ctx.size);
+ return UACPI_STATUS_INTERNAL_ERROR;
+ }
+
+ resources = uacpi_kernel_alloc_zeroed(ctx.size + sizeof(uacpi_resources));
+ if (uacpi_unlikely(resources == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+ resources->length = ctx.size;
+ resources->entries = UACPI_PTR_ADD(resources, sizeof(uacpi_resources));
+
+ uacpi_memzero(&ctx, sizeof(ctx));
+ ctx.buf = resources->entries;
+
+ ret = uacpi_for_each_aml_resource(aml_buffer, do_aml_resource_to_native, &ctx);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_free_resources(resources);
+ return ret;
+ }
+
+ *out_resources = resources;
+ return ret;
+}
+
+uacpi_status uacpi_get_resource_from_buffer(
+ uacpi_data_view aml_buffer, uacpi_resource **out_resource
+)
+{
+ uacpi_status ret;
+ struct resource_conversion_ctx ctx = {
+ .just_one = UACPI_TRUE,
+ };
+ uacpi_resource *resource;
+
+ ret = uacpi_for_each_aml_resource(
+ aml_buffer, accumulate_native_buffer_size, &ctx
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ resource = uacpi_kernel_alloc_zeroed(ctx.size);
+ if (uacpi_unlikely(resource == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ uacpi_memzero(&ctx, sizeof(ctx));
+ ctx.buf = resource;
+ ctx.just_one = UACPI_TRUE;
+
+ ret = uacpi_for_each_aml_resource(aml_buffer, do_aml_resource_to_native, &ctx);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_free_resource(resource);
+ return ret;
+ }
+
+ *out_resource = resource;
+ return ret;
+}
+
+void uacpi_free_resources(uacpi_resources *resources)
+{
+ if (resources == UACPI_NULL)
+ return;
+
+ uacpi_free(resources, sizeof(uacpi_resources) + resources->length);
+}
+
+void uacpi_free_resource(uacpi_resource *resource)
+{
+ if (resource == UACPI_NULL)
+ return;
+
+ uacpi_free(resource, resource->length);
+}
+
+static uacpi_status extract_native_resources_from_method(
+ uacpi_namespace_node *device, const uacpi_char *method,
+ uacpi_resources **out_resources
+)
+{
+ uacpi_status ret;
+ uacpi_object *obj;
+ uacpi_data_view buffer;
+
+ ret = eval_resource_helper(device, method, &obj);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ uacpi_buffer_to_view(obj->buffer, &buffer);
+
+ ret = uacpi_native_resources_from_aml(buffer, out_resources);
+ uacpi_object_unref(obj);
+
+ return ret;
+}
+
+uacpi_status uacpi_get_current_resources(
+ uacpi_namespace_node *device, uacpi_resources **out_resources
+)
+{
+ return extract_native_resources_from_method(device, "_CRS", out_resources);
+}
+
+uacpi_status uacpi_get_possible_resources(
+ uacpi_namespace_node *device, uacpi_resources **out_resources
+)
+{
+ return extract_native_resources_from_method(device, "_PRS", out_resources);
+}
+
+uacpi_status uacpi_get_device_resources(
+ uacpi_namespace_node *device, const uacpi_char *method,
+ uacpi_resources **out_resources
+)
+{
+ return extract_native_resources_from_method(device, method, out_resources);
+}
+
+uacpi_status uacpi_for_each_resource(
+ uacpi_resources *resources, uacpi_resource_iteration_callback cb, void *user
+)
+{
+ uacpi_size bytes_left = resources->length;
+ uacpi_resource *current = resources->entries;
+ uacpi_iteration_decision decision;
+
+ while (bytes_left) {
+ // At least the head bytes
+ if (uacpi_unlikely(bytes_left < 4)) {
+ uacpi_error("corrupted resource buffer %p length %zu\n",
+ resources, resources->length);
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ if (uacpi_unlikely(current->type > UACPI_RESOURCE_TYPE_MAX)) {
+ uacpi_error("invalid resource type %d\n", current->type);
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ if (uacpi_unlikely(current->length > bytes_left)) {
+ uacpi_error("corrupted resource@%p length %u (%zu bytes left)\n",
+ current, current->length, bytes_left);
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ decision = cb(user, current);
+
+ if (decision == UACPI_ITERATION_DECISION_BREAK ||
+ current->type == UACPI_RESOURCE_TYPE_END_TAG)
+ return UACPI_STATUS_OK;
+
+ bytes_left -= current->length;
+ current = (uacpi_resource*)((uacpi_u8*)current + current->length);
+ }
+
+ return UACPI_STATUS_NO_RESOURCE_END_TAG;
+}
+
+uacpi_status uacpi_for_each_device_resource(
+ uacpi_namespace_node *device, const uacpi_char *method,
+ uacpi_resource_iteration_callback cb, void *user
+)
+{
+ uacpi_status ret;
+ uacpi_resources *resources;
+
+ ret = extract_native_resources_from_method(device, method, &resources);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_for_each_resource(resources, cb, user);
+ uacpi_free_resources(resources);
+
+ return ret;
+}
+
+static const struct uacpi_resource_spec *resource_spec_from_native(
+ uacpi_resource *resource
+)
+{
+ return &aml_resources[native_resource_to_type[resource->type]];
+}
+
+static uacpi_size aml_size_for_native_resource(
+ uacpi_resource *resource, const struct uacpi_resource_spec *spec
+)
+{
+ return spec->size_for_aml ?
+ spec->size_for_aml(spec, resource) :
+ aml_size_with_header(spec);
+}
+
+static uacpi_iteration_decision do_native_resource_to_aml(
+ void *opaque, uacpi_resource *resource
+)
+{
+ struct resource_conversion_ctx *ctx = opaque;
+ const struct uacpi_resource_spec *spec;
+ const struct uacpi_resource_convert_instruction *insns, *insn;
+ uacpi_u8 pc = 0;
+ uacpi_u8 *dst_base, *src, *dst;
+ uacpi_u32 aml_size, base_aml_size_with_header, accumulator = 0;
+ void *resource_end;
+
+ spec = resource_spec_from_native(resource);
+ aml_size = aml_size_for_native_resource(resource, spec);
+ insns = spec->to_aml;
+
+ dst_base = ctx->byte_buf;
+ ctx->byte_buf += aml_size;
+ aml_size -= aml_resource_kind_to_header_size[spec->resource_kind];
+
+ base_aml_size_with_header = spec->aml_size;
+ base_aml_size_with_header += aml_resource_kind_to_header_size[
+ spec->resource_kind
+ ];
+ resource_end = PTR_AT(resource, spec->native_size);
+
+ if (spec->resource_kind == UACPI_AML_RESOURCE_KIND_LARGE) {
+ *dst_base = ACPI_LARGE_ITEM | type_to_aml_resource[spec->type];
+ uacpi_memcpy(dst_base + 1, &aml_size, sizeof(uacpi_u16));
+ } else {
+ *dst_base = type_to_aml_resource[spec->type] << ACPI_SMALL_ITEM_NAME_IDX;
+ *dst_base |= aml_size;
+ }
+
+ if (insns == UACPI_NULL)
+ return UACPI_ITERATION_DECISION_CONTINUE;
+
+ for (;;) {
+ insn = &insns[pc++];
+
+ src = NATIVE_OFFSET(resource, insn->f2.native_offset);
+ dst = dst_base + insn->f1.aml_offset;
+
+ switch (insn->code) {
+ case UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_8:
+ case UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_16: {
+ uacpi_u8 i, *array_size, bytes = 1;
+ uacpi_u16 mask = 0;
+
+ array_size = NATIVE_OFFSET(resource, insn->f3.arg2);
+ for (i = 0; i < *array_size; ++i)
+ mask |= 1 << src[i];
+
+ if (insn->code == UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_16)
+ bytes = 2;
+
+ uacpi_memcpy(dst, &mask, bytes);
+ break;
+ }
+
+ case UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_1:
+ case UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_2:
+ case UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_3:
+ case UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_6:
+ *dst |= *src << insn->f3.imm;
+ break;
+
+ case UACPI_RESOURCE_CONVERT_OPCODE_LOAD_AML_SIZE_32:
+ accumulator = aml_size;
+ break;
+
+ case UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE:
+ case UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE_NO_INDEX:
+ case UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_LABEL: {
+ uacpi_size source_offset, length;
+ uacpi_u8 *dst_string;
+ const uacpi_char *src_string;
+ union {
+ void *ptr;
+ uacpi_resource_source *source;
+ uacpi_resource_label *label;
+ } src_name = { 0 };
+
+ src_name.ptr = src;
+
+ source_offset = base_aml_size_with_header + accumulator;
+ dst_string = dst_base + source_offset;
+
+ if (insn->f1.aml_offset)
+ uacpi_memcpy(dst, &source_offset, sizeof(uacpi_u16));
+
+ if (insn->code == UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE &&
+ src_name.source->index_present)
+ uacpi_memcpy(dst_string++, &src_name.source->index, 1);
+
+ if (insn->code == UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_LABEL) {
+ length = src_name.label->length;
+ src_string = src_name.label->string;
+ } else {
+ length = src_name.source->length;
+ src_string = src_name.source->string;
+ }
+
+ if (length == 0)
+ break;
+
+ if (uacpi_unlikely(src_string == UACPI_NULL)) {
+ uacpi_error(
+ "source string length is %zu but the pointer is NULL\n",
+ length
+ );
+ ctx->st = UACPI_STATUS_INVALID_ARGUMENT;
+ return UACPI_ITERATION_DECISION_BREAK;
+ }
+
+ uacpi_memcpy(dst_string, src_string, length);
+ break;
+ }
+
+ case UACPI_RESOURCE_CONVERT_OPCODE_LOAD_PIN_TABLE_LENGTH:
+ uacpi_memcpy_zerout(&accumulator, src,
+ sizeof(accumulator), sizeof(uacpi_u16));
+ accumulator *= sizeof(uacpi_u16);
+ break;
+
+ case UACPI_RESOURCE_CONVERT_OPCODE_PIN_TABLE:
+ /*
+ * The pin table resides right at the end of the base resource,
+ * set the offset to it in the AML we're encoding.
+ */
+ uacpi_memcpy(dst, &base_aml_size_with_header, sizeof(uacpi_u16));
+
+ /*
+ * Copy the actual data. It also resides right at the end of the
+ * native base resource.
+ */
+ uacpi_memcpy(
+ dst_base + base_aml_size_with_header,
+ resource_end,
+ accumulator
+ );
+ break;
+
+ case UACPI_RESOURCE_CONVERT_OPCODE_VENDOR_DATA: {
+ uacpi_u16 vendor_data_length, data_offset, vendor_data_offset;
+ uacpi_u8 *vendor_data;
+
+ // Read the vendor_data pointer
+ uacpi_memcpy(&vendor_data, NATIVE_OFFSET(resource, insn->f3.arg2),
+ sizeof(void*));
+ uacpi_memcpy(&vendor_data_length, src, sizeof(uacpi_u16));
+
+ if (vendor_data == UACPI_NULL) {
+ uacpi_size full_aml_size;
+
+ if (uacpi_unlikely(vendor_data_length != 0)) {
+ uacpi_error(
+ "vendor_data_length is %d, but pointer is NULL\n",
+ vendor_data_length
+ );
+ ctx->st = UACPI_STATUS_INVALID_ARGUMENT;
+ return UACPI_ITERATION_DECISION_BREAK;
+ }
+
+ /*
+ * There's no vendor data. The specification still mandates
+ * that we fill the vendor data offset field correctly, meaning
+ * we set it to the total length of the resource.
+ */
+ full_aml_size = aml_size;
+ full_aml_size += aml_resource_kind_to_header_size[
+ spec->resource_kind
+ ];
+
+ uacpi_memcpy(dst, &full_aml_size, sizeof(uacpi_u16));
+ break;
+ }
+
+ /*
+ * Calculate the offset of vendor data from the end of the native
+ * resource and use it since it matches the offset from the end of
+ * the AML resource.
+ *
+ * Non-zero value means there's a source string in between.
+ */
+ data_offset = vendor_data - (uacpi_u8*)resource_end;
+ vendor_data_offset = data_offset + base_aml_size_with_header;
+
+ // Write vendor_data_offset
+ uacpi_memcpy(dst, &vendor_data_offset, sizeof(uacpi_u16));
+
+ /*
+ * Write vendor_data_length, this field is right after
+ * vendor_data_offset, and is completely redundant, but it exists
+ * nonetheless.
+ */
+ uacpi_memcpy(
+ dst + sizeof(uacpi_u16),
+ &vendor_data_length,
+ sizeof(vendor_data_length)
+ );
+
+ // Finally write the data itself
+ uacpi_memcpy(
+ dst_base + vendor_data_offset,
+ vendor_data,
+ vendor_data_length
+ );
+ break;
+ }
+
+ case UACPI_RESOURCE_CONVERT_OPCODE_SERIAL_TYPE_SPECIFIC: {
+ uacpi_u8 serial_type = *src;
+ *dst = serial_type;
+
+ ctx->st = validate_aml_serial_type(serial_type);
+ if (uacpi_unlikely_error(ctx->st))
+ return UACPI_ITERATION_DECISION_BREAK;
+
+ if (uacpi_unlikely(resource->type !=
+ aml_serial_to_native_type(serial_type))) {
+ uacpi_error(
+ "native serial resource type %d doesn't match expected %d\n",
+ resource->type, aml_serial_to_native_type(serial_type)
+ );
+ ctx->st = UACPI_STATUS_INVALID_ARGUMENT;
+ return UACPI_ITERATION_DECISION_BREAK;
+ }
+
+ // Rebase the end pointer & size now that we know the serial type
+ resource_end = PTR_AT(
+ resource_end,
+ aml_serial_resource_to_extra_native_size[serial_type]
+ );
+ base_aml_size_with_header += aml_serial_resource_to_extra_aml_size[
+ serial_type
+ ];
+
+ accumulator = resource->serial_bus_common.vendor_data_length;
+ if (accumulator == 0)
+ break;
+
+ // Copy vendor data
+ uacpi_memcpy(
+ dst_base + base_aml_size_with_header,
+ resource_end,
+ accumulator
+ );
+ break;
+ }
+
+ CONVERSION_OPCODES_COMMON(src)
+ }
+ }
+}
+
+static uacpi_status native_resources_to_aml(
+ uacpi_resources *native_resources, void *aml_buffer
+)
+{
+ uacpi_status ret;
+ struct resource_conversion_ctx ctx = { 0 };
+
+ ctx.buf = aml_buffer;
+
+ ret = uacpi_for_each_resource(
+ native_resources, do_native_resource_to_aml, &ctx
+ );
+ if (ret == UACPI_STATUS_NO_RESOURCE_END_TAG) {
+ // An end tag is always included
+ uacpi_resource end_tag = { .type = UACPI_RESOURCE_TYPE_END_TAG };
+
+ do_native_resource_to_aml(&ctx, &end_tag);
+ ret = UACPI_STATUS_OK;
+ }
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ return ctx.st;
+}
+
+static uacpi_iteration_decision accumulate_aml_buffer_size(
+ void *opaque, uacpi_resource *resource
+)
+{
+ struct resource_conversion_ctx *ctx = opaque;
+ const struct uacpi_resource_spec *spec;
+ uacpi_size size_for_this;
+
+ // resource->type is sanitized to be valid here by the iteration function
+ spec = resource_spec_from_native(resource);
+
+ size_for_this = aml_size_for_native_resource(resource, spec);
+ if (size_for_this == 0 || (ctx->size + size_for_this) < ctx->size) {
+ uacpi_error("invalid aml size for native resource: %zu\n",
+ size_for_this);
+ ctx->st = UACPI_STATUS_INVALID_ARGUMENT;
+ return UACPI_ITERATION_DECISION_BREAK;
+ }
+
+ ctx->size += size_for_this;
+ return UACPI_ITERATION_DECISION_CONTINUE;
+}
+
+uacpi_status uacpi_native_resources_to_aml(
+ uacpi_resources *resources, uacpi_object **out_template
+)
+{
+ uacpi_status ret;
+ uacpi_object *obj;
+ void *buffer;
+ struct resource_conversion_ctx ctx = { 0 };
+
+ ret = uacpi_for_each_resource(
+ resources, accumulate_aml_buffer_size, &ctx
+ );
+ if (ret == UACPI_STATUS_NO_RESOURCE_END_TAG) {
+ // An end tag is always included
+ uacpi_resource end_tag = { .type = UACPI_RESOURCE_TYPE_END_TAG };
+
+ accumulate_aml_buffer_size(&ctx, &end_tag);
+ ret = UACPI_STATUS_OK;
+ }
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ if (uacpi_unlikely_error(ctx.st))
+ return ctx.st;
+
+ // Same reasoning as native_resource_from_aml
+ if (uacpi_unlikely(ctx.size > (5 * 1024u * 1024u))) {
+ uacpi_error("bug: bogus target aml resource buffer size %zu\n",
+ ctx.size);
+ return UACPI_STATUS_INTERNAL_ERROR;
+ }
+
+ buffer = uacpi_kernel_alloc_zeroed(ctx.size);
+ if (uacpi_unlikely(buffer == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ obj = uacpi_create_object(UACPI_OBJECT_BUFFER);
+ if (uacpi_unlikely(obj == UACPI_NULL)) {
+ uacpi_free(buffer, ctx.size);
+ return UACPI_STATUS_OUT_OF_MEMORY;
+ }
+
+ obj->buffer->data = buffer;
+ obj->buffer->size = ctx.size;
+
+ ret = native_resources_to_aml(resources, buffer);
+ if (uacpi_unlikely_error(ret))
+ uacpi_object_unref(obj);
+
+ if (ret == UACPI_STATUS_OK)
+ *out_template = obj;
+
+ return ret;
+}
+
+uacpi_status uacpi_set_resources(
+ uacpi_namespace_node *device, uacpi_resources *resources
+)
+{
+ uacpi_status ret;
+ uacpi_object *res_template;
+ uacpi_object_array args;
+
+ ret = uacpi_native_resources_to_aml(resources, &res_template);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ args.objects = &res_template;
+ args.count = 1;
+ ret = uacpi_eval(device, "_SRS", &args, UACPI_NULL);
+
+ uacpi_object_unref(res_template);
+ return ret;
+}
+
+#endif // !UACPI_BAREBONES_MODE
diff --git a/sys/dev/acpi/uacpi/shareable.c b/sys/dev/acpi/uacpi/shareable.c
new file mode 100644
index 0000000..b42660a
--- /dev/null
+++ b/sys/dev/acpi/uacpi/shareable.c
@@ -0,0 +1,71 @@
+#include <uacpi/internal/shareable.h>
+#include <uacpi/internal/stdlib.h>
+#include <uacpi/platform/atomic.h>
+
+#ifndef UACPI_BAREBONES_MODE
+
+#define BUGGED_REFCOUNT 0xFFFFFFFF
+
+void uacpi_shareable_init(uacpi_handle handle)
+{
+ struct uacpi_shareable *shareable = handle;
+ shareable->reference_count = 1;
+}
+
+uacpi_bool uacpi_bugged_shareable(uacpi_handle handle)
+{
+ struct uacpi_shareable *shareable = handle;
+
+ if (uacpi_unlikely(shareable->reference_count == 0))
+ uacpi_make_shareable_bugged(shareable);
+
+ return uacpi_atomic_load32(&shareable->reference_count) == BUGGED_REFCOUNT;
+}
+
+void uacpi_make_shareable_bugged(uacpi_handle handle)
+{
+ struct uacpi_shareable *shareable = handle;
+ uacpi_atomic_store32(&shareable->reference_count, BUGGED_REFCOUNT);
+}
+
+uacpi_u32 uacpi_shareable_ref(uacpi_handle handle)
+{
+ struct uacpi_shareable *shareable = handle;
+
+ if (uacpi_unlikely(uacpi_bugged_shareable(shareable)))
+ return BUGGED_REFCOUNT;
+
+ return uacpi_atomic_inc32(&shareable->reference_count) - 1;
+}
+
+uacpi_u32 uacpi_shareable_unref(uacpi_handle handle)
+{
+ struct uacpi_shareable *shareable = handle;
+
+ if (uacpi_unlikely(uacpi_bugged_shareable(shareable)))
+ return BUGGED_REFCOUNT;
+
+ return uacpi_atomic_dec32(&shareable->reference_count) + 1;
+}
+
+void uacpi_shareable_unref_and_delete_if_last(
+ uacpi_handle handle, void (*do_free)(uacpi_handle)
+)
+{
+ if (handle == UACPI_NULL)
+ return;
+
+ if (uacpi_unlikely(uacpi_bugged_shareable(handle)))
+ return;
+
+ if (uacpi_shareable_unref(handle) == 1)
+ do_free(handle);
+}
+
+uacpi_u32 uacpi_shareable_refcount(uacpi_handle handle)
+{
+ struct uacpi_shareable *shareable = handle;
+ return uacpi_atomic_load32(&shareable->reference_count);
+}
+
+#endif // !UACPI_BAREBONES_MODE
diff --git a/sys/dev/acpi/uacpi/sleep.c b/sys/dev/acpi/uacpi/sleep.c
new file mode 100644
index 0000000..4736324
--- /dev/null
+++ b/sys/dev/acpi/uacpi/sleep.c
@@ -0,0 +1,616 @@
+#include <uacpi/sleep.h>
+#include <uacpi/internal/context.h>
+#include <uacpi/internal/log.h>
+#include <uacpi/internal/io.h>
+#include <uacpi/internal/registers.h>
+#include <uacpi/internal/event.h>
+#include <uacpi/platform/arch_helpers.h>
+
+#ifndef UACPI_BAREBONES_MODE
+
+#ifndef UACPI_REDUCED_HARDWARE
+#define CALL_SLEEP_FN(name, state) \
+ (uacpi_is_hardware_reduced() ? \
+ name##_hw_reduced(state) : name##_hw_full(state))
+#else
+#define CALL_SLEEP_FN(name, state) name##_hw_reduced(state);
+#endif
+
+static uacpi_status eval_wak(uacpi_u8 state);
+static uacpi_status eval_sst(uacpi_u8 value);
+
+#ifndef UACPI_REDUCED_HARDWARE
+uacpi_status uacpi_set_waking_vector(
+ uacpi_phys_addr addr32, uacpi_phys_addr addr64
+)
+{
+ struct acpi_facs *facs = g_uacpi_rt_ctx.facs;
+
+ if (facs == UACPI_NULL)
+ return UACPI_STATUS_OK;
+
+ facs->firmware_waking_vector = addr32;
+
+ // The 64-bit wake vector doesn't exist, we're done
+ if (facs->length < 32)
+ return UACPI_STATUS_OK;
+
+ // Only allow 64-bit wake vector on 1.0 and above FACS
+ if (facs->version >= 1)
+ facs->x_firmware_waking_vector = addr64;
+ else
+ facs->x_firmware_waking_vector = 0;
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status enter_sleep_state_hw_full(uacpi_u8 state)
+{
+ uacpi_status ret;
+ uacpi_u64 wake_status, pm1a, pm1b;
+
+ ret = uacpi_write_register_field(
+ UACPI_REGISTER_FIELD_WAK_STS, ACPI_PM1_STS_CLEAR
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_disable_all_gpes();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_clear_all_events();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_enable_all_wake_gpes();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_read_register(UACPI_REGISTER_PM1_CNT, &pm1a);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ pm1a &= ~((uacpi_u64)(ACPI_PM1_CNT_SLP_TYP_MASK | ACPI_PM1_CNT_SLP_EN_MASK));
+ pm1b = pm1a;
+
+ pm1a |= g_uacpi_rt_ctx.last_sleep_typ_a << ACPI_PM1_CNT_SLP_TYP_IDX;
+ pm1b |= g_uacpi_rt_ctx.last_sleep_typ_b << ACPI_PM1_CNT_SLP_TYP_IDX;
+
+ /*
+ * Just like ACPICA, split writing SLP_TYP and SLP_EN to work around
+ * buggy firmware that can't handle both written at the same time.
+ */
+ ret = uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ pm1a |= ACPI_PM1_CNT_SLP_EN_MASK;
+ pm1b |= ACPI_PM1_CNT_SLP_EN_MASK;
+
+ if (state < UACPI_SLEEP_STATE_S4)
+ UACPI_ARCH_FLUSH_CPU_CACHE();
+
+ ret = uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (state > UACPI_SLEEP_STATE_S3) {
+ /*
+ * We're still here, this is a bug or very slow firmware.
+ * Just try spinning for a bit.
+ */
+ uacpi_u64 stalled_time = 0;
+
+ // 10 seconds max
+ while (stalled_time < (10 * 1000 * 1000)) {
+ uacpi_kernel_stall(100);
+ stalled_time += 100;
+ }
+
+ // Try one more time
+ ret = uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ // Nothing we can do here, give up
+ return UACPI_STATUS_HARDWARE_TIMEOUT;
+ }
+
+ do {
+ ret = uacpi_read_register_field(
+ UACPI_REGISTER_FIELD_WAK_STS, &wake_status
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ } while (wake_status != 1);
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status prepare_for_wake_from_sleep_state_hw_full(uacpi_u8 state)
+{
+ uacpi_status ret;
+ uacpi_u64 pm1a, pm1b;
+ UACPI_UNUSED(state);
+
+ /*
+ * Some hardware apparently relies on S0 values being written to the PM1
+ * control register on wake, so do this here.
+ */
+
+ if (g_uacpi_rt_ctx.s0_sleep_typ_a == UACPI_SLEEP_TYP_INVALID)
+ goto out;
+
+ ret = uacpi_read_register(UACPI_REGISTER_PM1_CNT, &pm1a);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ pm1a &= ~((uacpi_u64)(ACPI_PM1_CNT_SLP_TYP_MASK | ACPI_PM1_CNT_SLP_EN_MASK));
+ pm1b = pm1a;
+
+ pm1a |= g_uacpi_rt_ctx.s0_sleep_typ_a << ACPI_PM1_CNT_SLP_TYP_IDX;
+ pm1b |= g_uacpi_rt_ctx.s0_sleep_typ_b << ACPI_PM1_CNT_SLP_TYP_IDX;
+
+ uacpi_write_registers(UACPI_REGISTER_PM1_CNT, pm1a, pm1b);
+out:
+ // Errors ignored intentionally, we don't want to abort because of this
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status wake_from_sleep_state_hw_full(uacpi_u8 state)
+{
+ uacpi_status ret;
+ g_uacpi_rt_ctx.last_sleep_typ_a = UACPI_SLEEP_TYP_INVALID;
+ g_uacpi_rt_ctx.last_sleep_typ_b = UACPI_SLEEP_TYP_INVALID;
+
+ // Set the status to 2 (waking) while we execute the wake method.
+ eval_sst(2);
+
+ ret = uacpi_disable_all_gpes();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_enable_all_runtime_gpes();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ eval_wak(state);
+
+ // Apparently some BIOSes expect us to clear this, so do it
+ uacpi_write_register_field(
+ UACPI_REGISTER_FIELD_WAK_STS, ACPI_PM1_STS_CLEAR
+ );
+
+ // Now that we're awake set the status to 1 (running)
+ eval_sst(1);
+
+ return UACPI_STATUS_OK;
+}
+#endif
+
+static uacpi_status get_slp_type_for_state(
+ uacpi_u8 state, uacpi_u8 *a, uacpi_u8 *b
+)
+{
+ uacpi_char path[] = "_S0";
+ uacpi_status ret;
+ uacpi_object *obj0, *obj1, *ret_obj = UACPI_NULL;
+
+ path[2] += state;
+
+ ret = uacpi_eval_typed(
+ uacpi_namespace_root(), path, UACPI_NULL,
+ UACPI_OBJECT_PACKAGE_BIT, &ret_obj
+ );
+ if (ret != UACPI_STATUS_OK) {
+ if (uacpi_unlikely(ret != UACPI_STATUS_NOT_FOUND)) {
+ uacpi_warn("error while evaluating %s: %s\n", path,
+ uacpi_status_to_string(ret));
+ } else {
+ uacpi_trace("sleep state %d is not supported as %s was not found\n",
+ state, path);
+ }
+ goto out;
+ }
+
+ switch (ret_obj->package->count) {
+ case 0:
+ uacpi_error("empty package while evaluating %s!\n", path);
+ ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ goto out;
+
+ case 1:
+ obj0 = ret_obj->package->objects[0];
+ if (uacpi_unlikely(obj0->type != UACPI_OBJECT_INTEGER)) {
+ uacpi_error(
+ "invalid object type at pkg[0] => %s when evaluating %s\n",
+ uacpi_object_type_to_string(obj0->type), path
+ );
+ goto out;
+ }
+
+ *a = obj0->integer;
+ *b = obj0->integer >> 8;
+ break;
+
+ default:
+ obj0 = ret_obj->package->objects[0];
+ obj1 = ret_obj->package->objects[1];
+
+ if (uacpi_unlikely(obj0->type != UACPI_OBJECT_INTEGER ||
+ obj1->type != UACPI_OBJECT_INTEGER)) {
+ uacpi_error(
+ "invalid object type when evaluating %s: "
+ "pkg[0] => %s, pkg[1] => %s\n", path,
+ uacpi_object_type_to_string(obj0->type),
+ uacpi_object_type_to_string(obj1->type)
+ );
+ ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ goto out;
+ }
+
+ *a = obj0->integer;
+ *b = obj1->integer;
+ break;
+ }
+
+out:
+ if (ret != UACPI_STATUS_OK) {
+ *a = UACPI_SLEEP_TYP_INVALID;
+ *b = UACPI_SLEEP_TYP_INVALID;
+ }
+
+ uacpi_object_unref(ret_obj);
+ return ret;
+}
+
+static uacpi_status eval_sleep_helper(
+ uacpi_namespace_node *parent, const uacpi_char *path, uacpi_u8 value
+)
+{
+ uacpi_object *arg;
+ uacpi_object_array args;
+ uacpi_status ret;
+
+ arg = uacpi_create_object(UACPI_OBJECT_INTEGER);
+ if (uacpi_unlikely(arg == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ arg->integer = value;
+ args.objects = &arg;
+ args.count = 1;
+
+ ret = uacpi_eval(parent, path, &args, UACPI_NULL);
+ switch (ret) {
+ case UACPI_STATUS_OK:
+ break;
+ case UACPI_STATUS_NOT_FOUND:
+ ret = UACPI_STATUS_OK;
+ break;
+ default:
+ uacpi_error("error while evaluating %s: %s\n",
+ path, uacpi_status_to_string(ret));
+ break;
+ }
+
+ uacpi_object_unref(arg);
+ return ret;
+}
+
+static uacpi_status eval_pts(uacpi_u8 state)
+{
+ return eval_sleep_helper(uacpi_namespace_root(), "_PTS", state);
+}
+
+static uacpi_status eval_wak(uacpi_u8 state)
+{
+ return eval_sleep_helper(uacpi_namespace_root(), "_WAK", state);
+}
+
+static uacpi_status eval_sst(uacpi_u8 value)
+{
+ return eval_sleep_helper(
+ uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_SI),
+ "_SST", value
+ );
+}
+
+static uacpi_status eval_sst_for_state(enum uacpi_sleep_state state)
+{
+ uacpi_u8 arg;
+
+ /*
+ * This optional object is a control method that OSPM invokes to set the
+ * system status indicator as desired.
+ * Arguments:(1)
+ * Arg0 - An Integer containing the system status indicator identifier:
+ * 0 - No system state indication. Indicator off
+ * 1 - Working
+ * 2 - Waking
+ * 3 - Sleeping. Used to indicate system state S1, S2, or S3
+ * 4 - Sleeping with context saved to non-volatile storage
+ */
+ switch (state) {
+ case UACPI_SLEEP_STATE_S0:
+ arg = 1;
+ break;
+ case UACPI_SLEEP_STATE_S1:
+ case UACPI_SLEEP_STATE_S2:
+ case UACPI_SLEEP_STATE_S3:
+ arg = 3;
+ break;
+ case UACPI_SLEEP_STATE_S4:
+ arg = 4;
+ break;
+ case UACPI_SLEEP_STATE_S5:
+ arg = 0;
+ break;
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ return eval_sst(arg);
+}
+
+uacpi_status uacpi_prepare_for_sleep_state(enum uacpi_sleep_state state_enum)
+{
+ uacpi_u8 state = state_enum;
+ uacpi_status ret;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
+
+ if (uacpi_unlikely(state > UACPI_SLEEP_STATE_S5))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ ret = get_slp_type_for_state(
+ state,
+ &g_uacpi_rt_ctx.last_sleep_typ_a,
+ &g_uacpi_rt_ctx.last_sleep_typ_b
+ );
+ if (ret != UACPI_STATUS_OK)
+ return ret;
+
+ ret = get_slp_type_for_state(
+ 0,
+ &g_uacpi_rt_ctx.s0_sleep_typ_a,
+ &g_uacpi_rt_ctx.s0_sleep_typ_b
+ );
+
+ ret = eval_pts(state);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ eval_sst_for_state(state);
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_u8 make_hw_reduced_sleep_control(uacpi_u8 slp_typ)
+{
+ uacpi_u8 value;
+
+ value = (slp_typ << ACPI_SLP_CNT_SLP_TYP_IDX);
+ value &= ACPI_SLP_CNT_SLP_TYP_MASK;
+ value |= ACPI_SLP_CNT_SLP_EN_MASK;
+
+ return value;
+}
+
+static uacpi_status enter_sleep_state_hw_reduced(uacpi_u8 state)
+{
+ uacpi_status ret;
+ uacpi_u8 sleep_control;
+ uacpi_u64 wake_status;
+ struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
+
+ if (!fadt->sleep_control_reg.address || !fadt->sleep_status_reg.address)
+ return UACPI_STATUS_NOT_FOUND;
+
+ ret = uacpi_write_register_field(
+ UACPI_REGISTER_FIELD_HWR_WAK_STS,
+ ACPI_SLP_STS_CLEAR
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ sleep_control = make_hw_reduced_sleep_control(
+ g_uacpi_rt_ctx.last_sleep_typ_a
+ );
+
+ if (state < UACPI_SLEEP_STATE_S4)
+ UACPI_ARCH_FLUSH_CPU_CACHE();
+
+ /*
+ * To put the system into a sleep state, software will write the HW-reduced
+ * Sleep Type value (obtained from the \_Sx object in the DSDT) and the
+ * SLP_EN bit to the sleep control register.
+ */
+ ret = uacpi_write_register(UACPI_REGISTER_SLP_CNT, sleep_control);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ /*
+ * The OSPM then polls the WAK_STS bit of the SLEEP_STATUS_REG waiting for
+ * it to be one (1), indicating that the system has been transitioned
+ * back to the Working state.
+ */
+ do {
+ ret = uacpi_read_register_field(
+ UACPI_REGISTER_FIELD_HWR_WAK_STS, &wake_status
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ } while (wake_status != 1);
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status prepare_for_wake_from_sleep_state_hw_reduced(uacpi_u8 state)
+{
+ uacpi_u8 sleep_control;
+ UACPI_UNUSED(state);
+
+ if (g_uacpi_rt_ctx.s0_sleep_typ_a == UACPI_SLEEP_TYP_INVALID)
+ goto out;
+
+ sleep_control = make_hw_reduced_sleep_control(
+ g_uacpi_rt_ctx.s0_sleep_typ_a
+ );
+ uacpi_write_register(UACPI_REGISTER_SLP_CNT, sleep_control);
+
+out:
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status wake_from_sleep_state_hw_reduced(uacpi_u8 state)
+{
+ g_uacpi_rt_ctx.last_sleep_typ_a = UACPI_SLEEP_TYP_INVALID;
+ g_uacpi_rt_ctx.last_sleep_typ_b = UACPI_SLEEP_TYP_INVALID;
+
+ // Set the status to 2 (waking) while we execute the wake method.
+ eval_sst(2);
+
+ eval_wak(state);
+
+ // Apparently some BIOSes expect us to clear this, so do it
+ uacpi_write_register_field(
+ UACPI_REGISTER_FIELD_HWR_WAK_STS, ACPI_SLP_STS_CLEAR
+ );
+
+ // Now that we're awake set the status to 1 (running)
+ eval_sst(1);
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_enter_sleep_state(enum uacpi_sleep_state state_enum)
+{
+ uacpi_u8 state = state_enum;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
+
+ if (uacpi_unlikely(state > UACPI_SLEEP_STATE_MAX))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ if (uacpi_unlikely(g_uacpi_rt_ctx.last_sleep_typ_a > ACPI_SLP_TYP_MAX ||
+ g_uacpi_rt_ctx.last_sleep_typ_b > ACPI_SLP_TYP_MAX)) {
+ uacpi_error("invalid SLP_TYP values: 0x%02X:0x%02X\n",
+ g_uacpi_rt_ctx.last_sleep_typ_a,
+ g_uacpi_rt_ctx.last_sleep_typ_b);
+ return UACPI_STATUS_AML_BAD_ENCODING;
+ }
+
+ return CALL_SLEEP_FN(enter_sleep_state, state);
+}
+
+uacpi_status uacpi_prepare_for_wake_from_sleep_state(
+ uacpi_sleep_state state_enum
+)
+{
+ uacpi_u8 state = state_enum;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
+
+ if (uacpi_unlikely(state > UACPI_SLEEP_STATE_MAX))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ return CALL_SLEEP_FN(prepare_for_wake_from_sleep_state, state);
+}
+
+uacpi_status uacpi_wake_from_sleep_state(
+ uacpi_sleep_state state_enum
+)
+{
+ uacpi_u8 state = state_enum;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
+
+ if (uacpi_unlikely(state > UACPI_SLEEP_STATE_MAX))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ return CALL_SLEEP_FN(wake_from_sleep_state, state);
+}
+
+uacpi_status uacpi_reboot(void)
+{
+ uacpi_status ret;
+ uacpi_handle pci_dev = UACPI_NULL, io_handle = UACPI_NULL;
+ struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
+ struct acpi_gas *reset_reg = &fadt->reset_reg;
+
+ /*
+ * Allow restarting earlier than namespace load so that the kernel can
+ * use this in case of some initialization error.
+ */
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ if (!(fadt->flags & ACPI_RESET_REG_SUP) || !reset_reg->address)
+ return UACPI_STATUS_NOT_FOUND;
+
+ switch (reset_reg->address_space_id) {
+ case UACPI_ADDRESS_SPACE_SYSTEM_IO:
+ /*
+ * For SystemIO we don't do any checking, and we ignore bit width
+ * because that's what NT does.
+ */
+ ret = uacpi_kernel_io_map(reset_reg->address, 1, &io_handle);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_kernel_io_write8(io_handle, 0, fadt->reset_value);
+ break;
+ case UACPI_ADDRESS_SPACE_SYSTEM_MEMORY:
+ ret = uacpi_write_register(UACPI_REGISTER_RESET, fadt->reset_value);
+ break;
+ case UACPI_ADDRESS_SPACE_PCI_CONFIG: {
+ uacpi_pci_address address = { 0 };
+
+ // Bus is assumed to be 0 here
+ address.segment = 0;
+ address.bus = 0;
+ address.device = (reset_reg->address >> 32) & 0xFF;
+ address.function = (reset_reg->address >> 16) & 0xFF;
+
+ ret = uacpi_kernel_pci_device_open(address, &pci_dev);
+ if (uacpi_unlikely_error(ret))
+ break;
+
+ ret = uacpi_kernel_pci_write8(
+ pci_dev, reset_reg->address & 0xFFFF, fadt->reset_value
+ );
+ break;
+ }
+ default:
+ uacpi_warn(
+ "unable to perform a reset: unsupported address space '%s' (%d)\n",
+ uacpi_address_space_to_string(reset_reg->address_space_id),
+ reset_reg->address_space_id
+ );
+ ret = UACPI_STATUS_UNIMPLEMENTED;
+ }
+
+ if (ret == UACPI_STATUS_OK) {
+ /*
+ * This should've worked but we're still here.
+ * Spin for a bit then give up.
+ */
+ uacpi_u64 stalled_time = 0;
+
+ while (stalled_time < (1000 * 1000)) {
+ uacpi_kernel_stall(100);
+ stalled_time += 100;
+ }
+
+ uacpi_error("reset timeout\n");
+ ret = UACPI_STATUS_HARDWARE_TIMEOUT;
+ }
+
+ if (pci_dev != UACPI_NULL)
+ uacpi_kernel_pci_device_close(pci_dev);
+ if (io_handle != UACPI_NULL)
+ uacpi_kernel_io_unmap(io_handle);
+
+ return ret;
+}
+
+#endif // !UACPI_BAREBONES_MODE
diff --git a/sys/dev/acpi/uacpi/stdlib.c b/sys/dev/acpi/uacpi/stdlib.c
new file mode 100644
index 0000000..98344f1
--- /dev/null
+++ b/sys/dev/acpi/uacpi/stdlib.c
@@ -0,0 +1,728 @@
+#include <uacpi/internal/stdlib.h>
+#include <uacpi/internal/utilities.h>
+
+#ifdef UACPI_USE_BUILTIN_STRING
+
+#ifndef uacpi_memcpy
+void *uacpi_memcpy(void *dest, const void *src, uacpi_size count)
+{
+ uacpi_char *cd = dest;
+ const uacpi_char *cs = src;
+
+ while (count--)
+ *cd++ = *cs++;
+
+ return dest;
+}
+#endif
+
+#ifndef uacpi_memmove
+void *uacpi_memmove(void *dest, const void *src, uacpi_size count)
+{
+ uacpi_char *cd = dest;
+ const uacpi_char *cs = src;
+
+ if (src < dest) {
+ cs += count;
+ cd += count;
+
+ while (count--)
+ *--cd = *--cs;
+ } else {
+ while (count--)
+ *cd++ = *cs++;
+ }
+
+ return dest;
+}
+#endif
+
+#ifndef uacpi_memset
+void *uacpi_memset(void *dest, uacpi_i32 ch, uacpi_size count)
+{
+ uacpi_u8 fill = ch;
+ uacpi_u8 *cdest = dest;
+
+ while (count--)
+ *cdest++ = fill;
+
+ return dest;
+}
+#endif
+
+#ifndef uacpi_memcmp
+uacpi_i32 uacpi_memcmp(const void *lhs, const void *rhs, uacpi_size count)
+{
+ const uacpi_u8 *byte_lhs = lhs;
+ const uacpi_u8 *byte_rhs = rhs;
+ uacpi_size i;
+
+ for (i = 0; i < count; ++i) {
+ if (byte_lhs[i] != byte_rhs[i])
+ return byte_lhs[i] - byte_rhs[i];
+ }
+
+ return 0;
+}
+#endif
+
+#endif // UACPI_USE_BUILTIN_STRING
+
+#ifndef uacpi_strlen
+uacpi_size uacpi_strlen(const uacpi_char *str)
+{
+ const uacpi_char *str1;
+
+ for (str1 = str; *str1; str1++);
+
+ return str1 - str;
+}
+#endif
+
+#ifndef UACPI_BAREBONES_MODE
+
+#ifndef uacpi_strnlen
+uacpi_size uacpi_strnlen(const uacpi_char *str, uacpi_size max)
+{
+ const uacpi_char *str1;
+
+ for (str1 = str; max-- && *str1; str1++);
+
+ return str1 - str;
+}
+#endif
+
+#ifndef uacpi_strcmp
+uacpi_i32 uacpi_strcmp(const uacpi_char *lhs, const uacpi_char *rhs)
+{
+ uacpi_size i = 0;
+ typedef const uacpi_u8 *cucp;
+
+ while (lhs[i] && rhs[i]) {
+ if (lhs[i] != rhs[i])
+ return *(cucp)&lhs[i] - *(cucp)&rhs[i];
+
+ i++;
+ }
+
+ return *(cucp)&lhs[i] - *(cucp)&rhs[i];
+}
+#endif
+
+void uacpi_memcpy_zerout(void *dst, const void *src,
+ uacpi_size dst_size, uacpi_size src_size)
+{
+ uacpi_size bytes_to_copy = UACPI_MIN(src_size, dst_size);
+
+ if (bytes_to_copy)
+ uacpi_memcpy(dst, src, bytes_to_copy);
+
+ if (dst_size > bytes_to_copy)
+ uacpi_memzero((uacpi_u8 *)dst + bytes_to_copy, dst_size - bytes_to_copy);
+}
+
+uacpi_u8 uacpi_bit_scan_forward(uacpi_u64 value)
+{
+#if defined(_MSC_VER) && !defined(__clang__)
+ unsigned char ret;
+ unsigned long index;
+
+#ifdef _WIN64
+ ret = _BitScanForward64(&index, value);
+ if (ret == 0)
+ return 0;
+
+ return (uacpi_u8)index + 1;
+#else
+ ret = _BitScanForward(&index, value);
+ if (ret == 0) {
+ ret = _BitScanForward(&index, value >> 32);
+ if (ret == 0)
+ return 0;
+
+ return (uacpi_u8)index + 33;
+ }
+
+ return (uacpi_u8)index + 1;
+#endif
+
+#elif defined(__WATCOMC__)
+ // TODO: Use compiler intrinsics or inline ASM here
+ uacpi_u8 index;
+ uacpi_u64 mask = 1;
+
+ for (index = 1; index <= 64; index++, mask <<= 1) {
+ if (value & mask) {
+ return index;
+ }
+ }
+
+ return 0;
+#else
+ return __builtin_ffsll(value);
+#endif
+}
+
+uacpi_u8 uacpi_bit_scan_backward(uacpi_u64 value)
+{
+#if defined(_MSC_VER) && !defined(__clang__)
+ unsigned char ret;
+ unsigned long index;
+
+#ifdef _WIN64
+ ret = _BitScanReverse64(&index, value);
+ if (ret == 0)
+ return 0;
+
+ return (uacpi_u8)index + 1;
+#else
+ ret = _BitScanReverse(&index, value >> 32);
+ if (ret == 0) {
+ ret = _BitScanReverse(&index, value);
+ if (ret == 0)
+ return 0;
+
+ return (uacpi_u8)index + 1;
+ }
+
+ return (uacpi_u8)index + 33;
+#endif
+
+#elif defined(__WATCOMC__)
+ // TODO: Use compiler intrinsics or inline ASM here
+ uacpi_u8 index;
+ uacpi_u64 mask = (1ull << 63);
+
+ for (index = 64; index > 0; index--, mask >>= 1) {
+ if (value & mask) {
+ return index;
+ }
+ }
+
+ return 0;
+#else
+ if (value == 0)
+ return 0;
+
+ return 64 - __builtin_clzll(value);
+#endif
+}
+
+#ifndef UACPI_NATIVE_ALLOC_ZEROED
+void *uacpi_builtin_alloc_zeroed(uacpi_size size)
+{
+ void *ptr;
+
+ ptr = uacpi_kernel_alloc(size);
+ if (uacpi_unlikely(ptr == UACPI_NULL))
+ return ptr;
+
+ uacpi_memzero(ptr, size);
+ return ptr;
+}
+#endif
+
+#endif // !UACPI_BAREBONES_MODE
+
+#ifndef uacpi_vsnprintf
+struct fmt_buf_state {
+ uacpi_char *buffer;
+ uacpi_size capacity;
+ uacpi_size bytes_written;
+};
+
+struct fmt_spec {
+ uacpi_u8 is_signed : 1;
+ uacpi_u8 prepend : 1;
+ uacpi_u8 uppercase : 1;
+ uacpi_u8 left_justify : 1;
+ uacpi_u8 alternate_form : 1;
+ uacpi_u8 has_precision : 1;
+ uacpi_char pad_char;
+ uacpi_char prepend_char;
+ uacpi_u64 min_width;
+ uacpi_u64 precision;
+ uacpi_u32 base;
+};
+
+static void write_one(struct fmt_buf_state *fb_state, uacpi_char c)
+{
+ if (fb_state->bytes_written < fb_state->capacity)
+ fb_state->buffer[fb_state->bytes_written] = c;
+
+ fb_state->bytes_written++;
+}
+
+static void write_many(
+ struct fmt_buf_state *fb_state, const uacpi_char *string, uacpi_size count
+)
+{
+ if (fb_state->bytes_written < fb_state->capacity) {
+ uacpi_size count_to_write;
+
+ count_to_write = UACPI_MIN(
+ count, fb_state->capacity - fb_state->bytes_written
+ );
+ uacpi_memcpy(
+ &fb_state->buffer[fb_state->bytes_written], string, count_to_write
+ );
+ }
+
+ fb_state->bytes_written += count;
+}
+
+static uacpi_char hex_char(uacpi_bool upper, uacpi_u64 value)
+{
+ static const uacpi_char upper_hex[] = "0123456789ABCDEF";
+ static const uacpi_char lower_hex[] = "0123456789abcdef";
+
+ return (upper ? upper_hex : lower_hex)[value];
+}
+
+static void write_padding(
+ struct fmt_buf_state *fb_state, struct fmt_spec *fm, uacpi_size repr_size
+)
+{
+ uacpi_u64 mw = fm->min_width;
+
+ if (mw <= repr_size)
+ return;
+
+ mw -= repr_size;
+
+ while (mw--)
+ write_one(fb_state, fm->left_justify ? ' ' : fm->pad_char);
+}
+
+#define REPR_BUFFER_SIZE 32
+
+static void write_integer(
+ struct fmt_buf_state *fb_state, struct fmt_spec *fm, uacpi_u64 value
+)
+{
+ uacpi_char repr_buffer[REPR_BUFFER_SIZE];
+ uacpi_size index = REPR_BUFFER_SIZE;
+ uacpi_u64 remainder;
+ uacpi_char repr;
+ uacpi_bool negative = UACPI_FALSE;
+ uacpi_size repr_size;
+
+ if (fm->is_signed) {
+ uacpi_i64 as_ll = value;
+
+ if (as_ll < 0) {
+ value = -as_ll;
+ negative = UACPI_TRUE;
+ }
+ }
+
+ if (fm->prepend || negative)
+ write_one(fb_state, negative ? '-' : fm->prepend_char);
+
+ while (value) {
+ remainder = value % fm->base;
+ value /= fm->base;
+
+ if (fm->base == 16) {
+ repr = hex_char(fm->uppercase, remainder);
+ } else if (fm->base == 8 || fm->base == 10) {
+ repr = remainder + '0';
+ } else {
+ repr = '?';
+ }
+
+ repr_buffer[--index] = repr;
+ }
+ repr_size = REPR_BUFFER_SIZE - index;
+
+ if (repr_size == 0) {
+ repr_buffer[--index] = '0';
+ repr_size = 1;
+ }
+
+ if (fm->alternate_form) {
+ if (fm->base == 16) {
+ repr_buffer[--index] = fm->uppercase ? 'X' : 'x';
+ repr_buffer[--index] = '0';
+ repr_size += 2;
+ } else if (fm->base == 8) {
+ repr_buffer[--index] = '0';
+ repr_size += 1;
+ }
+ }
+
+ if (fm->left_justify) {
+ write_many(fb_state, &repr_buffer[index], repr_size);
+ write_padding(fb_state, fm, repr_size);
+ } else {
+ write_padding(fb_state, fm, repr_size);
+ write_many(fb_state, &repr_buffer[index], repr_size);
+ }
+}
+
+static uacpi_bool string_has_at_least(
+ const uacpi_char *string, uacpi_size characters
+)
+{
+ while (*string) {
+ if (--characters == 0)
+ return UACPI_TRUE;
+
+ string++;
+ }
+
+ return UACPI_FALSE;
+}
+
+static uacpi_bool consume_digits(
+ const uacpi_char **string, uacpi_size *out_size
+)
+{
+ uacpi_size size = 0;
+
+ for (;;) {
+ char c = **string;
+ if (c < '0' || c > '9')
+ break;
+
+ size++;
+ *string += 1;
+ }
+
+ if (size == 0)
+ return UACPI_FALSE;
+
+ *out_size = size;
+ return UACPI_TRUE;
+}
+
+enum parse_number_mode {
+ PARSE_NUMBER_MODE_MAYBE,
+ PARSE_NUMBER_MODE_MUST,
+};
+
+static uacpi_bool parse_number(
+ const uacpi_char **fmt, enum parse_number_mode mode, uacpi_u64 *out_value
+)
+{
+ uacpi_status ret;
+ uacpi_size num_digits;
+ const uacpi_char *digits = *fmt;
+
+ if (!consume_digits(fmt, &num_digits))
+ return mode != PARSE_NUMBER_MODE_MUST;
+
+ ret = uacpi_string_to_integer(digits, num_digits, UACPI_BASE_DEC, out_value);
+ return ret == UACPI_STATUS_OK;
+}
+
+static uacpi_bool consume(const uacpi_char **string, const uacpi_char *token)
+{
+ uacpi_size token_size;
+
+ token_size = uacpi_strlen(token);
+
+ if (!string_has_at_least(*string, token_size))
+ return UACPI_FALSE;
+
+ if (!uacpi_memcmp(*string, token, token_size)) {
+ *string += token_size;
+ return UACPI_TRUE;
+ }
+
+ return UACPI_FALSE;
+}
+
+static uacpi_bool is_one_of(uacpi_char c, const uacpi_char *list)
+{
+ for (; *list; list++) {
+ if (c == *list)
+ return UACPI_TRUE;
+ }
+
+ return UACPI_FALSE;
+}
+
+static uacpi_bool consume_one_of(
+ const uacpi_char **string, const uacpi_char *list, uacpi_char *consumed_char
+)
+{
+ uacpi_char c = **string;
+ if (!c)
+ return UACPI_FALSE;
+
+ if (is_one_of(c, list)) {
+ *consumed_char = c;
+ *string += 1;
+ return UACPI_TRUE;
+ }
+
+ return UACPI_FALSE;
+}
+
+static uacpi_u32 base_from_specifier(uacpi_char specifier)
+{
+ switch (specifier)
+ {
+ case 'x':
+ case 'X':
+ return 16;
+ case 'o':
+ return 8;
+ default:
+ return 10;
+ }
+}
+
+static uacpi_bool is_uppercase_specifier(uacpi_char specifier)
+{
+ return specifier == 'X';
+}
+
+static const uacpi_char *find_next_conversion(
+ const uacpi_char *fmt, uacpi_size *offset
+)
+{
+ *offset = 0;
+
+ while (*fmt) {
+ if (*fmt == '%')
+ return fmt;
+
+ fmt++;
+ *offset += 1;
+ }
+
+ return UACPI_NULL;
+}
+
+uacpi_i32 uacpi_vsnprintf(
+ uacpi_char *buffer, uacpi_size capacity, const uacpi_char *fmt,
+ uacpi_va_list vlist
+)
+{
+ struct fmt_buf_state fb_state = { 0 };
+ uacpi_u64 value;
+ const uacpi_char *next_conversion;
+ uacpi_size next_offset;
+ uacpi_char flag;
+
+ fb_state.buffer = buffer;
+ fb_state.capacity = capacity;
+ fb_state.bytes_written = 0;
+
+ while (*fmt) {
+ struct fmt_spec fm = {
+ .pad_char = ' ',
+ .base = 10,
+ };
+ next_conversion = find_next_conversion(fmt, &next_offset);
+
+ if (next_offset)
+ write_many(&fb_state, fmt, next_offset);
+
+ if (!next_conversion)
+ break;
+
+ fmt = next_conversion;
+ if (consume(&fmt, "%%")) {
+ write_one(&fb_state, '%');
+ continue;
+ }
+
+ // consume %
+ fmt++;
+
+ while (consume_one_of(&fmt, "+- 0#", &flag)) {
+ switch (flag) {
+ case '+':
+ case ' ':
+ fm.prepend = UACPI_TRUE;
+ fm.prepend_char = flag;
+ continue;
+ case '-':
+ fm.left_justify = UACPI_TRUE;
+ continue;
+ case '0':
+ fm.pad_char = '0';
+ continue;
+ case '#':
+ fm.alternate_form = UACPI_TRUE;
+ continue;
+ default:
+ return -1;
+ }
+ }
+
+ if (consume(&fmt, "*")) {
+ fm.min_width = uacpi_va_arg(vlist, int);
+ } else if (!parse_number(&fmt, PARSE_NUMBER_MODE_MAYBE, &fm.min_width)) {
+ return -1;
+ }
+
+ if (consume(&fmt, ".")) {
+ fm.has_precision = UACPI_TRUE;
+
+ if (consume(&fmt, "*")) {
+ fm.precision = uacpi_va_arg(vlist, int);
+ } else {
+ if (!parse_number(&fmt, PARSE_NUMBER_MODE_MUST, &fm.precision))
+ return -1;
+ }
+ }
+
+ flag = 0;
+
+ if (consume(&fmt, "c")) {
+ uacpi_char c = uacpi_va_arg(vlist, int);
+ write_one(&fb_state, c);
+ continue;
+ }
+
+ if (consume(&fmt, "s")) {
+ const uacpi_char *string = uacpi_va_arg(vlist, uacpi_char*);
+ uacpi_size i;
+
+ if (uacpi_unlikely(string == UACPI_NULL))
+ string = "<null>";
+
+ for (i = 0; (!fm.has_precision || i < fm.precision) && string[i]; ++i)
+ write_one(&fb_state, string[i]);
+ while (i++ < fm.min_width)
+ write_one(&fb_state, ' ');
+ continue;
+ }
+
+ if (consume(&fmt, "p")) {
+ value = (uacpi_uintptr)uacpi_va_arg(vlist, void*);
+ fm.base = 16;
+ fm.min_width = UACPI_POINTER_SIZE * 2;
+ fm.pad_char = '0';
+ goto write_int;
+ }
+
+ if (consume(&fmt, "hh")) {
+ if (consume(&fmt, "d") || consume(&fmt, "i")) {
+ value = (signed char)uacpi_va_arg(vlist, int);
+ fm.is_signed = UACPI_TRUE;
+ } else if (consume_one_of(&fmt, "oxXu", &flag)) {
+ value = (unsigned char)uacpi_va_arg(vlist, int);
+ } else {
+ return -1;
+ }
+ goto write_int;
+ }
+
+ if (consume(&fmt, "h")) {
+ if (consume(&fmt, "d") || consume(&fmt, "i")) {
+ value = (signed short)uacpi_va_arg(vlist, int);
+ fm.is_signed = UACPI_TRUE;
+ } else if (consume_one_of(&fmt, "oxXu", &flag)) {
+ value = (unsigned short)uacpi_va_arg(vlist, int);
+ } else {
+ return -1;
+ }
+ goto write_int;
+ }
+
+ if (consume(&fmt, "ll") ||
+ (sizeof(uacpi_size) == sizeof(long long) && consume(&fmt, "z"))) {
+ if (consume(&fmt, "d") || consume(&fmt, "i")) {
+ value = uacpi_va_arg(vlist, long long);
+ fm.is_signed = UACPI_TRUE;
+ } else if (consume_one_of(&fmt, "oxXu", &flag)) {
+ value = uacpi_va_arg(vlist, unsigned long long);
+ } else {
+ return -1;
+ }
+ goto write_int;
+ }
+
+ if (consume(&fmt, "l") ||
+ (sizeof(uacpi_size) == sizeof(long) && consume(&fmt, "z"))) {
+ if (consume(&fmt, "d") || consume(&fmt, "i")) {
+ value = uacpi_va_arg(vlist, long);
+ fm.is_signed = UACPI_TRUE;
+ } else if (consume_one_of(&fmt, "oxXu", &flag)) {
+ value = uacpi_va_arg(vlist, unsigned long);
+ } else {
+ return -1;
+ }
+ goto write_int;
+ }
+
+ if (consume(&fmt, "d") || consume(&fmt, "i")) {
+ value = uacpi_va_arg(vlist, uacpi_i32);
+ fm.is_signed = UACPI_TRUE;
+ } else if (consume_one_of(&fmt, "oxXu", &flag)) {
+ value = uacpi_va_arg(vlist, uacpi_u32);
+ } else {
+ return -1;
+ }
+
+ write_int:
+ if (flag != 0) {
+ fm.base = base_from_specifier(flag);
+ fm.uppercase = is_uppercase_specifier(flag);
+ }
+
+ write_integer(&fb_state, &fm, value);
+ }
+
+ if (fb_state.capacity) {
+ uacpi_size last_char;
+
+ last_char = UACPI_MIN(fb_state.bytes_written, fb_state.capacity - 1);
+ fb_state.buffer[last_char] = '\0';
+ }
+
+ return fb_state.bytes_written;
+}
+#endif
+
+#ifndef uacpi_snprintf
+uacpi_i32 uacpi_snprintf(
+ uacpi_char *buffer, uacpi_size capacity, const uacpi_char *fmt, ...
+)
+{
+ uacpi_va_list vlist;
+ uacpi_i32 ret;
+
+ uacpi_va_start(vlist, fmt);
+ ret = uacpi_vsnprintf(buffer, capacity, fmt, vlist);
+ uacpi_va_end(vlist);
+
+ return ret;
+}
+#endif
+
+#ifndef UACPI_FORMATTED_LOGGING
+void uacpi_log(uacpi_log_level lvl, const uacpi_char *str, ...)
+{
+ uacpi_char buf[UACPI_PLAIN_LOG_BUFFER_SIZE];
+ int ret;
+
+ uacpi_va_list vlist;
+ uacpi_va_start(vlist, str);
+
+ ret = uacpi_vsnprintf(buf, sizeof(buf), str, vlist);
+ if (uacpi_unlikely(ret < 0))
+ return;
+
+ /*
+ * If this log message is too large for the configured buffer size, cut off
+ * the end and transform into "...\n" to indicate that it didn't fit and
+ * prevent the newline from being truncated.
+ */
+ if (uacpi_unlikely(ret >= UACPI_PLAIN_LOG_BUFFER_SIZE)) {
+ buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 5] = '.';
+ buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 4] = '.';
+ buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 3] = '.';
+ buf[UACPI_PLAIN_LOG_BUFFER_SIZE - 2] = '\n';
+ }
+
+ uacpi_kernel_log(lvl, buf);
+
+ uacpi_va_end(vlist);
+}
+#endif
diff --git a/sys/dev/acpi/uacpi/tables.c b/sys/dev/acpi/uacpi/tables.c
new file mode 100644
index 0000000..df7d7b9
--- /dev/null
+++ b/sys/dev/acpi/uacpi/tables.c
@@ -0,0 +1,1399 @@
+#include <uacpi/internal/tables.h>
+#include <uacpi/internal/utilities.h>
+#include <uacpi/internal/stdlib.h>
+#include <uacpi/internal/interpreter.h>
+#include <uacpi/internal/mutex.h>
+
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE(
+ table_array, struct uacpi_installed_table, UACPI_STATIC_TABLE_ARRAY_LEN
+)
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(
+ table_array, struct uacpi_installed_table, static
+)
+
+static struct table_array tables;
+static uacpi_bool early_table_access;
+static uacpi_table_installation_handler installation_handler;
+
+#ifndef UACPI_BAREBONES_MODE
+
+static uacpi_handle table_mutex;
+
+#define ENSURE_TABLES_ONLINE() \
+ do { \
+ if (!early_table_access) \
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST( \
+ UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED \
+ ); \
+ } while (0)
+
+#else
+
+/*
+ * Use a dummy function instead of a macro to prevent the following error:
+ * error: statement with no effect [-Werror=unused-value]
+ */
+static inline uacpi_status dummy_mutex_acquire_release(uacpi_handle mtx)
+{
+ UACPI_UNUSED(mtx);
+ return UACPI_STATUS_OK;
+}
+
+#define table_mutex UACPI_NULL
+#define uacpi_acquire_native_mutex_may_be_null dummy_mutex_acquire_release
+#define uacpi_release_native_mutex_may_be_null dummy_mutex_acquire_release
+
+#define ENSURE_TABLES_ONLINE() \
+ do { \
+ if (!early_table_access) \
+ return UACPI_STATUS_INIT_LEVEL_MISMATCH; \
+ } while (0)
+
+#endif // !UACPI_BAREBONES_MODE
+
+static uacpi_status table_install_physical_with_origin_unlocked(
+ uacpi_phys_addr phys, enum uacpi_table_origin origin,
+ const uacpi_char *expected_signature, uacpi_table *out_table
+);
+static uacpi_status table_install_with_origin_unlocked(
+ void *virt, enum uacpi_table_origin origin, uacpi_table *out_table
+);
+
+UACPI_PACKED(struct uacpi_rxsdt {
+ struct acpi_sdt_hdr hdr;
+ uacpi_u8 ptr_bytes[];
+})
+
+static void dump_table_header(
+ uacpi_phys_addr phys_addr, void *hdr
+)
+{
+ struct acpi_sdt_hdr *sdt = hdr;
+
+ if (uacpi_signatures_match(hdr, ACPI_FACS_SIGNATURE)) {
+ uacpi_info(
+ "FACS 0x%016"UACPI_PRIX64" %08X\n", UACPI_FMT64(phys_addr),
+ sdt->length
+ );
+ return;
+ }
+
+ if (!uacpi_memcmp(hdr, ACPI_RSDP_SIGNATURE, sizeof(ACPI_RSDP_SIGNATURE) - 1)) {
+ struct acpi_rsdp *rsdp = hdr;
+
+ uacpi_info(
+ "RSDP 0x%016"UACPI_PRIX64" %08X v%02X (%6.6s)\n",
+ UACPI_FMT64(phys_addr), rsdp->revision >= 2 ? rsdp->length : 20,
+ rsdp->revision, rsdp->oemid
+ );
+ return;
+ }
+
+ uacpi_info(
+ "%.4s 0x%016"UACPI_PRIX64" %08X v%02X (%6.6s %8.8s)\n",
+ sdt->signature, UACPI_FMT64(phys_addr), sdt->length, sdt->revision,
+ sdt->oemid, sdt->oem_table_id
+ );
+}
+
+static uacpi_status initialize_from_rxsdt(uacpi_phys_addr rxsdt_addr,
+ uacpi_size entry_size)
+{
+ struct uacpi_rxsdt *rxsdt;
+ uacpi_size i, entry_bytes, map_len = sizeof(*rxsdt);
+ uacpi_phys_addr entry_addr;
+ uacpi_status ret;
+
+ rxsdt = uacpi_kernel_map(rxsdt_addr, map_len);
+ if (rxsdt == UACPI_NULL)
+ return UACPI_STATUS_MAPPING_FAILED;
+
+ dump_table_header(rxsdt_addr, rxsdt);
+
+ ret = uacpi_check_table_signature(rxsdt,
+ entry_size == 8 ? ACPI_XSDT_SIGNATURE : ACPI_RSDT_SIGNATURE);
+ if (uacpi_unlikely_error(ret))
+ goto error_out;
+
+ map_len = rxsdt->hdr.length;
+ uacpi_kernel_unmap(rxsdt, sizeof(*rxsdt));
+
+ if (uacpi_unlikely(map_len < (sizeof(*rxsdt) + entry_size)))
+ return UACPI_STATUS_INVALID_TABLE_LENGTH;
+
+ // Make sure length is aligned to entry size so we don't OOB
+ entry_bytes = map_len - sizeof(*rxsdt);
+ entry_bytes &= ~(entry_size - 1);
+
+ rxsdt = uacpi_kernel_map(rxsdt_addr, map_len);
+ if (uacpi_unlikely(rxsdt == UACPI_NULL))
+ return UACPI_STATUS_MAPPING_FAILED;
+
+ ret = uacpi_verify_table_checksum(rxsdt, map_len);
+ if (uacpi_unlikely_error(ret))
+ goto error_out;
+
+ for (i = 0; i < entry_bytes; i += entry_size) {
+ uacpi_u64 entry_phys_addr_large = 0;
+ uacpi_memcpy(&entry_phys_addr_large, &rxsdt->ptr_bytes[i], entry_size);
+
+ if (!entry_phys_addr_large)
+ continue;
+
+ entry_addr = uacpi_truncate_phys_addr_with_warn(entry_phys_addr_large);
+ ret = uacpi_table_install_physical_with_origin(
+ entry_addr, UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL, UACPI_NULL
+ );
+ if (uacpi_unlikely(ret != UACPI_STATUS_OK &&
+ ret != UACPI_STATUS_OVERRIDDEN))
+ goto error_out;
+ }
+
+ ret = UACPI_STATUS_OK;
+
+error_out:
+ uacpi_kernel_unmap(rxsdt, map_len);
+ return ret;
+}
+
+static uacpi_status initialize_from_rsdp(void)
+{
+ uacpi_status ret;
+ uacpi_phys_addr rsdp_phys;
+ struct acpi_rsdp *rsdp;
+ uacpi_phys_addr rxsdt;
+ uacpi_size rxsdt_entry_size;
+
+ g_uacpi_rt_ctx.is_rev1 = UACPI_TRUE;
+
+ ret = uacpi_kernel_get_rsdp(&rsdp_phys);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ rsdp = uacpi_kernel_map(rsdp_phys, sizeof(struct acpi_rsdp));
+ if (rsdp == UACPI_NULL)
+ return UACPI_STATUS_MAPPING_FAILED;
+
+ dump_table_header(rsdp_phys, rsdp);
+
+ if (rsdp->revision > 1 && rsdp->xsdt_addr &&
+ !uacpi_check_flag(UACPI_FLAG_BAD_XSDT))
+ {
+ rxsdt = uacpi_truncate_phys_addr_with_warn(rsdp->xsdt_addr);
+ rxsdt_entry_size = 8;
+ } else {
+ rxsdt = (uacpi_phys_addr)rsdp->rsdt_addr;
+ rxsdt_entry_size = 4;
+ }
+
+ uacpi_kernel_unmap(rsdp, sizeof(struct acpi_rsdp));
+
+ if (!rxsdt) {
+ uacpi_error("both RSDT & XSDT tables are NULL!\n");
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ return initialize_from_rxsdt(rxsdt, rxsdt_entry_size);
+}
+
+uacpi_status uacpi_setup_early_table_access(
+ void *temporary_buffer, uacpi_size buffer_size
+)
+{
+ uacpi_status ret;
+
+#ifndef UACPI_BAREBONES_MODE
+ UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_EARLY);
+#endif
+ if (uacpi_unlikely(early_table_access))
+ return UACPI_STATUS_INIT_LEVEL_MISMATCH;
+
+ if (uacpi_unlikely(buffer_size < sizeof(struct uacpi_installed_table)))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ uacpi_logger_initialize();
+
+ tables.dynamic_storage = temporary_buffer;
+ tables.dynamic_capacity = buffer_size / sizeof(struct uacpi_installed_table);
+ early_table_access = UACPI_TRUE;
+
+ ret = initialize_from_rsdp();
+ if (uacpi_unlikely_error(ret))
+ uacpi_deinitialize_tables();
+
+ return ret;
+}
+
+#ifndef UACPI_BAREBONES_MODE
+static uacpi_iteration_decision warn_if_early_referenced(
+ void *user, struct uacpi_installed_table *tbl, uacpi_size idx
+)
+{
+ UACPI_UNUSED(user);
+
+ if (uacpi_unlikely(tbl->reference_count != 0)) {
+ uacpi_warn(
+ "table "UACPI_PRI_TBL_HDR" (%zu) still has %d early reference(s)!\n",
+ UACPI_FMT_TBL_HDR(&tbl->hdr), idx, tbl->reference_count
+ );
+ }
+
+ return UACPI_ITERATION_DECISION_CONTINUE;
+}
+
+uacpi_status uacpi_initialize_tables(void)
+{
+ if (early_table_access) {
+ uacpi_size num_tables;
+
+ uacpi_for_each_table(0, warn_if_early_referenced, UACPI_NULL);
+
+ // Reallocate the user buffer into a normal heap array
+ num_tables = table_array_size(&tables);
+ if (num_tables > table_array_inline_capacity(&tables)) {
+ void *new_buf;
+
+ /*
+ * Allocate a new buffer with size equal to exactly the number of
+ * dynamic tables (that live in the user provided temporary buffer).
+ */
+ num_tables -= table_array_inline_capacity(&tables);
+ new_buf = uacpi_kernel_alloc(
+ sizeof(struct uacpi_installed_table) * num_tables
+ );
+ if (uacpi_unlikely(new_buf == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ uacpi_memcpy(new_buf, tables.dynamic_storage,
+ sizeof(struct uacpi_installed_table) * num_tables);
+ tables.dynamic_storage = new_buf;
+ tables.dynamic_capacity = num_tables;
+ } else {
+ /*
+ * User-provided temporary buffer was not used at all, just remove
+ * any references to it.
+ */
+ tables.dynamic_storage = UACPI_NULL;
+ tables.dynamic_capacity = 0;
+ }
+
+ early_table_access = UACPI_FALSE;
+ } else {
+ uacpi_status ret;
+
+ ret = initialize_from_rsdp();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ if (!uacpi_is_hardware_reduced()) {
+ struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
+ uacpi_table tbl;
+
+ if (fadt->x_firmware_ctrl) {
+ uacpi_status ret;
+
+ ret = table_install_physical_with_origin_unlocked(
+ fadt->x_firmware_ctrl, UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL,
+ ACPI_FACS_SIGNATURE, &tbl
+ );
+ if (uacpi_unlikely(ret != UACPI_STATUS_OK &&
+ ret != UACPI_STATUS_OVERRIDDEN))
+ return ret;
+
+ g_uacpi_rt_ctx.facs = tbl.ptr;
+ }
+ }
+
+ table_mutex = uacpi_kernel_create_mutex();
+ if (uacpi_unlikely(table_mutex == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ return UACPI_STATUS_OK;
+}
+#endif // !UACPI_BAREBONES_MODE
+
+void uacpi_deinitialize_tables(void)
+{
+ uacpi_size i;
+
+ for (i = 0; i < table_array_size(&tables); ++i) {
+ struct uacpi_installed_table *tbl = table_array_at(&tables, i);
+
+ switch (tbl->origin) {
+#ifndef UACPI_BAREBONES_MODE
+ case UACPI_TABLE_ORIGIN_FIRMWARE_VIRTUAL:
+ uacpi_free(tbl->ptr, tbl->hdr.length);
+ break;
+#endif
+ case UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL:
+ case UACPI_TABLE_ORIGIN_HOST_PHYSICAL:
+ if (tbl->reference_count != 0)
+ uacpi_kernel_unmap(tbl->ptr, tbl->hdr.length);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (early_table_access) {
+ uacpi_memzero(&tables, sizeof(tables));
+ early_table_access = UACPI_FALSE;
+ } else {
+ table_array_clear(&tables);
+ }
+
+ installation_handler = UACPI_NULL;
+
+#ifndef UACPI_BAREBONES_MODE
+ if (table_mutex)
+ uacpi_kernel_free_mutex(table_mutex);
+
+ table_mutex = UACPI_NULL;
+#endif
+}
+
+uacpi_status uacpi_set_table_installation_handler(
+ uacpi_table_installation_handler handler
+)
+{
+ uacpi_status ret;
+
+ ret = uacpi_acquire_native_mutex_may_be_null(table_mutex);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (installation_handler != UACPI_NULL && handler != UACPI_NULL)
+ goto out;
+
+ installation_handler = handler;
+
+out:
+ uacpi_release_native_mutex_may_be_null(table_mutex);
+ return ret;
+}
+
+static uacpi_status initialize_fadt(const void*);
+
+static uacpi_u8 table_checksum(void *table, uacpi_size size)
+{
+ uacpi_u8 *bytes = table;
+ uacpi_u8 csum = 0;
+ uacpi_size i;
+
+ for (i = 0; i < size; ++i)
+ csum += bytes[i];
+
+ return csum;
+}
+
+uacpi_status uacpi_verify_table_checksum(void *table, uacpi_size size)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+ uacpi_u8 csum;
+
+ csum = table_checksum(table, size);
+
+ if (uacpi_unlikely(csum != 0)) {
+ enum uacpi_log_level lvl = UACPI_LOG_WARN;
+ struct acpi_sdt_hdr *hdr = table;
+
+ if (uacpi_check_flag(UACPI_FLAG_BAD_CSUM_FATAL)) {
+ ret = UACPI_STATUS_BAD_CHECKSUM;
+ lvl = UACPI_LOG_ERROR;
+ }
+
+ uacpi_log_lvl(
+ lvl, "invalid table "UACPI_PRI_TBL_HDR" checksum %d!\n",
+ UACPI_FMT_TBL_HDR(hdr), csum
+ );
+ }
+
+ return ret;
+}
+
+uacpi_bool uacpi_signatures_match(const void *const lhs, const void *const rhs)
+{
+ return uacpi_memcmp(lhs, rhs, sizeof(uacpi_object_name)) == 0;
+}
+
+uacpi_status uacpi_check_table_signature(void *table, const uacpi_char *expect)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+
+ if (!uacpi_signatures_match(table, expect)) {
+ enum uacpi_log_level lvl = UACPI_LOG_WARN;
+ struct acpi_sdt_hdr *hdr = table;
+
+ if (uacpi_check_flag(UACPI_FLAG_BAD_TBL_SIGNATURE_FATAL)) {
+ ret = UACPI_STATUS_INVALID_SIGNATURE;
+ lvl = UACPI_LOG_ERROR;
+ }
+
+ uacpi_log_lvl(
+ lvl,
+ "invalid table "UACPI_PRI_TBL_HDR" signature (expected '%.4s')\n",
+ UACPI_FMT_TBL_HDR(hdr), expect
+ );
+ }
+
+ return ret;
+}
+
+static uacpi_status table_alloc(
+ struct uacpi_installed_table **out_tbl, uacpi_size *out_idx
+)
+{
+ struct uacpi_installed_table *tbl;
+
+ if (early_table_access &&
+ table_array_size(&tables) == table_array_capacity(&tables)) {
+ uacpi_warn("early table access buffer capacity exhausted!\n");
+ return UACPI_STATUS_OUT_OF_MEMORY;
+ }
+
+ tbl = table_array_alloc(&tables);
+ if (uacpi_unlikely(tbl == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ *out_tbl = tbl;
+ *out_idx = table_array_size(&tables) - 1;
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status get_external_table_header(
+ uacpi_phys_addr phys_addr, struct acpi_sdt_hdr *out_hdr
+)
+{
+ void *virt;
+
+ virt = uacpi_kernel_map(phys_addr, sizeof(*out_hdr));
+ if (uacpi_unlikely(virt == UACPI_NULL))
+ return UACPI_STATUS_MAPPING_FAILED;
+
+ uacpi_memcpy(out_hdr, virt, sizeof(*out_hdr));
+
+ uacpi_kernel_unmap(virt, sizeof(*out_hdr));
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status table_ref_unlocked(struct uacpi_installed_table *tbl)
+{
+ switch (tbl->reference_count) {
+ case 0: {
+ uacpi_status ret;
+
+ if (tbl->flags & UACPI_TABLE_INVALID)
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ if (tbl->origin != UACPI_TABLE_ORIGIN_HOST_PHYSICAL &&
+ tbl->origin != UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL)
+ break;
+
+ tbl->ptr = uacpi_kernel_map(tbl->phys_addr, tbl->hdr.length);
+ if (uacpi_unlikely(tbl->ptr == UACPI_NULL))
+ return UACPI_STATUS_MAPPING_FAILED;
+
+ if (!(tbl->flags & UACPI_TABLE_CSUM_VERIFIED)) {
+ ret = uacpi_verify_table_checksum(tbl->ptr, tbl->hdr.length);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_kernel_unmap(tbl->ptr, tbl->hdr.length);
+ tbl->flags |= UACPI_TABLE_INVALID;
+ tbl->ptr = UACPI_NULL;
+ return ret;
+ }
+
+ tbl->flags |= UACPI_TABLE_CSUM_VERIFIED;
+ }
+ break;
+ }
+ case 0xFFFF - 1:
+ uacpi_warn(
+ "too many references for "UACPI_PRI_TBL_HDR
+ ", mapping permanently\n", UACPI_FMT_TBL_HDR(&tbl->hdr)
+ );
+ break;
+ default:
+ break;
+ }
+
+ if (uacpi_likely(tbl->reference_count != 0xFFFF))
+ tbl->reference_count++;
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status table_unref_unlocked(struct uacpi_installed_table *tbl)
+{
+ switch (tbl->reference_count) {
+ case 0:
+ uacpi_warn(
+ "tried to unref table "UACPI_PRI_TBL_HDR" with no references\n",
+ UACPI_FMT_TBL_HDR(&tbl->hdr)
+ );
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ case 1:
+ if (tbl->origin != UACPI_TABLE_ORIGIN_HOST_PHYSICAL &&
+ tbl->origin != UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL)
+ break;
+
+ uacpi_kernel_unmap(tbl->ptr, tbl->hdr.length);
+ tbl->ptr = UACPI_NULL;
+ break;
+ case 0xFFFF:
+ /*
+ * Consider the reference count (overflow) of 0xFFFF to be a permanently
+ * mapped table as we don't know the actual number of references.
+ */
+ return UACPI_STATUS_OK;
+ default:
+ break;
+ }
+
+ tbl->reference_count--;
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status verify_and_install_table(
+ struct acpi_sdt_hdr *hdr, uacpi_phys_addr phys_addr, void *virt_addr,
+ enum uacpi_table_origin origin, uacpi_table *out_table
+)
+{
+ uacpi_status ret;
+ struct uacpi_installed_table *table;
+ uacpi_bool is_fadt;
+ uacpi_size idx;
+ uacpi_u8 flags = 0;
+
+ is_fadt = uacpi_signatures_match(hdr->signature, ACPI_FADT_SIGNATURE);
+
+ /*
+ * FACS is the only(?) table without a checksum because it has OSPM
+ * writable fields. Don't try to validate it here.
+ */
+ if (uacpi_signatures_match(hdr->signature, ACPI_FACS_SIGNATURE)) {
+ flags |= UACPI_TABLE_CSUM_VERIFIED;
+ } else if (uacpi_check_flag(UACPI_FLAG_PROACTIVE_TBL_CSUM) || is_fadt ||
+ out_table != UACPI_NULL) {
+ void *mapping = virt_addr;
+
+ // We may already have a valid mapping, reuse it if we do
+ if (mapping == UACPI_NULL)
+ mapping = uacpi_kernel_map(phys_addr, hdr->length);
+ if (uacpi_unlikely(mapping == UACPI_NULL))
+ return UACPI_STATUS_MAPPING_FAILED;
+
+ ret = uacpi_verify_table_checksum(mapping, hdr->length);
+ if (uacpi_likely_success(ret)) {
+ if (is_fadt)
+ ret = initialize_fadt(mapping);
+ flags |= UACPI_TABLE_CSUM_VERIFIED;
+ }
+
+ if (virt_addr == UACPI_NULL)
+ uacpi_kernel_unmap(mapping, hdr->length);
+
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ if (uacpi_signatures_match(hdr->signature, ACPI_DSDT_SIGNATURE))
+ g_uacpi_rt_ctx.is_rev1 = hdr->revision < 2;
+
+ ret = table_alloc(&table, &idx);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ dump_table_header(phys_addr, hdr);
+
+ uacpi_memcpy(&table->hdr, hdr, sizeof(*hdr));
+ table->reference_count = 0;
+ table->phys_addr = phys_addr;
+ table->ptr = virt_addr;
+ table->flags = flags;
+ table->origin = origin;
+
+ if (out_table == UACPI_NULL)
+ return UACPI_STATUS_OK;
+
+ table->reference_count++;
+ out_table->ptr = virt_addr;
+ out_table->index = idx;
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status handle_table_override(
+ uacpi_table_installation_disposition disposition, uacpi_u64 address,
+ uacpi_table *out_table
+)
+{
+ uacpi_status ret;
+
+ switch (disposition) {
+ case UACPI_TABLE_INSTALLATION_DISPOSITON_VIRTUAL_OVERRIDE:
+ ret = table_install_with_origin_unlocked(
+ UACPI_VIRT_ADDR_TO_PTR((uacpi_virt_addr)address),
+ UACPI_TABLE_ORIGIN_HOST_VIRTUAL,
+ out_table
+ );
+ return ret;
+ case UACPI_TABLE_INSTALLATION_DISPOSITON_PHYSICAL_OVERRIDE:
+ return table_install_physical_with_origin_unlocked(
+ (uacpi_phys_addr)address,
+ UACPI_TABLE_ORIGIN_HOST_PHYSICAL,
+ UACPI_NULL,
+ out_table
+ );
+ default:
+ uacpi_error("invalid table installation disposition %d\n", disposition);
+ return UACPI_STATUS_INTERNAL_ERROR;
+ }
+}
+
+static uacpi_status table_install_physical_with_origin_unlocked(
+ uacpi_phys_addr phys, enum uacpi_table_origin origin,
+ const uacpi_char *expected_signature, uacpi_table *out_table
+)
+{
+ struct acpi_sdt_hdr hdr;
+ void *virt = UACPI_NULL;
+ uacpi_status ret;
+
+ ret = get_external_table_header(phys, &hdr);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (uacpi_unlikely(hdr.length < sizeof(struct acpi_sdt_hdr))) {
+ uacpi_error("invalid table '%.4s' (0x%016"UACPI_PRIX64") size: %u\n",
+ hdr.signature, UACPI_FMT64(phys), hdr.length);
+ return UACPI_STATUS_INVALID_TABLE_LENGTH;
+ }
+
+ if (expected_signature != UACPI_NULL) {
+ ret = uacpi_check_table_signature(&hdr, expected_signature);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+ }
+
+ if (installation_handler != UACPI_NULL || out_table != UACPI_NULL) {
+ virt = uacpi_kernel_map(phys, hdr.length);
+ if (uacpi_unlikely(!virt))
+ return UACPI_STATUS_MAPPING_FAILED;
+ }
+
+ if (origin == UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL &&
+ installation_handler != UACPI_NULL) {
+ uacpi_u64 override;
+ uacpi_table_installation_disposition disposition;
+
+ disposition = installation_handler(virt, &override);
+
+ switch (disposition) {
+ case UACPI_TABLE_INSTALLATION_DISPOSITON_ALLOW:
+ break;
+ case UACPI_TABLE_INSTALLATION_DISPOSITON_DENY:
+ uacpi_info(
+ "table '%.4s' (0x%016"UACPI_PRIX64") installation denied "
+ "by host\n", hdr.signature, UACPI_FMT64(phys)
+ );
+ ret = UACPI_STATUS_DENIED;
+ goto out;
+
+ default:
+ uacpi_info(
+ "table '%.4s' (0x%016"UACPI_PRIX64") installation "
+ "overridden by host\n", hdr.signature, UACPI_FMT64(phys)
+ );
+
+ ret = handle_table_override(disposition, override, out_table);
+ if (uacpi_likely_success(ret))
+ ret = UACPI_STATUS_OVERRIDDEN;
+
+ goto out;
+ }
+ }
+
+ ret = verify_and_install_table(&hdr, phys, virt, origin, out_table);
+out:
+ // We don't unmap only in this case
+ if (ret == UACPI_STATUS_OK && out_table != UACPI_NULL)
+ return ret;
+
+ if (virt != UACPI_NULL)
+ uacpi_kernel_unmap(virt, hdr.length);
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_table_install_physical_with_origin(
+ uacpi_phys_addr phys, enum uacpi_table_origin origin, uacpi_table *out_table
+)
+{
+ uacpi_status ret;
+
+ ret = uacpi_acquire_native_mutex_may_be_null(table_mutex);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = table_install_physical_with_origin_unlocked(
+ phys, origin, UACPI_NULL, out_table
+ );
+ uacpi_release_native_mutex_may_be_null(table_mutex);
+
+ return ret;
+}
+
+static uacpi_status table_install_with_origin_unlocked(
+ void *virt, enum uacpi_table_origin origin, uacpi_table *out_table
+)
+{
+ struct acpi_sdt_hdr *hdr = virt;
+
+ if (uacpi_unlikely(hdr->length < sizeof(struct acpi_sdt_hdr))) {
+ uacpi_error("invalid table '%.4s' (%p) size: %u\n",
+ hdr->signature, virt, hdr->length);
+ return UACPI_STATUS_INVALID_TABLE_LENGTH;
+ }
+
+#ifndef UACPI_BAREBONES_MODE
+ if (origin == UACPI_TABLE_ORIGIN_FIRMWARE_VIRTUAL &&
+ installation_handler != UACPI_NULL) {
+ uacpi_u64 override;
+ uacpi_table_installation_disposition disposition;
+
+ disposition = installation_handler(virt, &override);
+
+ switch (disposition) {
+ case UACPI_TABLE_INSTALLATION_DISPOSITON_ALLOW:
+ break;
+ case UACPI_TABLE_INSTALLATION_DISPOSITON_DENY:
+ uacpi_info(
+ "table "UACPI_PRI_TBL_HDR" installation denied by host\n",
+ UACPI_FMT_TBL_HDR(hdr)
+ );
+ return UACPI_STATUS_DENIED;
+
+ default: {
+ uacpi_status ret;
+ uacpi_info(
+ "table "UACPI_PRI_TBL_HDR" installation overridden by host\n",
+ UACPI_FMT_TBL_HDR(hdr)
+ );
+
+ ret = handle_table_override(disposition, override, out_table);
+ if (uacpi_likely_success(ret))
+ ret = UACPI_STATUS_OVERRIDDEN;
+
+ return ret;
+ }
+ }
+ }
+#endif
+
+ return verify_and_install_table(
+ hdr, 0, virt, origin, out_table
+ );
+}
+
+uacpi_status uacpi_table_install_with_origin(
+ void *virt, enum uacpi_table_origin origin, uacpi_table *out_table
+)
+{
+ uacpi_status ret;
+
+ ret = uacpi_acquire_native_mutex_may_be_null(table_mutex);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = table_install_with_origin_unlocked(virt, origin, out_table);
+
+ uacpi_release_native_mutex_may_be_null(table_mutex);
+ return ret;
+}
+
+uacpi_status uacpi_table_install(void *virt, uacpi_table *out_table)
+{
+ ENSURE_TABLES_ONLINE();
+
+ return uacpi_table_install_with_origin(
+ virt, UACPI_TABLE_ORIGIN_HOST_VIRTUAL, out_table
+ );
+}
+
+uacpi_status uacpi_table_install_physical(
+ uacpi_phys_addr addr, uacpi_table *out_table
+)
+{
+ ENSURE_TABLES_ONLINE();
+
+ return uacpi_table_install_physical_with_origin(
+ addr, UACPI_TABLE_ORIGIN_HOST_PHYSICAL, out_table
+ );
+}
+
+uacpi_status uacpi_for_each_table(
+ uacpi_size base_idx, uacpi_table_iteration_callback cb, void *user
+)
+{
+ uacpi_status ret;
+ uacpi_size idx;
+ struct uacpi_installed_table *tbl;
+ uacpi_iteration_decision dec;
+
+ ENSURE_TABLES_ONLINE();
+
+ ret = uacpi_acquire_native_mutex_may_be_null(table_mutex);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ for (idx = base_idx; idx < table_array_size(&tables); ++idx) {
+ tbl = table_array_at(&tables, idx);
+
+ if (tbl->flags & UACPI_TABLE_INVALID)
+ continue;
+
+ dec = cb(user, tbl, idx);
+ if (dec == UACPI_ITERATION_DECISION_BREAK)
+ break;
+ }
+
+ uacpi_release_native_mutex_may_be_null(table_mutex);
+ return ret;
+}
+
+enum search_type {
+ SEARCH_TYPE_BY_ID,
+ SEARCH_TYPE_MATCH,
+};
+
+struct table_search_ctx {
+ union {
+ const uacpi_table_identifiers *id;
+ uacpi_table_match_callback match_cb;
+ };
+
+ uacpi_table *out_table;
+ uacpi_u8 search_type;
+ uacpi_status status;
+};
+
+static uacpi_iteration_decision do_search_tables(
+ void *user, struct uacpi_installed_table *tbl, uacpi_size idx
+)
+{
+ struct table_search_ctx *ctx = user;
+ uacpi_table *out_table;
+ uacpi_status ret;
+
+ switch (ctx->search_type) {
+ case SEARCH_TYPE_BY_ID: {
+ const uacpi_table_identifiers *id = ctx->id;
+
+ if (!uacpi_signatures_match(&id->signature, tbl->hdr.signature))
+ return UACPI_ITERATION_DECISION_CONTINUE;
+ if (id->oemid[0] != '\0' &&
+ uacpi_memcmp(id->oemid, tbl->hdr.oemid, sizeof(id->oemid)) != 0)
+ return UACPI_ITERATION_DECISION_CONTINUE;
+
+ if (id->oem_table_id[0] != '\0' &&
+ uacpi_memcmp(id->oem_table_id, tbl->hdr.oem_table_id,
+ sizeof(id->oem_table_id)) != 0)
+ return UACPI_ITERATION_DECISION_CONTINUE;
+
+ break;
+ }
+
+ case SEARCH_TYPE_MATCH:
+ if (!ctx->match_cb(tbl))
+ return UACPI_ITERATION_DECISION_CONTINUE;
+ break;
+
+ default:
+ ctx->status = UACPI_STATUS_INVALID_ARGUMENT;
+ return UACPI_ITERATION_DECISION_BREAK;
+ }
+
+ ret = table_ref_unlocked(tbl);
+ if (uacpi_likely_success(ret)) {
+ out_table = ctx->out_table;
+ out_table->ptr = tbl->ptr;
+ out_table->index = idx;
+ ctx->status = ret;
+ return UACPI_ITERATION_DECISION_BREAK;
+ }
+
+ /*
+ * Don't abort nor propagate bad checksums, just pretend this table never
+ * existed and go on with the search.
+ */
+ if (ret == UACPI_STATUS_BAD_CHECKSUM)
+ return UACPI_ITERATION_DECISION_CONTINUE;
+
+ ctx->status = ret;
+ return UACPI_ITERATION_DECISION_BREAK;
+}
+
+#ifndef UACPI_BAREBONES_MODE
+uacpi_status uacpi_table_match(
+ uacpi_size base_idx, uacpi_table_match_callback cb, uacpi_table *out_table
+)
+{
+ uacpi_status ret;
+ struct table_search_ctx ctx = { 0 };
+
+ ctx.match_cb = cb;
+ ctx.search_type = SEARCH_TYPE_MATCH;
+ ctx.out_table = out_table;
+ ctx.status = UACPI_STATUS_NOT_FOUND;
+
+ ret = uacpi_for_each_table(base_idx, do_search_tables, &ctx);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ return ctx.status;
+}
+#endif
+
+static uacpi_status find_table(
+ uacpi_size base_idx, const uacpi_table_identifiers *id,
+ uacpi_table *out_table
+)
+{
+ uacpi_status ret;
+ struct table_search_ctx ctx = { 0 };
+
+ ctx.id = id;
+ ctx.out_table = out_table;
+ ctx.search_type = SEARCH_TYPE_BY_ID;
+ ctx.status = UACPI_STATUS_NOT_FOUND;
+
+ ret = uacpi_for_each_table(base_idx, do_search_tables, &ctx);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ return ctx.status;
+}
+
+uacpi_status uacpi_table_find_by_signature(
+ const uacpi_char *signature_string, struct uacpi_table *out_table
+)
+{
+ struct uacpi_table_identifiers id = { 0 };
+
+ id.signature.text[0] = signature_string[0];
+ id.signature.text[1] = signature_string[1];
+ id.signature.text[2] = signature_string[2];
+ id.signature.text[3] = signature_string[3];
+
+ ENSURE_TABLES_ONLINE();
+
+ return find_table(0, &id, out_table);
+}
+
+uacpi_status uacpi_table_find_next_with_same_signature(
+ uacpi_table *in_out_table
+)
+{
+ struct uacpi_table_identifiers id = { 0 };
+
+ ENSURE_TABLES_ONLINE();
+
+ if (uacpi_unlikely(in_out_table->ptr == UACPI_NULL))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ uacpi_memcpy(&id.signature, in_out_table->hdr->signature,
+ sizeof(id.signature));
+ uacpi_table_unref(in_out_table);
+
+ return find_table(in_out_table->index + 1, &id, in_out_table);
+}
+
+uacpi_status uacpi_table_find(
+ const uacpi_table_identifiers *id, uacpi_table *out_table
+)
+{
+ ENSURE_TABLES_ONLINE();
+
+ return find_table(0, id, out_table);
+}
+
+#define TABLE_CTL_SET_FLAGS (1 << 0)
+#define TABLE_CTL_CLEAR_FLAGS (1 << 1)
+#define TABLE_CTL_VALIDATE_SET_FLAGS (1 << 2)
+#define TABLE_CTL_VALIDATE_CLEAR_FLAGS (1 << 3)
+#define TABLE_CTL_GET (1 << 4)
+#define TABLE_CTL_PUT (1 << 5)
+
+struct table_ctl_request {
+ uacpi_u8 type;
+
+ uacpi_u8 expect_set;
+ uacpi_u8 expect_clear;
+ uacpi_u8 set;
+ uacpi_u8 clear;
+
+ void *out_tbl;
+};
+
+static uacpi_status table_ctl(uacpi_size idx, struct table_ctl_request *req)
+{
+ uacpi_status ret;
+ struct uacpi_installed_table *tbl;
+
+ ENSURE_TABLES_ONLINE();
+
+ ret = uacpi_acquire_native_mutex_may_be_null(table_mutex);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (uacpi_unlikely(table_array_size(&tables) <= idx)) {
+ uacpi_error(
+ "requested invalid table index %zu (%zu tables installed)\n",
+ idx, table_array_size(&tables)
+ );
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ tbl = table_array_at(&tables, idx);
+ if (uacpi_unlikely(tbl->flags & UACPI_TABLE_INVALID))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ if (req->type & TABLE_CTL_VALIDATE_SET_FLAGS) {
+ uacpi_u8 mask = req->expect_set;
+
+ if (uacpi_unlikely((tbl->flags & mask) != mask)) {
+ uacpi_error(
+ "unexpected table '%.4s' flags %02X, expected %02X to be set\n",
+ tbl->hdr.signature, tbl->flags, mask
+ );
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ goto out;
+ }
+ }
+
+ if (req->type & TABLE_CTL_VALIDATE_CLEAR_FLAGS) {
+ uacpi_u8 mask = req->expect_clear;
+
+ if (uacpi_unlikely((tbl->flags & mask) != 0)) {
+ uacpi_error(
+ "unexpected table '%.4s' flags %02X, expected %02X "
+ "to be clear\n", tbl->hdr.signature, tbl->flags, mask
+ );
+ ret = UACPI_STATUS_ALREADY_EXISTS;
+ goto out;
+ }
+ }
+
+ if (req->type & TABLE_CTL_GET) {
+ ret = table_ref_unlocked(tbl);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+
+ req->out_tbl = tbl->ptr;
+ }
+
+ if (req->type & TABLE_CTL_PUT) {
+ ret = table_unref_unlocked(tbl);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+ }
+
+ if (req->type & TABLE_CTL_SET_FLAGS)
+ tbl->flags |= req->set;
+ if (req->type & TABLE_CTL_CLEAR_FLAGS)
+ tbl->flags &= ~req->clear;
+
+out:
+ uacpi_release_native_mutex_may_be_null(table_mutex);
+ return ret;
+}
+
+#ifndef UACPI_BAREBONES_MODE
+uacpi_status uacpi_table_load_with_cause(
+ uacpi_size idx, enum uacpi_table_load_cause cause
+)
+{
+ uacpi_status ret;
+ struct table_ctl_request req = {
+ .type = TABLE_CTL_SET_FLAGS | TABLE_CTL_VALIDATE_CLEAR_FLAGS |
+ TABLE_CTL_GET,
+ .set = UACPI_TABLE_LOADED,
+ .expect_clear = UACPI_TABLE_LOADED,
+ };
+
+ ret = table_ctl(idx, &req);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_execute_table(req.out_tbl, cause);
+
+ req.type = TABLE_CTL_PUT;
+ table_ctl(idx, &req);
+ return ret;
+}
+
+uacpi_status uacpi_table_load(uacpi_size idx)
+{
+ return uacpi_table_load_with_cause(idx, UACPI_TABLE_LOAD_CAUSE_HOST);
+}
+
+void uacpi_table_mark_as_loaded(uacpi_size idx)
+{
+ struct table_ctl_request req = {
+ .type = TABLE_CTL_SET_FLAGS, .set = UACPI_TABLE_LOADED
+ };
+
+ table_ctl(idx, &req);
+}
+#endif // !UACPI_BAREBONES_MODE
+
+uacpi_status uacpi_table_ref(uacpi_table *tbl)
+{
+ struct table_ctl_request req = {
+ .type = TABLE_CTL_GET
+ };
+
+ return table_ctl(tbl->index, &req);
+}
+
+uacpi_status uacpi_table_unref(uacpi_table *tbl)
+{
+ struct table_ctl_request req = {
+ .type = TABLE_CTL_PUT
+ };
+
+ return table_ctl(tbl->index, &req);
+}
+
+uacpi_u16 fadt_version_sizes[] = {
+ 116, 132, 244, 244, 268, 276
+};
+
+static void fadt_ensure_correct_revision(struct acpi_fadt *fadt)
+{
+ uacpi_size current_rev, rev;
+
+ current_rev = fadt->hdr.revision;
+
+ for (rev = 0; rev < UACPI_ARRAY_SIZE(fadt_version_sizes); ++rev) {
+ if (fadt->hdr.length <= fadt_version_sizes[rev])
+ break;
+ }
+
+ if (rev == UACPI_ARRAY_SIZE(fadt_version_sizes)) {
+ uacpi_trace(
+ "FADT revision (%zu) is likely greater than the last "
+ "supported, reducing to %zu\n", current_rev, rev
+ );
+ fadt->hdr.revision = rev;
+ return;
+ }
+
+ rev++;
+
+ if (current_rev != rev && !(rev == 3 && current_rev == 4)) {
+ uacpi_warn(
+ "FADT length %u doesn't match expected for revision %zu, "
+ "assuming version %zu\n", fadt->hdr.length, current_rev,
+ rev
+ );
+ fadt->hdr.revision = rev;
+ }
+}
+
+static void gas_init_system_io(
+ struct acpi_gas *gas, uacpi_u64 address, uacpi_u8 byte_size
+)
+{
+ gas->address = address;
+ gas->address_space_id = UACPI_ADDRESS_SPACE_SYSTEM_IO;
+ gas->register_bit_width = UACPI_MIN(255, byte_size * 8);
+ gas->register_bit_offset = 0;
+ gas->access_size = 0;
+}
+
+
+struct register_description {
+ uacpi_size offset, xoffset;
+ uacpi_size length_offset;
+};
+
+#define fadt_offset(field) uacpi_offsetof(struct acpi_fadt, field)
+
+/*
+ * We convert all the legacy registers into GAS format and write them into
+ * the x_* fields for convenience and faster access at runtime.
+ */
+static struct register_description fadt_registers[] = {
+ {
+ .offset = fadt_offset(pm1a_evt_blk),
+ .xoffset = fadt_offset(x_pm1a_evt_blk),
+ .length_offset = fadt_offset(pm1_evt_len),
+ },
+ {
+ .offset = fadt_offset(pm1b_evt_blk),
+ .xoffset = fadt_offset(x_pm1b_evt_blk),
+ .length_offset = fadt_offset(pm1_evt_len),
+ },
+ {
+ .offset = fadt_offset(pm1a_cnt_blk),
+ .xoffset = fadt_offset(x_pm1a_cnt_blk),
+ .length_offset = fadt_offset(pm1_cnt_len),
+ },
+ {
+ .offset = fadt_offset(pm1b_cnt_blk),
+ .xoffset = fadt_offset(x_pm1b_cnt_blk),
+ .length_offset = fadt_offset(pm1_cnt_len),
+ },
+ {
+ .offset = fadt_offset(pm2_cnt_blk),
+ .xoffset = fadt_offset(x_pm2_cnt_blk),
+ .length_offset = fadt_offset(pm2_cnt_len),
+ },
+ {
+ .offset = fadt_offset(pm_tmr_blk),
+ .xoffset = fadt_offset(x_pm_tmr_blk),
+ .length_offset = fadt_offset(pm_tmr_len),
+ },
+ {
+ .offset = fadt_offset(gpe0_blk),
+ .xoffset = fadt_offset(x_gpe0_blk),
+ .length_offset = fadt_offset(gpe0_blk_len),
+ },
+ {
+ .offset = fadt_offset(gpe1_blk),
+ .xoffset = fadt_offset(x_gpe1_blk),
+ .length_offset = fadt_offset(gpe1_blk_len),
+ },
+};
+
+static void *fadt_relative(uacpi_size offset)
+{
+ return ((uacpi_u8*)&g_uacpi_rt_ctx.fadt) + offset;
+}
+
+static void convert_registers_to_gas(void)
+{
+ uacpi_size i;
+ struct register_description *desc;
+ struct acpi_gas *gas;
+ uacpi_u32 legacy_addr;
+ uacpi_u8 length;
+
+ for (i = 0; i < UACPI_ARRAY_SIZE(fadt_registers); ++i) {
+ desc = &fadt_registers[i];
+
+ legacy_addr = *(uacpi_u32*)fadt_relative(desc->offset);
+ length = *(uacpi_u8*)fadt_relative(desc->length_offset);
+ gas = fadt_relative(desc->xoffset);
+
+ if (gas->address)
+ continue;
+
+ gas_init_system_io(gas, legacy_addr, length);
+ }
+}
+
+#ifndef UACPI_BAREBONES_MODE
+static void split_one_block(
+ struct acpi_gas *src, struct acpi_gas *dst0, struct acpi_gas *dst1
+)
+{
+ uacpi_size byte_length;
+
+ if (src->address == 0)
+ return;
+
+ byte_length = src->register_bit_width / 8;
+ byte_length /= 2;
+
+ gas_init_system_io(dst0, src->address, byte_length);
+ gas_init_system_io(dst1, src->address + byte_length, byte_length);
+}
+
+static void split_event_blocks(void)
+{
+ split_one_block(
+ &g_uacpi_rt_ctx.fadt.x_pm1a_evt_blk,
+ &g_uacpi_rt_ctx.pm1a_status_blk,
+ &g_uacpi_rt_ctx.pm1a_enable_blk
+ );
+ split_one_block(
+ &g_uacpi_rt_ctx.fadt.x_pm1b_evt_blk,
+ &g_uacpi_rt_ctx.pm1b_status_blk,
+ &g_uacpi_rt_ctx.pm1b_enable_blk
+ );
+}
+#endif // !UACPI_BAREBONES_MODE
+
+static uacpi_status initialize_fadt(const void *virt)
+{
+ uacpi_status ret;
+ struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
+ const struct acpi_sdt_hdr *hdr = virt;
+
+ /*
+ * Here we (roughly) follow ACPICA initialization sequence to make sure we
+ * handle potential BIOS quirks with garbage inside FADT correctly.
+ */
+
+ uacpi_memcpy(fadt, hdr, UACPI_MIN(sizeof(*fadt), hdr->length));
+
+#if !defined(UACPI_REDUCED_HARDWARE) && !defined(UACPI_BAREBONES_MODE)
+ g_uacpi_rt_ctx.is_hardware_reduced = fadt->flags & ACPI_HW_REDUCED_ACPI;
+#endif
+
+ fadt_ensure_correct_revision(fadt);
+
+ /*
+ * These are reserved prior to version 3, so zero them out to work around
+ * BIOS implementations that might dirty these.
+ */
+ if (fadt->hdr.revision <= 2) {
+ fadt->preferred_pm_profile = 0;
+ fadt->pstate_cnt = 0;
+ fadt->cst_cnt = 0;
+ fadt->iapc_boot_arch = 0;
+ }
+
+ if (!fadt->x_dsdt)
+ fadt->x_dsdt = fadt->dsdt;
+
+ if (fadt->x_dsdt) {
+ ret = table_install_physical_with_origin_unlocked(
+ fadt->x_dsdt, UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL,
+ ACPI_DSDT_SIGNATURE, UACPI_NULL
+ );
+ if (uacpi_unlikely(ret != UACPI_STATUS_OK &&
+ ret != UACPI_STATUS_OVERRIDDEN))
+ return ret;
+ }
+
+ /*
+ * Unconditionally use 32 bit FACS if it exists, as 64 bit FACS is known
+ * to cause issues on some firmware:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=74021
+ *
+ * Note that we don't install it here as FACS needs permanent mapping, which
+ * we might not be able to obtain at this point in case of early table
+ * access.
+ */
+ if (fadt->firmware_ctrl)
+ fadt->x_firmware_ctrl = fadt->firmware_ctrl;
+
+ if (!uacpi_is_hardware_reduced()) {
+ convert_registers_to_gas();
+#ifndef UACPI_BAREBONES_MODE
+ split_event_blocks();
+#endif
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_table_fadt(struct acpi_fadt **out_fadt)
+{
+ ENSURE_TABLES_ONLINE();
+
+ *out_fadt = &g_uacpi_rt_ctx.fadt;
+ return UACPI_STATUS_OK;
+}
diff --git a/sys/dev/acpi/uacpi/types.c b/sys/dev/acpi/uacpi/types.c
new file mode 100644
index 0000000..840d3ef
--- /dev/null
+++ b/sys/dev/acpi/uacpi/types.c
@@ -0,0 +1,1489 @@
+#include <uacpi/types.h>
+#include <uacpi/internal/types.h>
+#include <uacpi/internal/stdlib.h>
+#include <uacpi/internal/shareable.h>
+#include <uacpi/internal/dynamic_array.h>
+#include <uacpi/internal/log.h>
+#include <uacpi/internal/namespace.h>
+#include <uacpi/internal/tables.h>
+#include <uacpi/kernel_api.h>
+
+const uacpi_char *uacpi_address_space_to_string(
+ enum uacpi_address_space space
+)
+{
+ switch (space) {
+ case UACPI_ADDRESS_SPACE_SYSTEM_MEMORY:
+ return "SystemMemory";
+ case UACPI_ADDRESS_SPACE_SYSTEM_IO:
+ return "SystemIO";
+ case UACPI_ADDRESS_SPACE_PCI_CONFIG:
+ return "PCI_Config";
+ case UACPI_ADDRESS_SPACE_EMBEDDED_CONTROLLER:
+ return "EmbeddedControl";
+ case UACPI_ADDRESS_SPACE_SMBUS:
+ return "SMBus";
+ case UACPI_ADDRESS_SPACE_SYSTEM_CMOS:
+ return "SystemCMOS";
+ case UACPI_ADDRESS_SPACE_PCI_BAR_TARGET:
+ return "PciBarTarget";
+ case UACPI_ADDRESS_SPACE_IPMI:
+ return "IPMI";
+ case UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO:
+ return "GeneralPurposeIO";
+ case UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS:
+ return "GenericSerialBus";
+ case UACPI_ADDRESS_SPACE_PCC:
+ return "PCC";
+ case UACPI_ADDRESS_SPACE_PRM:
+ return "PlatformRtMechanism";
+ case UACPI_ADDRESS_SPACE_FFIXEDHW:
+ return "FFixedHW";
+ case UACPI_ADDRESS_SPACE_TABLE_DATA:
+ return "TableData";
+ default:
+ return "<vendor specific>";
+ }
+}
+
+#ifndef UACPI_BAREBONES_MODE
+
+const uacpi_char *uacpi_object_type_to_string(uacpi_object_type type)
+{
+ switch (type) {
+ case UACPI_OBJECT_UNINITIALIZED:
+ return "Uninitialized";
+ case UACPI_OBJECT_INTEGER:
+ return "Integer";
+ case UACPI_OBJECT_STRING:
+ return "String";
+ case UACPI_OBJECT_BUFFER:
+ return "Buffer";
+ case UACPI_OBJECT_PACKAGE:
+ return "Package";
+ case UACPI_OBJECT_FIELD_UNIT:
+ return "Field Unit";
+ case UACPI_OBJECT_DEVICE:
+ return "Device";
+ case UACPI_OBJECT_EVENT:
+ return "Event";
+ case UACPI_OBJECT_REFERENCE:
+ return "Reference";
+ case UACPI_OBJECT_BUFFER_INDEX:
+ return "Buffer Index";
+ case UACPI_OBJECT_METHOD:
+ return "Method";
+ case UACPI_OBJECT_MUTEX:
+ return "Mutex";
+ case UACPI_OBJECT_OPERATION_REGION:
+ return "Operation Region";
+ case UACPI_OBJECT_POWER_RESOURCE:
+ return "Power Resource";
+ case UACPI_OBJECT_PROCESSOR:
+ return "Processor";
+ case UACPI_OBJECT_THERMAL_ZONE:
+ return "Thermal Zone";
+ case UACPI_OBJECT_BUFFER_FIELD:
+ return "Buffer Field";
+ case UACPI_OBJECT_DEBUG:
+ return "Debug";
+ default:
+ return "<Invalid type>";
+ }
+}
+
+static uacpi_bool buffer_alloc(uacpi_object *obj, uacpi_size initial_size)
+{
+ uacpi_buffer *buf;
+
+ buf = uacpi_kernel_alloc_zeroed(sizeof(uacpi_buffer));
+ if (uacpi_unlikely(buf == UACPI_NULL))
+ return UACPI_FALSE;
+
+ uacpi_shareable_init(buf);
+
+ if (initial_size) {
+ buf->data = uacpi_kernel_alloc(initial_size);
+ if (uacpi_unlikely(buf->data == UACPI_NULL)) {
+ uacpi_free(buf, sizeof(*buf));
+ return UACPI_FALSE;
+ }
+
+ buf->size = initial_size;
+ }
+
+ obj->buffer = buf;
+ return UACPI_TRUE;
+}
+
+static uacpi_bool empty_buffer_or_string_alloc(uacpi_object *object)
+{
+ return buffer_alloc(object, 0);
+}
+
+uacpi_bool uacpi_package_fill(
+ uacpi_package *pkg, uacpi_size num_elements,
+ enum uacpi_prealloc_objects prealloc_objects
+)
+{
+ uacpi_size i;
+
+ if (uacpi_unlikely(num_elements == 0))
+ return UACPI_TRUE;
+
+ pkg->objects = uacpi_kernel_alloc_zeroed(
+ num_elements * sizeof(uacpi_handle)
+ );
+ if (uacpi_unlikely(pkg->objects == UACPI_NULL))
+ return UACPI_FALSE;
+
+ pkg->count = num_elements;
+
+ if (prealloc_objects == UACPI_PREALLOC_OBJECTS_YES) {
+ for (i = 0; i < num_elements; ++i) {
+ pkg->objects[i] = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED);
+
+ if (uacpi_unlikely(pkg->objects[i] == UACPI_NULL))
+ return UACPI_FALSE;
+ }
+ }
+
+ return UACPI_TRUE;
+}
+
+static uacpi_bool package_alloc(
+ uacpi_object *obj, uacpi_size initial_size,
+ enum uacpi_prealloc_objects prealloc
+)
+{
+ uacpi_package *pkg;
+
+ pkg = uacpi_kernel_alloc_zeroed(sizeof(uacpi_package));
+ if (uacpi_unlikely(pkg == UACPI_NULL))
+ return UACPI_FALSE;
+
+ uacpi_shareable_init(pkg);
+
+ if (uacpi_unlikely(!uacpi_package_fill(pkg, initial_size, prealloc))) {
+ uacpi_free(pkg, sizeof(*pkg));
+ return UACPI_FALSE;
+ }
+
+ obj->package = pkg;
+ return UACPI_TRUE;
+}
+
+static uacpi_bool empty_package_alloc(uacpi_object *object)
+{
+ return package_alloc(object, 0, UACPI_PREALLOC_OBJECTS_NO);
+}
+
+uacpi_mutex *uacpi_create_mutex(void)
+{
+ uacpi_mutex *mutex;
+
+ mutex = uacpi_kernel_alloc_zeroed(sizeof(uacpi_mutex));
+ if (uacpi_unlikely(mutex == UACPI_NULL))
+ return UACPI_NULL;
+
+ mutex->owner = UACPI_THREAD_ID_NONE;
+
+ mutex->handle = uacpi_kernel_create_mutex();
+ if (mutex->handle == UACPI_NULL) {
+ uacpi_free(mutex, sizeof(*mutex));
+ return UACPI_NULL;
+ }
+
+ uacpi_shareable_init(mutex);
+ return mutex;
+}
+
+static uacpi_bool mutex_alloc(uacpi_object *obj)
+{
+ obj->mutex = uacpi_create_mutex();
+ return obj->mutex != UACPI_NULL;
+}
+
+static uacpi_bool event_alloc(uacpi_object *obj)
+{
+ uacpi_event *event;
+
+ event = uacpi_kernel_alloc_zeroed(sizeof(uacpi_event));
+ if (uacpi_unlikely(event == UACPI_NULL))
+ return UACPI_FALSE;
+
+ event->handle = uacpi_kernel_create_event();
+ if (event->handle == UACPI_NULL) {
+ uacpi_free(event, sizeof(*event));
+ return UACPI_FALSE;
+ }
+
+ uacpi_shareable_init(event);
+ obj->event = event;
+
+ return UACPI_TRUE;
+}
+
+static uacpi_bool method_alloc(uacpi_object *obj)
+{
+ uacpi_control_method *method;
+
+ method = uacpi_kernel_alloc_zeroed(sizeof(*method));
+ if (uacpi_unlikely(method == UACPI_NULL))
+ return UACPI_FALSE;
+
+ uacpi_shareable_init(method);
+ obj->method = method;
+
+ return UACPI_TRUE;
+}
+
+static uacpi_bool op_region_alloc(uacpi_object *obj)
+{
+ uacpi_operation_region *op_region;
+
+ op_region = uacpi_kernel_alloc_zeroed(sizeof(*op_region));
+ if (uacpi_unlikely(op_region == UACPI_NULL))
+ return UACPI_FALSE;
+
+ uacpi_shareable_init(op_region);
+ obj->op_region = op_region;
+
+ return UACPI_TRUE;
+}
+
+static uacpi_bool field_unit_alloc(uacpi_object *obj)
+{
+ uacpi_field_unit *field_unit;
+
+ field_unit = uacpi_kernel_alloc_zeroed(sizeof(*field_unit));
+ if (uacpi_unlikely(field_unit == UACPI_NULL))
+ return UACPI_FALSE;
+
+ uacpi_shareable_init(field_unit);
+ obj->field_unit = field_unit;
+
+ return UACPI_TRUE;
+}
+
+static uacpi_bool processor_alloc(uacpi_object *obj)
+{
+ uacpi_processor *processor;
+
+ processor = uacpi_kernel_alloc_zeroed(sizeof(*processor));
+ if (uacpi_unlikely(processor == UACPI_NULL))
+ return UACPI_FALSE;
+
+ uacpi_shareable_init(processor);
+ obj->processor = processor;
+
+ return UACPI_TRUE;
+}
+
+static uacpi_bool device_alloc(uacpi_object *obj)
+{
+ uacpi_device *device;
+
+ device = uacpi_kernel_alloc_zeroed(sizeof(*device));
+ if (uacpi_unlikely(device == UACPI_NULL))
+ return UACPI_FALSE;
+
+ uacpi_shareable_init(device);
+ obj->device = device;
+
+ return UACPI_TRUE;
+}
+
+static uacpi_bool thermal_zone_alloc(uacpi_object *obj)
+{
+ uacpi_thermal_zone *thermal_zone;
+
+ thermal_zone = uacpi_kernel_alloc_zeroed(sizeof(*thermal_zone));
+ if (uacpi_unlikely(thermal_zone == UACPI_NULL))
+ return UACPI_FALSE;
+
+ uacpi_shareable_init(thermal_zone);
+ obj->thermal_zone = thermal_zone;
+
+ return UACPI_TRUE;
+}
+
+typedef uacpi_bool (*object_ctor)(uacpi_object *obj);
+
+static object_ctor object_constructor_table[UACPI_OBJECT_MAX_TYPE_VALUE + 1] = {
+ [UACPI_OBJECT_STRING] = empty_buffer_or_string_alloc,
+ [UACPI_OBJECT_BUFFER] = empty_buffer_or_string_alloc,
+ [UACPI_OBJECT_PACKAGE] = empty_package_alloc,
+ [UACPI_OBJECT_FIELD_UNIT] = field_unit_alloc,
+ [UACPI_OBJECT_MUTEX] = mutex_alloc,
+ [UACPI_OBJECT_EVENT] = event_alloc,
+ [UACPI_OBJECT_OPERATION_REGION] = op_region_alloc,
+ [UACPI_OBJECT_METHOD] = method_alloc,
+ [UACPI_OBJECT_PROCESSOR] = processor_alloc,
+ [UACPI_OBJECT_DEVICE] = device_alloc,
+ [UACPI_OBJECT_THERMAL_ZONE] = thermal_zone_alloc,
+};
+
+uacpi_object *uacpi_create_object(uacpi_object_type type)
+{
+ uacpi_object *ret;
+ object_ctor ctor;
+
+ ret = uacpi_kernel_alloc_zeroed(sizeof(*ret));
+ if (uacpi_unlikely(ret == UACPI_NULL))
+ return ret;
+
+ uacpi_shareable_init(ret);
+ ret->type = type;
+
+ ctor = object_constructor_table[type];
+ if (ctor == UACPI_NULL)
+ return ret;
+
+ if (uacpi_unlikely(!ctor(ret))) {
+ uacpi_free(ret, sizeof(*ret));
+ return UACPI_NULL;
+ }
+
+ return ret;
+}
+
+static void free_buffer(uacpi_handle handle)
+{
+ uacpi_buffer *buf = handle;
+
+ if (buf->data != UACPI_NULL)
+ /*
+ * If buffer has a size of 0 but a valid data pointer it's probably an
+ * "empty" buffer allocated by the interpreter in make_null_buffer
+ * and its real size is actually 1.
+ */
+ uacpi_free(buf->data, UACPI_MAX(buf->size, 1));
+
+ uacpi_free(buf, sizeof(*buf));
+}
+
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE(free_queue, uacpi_package*, 4)
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(free_queue, uacpi_package*, static)
+
+static uacpi_bool free_queue_push(struct free_queue *queue, uacpi_package *pkg)
+{
+ uacpi_package **slot;
+
+ slot = free_queue_alloc(queue);
+ if (uacpi_unlikely(slot == UACPI_NULL))
+ return UACPI_FALSE;
+
+ *slot = pkg;
+ return UACPI_TRUE;
+}
+
+static void free_object(uacpi_object *obj);
+
+// No references allowed here, only plain objects
+static void free_plain_no_recurse(uacpi_object *obj, struct free_queue *queue)
+{
+ switch (obj->type) {
+ case UACPI_OBJECT_PACKAGE:
+ if (uacpi_shareable_unref(obj->package) > 1)
+ break;
+
+ if (uacpi_unlikely(!free_queue_push(queue,
+ obj->package))) {
+ uacpi_warn(
+ "unable to free nested package @%p: not enough memory\n",
+ obj->package
+ );
+ }
+
+ // Don't call free_object here as that will recurse
+ uacpi_free(obj, sizeof(*obj));
+ break;
+ default:
+ /*
+ * This call is guaranteed to not recurse further as we handle
+ * recursive cases elsewhere explicitly.
+ */
+ free_object(obj);
+ }
+}
+
+static void unref_plain_no_recurse(uacpi_object *obj, struct free_queue *queue)
+{
+ if (uacpi_shareable_unref(obj) > 1)
+ return;
+
+ free_plain_no_recurse(obj, queue);
+}
+
+static void unref_chain_no_recurse(uacpi_object *obj, struct free_queue *queue)
+{
+ uacpi_object *next_obj = UACPI_NULL;
+
+ while (obj) {
+ if (obj->type == UACPI_OBJECT_REFERENCE)
+ next_obj = obj->inner_object;
+
+ if (uacpi_shareable_unref(obj) > 1)
+ goto do_next;
+
+ if (obj->type == UACPI_OBJECT_REFERENCE) {
+ uacpi_free(obj, sizeof(*obj));
+ } else {
+ free_plain_no_recurse(obj, queue);
+ }
+
+ do_next:
+ obj = next_obj;
+ next_obj = UACPI_NULL;
+ }
+}
+
+static void unref_object_no_recurse(uacpi_object *obj, struct free_queue *queue)
+{
+ if (obj->type == UACPI_OBJECT_REFERENCE) {
+ unref_chain_no_recurse(obj, queue);
+ return;
+ }
+
+ unref_plain_no_recurse(obj, queue);
+}
+
+static void free_package(uacpi_handle handle)
+{
+ struct free_queue queue = { 0 };
+ uacpi_package *pkg = handle;
+ uacpi_object *obj;
+ uacpi_size i;
+
+ free_queue_push(&queue, pkg);
+
+ while (free_queue_size(&queue) != 0) {
+ pkg = *free_queue_last(&queue);
+ free_queue_pop(&queue);
+
+ /*
+ * 1. Unref/free every object in the package. Note that this might add
+ * even more packages into the free queue.
+ */
+ for (i = 0; i < pkg->count; ++i) {
+ obj = pkg->objects[i];
+ unref_object_no_recurse(obj, &queue);
+ }
+
+ // 2. Release the object array
+ uacpi_free(pkg->objects, sizeof(*pkg->objects) * pkg->count);
+
+ // 3. Release the package itself
+ uacpi_free(pkg, sizeof(*pkg));
+ }
+
+ free_queue_clear(&queue);
+}
+
+static void free_mutex(uacpi_handle handle)
+{
+ uacpi_mutex *mutex = handle;
+
+ uacpi_kernel_free_mutex(mutex->handle);
+ uacpi_free(mutex, sizeof(*mutex));
+}
+
+void uacpi_mutex_unref(uacpi_mutex *mutex)
+{
+ if (mutex == UACPI_NULL)
+ return;
+
+ uacpi_shareable_unref_and_delete_if_last(mutex, free_mutex);
+}
+
+static void free_event(uacpi_handle handle)
+{
+ uacpi_event *event = handle;
+
+ uacpi_kernel_free_event(event->handle);
+ uacpi_free(event, sizeof(*event));
+}
+
+static void free_address_space_handler(uacpi_handle handle)
+{
+ uacpi_address_space_handler *handler = handle;
+ uacpi_free(handler, sizeof(*handler));
+}
+
+static void free_address_space_handlers(
+ uacpi_address_space_handler *handler
+)
+{
+ uacpi_address_space_handler *next_handler;
+
+ while (handler) {
+ next_handler = handler->next;
+ uacpi_shareable_unref_and_delete_if_last(
+ handler, free_address_space_handler
+ );
+ handler = next_handler;
+ }
+}
+
+static void free_device_notify_handlers(uacpi_device_notify_handler *handler)
+{
+ uacpi_device_notify_handler *next_handler;
+
+ while (handler) {
+ next_handler = handler->next;
+ uacpi_free(handler, sizeof(*handler));
+ handler = next_handler;
+ }
+}
+
+static void free_handlers(uacpi_handle handle)
+{
+ uacpi_handlers *handlers = handle;
+
+ free_address_space_handlers(handlers->address_space_head);
+ free_device_notify_handlers(handlers->notify_head);
+}
+
+void uacpi_address_space_handler_unref(uacpi_address_space_handler *handler)
+{
+ uacpi_shareable_unref_and_delete_if_last(
+ handler, free_address_space_handler
+ );
+}
+
+static void free_op_region(uacpi_handle handle)
+{
+ uacpi_operation_region *op_region = handle;
+
+ if (uacpi_unlikely(op_region->handler != UACPI_NULL)) {
+ uacpi_warn(
+ "BUG: attempting to free an opregion@%p with a handler attached\n",
+ op_region
+ );
+ }
+
+ switch (op_region->space) {
+ case UACPI_ADDRESS_SPACE_PCC:
+ uacpi_free(op_region->internal_buffer, op_region->length);
+ break;
+ case UACPI_ADDRESS_SPACE_TABLE_DATA: {
+ struct uacpi_table table = { 0 };
+
+ table.index = op_region->table_idx;
+ uacpi_table_unref(
+ &table
+ );
+ break;
+ }
+ default:
+ break;
+ }
+
+ uacpi_free(op_region, sizeof(*op_region));
+}
+
+static void free_device(uacpi_handle handle)
+{
+ uacpi_device *device = handle;
+ free_handlers(device);
+ uacpi_free(device, sizeof(*device));
+}
+
+static void free_processor(uacpi_handle handle)
+{
+ uacpi_processor *processor = handle;
+ free_handlers(processor);
+ uacpi_free(processor, sizeof(*processor));
+}
+
+static void free_thermal_zone(uacpi_handle handle)
+{
+ uacpi_thermal_zone *thermal_zone = handle;
+ free_handlers(thermal_zone);
+ uacpi_free(thermal_zone, sizeof(*thermal_zone));
+}
+
+static void free_field_unit(uacpi_handle handle)
+{
+ uacpi_field_unit *field_unit = handle;
+
+ if (field_unit->connection)
+ uacpi_object_unref(field_unit->connection);
+
+ switch (field_unit->kind) {
+ case UACPI_FIELD_UNIT_KIND_NORMAL:
+ uacpi_namespace_node_unref(field_unit->region);
+ break;
+ case UACPI_FIELD_UNIT_KIND_BANK:
+ uacpi_namespace_node_unref(field_unit->bank_region);
+ uacpi_shareable_unref_and_delete_if_last(
+ field_unit->bank_selection, free_field_unit
+ );
+ break;
+ case UACPI_FIELD_UNIT_KIND_INDEX:
+ uacpi_shareable_unref_and_delete_if_last(
+ field_unit->index, free_field_unit
+ );
+ uacpi_shareable_unref_and_delete_if_last(
+ field_unit->data, free_field_unit
+ );
+ break;
+ default:
+ break;
+ }
+
+ uacpi_free(field_unit, sizeof(*field_unit));
+}
+
+static void free_method(uacpi_handle handle)
+{
+ uacpi_control_method *method = handle;
+
+ uacpi_shareable_unref_and_delete_if_last(
+ method->mutex, free_mutex
+ );
+
+ if (!method->native_call && method->owns_code)
+ uacpi_free(method->code, method->size);
+ uacpi_free(method, sizeof(*method));
+}
+
+void uacpi_method_unref(uacpi_control_method *method)
+{
+ uacpi_shareable_unref_and_delete_if_last(method, free_method);
+}
+
+static void free_object_storage(uacpi_object *obj)
+{
+ switch (obj->type) {
+ case UACPI_OBJECT_STRING:
+ case UACPI_OBJECT_BUFFER:
+ uacpi_shareable_unref_and_delete_if_last(obj->buffer, free_buffer);
+ break;
+ case UACPI_OBJECT_BUFFER_FIELD:
+ uacpi_shareable_unref_and_delete_if_last(obj->buffer_field.backing,
+ free_buffer);
+ break;
+ case UACPI_OBJECT_BUFFER_INDEX:
+ uacpi_shareable_unref_and_delete_if_last(obj->buffer_index.buffer,
+ free_buffer);
+ break;
+ case UACPI_OBJECT_METHOD:
+ uacpi_method_unref(obj->method);
+ break;
+ case UACPI_OBJECT_PACKAGE:
+ uacpi_shareable_unref_and_delete_if_last(obj->package,
+ free_package);
+ break;
+ case UACPI_OBJECT_FIELD_UNIT:
+ uacpi_shareable_unref_and_delete_if_last(obj->field_unit,
+ free_field_unit);
+ break;
+ case UACPI_OBJECT_MUTEX:
+ uacpi_mutex_unref(obj->mutex);
+ break;
+ case UACPI_OBJECT_EVENT:
+ uacpi_shareable_unref_and_delete_if_last(obj->event,
+ free_event);
+ break;
+ case UACPI_OBJECT_OPERATION_REGION:
+ uacpi_shareable_unref_and_delete_if_last(obj->op_region,
+ free_op_region);
+ break;
+ case UACPI_OBJECT_PROCESSOR:
+ uacpi_shareable_unref_and_delete_if_last(obj->processor,
+ free_processor);
+ break;
+ case UACPI_OBJECT_DEVICE:
+ uacpi_shareable_unref_and_delete_if_last(obj->device,
+ free_device);
+ break;
+ case UACPI_OBJECT_THERMAL_ZONE:
+ uacpi_shareable_unref_and_delete_if_last(obj->thermal_zone,
+ free_thermal_zone);
+ break;
+ default:
+ break;
+ }
+}
+
+static void free_object(uacpi_object *obj)
+{
+ free_object_storage(obj);
+ uacpi_free(obj, sizeof(*obj));
+}
+
+static void make_chain_bugged(uacpi_object *obj)
+{
+ uacpi_warn("object refcount bug, marking chain @%p as bugged\n", obj);
+
+ while (obj) {
+ uacpi_make_shareable_bugged(obj);
+
+ if (obj->type == UACPI_OBJECT_REFERENCE)
+ obj = obj->inner_object;
+ else
+ obj = UACPI_NULL;
+ }
+}
+
+void uacpi_object_ref(uacpi_object *obj)
+{
+ while (obj) {
+ uacpi_shareable_ref(obj);
+
+ if (obj->type == UACPI_OBJECT_REFERENCE)
+ obj = obj->inner_object;
+ else
+ obj = UACPI_NULL;
+ }
+}
+
+static void free_chain(uacpi_object *obj)
+{
+ uacpi_object *next_obj = UACPI_NULL;
+
+ while (obj) {
+ if (obj->type == UACPI_OBJECT_REFERENCE)
+ next_obj = obj->inner_object;
+
+ if (uacpi_shareable_refcount(obj) == 0)
+ free_object(obj);
+
+ obj = next_obj;
+ next_obj = UACPI_NULL;
+ }
+}
+
+void uacpi_object_unref(uacpi_object *obj)
+{
+ uacpi_object *this_obj = obj;
+
+ if (!obj)
+ return;
+
+ while (obj) {
+ if (uacpi_unlikely(uacpi_bugged_shareable(obj)))
+ return;
+
+ uacpi_shareable_unref(obj);
+
+ if (obj->type == UACPI_OBJECT_REFERENCE) {
+ obj = obj->inner_object;
+ } else {
+ obj = UACPI_NULL;
+ }
+ }
+
+ if (uacpi_shareable_refcount(this_obj) == 0)
+ free_chain(this_obj);
+}
+
+static uacpi_status buffer_alloc_and_store(
+ uacpi_object *obj, uacpi_size buf_size,
+ const void *src, uacpi_size src_size
+)
+{
+ if (uacpi_unlikely(!buffer_alloc(obj, buf_size)))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ uacpi_memcpy_zerout(obj->buffer->data, src, buf_size, src_size);
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status assign_buffer(uacpi_object *dst, uacpi_object *src,
+ enum uacpi_assign_behavior behavior)
+{
+ if (behavior == UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY) {
+ dst->buffer = src->buffer;
+ uacpi_shareable_ref(dst->buffer);
+ return UACPI_STATUS_OK;
+ }
+
+ return buffer_alloc_and_store(dst, src->buffer->size,
+ src->buffer->data, src->buffer->size);
+}
+
+struct pkg_copy_req {
+ uacpi_object *dst;
+ uacpi_package *src;
+};
+
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE(pkg_copy_reqs, struct pkg_copy_req, 2)
+DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(
+ pkg_copy_reqs, struct pkg_copy_req, static
+)
+
+static uacpi_bool pkg_copy_reqs_push(
+ struct pkg_copy_reqs *reqs,
+ uacpi_object *dst, uacpi_package *pkg
+)
+{
+ struct pkg_copy_req *req;
+
+ req = pkg_copy_reqs_alloc(reqs);
+ if (uacpi_unlikely(req == UACPI_NULL))
+ return UACPI_FALSE;
+
+ req->dst = dst;
+ req->src = pkg;
+
+ return UACPI_TRUE;
+}
+
+static uacpi_status deep_copy_package_no_recurse(
+ uacpi_object *dst, uacpi_package *src,
+ struct pkg_copy_reqs *reqs
+)
+{
+ uacpi_size i;
+ uacpi_package *dst_package;
+
+ if (uacpi_unlikely(!package_alloc(dst, src->count,
+ UACPI_PREALLOC_OBJECTS_YES)))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ dst->type = UACPI_OBJECT_PACKAGE;
+ dst_package = dst->package;
+
+ for (i = 0; i < src->count; ++i) {
+ uacpi_status st;
+ uacpi_object *src_obj = src->objects[i];
+ uacpi_object *dst_obj = dst_package->objects[i];
+
+ // Don't copy the internal package index reference
+ if (src_obj->type == UACPI_OBJECT_REFERENCE &&
+ src_obj->flags == UACPI_REFERENCE_KIND_PKG_INDEX)
+ src_obj = src_obj->inner_object;
+
+ if (src_obj->type == UACPI_OBJECT_PACKAGE) {
+ uacpi_bool ret;
+
+ ret = pkg_copy_reqs_push(reqs, dst_obj, src_obj->package);
+ if (uacpi_unlikely(!ret))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ continue;
+ }
+
+ st = uacpi_object_assign(dst_obj, src_obj,
+ UACPI_ASSIGN_BEHAVIOR_DEEP_COPY);
+ if (uacpi_unlikely_error(st))
+ return st;
+ }
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status deep_copy_package(uacpi_object *dst, uacpi_object *src)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+ struct pkg_copy_reqs reqs = { 0 };
+
+ pkg_copy_reqs_push(&reqs, dst, src->package);
+
+ while (pkg_copy_reqs_size(&reqs) != 0) {
+ struct pkg_copy_req req;
+
+ req = *pkg_copy_reqs_last(&reqs);
+ pkg_copy_reqs_pop(&reqs);
+
+ ret = deep_copy_package_no_recurse(req.dst, req.src, &reqs);
+ if (uacpi_unlikely_error(ret))
+ break;
+ }
+
+ pkg_copy_reqs_clear(&reqs);
+ return ret;
+}
+
+static uacpi_status assign_mutex(uacpi_object *dst, uacpi_object *src,
+ enum uacpi_assign_behavior behavior)
+{
+ if (behavior == UACPI_ASSIGN_BEHAVIOR_DEEP_COPY) {
+ if (uacpi_likely(mutex_alloc(dst))) {
+ dst->mutex->sync_level = src->mutex->sync_level;
+ return UACPI_STATUS_OK;
+ }
+
+ return UACPI_STATUS_OUT_OF_MEMORY;
+ }
+
+ dst->mutex = src->mutex;
+ uacpi_shareable_ref(dst->mutex);
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status assign_event(uacpi_object *dst, uacpi_object *src,
+ enum uacpi_assign_behavior behavior)
+{
+ if (behavior == UACPI_ASSIGN_BEHAVIOR_DEEP_COPY) {
+ if (uacpi_likely(event_alloc(dst)))
+ return UACPI_STATUS_OK;
+
+ return UACPI_STATUS_OUT_OF_MEMORY;
+ }
+
+ dst->event = src->event;
+ uacpi_shareable_ref(dst->event);
+
+ return UACPI_STATUS_OK;
+}
+
+static uacpi_status assign_package(uacpi_object *dst, uacpi_object *src,
+ enum uacpi_assign_behavior behavior)
+{
+ if (behavior == UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY) {
+ dst->package = src->package;
+ uacpi_shareable_ref(dst->package);
+ return UACPI_STATUS_OK;
+ }
+
+ return deep_copy_package(dst, src);
+}
+
+void uacpi_object_attach_child(uacpi_object *parent, uacpi_object *child)
+{
+ uacpi_u32 refs_to_add;
+
+ parent->inner_object = child;
+
+ if (uacpi_unlikely(uacpi_bugged_shareable(parent))) {
+ make_chain_bugged(child);
+ return;
+ }
+
+ refs_to_add = uacpi_shareable_refcount(parent);
+ while (refs_to_add--)
+ uacpi_object_ref(child);
+}
+
+void uacpi_object_detach_child(uacpi_object *parent)
+{
+ uacpi_u32 refs_to_remove;
+ uacpi_object *child;
+
+ child = parent->inner_object;
+ parent->inner_object = UACPI_NULL;
+
+ if (uacpi_unlikely(uacpi_bugged_shareable(parent)))
+ return;
+
+ refs_to_remove = uacpi_shareable_refcount(parent);
+ while (refs_to_remove--)
+ uacpi_object_unref(child);
+}
+
+uacpi_object_type uacpi_object_get_type(uacpi_object *obj)
+{
+ return obj->type;
+}
+
+uacpi_object_type_bits uacpi_object_get_type_bit(uacpi_object *obj)
+{
+ return (1u << obj->type);
+}
+
+uacpi_bool uacpi_object_is(uacpi_object *obj, uacpi_object_type type)
+{
+ return obj->type == type;
+}
+
+uacpi_bool uacpi_object_is_one_of(
+ uacpi_object *obj, uacpi_object_type_bits type_mask
+)
+{
+ return (uacpi_object_get_type_bit(obj) & type_mask) != 0;
+}
+
+#define TYPE_CHECK_USER_OBJ_RET(obj, type_bits, ret) \
+ do { \
+ if (uacpi_unlikely(obj == UACPI_NULL || \
+ !uacpi_object_is_one_of(obj, type_bits))) \
+ return ret; \
+ } while (0)
+
+#define TYPE_CHECK_USER_OBJ(obj, type_bits) \
+ TYPE_CHECK_USER_OBJ_RET(obj, type_bits, UACPI_STATUS_INVALID_ARGUMENT)
+
+#define ENSURE_VALID_USER_OBJ_RET(obj, ret) \
+ do { \
+ if (uacpi_unlikely(obj == UACPI_NULL)) \
+ return ret; \
+ } while (0)
+
+#define ENSURE_VALID_USER_OBJ(obj) \
+ ENSURE_VALID_USER_OBJ_RET(obj, UACPI_STATUS_INVALID_ARGUMENT)
+
+uacpi_status uacpi_object_get_integer(uacpi_object *obj, uacpi_u64 *out)
+{
+ TYPE_CHECK_USER_OBJ(obj, UACPI_OBJECT_INTEGER_BIT);
+
+ *out = obj->integer;
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_object_assign_integer(uacpi_object *obj, uacpi_u64 value)
+{
+ uacpi_object object = { 0 };
+
+ ENSURE_VALID_USER_OBJ(obj);
+
+ object.type = UACPI_OBJECT_INTEGER;
+ object.integer = value;
+
+ return uacpi_object_assign(obj, &object, UACPI_ASSIGN_BEHAVIOR_DEEP_COPY);
+}
+
+void uacpi_buffer_to_view(uacpi_buffer *buf, uacpi_data_view *out_view)
+{
+ out_view->bytes = buf->byte_data;
+ out_view->length = buf->size;
+}
+
+static uacpi_status uacpi_object_do_get_string_or_buffer(
+ uacpi_object *obj, uacpi_data_view *out, uacpi_u32 mask
+)
+{
+ TYPE_CHECK_USER_OBJ(obj, mask);
+
+ uacpi_buffer_to_view(obj->buffer, out);
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_object_get_string_or_buffer(
+ uacpi_object *obj, uacpi_data_view *out
+)
+{
+ return uacpi_object_do_get_string_or_buffer(
+ obj, out, UACPI_OBJECT_STRING_BIT | UACPI_OBJECT_BUFFER_BIT
+ );
+}
+
+uacpi_status uacpi_object_get_string(uacpi_object *obj, uacpi_data_view *out)
+{
+ return uacpi_object_do_get_string_or_buffer(
+ obj, out, UACPI_OBJECT_STRING_BIT
+ );
+}
+
+uacpi_status uacpi_object_get_buffer(uacpi_object *obj, uacpi_data_view *out)
+{
+ return uacpi_object_do_get_string_or_buffer(
+ obj, out, UACPI_OBJECT_BUFFER_BIT
+ );
+}
+
+uacpi_bool uacpi_object_is_aml_namepath(uacpi_object *obj)
+{
+ TYPE_CHECK_USER_OBJ_RET(obj, UACPI_OBJECT_STRING_BIT, UACPI_FALSE);
+ return obj->flags == UACPI_STRING_KIND_PATH;
+}
+
+uacpi_status uacpi_object_resolve_as_aml_namepath(
+ uacpi_object *obj, uacpi_namespace_node *scope,
+ uacpi_namespace_node **out_node
+)
+{
+ uacpi_status ret;
+ uacpi_namespace_node *node;
+
+ if (!uacpi_object_is_aml_namepath(obj))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ ret = uacpi_namespace_node_resolve_from_aml_namepath(
+ scope, obj->buffer->text, &node
+ );
+ if (uacpi_likely_success(ret))
+ *out_node = node;
+ return ret;
+}
+
+static uacpi_status uacpi_object_do_assign_buffer(
+ uacpi_object *obj, uacpi_data_view in, uacpi_object_type type
+)
+{
+ uacpi_status ret;
+ uacpi_object tmp_obj = { 0 };
+ uacpi_size dst_buf_size = in.length;
+
+ tmp_obj.type = type;
+
+ ENSURE_VALID_USER_OBJ(obj);
+
+ if (type == UACPI_OBJECT_STRING && (in.length == 0 ||
+ in.const_bytes[in.length - 1] != 0x00))
+ dst_buf_size++;
+
+ ret = buffer_alloc_and_store(
+ &tmp_obj, dst_buf_size, in.const_bytes, in.length
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ ret = uacpi_object_assign(
+ obj, &tmp_obj, UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY
+ );
+ uacpi_shareable_unref_and_delete_if_last(tmp_obj.buffer, free_buffer);
+
+ return ret;
+}
+
+uacpi_status uacpi_object_assign_string(uacpi_object *obj, uacpi_data_view in)
+{
+ return uacpi_object_do_assign_buffer(obj, in, UACPI_OBJECT_STRING);
+}
+
+uacpi_status uacpi_object_assign_buffer(uacpi_object *obj, uacpi_data_view in)
+{
+ return uacpi_object_do_assign_buffer(obj, in, UACPI_OBJECT_BUFFER);
+}
+
+uacpi_object *uacpi_object_create_uninitialized(void)
+{
+ return uacpi_create_object(UACPI_OBJECT_UNINITIALIZED);
+}
+
+uacpi_status uacpi_object_create_integer_safe(
+ uacpi_u64 value, uacpi_overflow_behavior behavior, uacpi_object **out_obj
+)
+{
+ uacpi_status ret;
+ uacpi_u8 bitness;
+ uacpi_object *obj;
+
+ ret = uacpi_get_aml_bitness(&bitness);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ switch (behavior) {
+ case UACPI_OVERFLOW_TRUNCATE:
+ case UACPI_OVERFLOW_DISALLOW:
+ if (bitness == 32 && value > 0xFFFFFFFF) {
+ if (behavior == UACPI_OVERFLOW_DISALLOW)
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ value &= 0xFFFFFFFF;
+ }
+ UACPI_FALLTHROUGH;
+ case UACPI_OVERFLOW_ALLOW:
+ obj = uacpi_object_create_integer(value);
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ *out_obj = obj;
+ return ret;
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+}
+
+uacpi_object *uacpi_object_create_integer(uacpi_u64 value)
+{
+ uacpi_object *obj;
+
+ obj = uacpi_create_object(UACPI_OBJECT_INTEGER);
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return obj;
+
+ obj->integer = value;
+ return obj;
+}
+
+static uacpi_object *uacpi_object_do_create_string_or_buffer(
+ uacpi_data_view view, uacpi_object_type type
+)
+{
+ uacpi_status ret;
+ uacpi_object *obj;
+
+ obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED);
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return UACPI_NULL;
+
+ ret = uacpi_object_do_assign_buffer(obj, view, type);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_object_unref(obj);
+ return UACPI_NULL;
+ }
+
+ return obj;
+}
+
+uacpi_object *uacpi_object_create_string(uacpi_data_view view)
+{
+ return uacpi_object_do_create_string_or_buffer(view, UACPI_OBJECT_STRING);
+}
+
+uacpi_object *uacpi_object_create_buffer(uacpi_data_view view)
+{
+ return uacpi_object_do_create_string_or_buffer(view, UACPI_OBJECT_BUFFER);
+}
+
+uacpi_object *uacpi_object_create_cstring(const uacpi_char *str)
+{
+ uacpi_data_view data_view = { 0 };
+
+ data_view.const_text = str;
+ data_view.length = uacpi_strlen(str) + 1;
+ return uacpi_object_create_string(data_view);
+}
+
+uacpi_status uacpi_object_get_package(
+ uacpi_object *obj, uacpi_object_array *out
+)
+{
+ TYPE_CHECK_USER_OBJ(obj, UACPI_OBJECT_PACKAGE_BIT);
+
+ out->objects = obj->package->objects;
+ out->count = obj->package->count;
+ return UACPI_STATUS_OK;
+}
+
+uacpi_object *uacpi_object_create_reference(uacpi_object *child)
+{
+ uacpi_object *obj;
+
+ ENSURE_VALID_USER_OBJ_RET(child, UACPI_NULL);
+
+ obj = uacpi_create_object(UACPI_OBJECT_REFERENCE);
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return UACPI_NULL;
+
+ uacpi_object_attach_child(obj, child);
+ obj->flags = UACPI_REFERENCE_KIND_ARG;
+
+ return obj;
+}
+
+uacpi_status uacpi_object_assign_reference(
+ uacpi_object *obj, uacpi_object *child
+)
+{
+ uacpi_status ret;
+ uacpi_object object = { 0 };
+
+ ENSURE_VALID_USER_OBJ(obj);
+ ENSURE_VALID_USER_OBJ(child);
+
+ // First clear out the object
+ object.type = UACPI_OBJECT_UNINITIALIZED;
+ ret = uacpi_object_assign(
+ obj, &object,
+ UACPI_ASSIGN_BEHAVIOR_DEEP_COPY
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ obj->type = UACPI_OBJECT_REFERENCE;
+ uacpi_object_attach_child(obj, child);
+ obj->flags = UACPI_REFERENCE_KIND_ARG;
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_object_get_dereferenced(
+ uacpi_object *obj, uacpi_object **out
+)
+{
+ TYPE_CHECK_USER_OBJ(obj, UACPI_OBJECT_REFERENCE_BIT);
+
+ *out = obj->inner_object;
+ uacpi_shareable_ref(*out);
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_object_get_processor_info(
+ uacpi_object *obj, uacpi_processor_info *out
+)
+{
+ TYPE_CHECK_USER_OBJ(obj, UACPI_OBJECT_PROCESSOR_BIT);
+
+ out->id = obj->processor->id;
+ out->block_address = obj->processor->block_address;
+ out->block_length = obj->processor->block_length;
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_object_get_power_resource_info(
+ uacpi_object *obj, uacpi_power_resource_info *out
+)
+{
+ TYPE_CHECK_USER_OBJ(obj, UACPI_OBJECT_POWER_RESOURCE_BIT);
+
+ out->system_level = obj->power_resource.system_level;
+ out->resource_order = obj->power_resource.resource_order;
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_object_assign_package(
+ uacpi_object *obj, uacpi_object_array in
+)
+{
+ uacpi_status ret;
+ uacpi_size i;
+ uacpi_object tmp_obj = {
+ .type = UACPI_OBJECT_PACKAGE,
+ };
+
+ ENSURE_VALID_USER_OBJ(obj);
+
+ if (uacpi_unlikely(!package_alloc(&tmp_obj, in.count,
+ UACPI_PREALLOC_OBJECTS_NO)))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ obj->type = UACPI_OBJECT_PACKAGE;
+
+ for (i = 0; i < in.count; ++i) {
+ tmp_obj.package->objects[i] = in.objects[i];
+ uacpi_object_ref(tmp_obj.package->objects[i]);
+ }
+
+ ret = uacpi_object_assign(obj, &tmp_obj, UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY);
+ uacpi_shareable_unref_and_delete_if_last(tmp_obj.package, free_package);
+
+ return ret;
+}
+
+uacpi_object *uacpi_object_create_package(uacpi_object_array in)
+{
+ uacpi_status ret;
+ uacpi_object *obj;
+
+ obj = uacpi_object_create_uninitialized();
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return obj;
+
+ ret = uacpi_object_assign_package(obj, in);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_object_unref(obj);
+ return UACPI_NULL;
+ }
+
+ return obj;
+}
+
+uacpi_status uacpi_object_assign(uacpi_object *dst, uacpi_object *src,
+ enum uacpi_assign_behavior behavior)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+
+ if (src == dst)
+ return ret;
+
+ switch (dst->type) {
+ case UACPI_OBJECT_REFERENCE:
+ uacpi_object_detach_child(dst);
+ break;
+ case UACPI_OBJECT_STRING:
+ case UACPI_OBJECT_BUFFER:
+ case UACPI_OBJECT_METHOD:
+ case UACPI_OBJECT_PACKAGE:
+ case UACPI_OBJECT_MUTEX:
+ case UACPI_OBJECT_EVENT:
+ case UACPI_OBJECT_PROCESSOR:
+ case UACPI_OBJECT_DEVICE:
+ case UACPI_OBJECT_THERMAL_ZONE:
+ free_object_storage(dst);
+ break;
+ default:
+ break;
+ }
+
+ switch (src->type) {
+ case UACPI_OBJECT_UNINITIALIZED:
+ case UACPI_OBJECT_DEBUG:
+ break;
+ case UACPI_OBJECT_BUFFER:
+ case UACPI_OBJECT_STRING:
+ dst->flags = src->flags;
+ ret = assign_buffer(dst, src, behavior);
+ break;
+ case UACPI_OBJECT_BUFFER_FIELD:
+ dst->buffer_field = src->buffer_field;
+ uacpi_shareable_ref(dst->buffer_field.backing);
+ break;
+ case UACPI_OBJECT_BUFFER_INDEX:
+ dst->buffer_index = src->buffer_index;
+ uacpi_shareable_ref(dst->buffer_index.buffer);
+ break;
+ case UACPI_OBJECT_INTEGER:
+ dst->integer = src->integer;
+ break;
+ case UACPI_OBJECT_METHOD:
+ dst->method = src->method;
+ uacpi_shareable_ref(dst->method);
+ break;
+ case UACPI_OBJECT_MUTEX:
+ ret = assign_mutex(dst, src, behavior);
+ break;
+ case UACPI_OBJECT_EVENT:
+ ret = assign_event(dst, src, behavior);
+ break;
+ case UACPI_OBJECT_OPERATION_REGION:
+ dst->op_region = src->op_region;
+ uacpi_shareable_ref(dst->op_region);
+ break;
+ case UACPI_OBJECT_PACKAGE:
+ ret = assign_package(dst, src, behavior);
+ break;
+ case UACPI_OBJECT_FIELD_UNIT:
+ dst->field_unit = src->field_unit;
+ uacpi_shareable_ref(dst->field_unit);
+ break;
+ case UACPI_OBJECT_REFERENCE:
+ uacpi_object_attach_child(dst, src->inner_object);
+ break;
+ case UACPI_OBJECT_PROCESSOR:
+ dst->processor = src->processor;
+ uacpi_shareable_ref(dst->processor);
+ break;
+ case UACPI_OBJECT_DEVICE:
+ dst->device = src->device;
+ uacpi_shareable_ref(dst->device);
+ break;
+ case UACPI_OBJECT_THERMAL_ZONE:
+ dst->thermal_zone = src->thermal_zone;
+ uacpi_shareable_ref(dst->thermal_zone);
+ break;
+ default:
+ ret = UACPI_STATUS_UNIMPLEMENTED;
+ }
+
+ if (ret == UACPI_STATUS_OK)
+ dst->type = src->type;
+
+ return ret;
+}
+
+struct uacpi_object *uacpi_create_internal_reference(
+ enum uacpi_reference_kind kind, uacpi_object *child
+)
+{
+ uacpi_object *ret;
+
+ ret = uacpi_create_object(UACPI_OBJECT_REFERENCE);
+ if (uacpi_unlikely(ret == UACPI_NULL))
+ return ret;
+
+ ret->flags = kind;
+ uacpi_object_attach_child(ret, child);
+ return ret;
+}
+
+uacpi_object *uacpi_unwrap_internal_reference(uacpi_object *object)
+{
+ for (;;) {
+ if (object->type != UACPI_OBJECT_REFERENCE ||
+ (object->flags == UACPI_REFERENCE_KIND_REFOF ||
+ object->flags == UACPI_REFERENCE_KIND_PKG_INDEX))
+ return object;
+
+ object = object->inner_object;
+ }
+}
+
+#endif // !UACPI_BAREBONES_MODE
diff --git a/sys/dev/acpi/uacpi/uacpi.c b/sys/dev/acpi/uacpi/uacpi.c
new file mode 100644
index 0000000..c6c569f
--- /dev/null
+++ b/sys/dev/acpi/uacpi/uacpi.c
@@ -0,0 +1,998 @@
+#include <uacpi/uacpi.h>
+#include <uacpi/acpi.h>
+
+#include <uacpi/internal/log.h>
+#include <uacpi/internal/context.h>
+#include <uacpi/internal/utilities.h>
+#include <uacpi/internal/tables.h>
+#include <uacpi/internal/interpreter.h>
+#include <uacpi/internal/namespace.h>
+#include <uacpi/internal/opregion.h>
+#include <uacpi/internal/registers.h>
+#include <uacpi/internal/event.h>
+#include <uacpi/internal/notify.h>
+#include <uacpi/internal/osi.h>
+#include <uacpi/internal/registers.h>
+
+struct uacpi_runtime_context g_uacpi_rt_ctx = { 0 };
+
+void uacpi_context_set_log_level(uacpi_log_level lvl)
+{
+ if (lvl == 0)
+ lvl = UACPI_DEFAULT_LOG_LEVEL;
+
+ g_uacpi_rt_ctx.log_level = lvl;
+}
+
+void uacpi_logger_initialize(void)
+{
+ static uacpi_bool version_printed = UACPI_FALSE;
+
+ if (g_uacpi_rt_ctx.log_level == 0)
+ uacpi_context_set_log_level(UACPI_DEFAULT_LOG_LEVEL);
+
+ if (!version_printed) {
+ version_printed = UACPI_TRUE;
+ uacpi_info(
+ "starting uACPI, version %d.%d.%d\n",
+ UACPI_MAJOR, UACPI_MINOR, UACPI_PATCH
+ );
+ }
+}
+
+void uacpi_context_set_proactive_table_checksum(uacpi_bool setting)
+{
+ if (setting)
+ g_uacpi_rt_ctx.flags |= UACPI_FLAG_PROACTIVE_TBL_CSUM;
+ else
+ g_uacpi_rt_ctx.flags &= ~UACPI_FLAG_PROACTIVE_TBL_CSUM;
+}
+
+const uacpi_char *uacpi_status_to_string(uacpi_status st)
+{
+ switch (st) {
+ case UACPI_STATUS_OK:
+ return "no error";
+ case UACPI_STATUS_MAPPING_FAILED:
+ return "failed to map memory";
+ case UACPI_STATUS_OUT_OF_MEMORY:
+ return "out of memory";
+ case UACPI_STATUS_BAD_CHECKSUM:
+ return "bad table checksum";
+ case UACPI_STATUS_INVALID_SIGNATURE:
+ return "invalid table signature";
+ case UACPI_STATUS_INVALID_TABLE_LENGTH:
+ return "invalid table length";
+ case UACPI_STATUS_NOT_FOUND:
+ return "not found";
+ case UACPI_STATUS_INVALID_ARGUMENT:
+ return "invalid argument";
+ case UACPI_STATUS_UNIMPLEMENTED:
+ return "unimplemented";
+ case UACPI_STATUS_ALREADY_EXISTS:
+ return "already exists";
+ case UACPI_STATUS_INTERNAL_ERROR:
+ return "internal error";
+ case UACPI_STATUS_TYPE_MISMATCH:
+ return "object type mismatch";
+ case UACPI_STATUS_INIT_LEVEL_MISMATCH:
+ return "init level too low/high for this action";
+ case UACPI_STATUS_NAMESPACE_NODE_DANGLING:
+ return "attempting to use a dangling namespace node";
+ case UACPI_STATUS_NO_HANDLER:
+ return "no handler found";
+ case UACPI_STATUS_NO_RESOURCE_END_TAG:
+ return "resource template without an end tag";
+ case UACPI_STATUS_COMPILED_OUT:
+ return "this functionality has been compiled out of this build";
+ case UACPI_STATUS_HARDWARE_TIMEOUT:
+ return "timed out waiting for hardware response";
+ case UACPI_STATUS_TIMEOUT:
+ return "wait timed out";
+ case UACPI_STATUS_OVERRIDDEN:
+ return "the requested action has been overridden";
+ case UACPI_STATUS_DENIED:
+ return "the requested action has been denied";
+
+ case UACPI_STATUS_AML_UNDEFINED_REFERENCE:
+ return "AML referenced an undefined object";
+ case UACPI_STATUS_AML_INVALID_NAMESTRING:
+ return "invalid AML name string";
+ case UACPI_STATUS_AML_OBJECT_ALREADY_EXISTS:
+ return "object already exists";
+ case UACPI_STATUS_AML_INVALID_OPCODE:
+ return "invalid AML opcode";
+ case UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE:
+ return "incompatible AML object type";
+ case UACPI_STATUS_AML_BAD_ENCODING:
+ return "bad AML instruction encoding";
+ case UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX:
+ return "out of bounds AML index";
+ case UACPI_STATUS_AML_SYNC_LEVEL_TOO_HIGH:
+ return "AML attempted to acquire a mutex with a lower sync level";
+ case UACPI_STATUS_AML_INVALID_RESOURCE:
+ return "invalid resource template encoding or type";
+ case UACPI_STATUS_AML_LOOP_TIMEOUT:
+ return "hanging AML while loop";
+ case UACPI_STATUS_AML_CALL_STACK_DEPTH_LIMIT:
+ return "reached maximum AML call stack depth";
+ default:
+ return "<invalid status>";
+ }
+}
+
+void uacpi_state_reset(void)
+{
+#ifndef UACPI_BAREBONES_MODE
+ uacpi_deinitialize_namespace();
+ uacpi_deinitialize_interfaces();
+ uacpi_deinitialize_events();
+ uacpi_deinitialize_notify();
+ uacpi_deinitialize_opregion();
+#endif
+
+ uacpi_deinitialize_tables();
+
+#ifndef UACPI_BAREBONES_MODE
+
+#ifndef UACPI_REDUCED_HARDWARE
+ if (g_uacpi_rt_ctx.was_in_legacy_mode)
+ uacpi_leave_acpi_mode();
+#endif
+
+ uacpi_deinitialize_registers();
+
+#ifndef UACPI_REDUCED_HARDWARE
+ if (g_uacpi_rt_ctx.global_lock_event)
+ uacpi_kernel_free_event(g_uacpi_rt_ctx.global_lock_event);
+ if (g_uacpi_rt_ctx.global_lock_spinlock)
+ uacpi_kernel_free_spinlock(g_uacpi_rt_ctx.global_lock_spinlock);
+#endif
+
+#endif // !UACPI_BAREBONES_MODE
+
+ uacpi_memzero(&g_uacpi_rt_ctx, sizeof(g_uacpi_rt_ctx));
+
+#if defined(UACPI_KERNEL_INITIALIZATION) && !defined(UACPI_BAREBONES_MODE)
+ uacpi_kernel_deinitialize();
+#endif
+}
+
+#ifndef UACPI_BAREBONES_MODE
+
+void uacpi_context_set_loop_timeout(uacpi_u32 seconds)
+{
+ if (seconds == 0)
+ seconds = UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS;
+
+ g_uacpi_rt_ctx.loop_timeout_seconds = seconds;
+}
+
+void uacpi_context_set_max_call_stack_depth(uacpi_u32 depth)
+{
+ if (depth == 0)
+ depth = UACPI_DEFAULT_MAX_CALL_STACK_DEPTH;
+
+ g_uacpi_rt_ctx.max_call_stack_depth = depth;
+}
+
+uacpi_u32 uacpi_context_get_loop_timeout(void)
+{
+ return g_uacpi_rt_ctx.loop_timeout_seconds;
+}
+
+#ifndef UACPI_REDUCED_HARDWARE
+enum hw_mode {
+ HW_MODE_ACPI = 0,
+ HW_MODE_LEGACY = 1,
+};
+
+static enum hw_mode read_mode(void)
+{
+ uacpi_status ret;
+ uacpi_u64 raw_value;
+ struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
+
+ if (!fadt->smi_cmd)
+ return HW_MODE_ACPI;
+
+ ret = uacpi_read_register_field(UACPI_REGISTER_FIELD_SCI_EN, &raw_value);
+ if (uacpi_unlikely_error(ret))
+ return HW_MODE_LEGACY;
+
+ return raw_value ? HW_MODE_ACPI : HW_MODE_LEGACY;
+}
+
+static uacpi_status set_mode(enum hw_mode mode)
+{
+ uacpi_status ret;
+ uacpi_u64 raw_value, stalled_time = 0;
+ struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt;
+
+ if (uacpi_unlikely(!fadt->smi_cmd)) {
+ uacpi_error("SMI_CMD is not implemented by the firmware\n");
+ return UACPI_STATUS_NOT_FOUND;
+ }
+
+ if (uacpi_unlikely(!fadt->acpi_enable && !fadt->acpi_disable)) {
+ uacpi_error("mode transition is not implemented by the hardware\n");
+ return UACPI_STATUS_NOT_FOUND;
+ }
+
+ switch (mode) {
+ case HW_MODE_ACPI:
+ raw_value = fadt->acpi_enable;
+ break;
+ case HW_MODE_LEGACY:
+ raw_value = fadt->acpi_disable;
+ break;
+ default:
+ return UACPI_STATUS_INVALID_ARGUMENT;
+ }
+
+ ret = uacpi_write_register(UACPI_REGISTER_SMI_CMD, raw_value);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ // Allow up to 5 seconds for the hardware to enter the desired mode
+ while (stalled_time < (5 * 1000 * 1000)) {
+ if (read_mode() == mode)
+ return UACPI_STATUS_OK;
+
+ uacpi_kernel_stall(100);
+ stalled_time += 100;
+ }
+
+ uacpi_error("hardware time out while changing modes\n");
+ return UACPI_STATUS_HARDWARE_TIMEOUT;
+}
+
+static uacpi_status enter_mode(enum hw_mode mode, uacpi_bool *did_change)
+{
+ uacpi_status ret;
+ const uacpi_char *mode_str;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ if (uacpi_is_hardware_reduced())
+ return UACPI_STATUS_OK;
+
+ mode_str = mode == HW_MODE_LEGACY ? "legacy" : "acpi";
+
+ if (read_mode() == mode) {
+ uacpi_trace("%s mode already enabled\n", mode_str);
+ return UACPI_STATUS_OK;
+ }
+
+ ret = set_mode(mode);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_warn(
+ "unable to enter %s mode: %s\n",
+ mode_str, uacpi_status_to_string(ret)
+ );
+ return ret;
+ }
+
+ uacpi_trace("entered %s mode\n", mode_str);
+ if (did_change != UACPI_NULL)
+ *did_change = UACPI_TRUE;
+
+ return ret;
+}
+
+uacpi_status uacpi_enter_acpi_mode(void)
+{
+ return enter_mode(HW_MODE_ACPI, UACPI_NULL);
+}
+
+uacpi_status uacpi_leave_acpi_mode(void)
+{
+ return enter_mode(HW_MODE_LEGACY, UACPI_NULL);
+}
+
+static void enter_acpi_mode_initial(void)
+{
+ enter_mode(HW_MODE_ACPI, &g_uacpi_rt_ctx.was_in_legacy_mode);
+}
+#else
+static void enter_acpi_mode_initial(void) { }
+#endif
+
+uacpi_init_level uacpi_get_current_init_level(void)
+{
+ return g_uacpi_rt_ctx.init_level;
+}
+
+uacpi_status uacpi_initialize(uacpi_u64 flags)
+{
+ uacpi_status ret;
+
+ UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_EARLY);
+
+#ifdef UACPI_KERNEL_INITIALIZATION
+ ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_EARLY);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+#endif
+
+ g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED;
+ g_uacpi_rt_ctx.last_sleep_typ_a = UACPI_SLEEP_TYP_INVALID;
+ g_uacpi_rt_ctx.last_sleep_typ_b = UACPI_SLEEP_TYP_INVALID;
+ g_uacpi_rt_ctx.s0_sleep_typ_a = UACPI_SLEEP_TYP_INVALID;
+ g_uacpi_rt_ctx.s0_sleep_typ_b = UACPI_SLEEP_TYP_INVALID;
+ g_uacpi_rt_ctx.flags = flags;
+
+ uacpi_logger_initialize();
+
+ if (g_uacpi_rt_ctx.loop_timeout_seconds == 0)
+ uacpi_context_set_loop_timeout(UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS);
+ if (g_uacpi_rt_ctx.max_call_stack_depth == 0)
+ uacpi_context_set_max_call_stack_depth(UACPI_DEFAULT_MAX_CALL_STACK_DEPTH);
+
+ ret = uacpi_initialize_tables();
+ if (uacpi_unlikely_error(ret))
+ goto out_fatal_error;
+
+ ret = uacpi_initialize_registers();
+ if (uacpi_unlikely_error(ret))
+ goto out_fatal_error;
+
+ ret = uacpi_initialize_events_early();
+ if (uacpi_unlikely_error(ret))
+ goto out_fatal_error;
+
+ ret = uacpi_initialize_opregion();
+ if (uacpi_unlikely_error(ret))
+ goto out_fatal_error;
+
+ ret = uacpi_initialize_interfaces();
+ if (uacpi_unlikely_error(ret))
+ goto out_fatal_error;
+
+ ret = uacpi_initialize_namespace();
+ if (uacpi_unlikely_error(ret))
+ goto out_fatal_error;
+
+ ret = uacpi_initialize_notify();
+ if (uacpi_unlikely_error(ret))
+ goto out_fatal_error;
+
+ uacpi_install_default_address_space_handlers();
+
+ if (!uacpi_check_flag(UACPI_FLAG_NO_ACPI_MODE))
+ enter_acpi_mode_initial();
+
+ return UACPI_STATUS_OK;
+
+out_fatal_error:
+ uacpi_state_reset();
+ return ret;
+}
+
+struct table_load_stats {
+ uacpi_u32 load_counter;
+ uacpi_u32 failure_counter;
+};
+
+static void trace_table_load_failure(
+ struct acpi_sdt_hdr *tbl, uacpi_log_level lvl, uacpi_status ret
+)
+{
+ uacpi_log_lvl(
+ lvl,
+ "failed to load "UACPI_PRI_TBL_HDR": %s\n",
+ UACPI_FMT_TBL_HDR(tbl), uacpi_status_to_string(ret)
+ );
+}
+
+static uacpi_bool match_ssdt_or_psdt(struct uacpi_installed_table *tbl)
+{
+ if (tbl->flags & UACPI_TABLE_LOADED)
+ return UACPI_FALSE;
+
+ return uacpi_signatures_match(tbl->hdr.signature, ACPI_SSDT_SIGNATURE) ||
+ uacpi_signatures_match(tbl->hdr.signature, ACPI_PSDT_SIGNATURE);
+}
+
+static uacpi_u64 elapsed_ms(uacpi_u64 begin_ns, uacpi_u64 end_ns)
+{
+ return (end_ns - begin_ns) / (1000ull * 1000ull);
+}
+
+static uacpi_bool warn_on_bad_timesource(uacpi_u64 begin_ts, uacpi_u64 end_ts)
+{
+ const uacpi_char *reason;
+
+ if (uacpi_unlikely(begin_ts == 0 && end_ts == 0)) {
+ reason = "uacpi_kernel_get_nanoseconds_since_boot() appears to be a stub";
+ goto out_bad_timesource;
+ }
+
+ if (uacpi_unlikely(begin_ts == end_ts)) {
+ reason = "poor time source precision detected";
+ goto out_bad_timesource;
+ }
+
+ if (uacpi_unlikely(end_ts < begin_ts)) {
+ reason = "time source backwards drift detected";
+ goto out_bad_timesource;
+ }
+
+ return UACPI_FALSE;
+
+out_bad_timesource:
+ uacpi_warn("%s, this may cause problems\n", reason);
+ return UACPI_TRUE;
+}
+
+uacpi_status uacpi_namespace_load(void)
+{
+ struct uacpi_table tbl;
+ uacpi_status ret;
+ uacpi_u64 begin_ts, end_ts;
+ struct table_load_stats st = { 0 };
+ uacpi_size cur_index;
+
+ UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+#ifdef UACPI_KERNEL_INITIALIZATION
+ ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+ if (uacpi_unlikely_error(ret))
+ goto out_fatal_error;
+#endif
+
+ begin_ts = uacpi_kernel_get_nanoseconds_since_boot();
+
+ ret = uacpi_table_find_by_signature(ACPI_DSDT_SIGNATURE, &tbl);
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_error("unable to find DSDT: %s\n", uacpi_status_to_string(ret));
+ goto out_fatal_error;
+ }
+
+ ret = uacpi_table_load_with_cause(tbl.index, UACPI_TABLE_LOAD_CAUSE_INIT);
+ if (uacpi_unlikely_error(ret)) {
+ trace_table_load_failure(tbl.hdr, UACPI_LOG_ERROR, ret);
+ st.failure_counter++;
+ }
+ st.load_counter++;
+ uacpi_table_unref(&tbl);
+
+ for (cur_index = 0;; cur_index = tbl.index + 1) {
+ ret = uacpi_table_match(cur_index, match_ssdt_or_psdt, &tbl);
+ if (ret != UACPI_STATUS_OK) {
+ if (uacpi_unlikely(ret != UACPI_STATUS_NOT_FOUND))
+ goto out_fatal_error;
+
+ break;
+ }
+
+ ret = uacpi_table_load_with_cause(tbl.index, UACPI_TABLE_LOAD_CAUSE_INIT);
+ if (uacpi_unlikely_error(ret)) {
+ trace_table_load_failure(tbl.hdr, UACPI_LOG_WARN, ret);
+ st.failure_counter++;
+ }
+ st.load_counter++;
+ uacpi_table_unref(&tbl);
+ }
+
+ end_ts = uacpi_kernel_get_nanoseconds_since_boot();
+ g_uacpi_rt_ctx.bad_timesource = warn_on_bad_timesource(begin_ts, end_ts);
+
+ if (uacpi_unlikely(st.failure_counter != 0 || g_uacpi_rt_ctx.bad_timesource)) {
+ uacpi_info(
+ "loaded %u AML blob%s (%u error%s)\n",
+ st.load_counter, st.load_counter > 1 ? "s" : "", st.failure_counter,
+ st.failure_counter == 1 ? "" : "s"
+ );
+ } else {
+ uacpi_u64 ops = g_uacpi_rt_ctx.opcodes_executed;
+ uacpi_u64 ops_per_sec = ops * UACPI_NANOSECONDS_PER_SEC;
+
+ ops_per_sec /= end_ts - begin_ts;
+
+ uacpi_info(
+ "successfully loaded %u AML blob%s, %"UACPI_PRIu64" ops in "
+ "%"UACPI_PRIu64"ms (avg %"UACPI_PRIu64"/s)\n",
+ st.load_counter, st.load_counter > 1 ? "s" : "",
+ UACPI_FMT64(ops), UACPI_FMT64(elapsed_ms(begin_ts, end_ts)),
+ UACPI_FMT64(ops_per_sec)
+ );
+ }
+
+ ret = uacpi_initialize_events();
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_error("event initialization failed: %s\n",
+ uacpi_status_to_string(ret));
+ goto out_fatal_error;
+ }
+
+ g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_NAMESPACE_LOADED;
+ return UACPI_STATUS_OK;
+
+out_fatal_error:
+ uacpi_state_reset();
+ return ret;
+}
+
+struct ns_init_context {
+ uacpi_size ini_executed;
+ uacpi_size ini_errors;
+ uacpi_size sta_executed;
+ uacpi_size sta_errors;
+ uacpi_size devices;
+ uacpi_size thermal_zones;
+};
+
+static void ini_eval(struct ns_init_context *ctx, uacpi_namespace_node *node)
+{
+ uacpi_status ret;
+
+ ret = uacpi_eval(node, "_INI", UACPI_NULL, UACPI_NULL);
+ if (ret == UACPI_STATUS_NOT_FOUND)
+ return;
+
+ ctx->ini_executed++;
+ if (uacpi_unlikely_error(ret))
+ ctx->ini_errors++;
+}
+
+static uacpi_status sta_eval(
+ struct ns_init_context *ctx, uacpi_namespace_node *node,
+ uacpi_u32 *value
+)
+{
+ uacpi_status ret;
+
+ ret = uacpi_eval_sta(node, value);
+ if (*value == 0xFFFFFFFF)
+ return ret;
+
+ ctx->sta_executed++;
+ if (uacpi_unlikely_error(ret))
+ ctx->sta_errors++;
+
+ return ret;
+}
+
+static uacpi_iteration_decision do_sta_ini(
+ void *opaque, uacpi_namespace_node *node, uacpi_u32 depth
+)
+{
+ struct ns_init_context *ctx = opaque;
+ uacpi_status ret;
+ uacpi_object_type type = UACPI_OBJECT_UNINITIALIZED;
+ uacpi_u32 sta_ret;
+
+ UACPI_UNUSED(depth);
+
+ // We don't care about aliases
+ if (uacpi_namespace_node_is_alias(node))
+ return UACPI_ITERATION_DECISION_NEXT_PEER;
+
+ ret = uacpi_namespace_node_type(node, &type);
+ switch (type) {
+ case UACPI_OBJECT_DEVICE:
+ case UACPI_OBJECT_PROCESSOR:
+ ctx->devices++;
+ break;
+ case UACPI_OBJECT_THERMAL_ZONE:
+ ctx->thermal_zones++;
+ break;
+ default:
+ if (node != uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_TZ))
+ return UACPI_ITERATION_DECISION_CONTINUE;
+ }
+
+ ret = sta_eval(ctx, node, &sta_ret);
+ if (uacpi_unlikely_error(ret))
+ return UACPI_ITERATION_DECISION_CONTINUE;
+
+ if (!(sta_ret & ACPI_STA_RESULT_DEVICE_PRESENT)) {
+ if (!(sta_ret & ACPI_STA_RESULT_DEVICE_FUNCTIONING))
+ return UACPI_ITERATION_DECISION_NEXT_PEER;
+
+ /*
+ * ACPI 6.5 specification:
+ * _STA may return bit 0 clear (not present) with bit [3] set (device
+ * is functional). This case is used to indicate a valid device for
+ * which no device driver should be loaded (for example, a bridge
+ * device.) Children of this device may be present and valid. OSPM
+ * should continue enumeration below a device whose _STA returns this
+ * bit combination.
+ */
+ return UACPI_ITERATION_DECISION_CONTINUE;
+ }
+
+ ini_eval(ctx, node);
+
+ return UACPI_ITERATION_DECISION_CONTINUE;
+}
+
+uacpi_status uacpi_namespace_initialize(void)
+{
+ struct ns_init_context ctx = { 0 };
+ uacpi_namespace_node *root;
+ uacpi_u64 begin_ts, end_ts;
+ uacpi_address_space_handlers *handlers;
+ uacpi_address_space_handler *handler;
+ uacpi_status ret = UACPI_STATUS_OK;
+
+ UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+#ifdef UACPI_KERNEL_INITIALIZATION
+ ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+ if (uacpi_unlikely_error(ret))
+ goto out;
+#endif
+
+ /*
+ * Initialization order here is identical to ACPICA because ACPI
+ * specification doesn't really have any detailed steps that explain
+ * how to do it.
+ */
+
+ root = uacpi_namespace_root();
+
+ begin_ts = uacpi_kernel_get_nanoseconds_since_boot();
+
+ // Step 1 - Execute \_INI
+ ini_eval(&ctx, root);
+
+ // Step 2 - Execute \_SB._INI
+ ini_eval(
+ &ctx, uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_SB)
+ );
+
+ /*
+ * Step 3 - Run _REG methods for all globally installed
+ * address space handlers.
+ */
+ handlers = uacpi_node_get_address_space_handlers(root);
+ if (handlers) {
+ handler = handlers->head;
+
+ while (handler) {
+ if (uacpi_address_space_handler_is_default(handler))
+ uacpi_reg_all_opregions(root, handler->space);
+
+ handler = handler->next;
+ }
+ }
+
+ // Step 4 - Run all other _STA and _INI methods
+ uacpi_namespace_for_each_child(
+ root, do_sta_ini, UACPI_NULL,
+ UACPI_OBJECT_ANY_BIT, UACPI_MAX_DEPTH_ANY, &ctx
+ );
+
+ end_ts = uacpi_kernel_get_nanoseconds_since_boot();
+
+ if (uacpi_likely(!g_uacpi_rt_ctx.bad_timesource)) {
+ uacpi_info(
+ "namespace initialization done in %"UACPI_PRIu64"ms: "
+ "%zu devices, %zu thermal zones\n",
+ UACPI_FMT64(elapsed_ms(begin_ts, end_ts)),
+ ctx.devices, ctx.thermal_zones
+ );
+ } else {
+ uacpi_info(
+ "namespace initialization done: %zu devices, %zu thermal zones\n",
+ ctx.devices, ctx.thermal_zones
+ );
+ }
+
+ uacpi_trace(
+ "_STA calls: %zu (%zu errors), _INI calls: %zu (%zu errors)\n",
+ ctx.sta_executed, ctx.sta_errors, ctx.ini_executed,
+ ctx.ini_errors
+ );
+
+ g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED;
+#ifdef UACPI_KERNEL_INITIALIZATION
+ ret = uacpi_kernel_initialize(UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED);
+out:
+ if (uacpi_unlikely_error(ret))
+ uacpi_state_reset();
+#endif
+ return ret;
+}
+
+uacpi_status uacpi_eval(
+ uacpi_namespace_node *parent, const uacpi_char *path,
+ const uacpi_object_array *args, uacpi_object **out_obj
+)
+{
+ struct uacpi_namespace_node *node;
+ uacpi_control_method *method;
+ uacpi_object *obj;
+ uacpi_status ret = UACPI_STATUS_INVALID_ARGUMENT;
+
+ if (uacpi_unlikely(parent == UACPI_NULL && path == UACPI_NULL))
+ return ret;
+
+ ret = uacpi_namespace_read_lock();
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (path != UACPI_NULL) {
+ ret = uacpi_namespace_node_resolve(
+ parent, path, UACPI_SHOULD_LOCK_NO,
+ UACPI_MAY_SEARCH_ABOVE_PARENT_NO, UACPI_PERMANENT_ONLY_YES,
+ &node
+ );
+ if (uacpi_unlikely_error(ret))
+ goto out_read_unlock;
+ } else {
+ node = parent;
+ }
+
+ obj = uacpi_namespace_node_get_object(node);
+ if (uacpi_unlikely(obj == UACPI_NULL)) {
+ ret = UACPI_STATUS_INVALID_ARGUMENT;
+ goto out_read_unlock;
+ }
+
+ if (obj->type != UACPI_OBJECT_METHOD) {
+ uacpi_object *new_obj;
+
+ if (uacpi_unlikely(out_obj == UACPI_NULL))
+ goto out_read_unlock;
+
+ new_obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED);
+ if (uacpi_unlikely(new_obj == UACPI_NULL)) {
+ ret = UACPI_STATUS_OUT_OF_MEMORY;
+ goto out_read_unlock;
+ }
+
+ ret = uacpi_object_assign(
+ new_obj, obj, UACPI_ASSIGN_BEHAVIOR_DEEP_COPY
+ );
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_object_unref(new_obj);
+ goto out_read_unlock;
+ }
+ *out_obj = new_obj;
+
+ out_read_unlock:
+ uacpi_namespace_read_unlock();
+ return ret;
+ }
+
+ method = obj->method;
+ uacpi_shareable_ref(method);
+ uacpi_namespace_read_unlock();
+
+ // Upgrade to a write-lock since we're about to run a method
+ ret = uacpi_namespace_write_lock();
+ if (uacpi_unlikely_error(ret))
+ goto out_no_write_lock;
+
+ ret = uacpi_execute_control_method(node, method, args, out_obj);
+ uacpi_namespace_write_unlock();
+
+out_no_write_lock:
+ uacpi_method_unref(method);
+ return ret;
+}
+
+uacpi_status uacpi_eval_simple(
+ uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
+)
+{
+ return uacpi_eval(parent, path, UACPI_NULL, ret);
+}
+
+uacpi_status uacpi_execute(
+ uacpi_namespace_node *parent, const uacpi_char *path,
+ const uacpi_object_array *args
+)
+{
+ return uacpi_eval(parent, path, args, UACPI_NULL);
+}
+
+uacpi_status uacpi_execute_simple(
+ uacpi_namespace_node *parent, const uacpi_char *path
+)
+{
+ return uacpi_eval(parent, path, UACPI_NULL, UACPI_NULL);
+}
+
+#define TRACE_BAD_RET(path_fmt, type, ...) \
+ uacpi_warn( \
+ "unexpected '%s' object returned by method "path_fmt \
+ ", expected type mask: %08X\n", uacpi_object_type_to_string(type), \
+ __VA_ARGS__ \
+ )
+
+#define TRACE_NO_RET(path_fmt, ...) \
+ uacpi_warn( \
+ "no value returned from method "path_fmt", expected type mask: " \
+ "%08X\n", __VA_ARGS__ \
+ )
+
+static void trace_invalid_return_type(
+ uacpi_namespace_node *parent, const uacpi_char *path,
+ uacpi_object_type_bits expected_mask, uacpi_object_type actual_type
+)
+{
+ const uacpi_char *abs_path;
+ uacpi_bool dynamic_abs_path = UACPI_FALSE;
+
+ if (parent == UACPI_NULL || (path != UACPI_NULL && path[0] == '\\')) {
+ abs_path = path;
+ } else {
+ abs_path = uacpi_namespace_node_generate_absolute_path(parent);
+ dynamic_abs_path = UACPI_TRUE;
+ }
+
+ if (dynamic_abs_path && path != UACPI_NULL) {
+ if (actual_type == UACPI_OBJECT_UNINITIALIZED)
+ TRACE_NO_RET("%s.%s", abs_path, path, expected_mask);
+ else
+ TRACE_BAD_RET("%s.%s", actual_type, abs_path, path, expected_mask);
+ } else {
+ if (actual_type == UACPI_OBJECT_UNINITIALIZED) {
+ TRACE_NO_RET("%s", abs_path, expected_mask);
+ } else {
+ TRACE_BAD_RET("%s", actual_type, abs_path, expected_mask);
+ }
+ }
+
+ if (dynamic_abs_path)
+ uacpi_free_dynamic_string(abs_path);
+}
+
+uacpi_status uacpi_eval_typed(
+ uacpi_namespace_node *parent, const uacpi_char *path,
+ const uacpi_object_array *args, uacpi_object_type_bits ret_mask,
+ uacpi_object **out_obj
+)
+{
+ uacpi_status ret;
+ uacpi_object *obj;
+ uacpi_object_type returned_type = UACPI_OBJECT_UNINITIALIZED;
+
+ if (uacpi_unlikely(out_obj == UACPI_NULL))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ ret = uacpi_eval(parent, path, args, &obj);
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ if (obj != UACPI_NULL)
+ returned_type = obj->type;
+
+ if (ret_mask && (ret_mask & (1 << returned_type)) == 0) {
+ trace_invalid_return_type(parent, path, ret_mask, returned_type);
+ uacpi_object_unref(obj);
+ return UACPI_STATUS_TYPE_MISMATCH;
+ }
+
+ *out_obj = obj;
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_eval_simple_typed(
+ uacpi_namespace_node *parent, const uacpi_char *path,
+ uacpi_object_type_bits ret_mask, uacpi_object **ret
+)
+{
+ return uacpi_eval_typed(parent, path, UACPI_NULL, ret_mask, ret);
+}
+
+uacpi_status uacpi_eval_integer(
+ uacpi_namespace_node *parent, const uacpi_char *path,
+ const uacpi_object_array *args, uacpi_u64 *out_value
+)
+{
+ uacpi_object *int_obj;
+ uacpi_status ret;
+
+ ret = uacpi_eval_typed(
+ parent, path, args, UACPI_OBJECT_INTEGER_BIT, &int_obj
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ *out_value = int_obj->integer;
+ uacpi_object_unref(int_obj);
+
+ return UACPI_STATUS_OK;
+}
+
+uacpi_status uacpi_eval_simple_integer(
+ uacpi_namespace_node *parent, const uacpi_char *path, uacpi_u64 *out_value
+)
+{
+ return uacpi_eval_integer(parent, path, UACPI_NULL, out_value);
+}
+
+uacpi_status uacpi_eval_buffer_or_string(
+ uacpi_namespace_node *parent, const uacpi_char *path,
+ const uacpi_object_array *args, uacpi_object **ret
+)
+{
+ return uacpi_eval_typed(
+ parent, path, args,
+ UACPI_OBJECT_BUFFER_BIT | UACPI_OBJECT_STRING_BIT,
+ ret
+ );
+}
+
+uacpi_status uacpi_eval_simple_buffer_or_string(
+ uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
+)
+{
+ return uacpi_eval_typed(
+ parent, path, UACPI_NULL,
+ UACPI_OBJECT_BUFFER_BIT | UACPI_OBJECT_STRING_BIT,
+ ret
+ );
+}
+
+uacpi_status uacpi_eval_string(
+ uacpi_namespace_node *parent, const uacpi_char *path,
+ const uacpi_object_array *args, uacpi_object **ret
+)
+{
+ return uacpi_eval_typed(
+ parent, path, args, UACPI_OBJECT_STRING_BIT, ret
+ );
+}
+
+uacpi_status uacpi_eval_simple_string(
+ uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
+)
+{
+ return uacpi_eval_typed(
+ parent, path, UACPI_NULL, UACPI_OBJECT_STRING_BIT, ret
+ );
+}
+
+uacpi_status uacpi_eval_buffer(
+ uacpi_namespace_node *parent, const uacpi_char *path,
+ const uacpi_object_array *args, uacpi_object **ret
+)
+{
+ return uacpi_eval_typed(
+ parent, path, args, UACPI_OBJECT_BUFFER_BIT, ret
+ );
+}
+
+uacpi_status uacpi_eval_simple_buffer(
+ uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
+)
+{
+ return uacpi_eval_typed(
+ parent, path, UACPI_NULL, UACPI_OBJECT_BUFFER_BIT, ret
+ );
+}
+
+uacpi_status uacpi_eval_package(
+ uacpi_namespace_node *parent, const uacpi_char *path,
+ const uacpi_object_array *args, uacpi_object **ret
+)
+{
+ return uacpi_eval_typed(
+ parent, path, args, UACPI_OBJECT_PACKAGE_BIT, ret
+ );
+}
+
+uacpi_status uacpi_eval_simple_package(
+ uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret
+)
+{
+ return uacpi_eval_typed(
+ parent, path, UACPI_NULL, UACPI_OBJECT_PACKAGE_BIT, ret
+ );
+}
+
+uacpi_status uacpi_get_aml_bitness(uacpi_u8 *out_bitness)
+{
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED);
+
+ *out_bitness = g_uacpi_rt_ctx.is_rev1 ? 32 : 64;
+ return UACPI_STATUS_OK;
+}
+
+#endif // !UACPI_BAREBONES_MODE
diff --git a/sys/dev/acpi/uacpi/utilities.c b/sys/dev/acpi/uacpi/utilities.c
new file mode 100644
index 0000000..c7ca20a
--- /dev/null
+++ b/sys/dev/acpi/uacpi/utilities.c
@@ -0,0 +1,1156 @@
+#include <uacpi/types.h>
+#include <uacpi/status.h>
+#include <uacpi/uacpi.h>
+
+#include <uacpi/internal/context.h>
+#include <uacpi/internal/utilities.h>
+#include <uacpi/internal/log.h>
+#include <uacpi/internal/namespace.h>
+
+enum char_type {
+ CHAR_TYPE_CONTROL = 1 << 0,
+ CHAR_TYPE_SPACE = 1 << 1,
+ CHAR_TYPE_BLANK = 1 << 2,
+ CHAR_TYPE_PUNCTUATION = 1 << 3,
+ CHAR_TYPE_LOWER = 1 << 4,
+ CHAR_TYPE_UPPER = 1 << 5,
+ CHAR_TYPE_DIGIT = 1 << 6,
+ CHAR_TYPE_HEX_DIGIT = 1 << 7,
+ CHAR_TYPE_ALPHA = CHAR_TYPE_LOWER | CHAR_TYPE_UPPER,
+ CHAR_TYPE_ALHEX = CHAR_TYPE_ALPHA | CHAR_TYPE_HEX_DIGIT,
+ CHAR_TYPE_ALNUM = CHAR_TYPE_ALPHA | CHAR_TYPE_DIGIT,
+};
+
+static const uacpi_u8 ascii_map[256] = {
+ CHAR_TYPE_CONTROL, // 0
+ CHAR_TYPE_CONTROL, // 1
+ CHAR_TYPE_CONTROL, // 2
+ CHAR_TYPE_CONTROL, // 3
+ CHAR_TYPE_CONTROL, // 4
+ CHAR_TYPE_CONTROL, // 5
+ CHAR_TYPE_CONTROL, // 6
+ CHAR_TYPE_CONTROL, // 7
+ CHAR_TYPE_CONTROL, // -> 8 control codes
+
+ CHAR_TYPE_CONTROL | CHAR_TYPE_SPACE | CHAR_TYPE_BLANK, // 9 tab
+
+ CHAR_TYPE_CONTROL | CHAR_TYPE_SPACE, // 10
+ CHAR_TYPE_CONTROL | CHAR_TYPE_SPACE, // 11
+ CHAR_TYPE_CONTROL | CHAR_TYPE_SPACE, // 12
+ CHAR_TYPE_CONTROL | CHAR_TYPE_SPACE, // -> 13 whitespaces
+
+ CHAR_TYPE_CONTROL, // 14
+ CHAR_TYPE_CONTROL, // 15
+ CHAR_TYPE_CONTROL, // 16
+ CHAR_TYPE_CONTROL, // 17
+ CHAR_TYPE_CONTROL, // 18
+ CHAR_TYPE_CONTROL, // 19
+ CHAR_TYPE_CONTROL, // 20
+ CHAR_TYPE_CONTROL, // 21
+ CHAR_TYPE_CONTROL, // 22
+ CHAR_TYPE_CONTROL, // 23
+ CHAR_TYPE_CONTROL, // 24
+ CHAR_TYPE_CONTROL, // 25
+ CHAR_TYPE_CONTROL, // 26
+ CHAR_TYPE_CONTROL, // 27
+ CHAR_TYPE_CONTROL, // 28
+ CHAR_TYPE_CONTROL, // 29
+ CHAR_TYPE_CONTROL, // 30
+ CHAR_TYPE_CONTROL, // -> 31 control codes
+
+ CHAR_TYPE_SPACE | CHAR_TYPE_BLANK, // 32 space
+
+ CHAR_TYPE_PUNCTUATION, // 33
+ CHAR_TYPE_PUNCTUATION, // 34
+ CHAR_TYPE_PUNCTUATION, // 35
+ CHAR_TYPE_PUNCTUATION, // 36
+ CHAR_TYPE_PUNCTUATION, // 37
+ CHAR_TYPE_PUNCTUATION, // 38
+ CHAR_TYPE_PUNCTUATION, // 39
+ CHAR_TYPE_PUNCTUATION, // 40
+ CHAR_TYPE_PUNCTUATION, // 41
+ CHAR_TYPE_PUNCTUATION, // 42
+ CHAR_TYPE_PUNCTUATION, // 43
+ CHAR_TYPE_PUNCTUATION, // 44
+ CHAR_TYPE_PUNCTUATION, // 45
+ CHAR_TYPE_PUNCTUATION, // 46
+ CHAR_TYPE_PUNCTUATION, // -> 47 punctuation
+
+ CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 48
+ CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 49
+ CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 50
+ CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 51
+ CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 52
+ CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 53
+ CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 54
+ CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 55
+ CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // 56
+ CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT, // -> 57 digits
+
+ CHAR_TYPE_PUNCTUATION, // 58
+ CHAR_TYPE_PUNCTUATION, // 59
+ CHAR_TYPE_PUNCTUATION, // 60
+ CHAR_TYPE_PUNCTUATION, // 61
+ CHAR_TYPE_PUNCTUATION, // 62
+ CHAR_TYPE_PUNCTUATION, // 63
+ CHAR_TYPE_PUNCTUATION, // -> 64 punctuation
+
+ CHAR_TYPE_UPPER | CHAR_TYPE_HEX_DIGIT, // 65
+ CHAR_TYPE_UPPER | CHAR_TYPE_HEX_DIGIT, // 66
+ CHAR_TYPE_UPPER | CHAR_TYPE_HEX_DIGIT, // 67
+ CHAR_TYPE_UPPER | CHAR_TYPE_HEX_DIGIT, // 68
+ CHAR_TYPE_UPPER | CHAR_TYPE_HEX_DIGIT, // 69
+ CHAR_TYPE_UPPER | CHAR_TYPE_HEX_DIGIT, // -> 70 ABCDEF
+
+ CHAR_TYPE_UPPER, // 71
+ CHAR_TYPE_UPPER, // 72
+ CHAR_TYPE_UPPER, // 73
+ CHAR_TYPE_UPPER, // 74
+ CHAR_TYPE_UPPER, // 75
+ CHAR_TYPE_UPPER, // 76
+ CHAR_TYPE_UPPER, // 77
+ CHAR_TYPE_UPPER, // 78
+ CHAR_TYPE_UPPER, // 79
+ CHAR_TYPE_UPPER, // 80
+ CHAR_TYPE_UPPER, // 81
+ CHAR_TYPE_UPPER, // 82
+ CHAR_TYPE_UPPER, // 83
+ CHAR_TYPE_UPPER, // 84
+ CHAR_TYPE_UPPER, // 85
+ CHAR_TYPE_UPPER, // 86
+ CHAR_TYPE_UPPER, // 87
+ CHAR_TYPE_UPPER, // 88
+ CHAR_TYPE_UPPER, // 89
+ CHAR_TYPE_UPPER, // -> 90 the rest of UPPERCASE alphabet
+
+ CHAR_TYPE_PUNCTUATION, // 91
+ CHAR_TYPE_PUNCTUATION, // 92
+ CHAR_TYPE_PUNCTUATION, // 93
+ CHAR_TYPE_PUNCTUATION, // 94
+ CHAR_TYPE_PUNCTUATION, // 95
+ CHAR_TYPE_PUNCTUATION, // -> 96 punctuation
+
+ CHAR_TYPE_LOWER | CHAR_TYPE_HEX_DIGIT, // 97
+ CHAR_TYPE_LOWER | CHAR_TYPE_HEX_DIGIT, // 98
+ CHAR_TYPE_LOWER | CHAR_TYPE_HEX_DIGIT, // 99
+ CHAR_TYPE_LOWER | CHAR_TYPE_HEX_DIGIT, // 100
+ CHAR_TYPE_LOWER | CHAR_TYPE_HEX_DIGIT, // 101
+ CHAR_TYPE_LOWER | CHAR_TYPE_HEX_DIGIT, // -> 102 abcdef
+
+ CHAR_TYPE_LOWER, // 103
+ CHAR_TYPE_LOWER, // 104
+ CHAR_TYPE_LOWER, // 105
+ CHAR_TYPE_LOWER, // 106
+ CHAR_TYPE_LOWER, // 107
+ CHAR_TYPE_LOWER, // 108
+ CHAR_TYPE_LOWER, // 109
+ CHAR_TYPE_LOWER, // 110
+ CHAR_TYPE_LOWER, // 111
+ CHAR_TYPE_LOWER, // 112
+ CHAR_TYPE_LOWER, // 113
+ CHAR_TYPE_LOWER, // 114
+ CHAR_TYPE_LOWER, // 115
+ CHAR_TYPE_LOWER, // 116
+ CHAR_TYPE_LOWER, // 117
+ CHAR_TYPE_LOWER, // 118
+ CHAR_TYPE_LOWER, // 119
+ CHAR_TYPE_LOWER, // 120
+ CHAR_TYPE_LOWER, // 121
+ CHAR_TYPE_LOWER, // -> 122 the rest of UPPERCASE alphabet
+
+ CHAR_TYPE_PUNCTUATION, // 123
+ CHAR_TYPE_PUNCTUATION, // 124
+ CHAR_TYPE_PUNCTUATION, // 125
+ CHAR_TYPE_PUNCTUATION, // -> 126 punctuation
+
+ CHAR_TYPE_CONTROL // 127 backspace
+};
+
+static uacpi_bool is_char(uacpi_char c, enum char_type type)
+{
+ return (ascii_map[(uacpi_u8)c] & type) == type;
+}
+
+static uacpi_char to_lower(uacpi_char c)
+{
+ if (is_char(c, CHAR_TYPE_UPPER))
+ return c + ('a' - 'A');
+
+ return c;
+}
+
+static uacpi_bool peek_one(
+ const uacpi_char **str, const uacpi_size *size, uacpi_char *out_char
+)
+{
+ if (*size == 0)
+ return UACPI_FALSE;
+
+ *out_char = **str;
+ return UACPI_TRUE;
+}
+
+static uacpi_bool consume_one(
+ const uacpi_char **str, uacpi_size *size, uacpi_char *out_char
+)
+{
+ if (!peek_one(str, size, out_char))
+ return UACPI_FALSE;
+
+ *str += 1;
+ *size -= 1;
+ return UACPI_TRUE;
+}
+
+static uacpi_bool consume_if(
+ const uacpi_char **str, uacpi_size *size, enum char_type type
+)
+{
+ uacpi_char c;
+
+ if (!peek_one(str, size, &c) || !is_char(c, type))
+ return UACPI_FALSE;
+
+ *str += 1;
+ *size -= 1;
+ return UACPI_TRUE;
+}
+
+static uacpi_bool consume_if_equals(
+ const uacpi_char **str, uacpi_size *size, uacpi_char c
+)
+{
+ uacpi_char c1;
+
+ if (!peek_one(str, size, &c1) || to_lower(c1) != c)
+ return UACPI_FALSE;
+
+ *str += 1;
+ *size -= 1;
+ return UACPI_TRUE;
+}
+
+uacpi_status uacpi_string_to_integer(
+ const uacpi_char *str, uacpi_size max_chars, enum uacpi_base base,
+ uacpi_u64 *out_value
+)
+{
+ uacpi_status ret = UACPI_STATUS_INVALID_ARGUMENT;
+ uacpi_bool negative = UACPI_FALSE;
+ uacpi_u64 next, value = 0;
+ uacpi_char c = '\0';
+
+ while (consume_if(&str, &max_chars, CHAR_TYPE_SPACE));
+
+ if (consume_if_equals(&str, &max_chars, '-'))
+ negative = UACPI_TRUE;
+ else
+ consume_if_equals(&str, &max_chars, '+');
+
+ if (base == UACPI_BASE_AUTO) {
+ base = UACPI_BASE_DEC;
+
+ if (consume_if_equals(&str, &max_chars, '0')) {
+ base = UACPI_BASE_OCT;
+ if (consume_if_equals(&str, &max_chars, 'x'))
+ base = UACPI_BASE_HEX;
+ }
+ }
+
+ while (consume_one(&str, &max_chars, &c)) {
+ switch (ascii_map[(uacpi_u8)c] & (CHAR_TYPE_DIGIT | CHAR_TYPE_ALHEX)) {
+ case CHAR_TYPE_DIGIT | CHAR_TYPE_HEX_DIGIT:
+ next = c - '0';
+ if (base == UACPI_BASE_OCT && next > 7)
+ goto out;
+ break;
+ case CHAR_TYPE_LOWER | CHAR_TYPE_HEX_DIGIT:
+ case CHAR_TYPE_UPPER | CHAR_TYPE_HEX_DIGIT:
+ if (base != UACPI_BASE_HEX)
+ goto out;
+ next = 10 + (to_lower(c) - 'a');
+ break;
+ default:
+ goto out;
+ }
+
+ next = (value * base) + next;
+ if ((next / base) != value) {
+ value = 0xFFFFFFFFFFFFFFFF;
+ goto out;
+ }
+
+ value = next;
+ }
+
+out:
+ if (negative)
+ value = -((uacpi_i64)value);
+
+ *out_value = value;
+ if (max_chars == 0 || c == '\0')
+ ret = UACPI_STATUS_OK;
+
+ return ret;
+}
+
+#ifndef UACPI_BAREBONES_MODE
+
+static inline uacpi_bool is_valid_name_byte(uacpi_u8 c)
+{
+ // ‘_’ := 0x5F
+ if (c == 0x5F)
+ return UACPI_TRUE;
+
+ /*
+ * LeadNameChar := ‘A’-‘Z’ | ‘_’
+ * DigitChar := ‘0’ - ‘9’
+ * NameChar := DigitChar | LeadNameChar
+ * ‘A’-‘Z’ := 0x41 - 0x5A
+ * ‘0’-‘9’ := 0x30 - 0x39
+ */
+ return (ascii_map[c] & (CHAR_TYPE_DIGIT | CHAR_TYPE_UPPER)) != 0;
+}
+
+uacpi_bool uacpi_is_valid_nameseg(uacpi_u8 *nameseg)
+{
+ return is_valid_name_byte(nameseg[0]) &&
+ is_valid_name_byte(nameseg[1]) &&
+ is_valid_name_byte(nameseg[2]) &&
+ is_valid_name_byte(nameseg[3]);
+}
+
+void uacpi_eisa_id_to_string(uacpi_u32 id, uacpi_char *out_string)
+{
+ static uacpi_char hex_to_ascii[16] = {
+ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'A', 'B', 'C', 'D', 'E', 'F'
+ };
+
+ /*
+ * For whatever reason bits are encoded upper to lower here, swap
+ * them around so that we don't have to do ridiculous bit shifts
+ * everywhere.
+ */
+ union {
+ uacpi_u8 bytes[4];
+ uacpi_u32 dword;
+ } orig, swapped;
+
+ orig.dword = id;
+ swapped.bytes[0] = orig.bytes[3];
+ swapped.bytes[1] = orig.bytes[2];
+ swapped.bytes[2] = orig.bytes[1];
+ swapped.bytes[3] = orig.bytes[0];
+
+ /*
+ * Bit 16 - 20: 3rd character (- 0x40) of mfg code
+ * Bit 21 - 25: 2nd character (- 0x40) of mfg code
+ * Bit 26 - 30: 1st character (- 0x40) of mfg code
+ */
+ out_string[0] = (uacpi_char)(0x40 + ((swapped.dword >> 26) & 0x1F));
+ out_string[1] = (uacpi_char)(0x40 + ((swapped.dword >> 21) & 0x1F));
+ out_string[2] = (uacpi_char)(0x40 + ((swapped.dword >> 16) & 0x1F));
+
+ /*
+ * Bit 0 - 3 : 4th hex digit of product number
+ * Bit 4 - 7 : 3rd hex digit of product number
+ * Bit 8 - 11: 2nd hex digit of product number
+ * Bit 12 - 15: 1st hex digit of product number
+ */
+ out_string[3] = hex_to_ascii[(swapped.dword >> 12) & 0x0F];
+ out_string[4] = hex_to_ascii[(swapped.dword >> 8 ) & 0x0F];
+ out_string[5] = hex_to_ascii[(swapped.dword >> 4 ) & 0x0F];
+ out_string[6] = hex_to_ascii[(swapped.dword >> 0 ) & 0x0F];
+
+ out_string[7] = '\0';
+}
+
+#define PNP_ID_LENGTH 8
+
+uacpi_status uacpi_eval_hid(uacpi_namespace_node *node, uacpi_id_string **out_id)
+{
+ uacpi_status ret;
+ uacpi_object *hid_ret;
+ uacpi_id_string *id = UACPI_NULL;
+ uacpi_u32 size;
+
+ ret = uacpi_eval_typed(
+ node, "_HID", UACPI_NULL,
+ UACPI_OBJECT_INTEGER_BIT | UACPI_OBJECT_STRING_BIT,
+ &hid_ret
+ );
+ if (ret != UACPI_STATUS_OK)
+ return ret;
+
+ size = sizeof(uacpi_id_string);
+
+ switch (hid_ret->type) {
+ case UACPI_OBJECT_STRING: {
+ uacpi_buffer *buf = hid_ret->buffer;
+
+ size += buf->size;
+ if (uacpi_unlikely(buf->size == 0 || size < buf->size)) {
+ uacpi_object_name name = uacpi_namespace_node_name(node);
+
+ uacpi_error(
+ "%.4s._HID: empty/invalid EISA ID string (%zu bytes)\n",
+ name.text, buf->size
+ );
+ ret = UACPI_STATUS_AML_BAD_ENCODING;
+ break;
+ }
+
+ id = uacpi_kernel_alloc(size);
+ if (uacpi_unlikely(id == UACPI_NULL)) {
+ ret = UACPI_STATUS_OUT_OF_MEMORY;
+ break;
+ }
+ id->size = buf->size;
+ id->value = UACPI_PTR_ADD(id, sizeof(uacpi_id_string));
+
+ uacpi_memcpy(id->value, buf->text, buf->size);
+ id->value[buf->size - 1] = '\0';
+ break;
+ }
+
+ case UACPI_OBJECT_INTEGER:
+ size += PNP_ID_LENGTH;
+
+ id = uacpi_kernel_alloc(size);
+ if (uacpi_unlikely(id == UACPI_NULL)) {
+ ret = UACPI_STATUS_OUT_OF_MEMORY;
+ break;
+ }
+ id->size = PNP_ID_LENGTH;
+ id->value = UACPI_PTR_ADD(id, sizeof(uacpi_id_string));
+
+ uacpi_eisa_id_to_string(hid_ret->integer, id->value);
+ break;
+ }
+
+ uacpi_object_unref(hid_ret);
+ if (uacpi_likely_success(ret))
+ *out_id = id;
+ return ret;
+}
+
+void uacpi_free_id_string(uacpi_id_string *id)
+{
+ if (id == UACPI_NULL)
+ return;
+
+ uacpi_free(id, sizeof(uacpi_id_string) + id->size);
+}
+
+uacpi_status uacpi_eval_cid(
+ uacpi_namespace_node *node, uacpi_pnp_id_list **out_list
+)
+{
+ uacpi_status ret;
+ uacpi_object *object, *cid_ret;
+ uacpi_object **objects;
+ uacpi_size num_ids, i;
+ uacpi_u32 size;
+ uacpi_id_string *id;
+ uacpi_char *id_buffer;
+ uacpi_pnp_id_list *list;
+
+ ret = uacpi_eval_typed(
+ node, "_CID", UACPI_NULL,
+ UACPI_OBJECT_INTEGER_BIT | UACPI_OBJECT_STRING_BIT |
+ UACPI_OBJECT_PACKAGE_BIT,
+ &cid_ret
+ );
+ if (ret != UACPI_STATUS_OK)
+ return ret;
+
+ switch (cid_ret->type) {
+ case UACPI_OBJECT_PACKAGE:
+ objects = cid_ret->package->objects;
+ num_ids = cid_ret->package->count;
+ break;
+ default:
+ objects = &cid_ret;
+ num_ids = 1;
+ break;
+ }
+
+ size = sizeof(uacpi_pnp_id_list);
+ size += num_ids * sizeof(uacpi_id_string);
+
+ for (i = 0; i < num_ids; ++i) {
+ object = objects[i];
+
+ switch (object->type) {
+ case UACPI_OBJECT_STRING: {
+ uacpi_size buf_size = object->buffer->size;
+
+ if (uacpi_unlikely(buf_size == 0)) {
+ uacpi_object_name name = uacpi_namespace_node_name(node);
+
+ uacpi_error(
+ "%.4s._CID: empty EISA ID string (sub-object %zu)\n",
+ name.text, i
+ );
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+
+ size += buf_size;
+ if (uacpi_unlikely(size < buf_size)) {
+ uacpi_object_name name = uacpi_namespace_node_name(node);
+
+ uacpi_error(
+ "%.4s._CID: buffer size overflow (+ %zu)\n",
+ name.text, buf_size
+ );
+ return UACPI_STATUS_AML_BAD_ENCODING;
+ }
+
+ break;
+ }
+
+ case UACPI_OBJECT_INTEGER:
+ size += PNP_ID_LENGTH;
+ break;
+ default: {
+ uacpi_object_name name = uacpi_namespace_node_name(node);
+
+ uacpi_error(
+ "%.4s._CID: invalid package sub-object %zu type: %s\n",
+ name.text, i,
+ uacpi_object_type_to_string(object->type)
+ );
+ return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
+ }
+ }
+ }
+
+ list = uacpi_kernel_alloc(size);
+ if (uacpi_unlikely(list == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+ list->num_ids = num_ids;
+ list->size = size - sizeof(uacpi_pnp_id_list);
+
+ id_buffer = UACPI_PTR_ADD(list, sizeof(uacpi_pnp_id_list));
+ id_buffer += num_ids * sizeof(uacpi_id_string);
+
+ for (i = 0; i < num_ids; ++i) {
+ object = objects[i];
+ id = &list->ids[i];
+
+ switch (object->type) {
+ case UACPI_OBJECT_STRING: {
+ uacpi_buffer *buf = object->buffer;
+
+ id->size = buf->size;
+ id->value = id_buffer;
+
+ uacpi_memcpy(id->value, buf->text, id->size);
+ id->value[id->size - 1] = '\0';
+ break;
+ }
+
+ case UACPI_OBJECT_INTEGER:
+ id->size = PNP_ID_LENGTH;
+ id->value = id_buffer;
+ uacpi_eisa_id_to_string(object->integer, id_buffer);
+ break;
+ }
+
+ id_buffer += id->size;
+ }
+
+ uacpi_object_unref(cid_ret);
+ *out_list = list;
+ return ret;
+}
+
+void uacpi_free_pnp_id_list(uacpi_pnp_id_list *list)
+{
+ if (list == UACPI_NULL)
+ return;
+
+ uacpi_free(list, sizeof(uacpi_pnp_id_list) + list->size);
+}
+
+uacpi_status uacpi_eval_sta(uacpi_namespace_node *node, uacpi_u32 *flags)
+{
+ uacpi_status ret;
+ uacpi_u64 value = 0;
+
+ ret = uacpi_eval_integer(node, "_STA", UACPI_NULL, &value);
+
+ /*
+ * ACPI 6.5 specification:
+ * If a device object (including the processor object) does not have
+ * an _STA object, then OSPM assumes that all of the above bits are
+ * set (i.e., the device is present, enabled, shown in the UI,
+ * and functioning).
+ */
+ if (ret == UACPI_STATUS_NOT_FOUND) {
+ value = 0xFFFFFFFF;
+ ret = UACPI_STATUS_OK;
+ }
+
+ *flags = value;
+ return ret;
+}
+
+uacpi_status uacpi_eval_adr(uacpi_namespace_node *node, uacpi_u64 *out)
+{
+ return uacpi_eval_integer(node, "_ADR", UACPI_NULL, out);
+}
+
+#define CLS_REPR_SIZE 7
+
+static uacpi_u8 extract_package_byte_or_zero(uacpi_package *pkg, uacpi_size i)
+{
+ uacpi_object *obj;
+
+ if (uacpi_unlikely(pkg->count <= i))
+ return 0;
+
+ obj = pkg->objects[i];
+ if (uacpi_unlikely(obj->type != UACPI_OBJECT_INTEGER))
+ return 0;
+
+ return obj->integer;
+}
+
+uacpi_status uacpi_eval_cls(
+ uacpi_namespace_node *node, uacpi_id_string **out_id
+)
+{
+ uacpi_status ret;
+ uacpi_object *obj;
+ uacpi_package *pkg;
+ uacpi_u8 class_codes[3];
+ uacpi_id_string *id_string;
+
+ ret = uacpi_eval_typed(
+ node, "_CLS", UACPI_NULL, UACPI_OBJECT_PACKAGE_BIT, &obj
+ );
+ if (ret != UACPI_STATUS_OK)
+ return ret;
+
+ pkg = obj->package;
+ class_codes[0] = extract_package_byte_or_zero(pkg, 0);
+ class_codes[1] = extract_package_byte_or_zero(pkg, 1);
+ class_codes[2] = extract_package_byte_or_zero(pkg, 2);
+
+ id_string = uacpi_kernel_alloc(sizeof(uacpi_id_string) + CLS_REPR_SIZE);
+ if (uacpi_unlikely(id_string == UACPI_NULL)) {
+ ret = UACPI_STATUS_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ id_string->size = CLS_REPR_SIZE;
+ id_string->value = UACPI_PTR_ADD(id_string, sizeof(uacpi_id_string));
+
+ uacpi_snprintf(
+ id_string->value, CLS_REPR_SIZE, "%02X%02X%02X",
+ class_codes[0], class_codes[1], class_codes[2]
+ );
+
+out:
+ if (uacpi_likely_success(ret))
+ *out_id = id_string;
+
+ uacpi_object_unref(obj);
+ return ret;
+}
+
+uacpi_status uacpi_eval_uid(
+ uacpi_namespace_node *node, uacpi_id_string **out_uid
+)
+{
+ uacpi_status ret;
+ uacpi_object *obj;
+ uacpi_id_string *id_string;
+ uacpi_u32 size;
+
+ ret = uacpi_eval_typed(
+ node, "_UID", UACPI_NULL,
+ UACPI_OBJECT_INTEGER_BIT | UACPI_OBJECT_STRING_BIT,
+ &obj
+ );
+ if (ret != UACPI_STATUS_OK)
+ return ret;
+
+ if (obj->type == UACPI_OBJECT_STRING) {
+ size = obj->buffer->size;
+ if (uacpi_unlikely(size == 0 || size > 0xE0000000)) {
+ uacpi_object_name name = uacpi_namespace_node_name(node);
+
+ uacpi_error(
+ "invalid %.4s._UID string size: %u\n",
+ name.text, size
+ );
+ ret = UACPI_STATUS_AML_BAD_ENCODING;
+ goto out;
+ }
+ } else {
+ size = uacpi_snprintf(
+ UACPI_NULL, 0, "%"UACPI_PRIu64, UACPI_FMT64(obj->integer)
+ ) + 1;
+ }
+
+ id_string = uacpi_kernel_alloc(sizeof(uacpi_id_string) + size);
+ if (uacpi_unlikely(id_string == UACPI_NULL)) {
+ ret = UACPI_STATUS_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ id_string->value = UACPI_PTR_ADD(id_string, sizeof(uacpi_id_string));
+ id_string->size = size;
+
+ if (obj->type == UACPI_OBJECT_STRING) {
+ uacpi_memcpy(id_string->value, obj->buffer->text, size);
+ id_string->value[size - 1] = '\0';
+ } else {
+ uacpi_snprintf(
+ id_string->value, id_string->size, "%"UACPI_PRIu64,
+ UACPI_FMT64(obj->integer)
+ );
+ }
+
+out:
+ if (uacpi_likely_success(ret))
+ *out_uid = id_string;
+
+ uacpi_object_unref(obj);
+ return ret;
+}
+
+static uacpi_bool matches_any(
+ uacpi_id_string *id, const uacpi_char *const *ids
+)
+{
+ uacpi_size i;
+
+ for (i = 0; ids[i]; ++i) {
+ if (uacpi_strcmp(id->value, ids[i]) == 0)
+ return UACPI_TRUE;
+ }
+
+ return UACPI_FALSE;
+}
+
+static uacpi_status uacpi_eval_dstate_method_template(
+ uacpi_namespace_node *parent, uacpi_char *template, uacpi_u8 num_methods,
+ uacpi_u8 *out_values
+)
+{
+ uacpi_u8 i;
+ uacpi_status ret = UACPI_STATUS_NOT_FOUND, eval_ret;
+ uacpi_object *obj;
+
+ // We expect either _SxD or _SxW, so increment template[2]
+ for (i = 0; i < num_methods; ++i, template[2]++) {
+ eval_ret = uacpi_eval_typed(
+ parent, template, UACPI_NULL, UACPI_OBJECT_INTEGER_BIT, &obj
+ );
+ if (eval_ret == UACPI_STATUS_OK) {
+ ret = UACPI_STATUS_OK;
+ out_values[i] = obj->integer;
+ uacpi_object_unref(obj);
+ continue;
+ }
+
+ out_values[i] = 0xFF;
+ if (uacpi_unlikely(eval_ret != UACPI_STATUS_NOT_FOUND)) {
+ const char *path;
+
+ path = uacpi_namespace_node_generate_absolute_path(parent);
+ uacpi_warn(
+ "failed to evaluate %s.%s: %s\n",
+ path, template, uacpi_status_to_string(eval_ret)
+ );
+ uacpi_free_dynamic_string(path);
+ }
+ }
+
+ return ret;
+}
+
+#define NODE_INFO_EVAL_ADD_ID(name) \
+ if (uacpi_eval_##name(node, &name) == UACPI_STATUS_OK) { \
+ size += name->size; \
+ if (uacpi_unlikely(size < name->size)) { \
+ ret = UACPI_STATUS_AML_BAD_ENCODING; \
+ goto out; \
+ } \
+ }
+
+#define NODE_INFO_COPY_ID(name, flag) \
+ if (name != UACPI_NULL) { \
+ flags |= UACPI_NS_NODE_INFO_HAS_##flag; \
+ info->name.value = cursor; \
+ info->name.size = name->size; \
+ uacpi_memcpy(cursor, name->value, name->size); \
+ cursor += name->size; \
+ } else { \
+ uacpi_memzero(&info->name, sizeof(*name)); \
+ } \
+
+uacpi_status uacpi_get_namespace_node_info(
+ uacpi_namespace_node *node, uacpi_namespace_node_info **out_info
+)
+{
+ uacpi_status ret = UACPI_STATUS_OK;
+ uacpi_u32 size = sizeof(uacpi_namespace_node_info);
+ uacpi_object *obj;
+ uacpi_namespace_node_info *info;
+ uacpi_id_string *hid = UACPI_NULL, *uid = UACPI_NULL, *cls = UACPI_NULL;
+ uacpi_pnp_id_list *cid = UACPI_NULL;
+ uacpi_char *cursor;
+ uacpi_u64 adr = 0;
+ uacpi_u8 flags = 0;
+ uacpi_u8 sxd[4], sxw[5];
+
+ obj = uacpi_namespace_node_get_object(node);
+ if (uacpi_unlikely(obj == UACPI_NULL))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ if (obj->type == UACPI_OBJECT_DEVICE ||
+ obj->type == UACPI_OBJECT_PROCESSOR) {
+ char dstate_method_template[5] = { '_', 'S', '1', 'D', '\0' };
+
+ NODE_INFO_EVAL_ADD_ID(hid)
+ NODE_INFO_EVAL_ADD_ID(uid)
+ NODE_INFO_EVAL_ADD_ID(cls)
+ NODE_INFO_EVAL_ADD_ID(cid)
+
+ if (uacpi_eval_adr(node, &adr) == UACPI_STATUS_OK)
+ flags |= UACPI_NS_NODE_INFO_HAS_ADR;
+
+ if (uacpi_eval_dstate_method_template(
+ node, dstate_method_template, sizeof(sxd), sxd
+ ) == UACPI_STATUS_OK)
+ flags |= UACPI_NS_NODE_INFO_HAS_SXD;
+
+ dstate_method_template[2] = '0';
+ dstate_method_template[3] = 'W';
+
+ if (uacpi_eval_dstate_method_template(
+ node, dstate_method_template, sizeof(sxw), sxw
+ ) == UACPI_STATUS_OK)
+ flags |= UACPI_NS_NODE_INFO_HAS_SXW;
+ }
+
+ info = uacpi_kernel_alloc(size);
+ if (uacpi_unlikely(info == UACPI_NULL)) {
+ ret = UACPI_STATUS_OUT_OF_MEMORY;
+ goto out;
+ }
+ info->size = size;
+ cursor = UACPI_PTR_ADD(info, sizeof(uacpi_namespace_node_info));
+ info->name = uacpi_namespace_node_name(node);
+ info->type = obj->type;
+ info->num_params = info->type == UACPI_OBJECT_METHOD ? obj->method->args : 0;
+
+ info->adr = adr;
+ if (flags & UACPI_NS_NODE_INFO_HAS_SXD)
+ uacpi_memcpy(info->sxd, sxd, sizeof(sxd));
+ else
+ uacpi_memzero(info->sxd, sizeof(info->sxd));
+
+ if (flags & UACPI_NS_NODE_INFO_HAS_SXW)
+ uacpi_memcpy(info->sxw, sxw, sizeof(sxw));
+ else
+ uacpi_memzero(info->sxw, sizeof(info->sxw));
+
+ if (cid != UACPI_NULL) {
+ uacpi_u32 i;
+
+ uacpi_memcpy(&info->cid, cid, cid->size + sizeof(*cid));
+ cursor += cid->num_ids * sizeof(uacpi_id_string);
+
+ for (i = 0; i < cid->num_ids; ++i) {
+ info->cid.ids[i].value = cursor;
+ cursor += info->cid.ids[i].size;
+ }
+
+ flags |= UACPI_NS_NODE_INFO_HAS_CID;
+ } else {
+ uacpi_memzero(&info->cid, sizeof(*cid));
+ }
+
+ NODE_INFO_COPY_ID(hid, HID)
+ NODE_INFO_COPY_ID(uid, UID)
+ NODE_INFO_COPY_ID(cls, CLS)
+
+out:
+ if (uacpi_likely_success(ret)) {
+ info->flags = flags;
+ *out_info = info;
+ }
+
+ uacpi_free_id_string(hid);
+ uacpi_free_id_string(uid);
+ uacpi_free_id_string(cls);
+ uacpi_free_pnp_id_list(cid);
+ return ret;
+}
+
+void uacpi_free_namespace_node_info(uacpi_namespace_node_info *info)
+{
+ if (info == UACPI_NULL)
+ return;
+
+ uacpi_free(info, info->size);
+}
+
+uacpi_bool uacpi_device_matches_pnp_id(
+ uacpi_namespace_node *node, const uacpi_char *const *ids
+)
+{
+ uacpi_status st;
+ uacpi_bool ret = UACPI_FALSE;
+ uacpi_id_string *id = UACPI_NULL;
+ uacpi_pnp_id_list *id_list = UACPI_NULL;
+
+ st = uacpi_eval_hid(node, &id);
+ if (st == UACPI_STATUS_OK && matches_any(id, ids)) {
+ ret = UACPI_TRUE;
+ goto out;
+ }
+
+ st = uacpi_eval_cid(node, &id_list);
+ if (st == UACPI_STATUS_OK) {
+ uacpi_size i;
+
+ for (i = 0; i < id_list->num_ids; ++i) {
+ if (matches_any(&id_list->ids[i], ids)) {
+ ret = UACPI_TRUE;
+ goto out;
+ }
+ }
+ }
+
+out:
+ uacpi_free_id_string(id);
+ uacpi_free_pnp_id_list(id_list);
+ return ret;
+}
+
+struct device_find_ctx {
+ const uacpi_char *const *target_hids;
+ void *user;
+ uacpi_iteration_callback cb;
+};
+
+static uacpi_iteration_decision find_one_device(
+ void *opaque, uacpi_namespace_node *node, uacpi_u32 depth
+)
+{
+ struct device_find_ctx *ctx = opaque;
+ uacpi_status ret;
+ uacpi_u32 flags;
+
+ if (!uacpi_device_matches_pnp_id(node, ctx->target_hids))
+ return UACPI_ITERATION_DECISION_CONTINUE;
+
+ ret = uacpi_eval_sta(node, &flags);
+ if (uacpi_unlikely_error(ret))
+ return UACPI_ITERATION_DECISION_NEXT_PEER;
+
+ if (!(flags & ACPI_STA_RESULT_DEVICE_PRESENT) &&
+ !(flags & ACPI_STA_RESULT_DEVICE_FUNCTIONING))
+ return UACPI_ITERATION_DECISION_NEXT_PEER;
+
+ return ctx->cb(ctx->user, node, depth);
+}
+
+
+uacpi_status uacpi_find_devices_at(
+ uacpi_namespace_node *parent, const uacpi_char *const *hids,
+ uacpi_iteration_callback cb, void *user
+)
+{
+ struct device_find_ctx ctx = { 0 };
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ ctx.target_hids = hids;
+ ctx.user = user;
+ ctx.cb = cb;
+
+ return uacpi_namespace_for_each_child(
+ parent, find_one_device, UACPI_NULL, UACPI_OBJECT_DEVICE_BIT,
+ UACPI_MAX_DEPTH_ANY, &ctx
+ );
+}
+
+uacpi_status uacpi_find_devices(
+ const uacpi_char *hid, uacpi_iteration_callback cb, void *user
+)
+{
+ const uacpi_char *hids[2] = {
+ UACPI_NULL, UACPI_NULL
+ };
+
+ hids[0] = hid;
+
+ return uacpi_find_devices_at(uacpi_namespace_root(), hids, cb, user);
+}
+
+uacpi_status uacpi_set_interrupt_model(uacpi_interrupt_model model)
+{
+ uacpi_status ret;
+ uacpi_object *arg;
+ uacpi_object_array args;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ arg = uacpi_create_object(UACPI_OBJECT_INTEGER);
+ if (uacpi_unlikely(arg == UACPI_NULL))
+ return UACPI_STATUS_OUT_OF_MEMORY;
+
+ arg->integer = model;
+ args.objects = &arg;
+ args.count = 1;
+
+ ret = uacpi_eval(uacpi_namespace_root(), "_PIC", &args, UACPI_NULL);
+ uacpi_object_unref(arg);
+
+ if (ret == UACPI_STATUS_NOT_FOUND)
+ ret = UACPI_STATUS_OK;
+
+ return ret;
+}
+
+uacpi_status uacpi_get_pci_routing_table(
+ uacpi_namespace_node *parent, uacpi_pci_routing_table **out_table
+)
+{
+ uacpi_status ret;
+ uacpi_object *obj, *entry_obj, *elem_obj;
+ uacpi_package *table_pkg, *entry_pkg;
+ uacpi_pci_routing_table_entry *entry;
+ uacpi_pci_routing_table *table;
+ uacpi_size size, i;
+
+ UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED);
+
+ obj = uacpi_namespace_node_get_object(parent);
+ if (uacpi_unlikely(obj == UACPI_NULL || obj->type != UACPI_OBJECT_DEVICE))
+ return UACPI_STATUS_INVALID_ARGUMENT;
+
+ ret = uacpi_eval_typed(
+ parent, "_PRT", UACPI_NULL, UACPI_OBJECT_PACKAGE_BIT, &obj
+ );
+ if (uacpi_unlikely_error(ret))
+ return ret;
+
+ table_pkg = obj->package;
+ if (uacpi_unlikely(table_pkg->count == 0 || table_pkg->count > 1024)) {
+ uacpi_error("invalid number of _PRT entries: %zu\n", table_pkg->count);
+ uacpi_object_unref(obj);
+ return UACPI_STATUS_AML_BAD_ENCODING;
+ }
+
+ size = table_pkg->count * sizeof(uacpi_pci_routing_table_entry);
+ table = uacpi_kernel_alloc(sizeof(uacpi_pci_routing_table) + size);
+ if (uacpi_unlikely(table == UACPI_NULL)) {
+ uacpi_object_unref(obj);
+ return UACPI_STATUS_OUT_OF_MEMORY;
+ }
+ table->num_entries = table_pkg->count;
+
+ for (i = 0; i < table_pkg->count; ++i) {
+ entry_obj = table_pkg->objects[i];
+
+ if (uacpi_unlikely(entry_obj->type != UACPI_OBJECT_PACKAGE)) {
+ uacpi_error("_PRT sub-object %zu is not a package: %s\n",
+ i, uacpi_object_type_to_string(entry_obj->type));
+ goto out_bad_encoding;
+ }
+
+ entry_pkg = entry_obj->package;
+ if (uacpi_unlikely(entry_pkg->count != 4)) {
+ uacpi_error("invalid _PRT sub-package entry count %zu\n",
+ entry_pkg->count);
+ goto out_bad_encoding;
+ }
+
+ entry = &table->entries[i];
+
+ elem_obj = entry_pkg->objects[0];
+ if (uacpi_unlikely(elem_obj->type != UACPI_OBJECT_INTEGER)) {
+ uacpi_error("invalid _PRT sub-package %zu address type: %s\n",
+ i, uacpi_object_type_to_string(elem_obj->type));
+ goto out_bad_encoding;
+ }
+ entry->address = elem_obj->integer;
+
+ elem_obj = entry_pkg->objects[1];
+ if (uacpi_unlikely(elem_obj->type != UACPI_OBJECT_INTEGER)) {
+ uacpi_error("invalid _PRT sub-package %zu pin type: %s\n",
+ i, uacpi_object_type_to_string(elem_obj->type));
+ goto out_bad_encoding;
+ }
+ entry->pin = elem_obj->integer;
+
+ elem_obj = entry_pkg->objects[2];
+ switch (elem_obj->type) {
+ case UACPI_OBJECT_STRING:
+ ret = uacpi_object_resolve_as_aml_namepath(
+ elem_obj, parent, &entry->source
+ );
+ if (uacpi_unlikely_error(ret)) {
+ uacpi_error("unable to lookup _PRT source %s: %s\n",
+ elem_obj->buffer->text, uacpi_status_to_string(ret));
+ goto out_bad_encoding;
+ }
+ break;
+ case UACPI_OBJECT_INTEGER:
+ entry->source = UACPI_NULL;
+ break;
+ default:
+ uacpi_error("invalid _PRT sub-package %zu source type: %s\n",
+ i, uacpi_object_type_to_string(elem_obj->type));
+ goto out_bad_encoding;
+ }
+
+ elem_obj = entry_pkg->objects[3];
+ if (uacpi_unlikely(elem_obj->type != UACPI_OBJECT_INTEGER)) {
+ uacpi_error("invalid _PRT sub-package %zu source index type: %s\n",
+ i, uacpi_object_type_to_string(elem_obj->type));
+ goto out_bad_encoding;
+ }
+ entry->index = elem_obj->integer;
+ }
+
+ uacpi_object_unref(obj);
+ *out_table = table;
+ return UACPI_STATUS_OK;
+
+out_bad_encoding:
+ uacpi_object_unref(obj);
+ uacpi_free_pci_routing_table(table);
+ return UACPI_STATUS_AML_BAD_ENCODING;
+}
+
+void uacpi_free_pci_routing_table(uacpi_pci_routing_table *table)
+{
+ if (table == UACPI_NULL)
+ return;
+
+ uacpi_free(
+ table,
+ sizeof(uacpi_pci_routing_table) +
+ table->num_entries * sizeof(uacpi_pci_routing_table_entry)
+ );
+}
+
+void uacpi_free_dynamic_string(const uacpi_char *str)
+{
+ if (str == UACPI_NULL)
+ return;
+
+ uacpi_free((void*)str, uacpi_strlen(str) + 1);
+}
+
+#endif // !UACPI_BAREBONES_MODE