summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIan Moffett <ian@osmora.org>2025-10-19 19:35:54 -0400
committerIan Moffett <ian@osmora.org>2025-10-19 19:36:13 -0400
commit1c5cc6601bf73443a27510df636998521c85fb26 (patch)
tree8355a06f60a632b50fde341394ff162462b8a300
parentb7e43af6a43efd6d80f81bb613efacfe326d18b6 (diff)
kern/amd64: cpu: Add SIMD support
Signed-off-by: Ian Moffett <ian@osmora.org>
-rw-r--r--src/sys/arch/amd64/cpu/cpu_conf.c3
-rw-r--r--src/sys/arch/amd64/simd.S75
2 files changed, 78 insertions, 0 deletions
diff --git a/src/sys/arch/amd64/cpu/cpu_conf.c b/src/sys/arch/amd64/cpu/cpu_conf.c
index da2d747..8a3b4e6 100644
--- a/src/sys/arch/amd64/cpu/cpu_conf.c
+++ b/src/sys/arch/amd64/cpu/cpu_conf.c
@@ -44,7 +44,9 @@
extern void syscall_isr(void);
extern void core_halt_isr(void);
+
void core_halt_handler(void);
+int simd_init(void);
void
core_halt_handler(void)
@@ -193,6 +195,7 @@ cpu_conf(struct pcore *pcore)
pcore->self = pcore;
wrmsr(IA32_GS_BASE, (uintptr_t)pcore);
+ simd_init();
init_vectors();
idt_load();
cpu_identify(mdcore);
diff --git a/src/sys/arch/amd64/simd.S b/src/sys/arch/amd64/simd.S
new file mode 100644
index 0000000..f0673f9
--- /dev/null
+++ b/src/sys/arch/amd64/simd.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2025 Ian Marco Moffett and L5 engineers
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+ .text
+ .globl simd_init
+simd_init:
+ /*
+ * Enable SIMD, if SSE and AVX is supported,
+ * a value of zero is returned. If SSE is
+ * supported yet AVX is not, a value of one
+ * is returned. However, if none are supported,
+ * this routine returns -1.
+ */
+
+ // Do we support SSE?
+ mov $1, %eax
+ cpuid
+ bt $25, %edx
+ jnc .sse_not_sup
+
+ mov %cr0, %rax // Old CR0 -> EAX
+ and $0xFFFB, %ax // Disable co-processor emulation
+ or $0x02, %ax // Enable co-processor monitoring
+ mov %rax, %cr0 // Update CR0 with new flags
+
+ mov %cr4, %rax // Old CR4 -> EAX
+ or $0x200, %ax // Enable FXSAVE/FXRSTOR
+ or $0x400, %ax // Enable SIMD FP exceptions
+ mov %rax, %cr4 // Update CR4 with new flags
+
+ mov $1, %eax // LEAF 1
+ cpuid // Bit 28 of ECX indicates AVX support
+ mov $3, %eax // We need to check two bits
+ shl $27, %eax // Which are ECX.OSXSAVE and ECX.AVX
+ test %eax, %ecx // Are XSAVE and AVX supported?
+ jnc .avx_not_sup // Nope, just continue
+
+ // Enable AVX
+ xor %rcx, %rcx // Select XCR0
+ xgetbv // Load extended control register
+ or $0x07, %eax // Set AVX + SSE bits
+ xsetbv // Store new flags
+ xor %rax, %rax // Everything is good
+ retq // Return back to caller (RETURN)
+.sse_not_sup:
+ mov $-1, %rax
+ retq
+.avx_not_sup:
+ mov $1, %rax
+ retq