summaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/init_main.c3
-rw-r--r--sys/kern/kern_accnt.c108
-rw-r--r--sys/kern/kern_descrip.c4
-rw-r--r--sys/kern/kern_exit.c13
-rw-r--r--sys/kern/kern_sched.c121
-rw-r--r--sys/kern/kern_spawn.c79
-rw-r--r--sys/kern/kern_syscall.c1
7 files changed, 255 insertions, 74 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 6b3e09b..5e351a8 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -52,6 +52,7 @@
#endif /* _INSTALL_MEDIA */
struct proc g_proc0;
+struct proc *g_init;
static void
copyright(void)
@@ -114,7 +115,7 @@ main(void)
memset(&g_proc0, 0, sizeof(g_proc0));
/* Startup pid 1 */
- spawn(&g_proc0, start_init, NULL, 0, NULL);
+ spawn(&g_proc0, start_init, NULL, 0, &g_init);
md_inton();
/* Load all early drivers */
diff --git a/sys/kern/kern_accnt.c b/sys/kern/kern_accnt.c
new file mode 100644
index 0000000..cd15863
--- /dev/null
+++ b/sys/kern/kern_accnt.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Hyra nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * System Accounting
+ */
+
+#include <sys/sched.h>
+#include <sys/schedvar.h>
+#include <sys/proc.h>
+#include <fs/ctlfs.h>
+#include <machine/cpu.h>
+#include <string.h>
+
+/* Called within kern_sched.c */
+void sched_accnt_init(void);
+
+static struct ctlops sched_stat_ctl;
+volatile size_t g_nthreads;
+
+static int
+ctl_stat_read(struct ctlfs_dev *cdp, struct sio_txn *sio)
+{
+ struct sched_stat stat;
+
+ if (sio->len > sizeof(stat)) {
+ sio->len = sizeof(stat);
+ }
+
+ sched_stat(&stat);
+ memcpy(sio->buf, &stat, sio->len);
+ return sio->len;
+}
+
+/*
+ * Get scheduler accounting information
+ *
+ * @statp: Info gets copied here
+ */
+void
+sched_stat(struct sched_stat *statp)
+{
+ struct sched_cpu *cpustat;
+
+ statp->nproc = atomic_load_64(&g_nthreads);
+ statp->ncpu = cpu_count();
+ statp->quantum_usec = DEFAULT_TIMESLICE_USEC;
+
+ /*
+ * Setup the per-cpu info/statistics
+ */
+ for (int i = 0; i < CPU_MAX; ++i) {
+ cpustat = cpu_get_stat(i);
+ if (cpustat == NULL) {
+ break;
+ }
+
+ statp->cpus[i] = *cpustat;
+ }
+}
+
+void
+sched_accnt_init(void)
+{
+ char devname[] = "sched";
+ struct ctlfs_dev ctl;
+
+ /*
+ * Register some accounting information in
+ * '/ctl/sched/stat'
+ */
+ ctl.mode = 0444;
+ ctlfs_create_node(devname, &ctl);
+ ctl.devname = devname;
+ ctl.ops = &sched_stat_ctl;
+ ctlfs_create_entry("stat", &ctl);
+}
+
+static struct ctlops sched_stat_ctl = {
+ .read = ctl_stat_read,
+ .write = NULL
+};
diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c
index b5ff144..57beaf6 100644
--- a/sys/kern/kern_descrip.c
+++ b/sys/kern/kern_descrip.c
@@ -154,6 +154,10 @@ fd_rw(unsigned int fd, void *buf, size_t count, uint8_t write)
struct sio_txn sio;
scret_t retval = 0;
+ if (fd > PROC_MAX_FILEDES) {
+ return -EBADF;
+ }
+
if (count > SSIZE_MAX) {
retval = -EINVAL;
goto done;
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 2c9e2e4..9377eed 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -30,6 +30,7 @@
#include <sys/proc.h>
#include <sys/sched.h>
#include <sys/syslog.h>
+#include <sys/atomic.h>
#include <sys/panic.h>
#include <sys/filedesc.h>
#include <sys/vnode.h>
@@ -44,6 +45,9 @@
#define pr_trace(fmt, ...) kprintf("exit: " fmt, ##__VA_ARGS__)
#define pr_error(...) pr_trace(__VA_ARGS__)
+extern volatile size_t g_nthreads;
+extern struct proc g_init;
+
static void
unload_td(struct proc *td)
{
@@ -147,14 +151,17 @@ exit1(struct proc *td, int flags)
curtd = this_td();
curpid = curtd->pid;
+
td->flags |= PROC_EXITING;
parent = td->parent;
- /* If we have any children, kill them too */
+ /* We have one less process in the system! */
+ atomic_dec_64(&g_nthreads);
+
+ /* Reassign children to init */
if (td->nleaves > 0) {
TAILQ_FOREACH(procp, &td->leafq, leaf_link) {
- if (!ISSET(procp->flags, PROC_EXITING))
- exit1(procp, flags);
+ procp->parent = &g_init;
}
}
diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c
index 24806db..23a1ebb 100644
--- a/sys/kern/kern_sched.c
+++ b/sys/kern/kern_sched.c
@@ -45,7 +45,8 @@
#define pr_trace(fmt, ...) kprintf("ksched: " fmt, ##__VA_ARGS__)
-void sched_switch(struct trapframe *tf);
+void md_sched_switch(struct trapframe *tf);
+void sched_accnt_init(void);
static sched_policy_t policy = SCHED_POLICY_MLFQ;
@@ -64,7 +65,7 @@ __cacheline_aligned static struct spinlock tdq_lock = {0};
/*
* Perform timer oneshot
*/
-static inline void
+void
sched_oneshot(bool now)
{
struct timer timer;
@@ -78,30 +79,36 @@ sched_oneshot(bool now)
}
/*
- * Save thread state and enqueue it back into one
- * of the ready queues.
+ * Returns true if a processor is associated
+ * with a specific thread
+ *
+ * @ci: CPU that wants to take 'td'
+ * @td: Thread to check against
*/
-static void
-sched_save_td(struct proc *td, struct trapframe *tf)
+static bool
+cpu_is_assoc(struct cpu_info *ci, struct proc *td)
{
/*
- * Save trapframe to process structure only
- * if PROC_EXEC is not set.
+ * If we are not pinned, any processor is
+ * associated.
*/
- if (!ISSET(td->flags, PROC_EXEC)) {
- memcpy(&td->tf, tf, sizeof(td->tf));
+ if (!ISSET(td->flags, PROC_PINNED)) {
+ return true;
}
- sched_enqueue_td(td);
+ return ci->id == td->affinity;
}
-static struct proc *
+struct proc *
sched_dequeue_td(void)
{
struct sched_queue *queue;
struct proc *td = NULL;
+ struct cpu_info *ci;
+ uint32_t ncpu = 0;
spinlock_acquire(&tdq_lock);
+ ci = this_cpu();
for (size_t i = 0; i < SCHED_NQUEUE; ++i) {
queue = &qlist[i];
@@ -121,6 +128,19 @@ sched_dequeue_td(void)
}
}
+ /*
+ * If we are on a multicore system and this isn't
+ * our process, don't take it. Some threads might
+ * be pinned to a specific processor.
+ */
+ ncpu = cpu_count();
+ while (!cpu_is_assoc(ci, td) && ncpu > 1) {
+ td = TAILQ_NEXT(td, link);
+ if (td == NULL) {
+ break;
+ }
+ }
+
if (td == NULL) {
continue;
}
@@ -197,52 +217,22 @@ td_pri_update(struct proc *td)
}
}
-void
-sched_switch_to(struct trapframe *tf, struct proc *td)
-{
- struct cpu_info *ci;
- struct pcb *pcbp;
-
- ci = this_cpu();
-
- if (tf != NULL) {
- memcpy(tf, &td->tf, sizeof(*tf));
- }
-
- ci->curtd = td;
- pcbp = &td->pcb;
- pmap_switch_vas(pcbp->addrsp);
-}
-
/*
- * Perform a context switch.
+ * MI work to be done during a context
+ * switch. Called by md_sched_switch()
*/
void
-sched_switch(struct trapframe *tf)
+mi_sched_switch(struct proc *from)
{
- struct proc *next_td, *td;
- struct cpu_info *ci;
-
- ci = this_cpu();
- td = ci->curtd;
- cons_detach();
-
- if (td != NULL) {
- if (td->pid == 0)
+ if (from != NULL) {
+ if (from->pid == 0)
return;
- dispatch_signals(td);
- td_pri_update(td);
- sched_save_td(td, tf);
- }
-
- if ((next_td = sched_dequeue_td()) == NULL) {
- sched_oneshot(false);
- return;
+ dispatch_signals(from);
+ td_pri_update(from);
}
- sched_switch_to(tf, next_td);
- sched_oneshot(false);
+ cons_detach();
}
/*
@@ -296,6 +286,35 @@ sched_detach(struct proc *td)
spinlock_release(&tdq_lock);
}
+/*
+ * Pin a process to a specific processor
+ *
+ * @td: Process to pin
+ * @cpu: Logical processor ID to pin `td' to.
+ *
+ * XXX: 'cpu' is a machine independent value, representing
+ * CPU<n>
+ */
+void
+proc_pin(struct proc *td, affinity_t cpu)
+{
+ td->affinity = cpu;
+ td->flags |= PROC_PINNED;
+}
+
+/*
+ * Unpin a pinned process, allowing it to be
+ * picked up by any processor
+ *
+ * @td: Process to unpin
+ */
+void
+proc_unpin(struct proc *td)
+{
+ td->affinity = 0;
+ td->flags &= ~PROC_PINNED;
+}
+
void
sched_init(void)
{
@@ -306,4 +325,6 @@ sched_init(void)
pr_trace("prepared %d queues (policy=0x%x)\n",
SCHED_NQUEUE, policy);
+
+ sched_accnt_init();
}
diff --git a/sys/kern/kern_spawn.c b/sys/kern/kern_spawn.c
index fcaa194..b9551f3 100644
--- a/sys/kern/kern_spawn.c
+++ b/sys/kern/kern_spawn.c
@@ -28,11 +28,13 @@
*/
#include <sys/spawn.h>
+#include <sys/wait.h>
#include <sys/proc.h>
#include <sys/exec.h>
#include <sys/mman.h>
#include <sys/systm.h>
#include <sys/errno.h>
+#include <sys/atomic.h>
#include <sys/syslog.h>
#include <sys/syscall.h>
#include <sys/atomic.h>
@@ -47,7 +49,8 @@
#define ARGVP_MAX (ARG_MAX / sizeof(void *))
-static volatile size_t nthreads = 0;
+static size_t next_pid = 1;
+extern volatile size_t g_nthreads;
/*
* TODO: envp
@@ -94,6 +97,35 @@ spawn_thunk(void)
__builtin_unreachable();
}
+pid_t
+waitpid(pid_t pid, int *wstatus, int options)
+{
+ struct proc *child, *td;
+ pid_t ret;
+
+ td = this_td();
+ child = get_child(td, pid);
+
+ if (child == NULL) {
+ return -1;
+ }
+
+ /* Wait for it to be done */
+ while (!ISSET(child->flags, PROC_ZOMB)) {
+ sched_yield();
+ }
+
+
+ /* Give back the status */
+ if (wstatus != NULL) {
+ copyout(&child->exit_status, wstatus, sizeof(*wstatus));
+ }
+
+ ret = child->pid;
+ proc_reap(child);
+ return ret;
+}
+
/*
* Spawn a new process
*
@@ -166,28 +198,11 @@ spawn(struct proc *cur, void(*func)(void), void *p, int flags, struct proc **new
newproc->mlgdr = mlgdr;
newproc->flags |= PROC_WAITED;
- newproc->pid = ++nthreads;
+ atomic_inc_64(&g_nthreads);
+ newproc->pid = next_pid++;
signals_init(newproc);
sched_enqueue_td(newproc);
pid = newproc->pid;
-
- if (ISSET(flags, SPAWN_WAIT)) {
- cur->flags |= PROC_SLEEP;
-
- while (ISSET(cur->flags, PROC_SLEEP)) {
- sched_yield();
- }
- while (!ISSET(newproc->flags, PROC_ZOMB)) {
- sched_yield();
- }
-
- if (newproc->exit_status < 0) {
- pid = newproc->exit_status;
- }
-
- proc_reap(newproc);
- }
-
return pid;
}
@@ -205,6 +220,9 @@ get_child(struct proc *cur, pid_t pid)
struct proc *procp;
TAILQ_FOREACH(procp, &cur->leafq, leaf_link) {
+ if (procp == NULL) {
+ continue;
+ }
if (procp->pid == pid) {
return procp;
}
@@ -214,6 +232,27 @@ get_child(struct proc *cur, pid_t pid)
}
/*
+ * arg0: PID
+ * arg1: wstatus
+ * arg2: options
+ *
+ * Returns PID of terminated child, returns
+ * -1 on failure.
+ */
+scret_t
+sys_waitpid(struct syscall_args *scargs)
+{
+ pid_t pid;
+ int *u_wstatus;
+ int options;
+
+ pid = scargs->arg0;
+ u_wstatus = (void *)scargs->arg1;
+ options = scargs->arg2;
+ return waitpid(pid, u_wstatus, options);
+}
+
+/*
* arg0: The file /path/to/executable
* arg1: Argv
* arg2: Envp (TODO)
diff --git a/sys/kern/kern_syscall.c b/sys/kern/kern_syscall.c
index a28d2dd..cb7e1d2 100644
--- a/sys/kern/kern_syscall.c
+++ b/sys/kern/kern_syscall.c
@@ -59,6 +59,7 @@ scret_t(*g_sctab[])(struct syscall_args *) = {
sys_getppid, /* SYS_getppid */
sys_setuid, /* SYS_setuid */
sys_getuid, /* SYS_getuid */
+ sys_waitpid, /* SYS_waitpid */
};
const size_t MAX_SYSCALLS = NELEM(g_sctab);