summaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_proc.c32
-rw-r--r--sys/kern/kern_spawn.c36
-rw-r--r--sys/kern/kern_sysctl.c6
-rw-r--r--sys/kern/kern_work.c274
4 files changed, 318 insertions, 30 deletions
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 87dcc74..8bc5680 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -29,14 +29,18 @@
#include <sys/types.h>
#include <sys/proc.h>
+#include <sys/errno.h>
#include <sys/cdefs.h>
#include <sys/vnode.h>
+#include <sys/tree.h>
#include <sys/syscall.h>
#include <sys/filedesc.h>
#include <sys/fcntl.h>
#include <string.h>
#include <crc32.h>
+extern volatile size_t g_nthreads;
+
pid_t
getpid(void)
{
@@ -50,7 +54,6 @@ getpid(void)
return td->pid;
}
-
pid_t
getppid(void)
{
@@ -100,6 +103,33 @@ proc_coredump(struct proc *td, uintptr_t fault_addr)
fd_close(fd);
}
+int
+proc_init(struct proc *td, struct proc *parent)
+{
+ struct mmap_lgdr *mlgdr;
+
+ mlgdr = dynalloc(sizeof(*mlgdr));
+ if (mlgdr == NULL) {
+ return -ENOMEM;
+ }
+
+ /* Add to parent leafq */
+ TAILQ_INSERT_TAIL(&parent->leafq, td, leaf_link);
+ atomic_inc_int(&parent->nleaves);
+ atomic_inc_64(&g_nthreads);
+ td->parent = parent;
+ td->exit_status = -1;
+ td->cred = parent->cred;
+
+ /* Initialize the mmap ledger */
+ mlgdr->nbytes = 0;
+ RBT_INIT(lgdr_entries, &mlgdr->hd);
+ td->mlgdr = mlgdr;
+ td->flags |= PROC_WAITED;
+ signals_init(td);
+ return 0;
+}
+
scret_t
sys_getpid(struct syscall_args *scargs)
{
diff --git a/sys/kern/kern_spawn.c b/sys/kern/kern_spawn.c
index b9551f3..7962ced 100644
--- a/sys/kern/kern_spawn.c
+++ b/sys/kern/kern_spawn.c
@@ -34,10 +34,8 @@
#include <sys/mman.h>
#include <sys/systm.h>
#include <sys/errno.h>
-#include <sys/atomic.h>
#include <sys/syslog.h>
#include <sys/syscall.h>
-#include <sys/atomic.h>
#include <sys/signal.h>
#include <sys/limits.h>
#include <sys/sched.h>
@@ -50,7 +48,6 @@
#define ARGVP_MAX (ARG_MAX / sizeof(void *))
static size_t next_pid = 1;
-extern volatile size_t g_nthreads;
/*
* TODO: envp
@@ -145,7 +142,6 @@ pid_t
spawn(struct proc *cur, void(*func)(void), void *p, int flags, struct proc **newprocp)
{
struct proc *newproc;
- struct mmap_lgdr *mlgdr;
int error;
pid_t pid;
@@ -156,19 +152,10 @@ spawn(struct proc *cur, void(*func)(void), void *p, int flags, struct proc **new
return -ENOMEM;
}
- mlgdr = dynalloc(sizeof(*mlgdr));
- if (mlgdr == NULL) {
- dynfree(newproc);
- try_free_data(p);
- pr_error("could not alloc proc mlgdr (-ENOMEM)\n");
- return -ENOMEM;
- }
-
memset(newproc, 0, sizeof(*newproc));
error = md_spawn(newproc, cur, (uintptr_t)func);
if (error < 0) {
dynfree(newproc);
- dynfree(mlgdr);
try_free_data(p);
pr_error("error initializing proc\n");
return error;
@@ -184,23 +171,16 @@ spawn(struct proc *cur, void(*func)(void), void *p, int flags, struct proc **new
cur->flags |= PROC_LEAFQ;
}
- /* Add to parent leafq */
- TAILQ_INSERT_TAIL(&cur->leafq, newproc, leaf_link);
- atomic_inc_int(&cur->nleaves);
- newproc->parent = cur;
- newproc->data = p;
- newproc->exit_status = -1;
- newproc->cred = cur->cred;
-
- /* Initialize the mmap ledger */
- mlgdr->nbytes = 0;
- RBT_INIT(lgdr_entries, &mlgdr->hd);
- newproc->mlgdr = mlgdr;
- newproc->flags |= PROC_WAITED;
+ error = proc_init(newproc, cur);
+ if (error < 0) {
+ dynfree(newproc);
+ try_free_data(p);
+ pr_error("error initializing proc\n");
+ return error;
+ }
- atomic_inc_64(&g_nthreads);
+ newproc->data = p;
newproc->pid = next_pid++;
- signals_init(newproc);
sched_enqueue_td(newproc);
pid = newproc->pid;
return pid;
diff --git a/sys/kern/kern_sysctl.c b/sys/kern/kern_sysctl.c
index a4c16bb..1f5e578 100644
--- a/sys/kern/kern_sysctl.c
+++ b/sys/kern/kern_sysctl.c
@@ -40,6 +40,7 @@
HYRA_VERSION " " \
HYRA_BUILDDATE
+extern size_t g_nthreads;
static uint32_t pagesize = DEFAULT_PAGESIZE;
static char machine[] = HYRA_ARCH;
static char hyra[] = "Hyra";
@@ -62,7 +63,10 @@ static struct sysctl_entry common_optab[] = {
/* 'hw.*' */
[HW_PAGESIZE] = { HW_PAGESIZE, SYSCTL_OPTYPE_INT_RO, &pagesize },
[HW_NCPU] = { HW_NCPU, SYSCTL_OPTYPE_INT, NULL },
- [HW_MACHINE] = {HW_MACHINE, SYSCTL_OPTYPE_STR_RO, &machine }
+ [HW_MACHINE] = {HW_MACHINE, SYSCTL_OPTYPE_STR_RO, &machine },
+
+ /* 'proc.*' */
+ [PROC_COUNT] = { PROC_COUNT, SYSCTL_OPTYPE_INT_RO, &g_nthreads }
};
static int
diff --git a/sys/kern/kern_work.c b/sys/kern/kern_work.c
new file mode 100644
index 0000000..918af89
--- /dev/null
+++ b/sys/kern/kern_work.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2023-2025 Ian Marco Moffett and the Osmora Team.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Hyra nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/panic.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <sys/syslog.h>
+#include <sys/workqueue.h>
+#include <vm/dynalloc.h>
+#include <string.h>
+
+#define pr_trace(fmt, ...) kprintf("workq: " fmt, ##__VA_ARGS__)
+#define pr_error(...) pr_trace(__VA_ARGS__)
+
+extern struct proc g_proc0;
+
+/*
+ * The workqueue cookie value that is used for
+ * verifying if a workqueue object is properly
+ * set up or not.
+ */
+#define WQ_COOKIE 0xFC0B
+
+/*
+ * A worker services work in the queue
+ * and there is one per workqueue.
+ */
+static void
+workqueue_worker(void)
+{
+ struct proc *td;
+ struct workqueue *wqp;
+ struct work *wp;
+
+ td = this_td();
+ if ((wqp = td->data) == NULL) {
+ panic("no workqueue in thread\n");
+ }
+
+ /*
+ * Weird things can happen, just be careful
+ * here...
+ */
+ if (wqp->cookie != WQ_COOKIE) {
+ panic("bad WQ_COOKIE in worker\n");
+ }
+
+ for (;;) {
+ mutex_acquire(wqp->lock, 0);
+ wp = TAILQ_FIRST(&wqp->work);
+
+ /* Try again later if empty */
+ if (wp == NULL) {
+ mutex_release(wqp->lock);
+ sched_yield();
+ continue;
+ }
+
+ wp->func(wqp, wp);
+ TAILQ_REMOVE(&wqp->work, wp, link);
+
+ /*
+ * Decrement the amount of work that is
+ * left to get done. Check for underflows
+ * which should not happen unless something
+ * clobbers the fields.
+ */
+ if ((--wqp->nwork) < 0) {
+ panic("wqp nwork underflow\n");
+ }
+
+ mutex_release(wqp->lock);
+ sched_yield();
+ }
+}
+
+/*
+ * Allocates a new work queue that may be used
+ * to hold queued up tasks.
+ *
+ * @name: Name to give the workqueue
+ * @max_work: Maximum number of jobs to be added
+ * @ipl: IPL that the work must operate in
+ *
+ * Returns a pointer to the new workqueue on success,
+ * otherwise a value of NULL is returned.
+ */
+struct workqueue *
+workqueue_new(const char *name, size_t max_work, int ipl)
+{
+ struct workqueue *wqp;
+ struct proc *td;
+
+ td = this_td();
+ if (__unlikely(td == NULL)) {
+ pr_error("no thread in workqueue_new()\n");
+ return NULL;
+ }
+
+ wqp = dynalloc(sizeof(*wqp));
+ if (wqp == NULL) {
+ return NULL;
+ }
+
+ wqp->name = strdup(name);
+ TAILQ_INIT(&wqp->work);
+ wqp->ipl = ipl;
+ wqp->max_work = max_work;
+ wqp->nwork = 0;
+ wqp->cookie = WQ_COOKIE;
+ wqp->lock = mutex_new(wqp->name);
+
+ /*
+ * We need to spawn the work thread which
+ * is behind the management of this specific
+ * workqueue. It typically does something like
+ * dequeuing at the head of the workqueue, performing
+ * the work, cleaning up as needed and dequeuing the
+ * next and waiting if there are none yet.
+ */
+ spawn(
+ &g_proc0, workqueue_worker,
+ wqp, 0,
+ &wqp->worktd
+ );
+
+ return wqp;
+}
+
+/*
+ * Enqueue a work item onto a specific
+ * workqueue.
+ *
+ * @wqp: Pointer to specific workqueue
+ * @name: Name to set for work unit
+ * @wp: Pointer to work that should be enqueued
+ *
+ * Returns zero on success, otherwise a less than
+ * zero value is returned.
+ */
+int
+workqueue_enq(struct workqueue *wqp, const char *name, struct work *wp)
+{
+ if (wqp == NULL || wp == NULL) {
+ return -EINVAL;
+ }
+
+ if (name == NULL) {
+ return -EINVAL;
+ }
+
+ /* Verify that we have a valid workqueue */
+ if (__unlikely(wqp->cookie != WQ_COOKIE)) {
+ panic("workq: bad cookie on work enqueue\n");
+ }
+
+ wp->name = strdup(name);
+ mutex_acquire(wqp->lock, 0);
+
+ /*
+ * If we have reached the max amount of jobs
+ * that we can enqueue here, just log it and
+ * bail.
+ */
+ if (wqp->nwork >= wqp->max_work) {
+ pr_error("max jobs reached for '%s'\n", wqp->name);
+ mutex_release(wqp->lock);
+ return -EAGAIN;
+ }
+
+ TAILQ_INSERT_TAIL(&wqp->work, wp, link);
+ ++wqp->nwork;
+ mutex_release(wqp->lock);
+ return 0;
+}
+
+/*
+ * Destroy a workqueue and free resources
+ * associated with it.
+ *
+ * @wqp: Pointer to workqueue to destroy
+ *
+ * Returns zero on success, otherwise a less
+ * than zero value is returned.
+ */
+int
+workqueue_destroy(struct workqueue *wqp)
+{
+ if (wqp == NULL) {
+ return -EINVAL;
+ }
+
+ /* Should not happen but just make sure */
+ if (__unlikely(wqp->cookie != WQ_COOKIE)) {
+ panic("workq: bad cookie on destroy\n");
+ }
+
+ /* Free the name if we have it */
+ if (wqp->name != NULL) {
+ dynfree(wqp->name);
+ }
+
+ if (wqp->lock != NULL) {
+ mutex_free(wqp->lock);
+ }
+
+ /* Brutally murder any workthreads */
+ if (wqp->worktd != NULL) {
+ exit1(wqp->worktd, 0);
+ wqp->worktd = NULL;
+ }
+
+ /*
+ * Zero before we free for security reasons, we
+ * don't really know what will be queued up but
+ * for certain things, it is best if we make it
+ * as if it never existed in the first place.
+ *
+ * XXX: There is no need to free the workqueue here as
+ * we had to pass it to spawn() to run the worker.
+ *
+ * During an exit, spawn() will free the thread data
+ * meaning this is already cleaned up.
+ */
+ memset(wqp, 0, sizeof(*wqp));
+ return 0;
+}
+
+/*
+ * Cleanup after work
+ *
+ * @wp: Work to clean up
+ */
+int
+work_destroy(struct work *wp)
+{
+ if (wp == NULL) {
+ return -EINVAL;
+ }
+
+ if (wp->name != NULL) {
+ dynfree(wp->name);
+ }
+
+ return 0;
+}