aboutsummaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_filedesc.c123
-rw-r--r--sys/kern/kern_loader.c33
-rw-r--r--sys/kern/kern_sched.c275
-rw-r--r--sys/kern/kern_syscall.c44
-rw-r--r--sys/kern/vfs_init.c5
-rw-r--r--sys/kern/vfs_lookup.c61
6 files changed, 474 insertions, 67 deletions
diff --git a/sys/kern/kern_filedesc.c b/sys/kern/kern_filedesc.c
new file mode 100644
index 0000000..a943714
--- /dev/null
+++ b/sys/kern/kern_filedesc.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2023-2024 Ian Marco Moffett and the Osmora Team.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Hyra nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/filedesc.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <vm/dynalloc.h>
+#include <assert.h>
+#include <string.h>
+
+/*
+ * Allocate a file descriptor.
+ *
+ * @td: Thread to allocate from, NULL for current thread.
+ */
+struct filedesc *
+fd_alloc(struct proc *td)
+{
+ struct filedesc *fd;
+
+ if (td == NULL) {
+ td = this_td();
+ __assert(td != NULL);
+ }
+
+ for (size_t i = 0; i < PROC_MAX_FDS; ++i) {
+ if (td->fds[i] != NULL) {
+ continue;
+ }
+
+ fd = dynalloc(sizeof(struct filedesc));
+ memset(fd, 0, sizeof(struct filedesc));
+
+ if (fd == NULL) {
+ return NULL;
+ }
+
+ fd->fdno = i;
+ td->fds[i] = fd;
+ return fd;
+ }
+
+ return NULL;
+}
+
+/*
+ * Fetch a file descriptor from a file descriptor
+ * number.
+ *
+ * @td: Thread to fetch from, NULL for current thread.
+ * @fdno: File descriptor to fetch
+ */
+struct filedesc *
+fd_from_fdnum(const struct proc *td, int fdno)
+{
+ if (td == NULL) {
+ td = this_td();
+ __assert(td != NULL);
+ }
+
+ if (fdno < 0 || fdno > PROC_MAX_FDS) {
+ return NULL;
+ }
+
+ for (size_t i = 0; i < PROC_MAX_FDS; ++i) {
+ if (i == fdno && td->fds[i] != NULL) {
+ return td->fds[i];
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Close a file descriptor from its fd number.
+ *
+ * @td: Thread to fetch from, NULL for current thread.
+ * @fdno: File descriptor number to close.
+ */
+void
+fd_close_fdnum(struct proc *td, int fdno)
+{
+ struct filedesc *fd;
+
+ if (td == NULL) {
+ td = this_td();
+ __assert(td != NULL);
+ }
+
+ fd = fd_from_fdnum(td, fdno);
+ if (fd == NULL) {
+ return;
+ }
+
+ dynfree(fd);
+ td->fds[fdno] = NULL;
+}
diff --git a/sys/kern/kern_loader.c b/sys/kern/kern_loader.c
index 53ba8c2..23249ea 100644
--- a/sys/kern/kern_loader.c
+++ b/sys/kern/kern_loader.c
@@ -61,12 +61,10 @@ int loader_load(struct vas vas, const void *dataptr, struct auxval *auxv,
vm_prot_t prot = PROT_USER;
uintptr_t physmem;
- uintptr_t max_addr, map_addr;
- size_t misalign, page_count;
-
+ size_t misalign, page_count, map_len;
int status;
- const size_t GRANULE = vm_get_page_size();
+ const size_t GRANULE = vm_get_page_size();
void *tmp_ptr;
if (auxv == NULL) {
@@ -93,26 +91,8 @@ int loader_load(struct vas vas, const void *dataptr, struct auxval *auxv,
misalign = phdr->p_vaddr & (GRANULE - 1);
page_count = __DIV_ROUNDUP(phdr->p_memsz + misalign, GRANULE);
- max_addr = phdr->p_vaddr + (GRANULE * page_count);
-
- /*
- * We are assuming this is a user program that we are loading.
- * All user programs should be on the lower half of the address
- * space. We will check that before we begin doing anything here.
- *
- * We are also going to check if the virtual address the program
- * header refers to overflows into the higher half. If anything
- * goes into the higher half, we won't simply drop the phdr,
- * we'll instead assume caller error and return -EINVAL.
- */
- if (phdr->p_vaddr >= VM_HIGHER_HALF) {
- return -EINVAL;
- } else if (max_addr >= VM_HIGHER_HALF) {
- /* Overflows into higher half */
- return -EINVAL;
- }
-
physmem = vm_alloc_pageframe(page_count);
+ map_len = page_count * GRANULE;
/* Do we not have enough page frames? */
if (physmem == 0) {
@@ -121,14 +101,9 @@ int loader_load(struct vas vas, const void *dataptr, struct auxval *auxv,
return -ENOMEM;
}
- map_addr = phdr->p_vaddr + load_base;
- status = vm_map_create(vas, map_addr, physmem, prot, page_count*GRANULE);
+ status = vm_map_create(vas, phdr->p_vaddr + load_base, physmem, prot, map_len);
if (status != 0) {
- DBG("Failed to map 0x%p - 0x%p\n",
- phdr->p_vaddr + load_base,
- (phdr->p_vaddr + load_base) + (page_count * GRANULE));
-
return status;
}
diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c
index f61c592..7b3776d 100644
--- a/sys/kern/kern_sched.c
+++ b/sys/kern/kern_sched.c
@@ -34,10 +34,36 @@
#include <sys/timer.h>
#include <sys/cdefs.h>
#include <sys/spinlock.h>
+#include <sys/loader.h>
+#include <sys/panic.h>
+#include <sys/machdep.h>
+#include <sys/filedesc.h>
+#include <fs/initramfs.h>
#include <vm/dynalloc.h>
+#include <vm/physseg.h>
+#include <vm/pmap.h>
+#include <vm/map.h>
+#include <vm/vm.h>
#include <assert.h>
#include <string.h>
+#define STACK_PAGES 8
+#define STACK_SIZE (STACK_PAGES*vm_get_page_size())
+
+/*
+ * The PHYS_TO_VIRT/VIRT_TO_PHYS macros convert
+ * addresses to lower and higher half addresses.
+ * Userspace addresses are on the lower half,
+ * therefore, we can just wrap over these to
+ * keep things simple.
+ *
+ * XXX: TODO: This won't work when not identity mapping
+ * lowerhalf addresses. Once that is updated,
+ * get rid of this.
+ */
+#define USER_TO_KERN(user) PHYS_TO_VIRT(user)
+#define KERN_TO_USER(kern) VIRT_TO_PHYS(kern)
+
/*
* Thread ready queue - all threads ready to be
* scheduled should be added to this queue.
@@ -51,9 +77,6 @@ static size_t nthread = 0;
*/
static struct spinlock tdq_lock = {0};
-/* In sys/<machine>/<machine>/switch.S */
-void __sched_switch_to(struct trapframe *tf);
-
static inline void
sched_oneshot(void)
{
@@ -97,13 +120,11 @@ sched_dequeue_td(void)
spinlock_acquire(&tdq_lock);
- if (TAILQ_EMPTY(&td_queue)) {
- goto done;
+ if (!TAILQ_EMPTY(&td_queue)) {
+ td = TAILQ_FIRST(&td_queue);
+ TAILQ_REMOVE(&td_queue, td, link);
}
- td = TAILQ_FIRST(&td_queue);
- TAILQ_REMOVE(&td_queue, td, link);
-done:
spinlock_release(&tdq_lock);
return td;
}
@@ -121,12 +142,101 @@ sched_enter(void)
}
}
+static uintptr_t
+sched_init_stack(void *stack_top, char *argvp[], char *envp[], struct auxval auxv)
+{
+ uintptr_t *sp = stack_top;
+ void *env_ptr = NULL, *argv_ptr = NULL;
+ size_t argc, envc, len;
+
+ /* Copy argument and environment strings */
+ for (envc = 0; envp[envc] != NULL; ++envc) {
+ len = strlen(envp[envc]);
+ sp -= len - 1;
+ memcpy(sp, envp[envc], len);
+ }
+
+ __assert(envc >= 1);
+ env_ptr = sp;
+
+ for (argc = 0; argvp[argc] != NULL; ++argc) {
+ len = strlen(argvp[argc]);
+ sp -= len - 1;
+ memcpy(sp, argvp[argc], len);
+ }
+
+ __assert(argc >= 1);
+ argv_ptr = sp;
+
+ /* Ensure the stack is aligned */
+ sp = (void *)__ALIGN_DOWN((uintptr_t)sp, 16);
+ if (((argc + envc + 1) & 1) != 0)
+ --sp;
+
+ AUXVAL(sp, AT_NULL, 0x0);
+ AUXVAL(sp, AT_SECURE, 0x0);
+ AUXVAL(sp, AT_ENTRY, auxv.at_entry);
+ AUXVAL(sp, AT_PHDR, auxv.at_phdr);
+ AUXVAL(sp, AT_PHNUM, auxv.at_phnum);
+ STACK_PUSH(sp, 0);
+
+ /* Push environment string pointers */
+ for (int i = 0; i < envc; ++i) {
+ len = strlen(env_ptr);
+ sp -= len;
+
+ *sp = (uintptr_t)KERN_TO_USER((uintptr_t)env_ptr);
+ env_ptr = (char *)env_ptr + len;
+ }
+
+ /* Push argument string pointers */
+ STACK_PUSH(sp, 0);
+ for (int i = 0; i < argc; ++i) {
+ len = strlen(argv_ptr);
+ sp -= len;
+
+ *sp = (uintptr_t)KERN_TO_USER((uintptr_t)argv_ptr);
+ argv_ptr = (char *)argv_ptr + len;
+ }
+
+ STACK_PUSH(sp, argc);
+
+ return (uintptr_t)sp;
+}
+
+static uintptr_t
+sched_create_stack(struct vas vas, bool user, char *argvp[],
+ char *envp[], struct auxval auxv, struct proc *td)
+{
+ int status;
+ uintptr_t stack;
+ const vm_prot_t USER_STACK_PROT = PROT_WRITE | PROT_USER;
+
+ if (!user) {
+ stack = (uintptr_t)dynalloc(STACK_SIZE);
+ td->stack_base = (uintptr_t)stack;
+ return sched_init_stack((void *)(stack + STACK_SIZE), argvp, envp, auxv);
+ }
+
+ stack = vm_alloc_pageframe(STACK_PAGES);
+ td->stack_base = stack;
+ status = vm_map_create(vas, stack, stack, USER_STACK_PROT, STACK_SIZE);
+
+ if (status != 0) {
+ return 0;
+ }
+
+ memset(USER_TO_KERN(stack), 0, STACK_SIZE);
+ stack = sched_init_stack((void *)USER_TO_KERN(stack + STACK_SIZE), argvp, envp, auxv);
+ return stack;
+}
+
static struct proc *
-sched_create_td(uintptr_t rip)
+sched_create_td(uintptr_t rip, char *argvp[], char *envp[], struct auxval auxv,
+ struct vas vas, bool is_user)
{
- const size_t STACK_SIZE = 0x100000; /* 1 MiB */
struct proc *td;
- void *stack;
+ uintptr_t stack;
struct trapframe *tf;
tf = dynalloc(sizeof(struct trapframe));
@@ -134,32 +244,103 @@ sched_create_td(uintptr_t rip)
return NULL;
}
- stack = dynalloc(STACK_SIZE);
- if (stack == NULL) {
+ td = dynalloc(sizeof(struct proc));
+ if (td == NULL) {
+ /* TODO: Free stack */
dynfree(tf);
return NULL;
}
- td = dynalloc(sizeof(struct proc));
- if (td == NULL) {
+ stack = sched_create_stack(vas, is_user, argvp, envp, auxv, td);
+ if (stack == 0) {
dynfree(tf);
- dynfree(stack);
+ dynfree(td);
return NULL;
}
memset(tf, 0, sizeof(struct trapframe));
- memset(stack, 0, STACK_SIZE);
+ memset(td, 0, sizeof(struct proc));
/* Setup process itself */
td->pid = 0; /* Don't assign PID until enqueued */
td->cpu = NULL; /* Not yet assigned a core */
td->tf = tf;
+ td->addrsp = vas;
+ td->is_user = is_user;
+ processor_init_pcb(td);
+
+ /* Allocate standard file descriptors */
+ __assert(fd_alloc(td) != NULL); /* STDIN */
+ __assert(fd_alloc(td) != NULL); /* STDOUT */
+ __assert(fd_alloc(td) != NULL); /* STDERR */
/* Setup trapframe */
- init_frame(tf, rip, (uintptr_t)stack + STACK_SIZE - 1);
+ if (!is_user) {
+ init_frame(tf, rip, (uintptr_t)stack);
+ } else {
+ init_frame_user(tf, rip, KERN_TO_USER(stack));
+ }
return td;
}
+static void
+sched_destroy_td(struct proc *td)
+{
+ processor_free_pcb(td);
+
+ /*
+ * User stacks are allocated with vm_alloc_pageframe(),
+ * while kernel stacks are allocated with dynalloc().
+ * We want to check if we are a user program or kernel
+ * program to perform the proper deallocation method.
+ */
+ if (td->is_user) {
+ vm_free_pageframe(td->stack_base, STACK_PAGES);
+ } else {
+ dynfree((void *)td->stack_base);
+ }
+
+ /* Close all of the file descriptors */
+ for (size_t i = 0; i < PROC_MAX_FDS; ++i) {
+ fd_close_fdnum(td, i);
+ }
+
+ pmap_free_vas(vm_get_ctx(), td->addrsp);
+ dynfree(td);
+}
+
+void
+sched_exit(void)
+{
+ struct proc *td;
+ struct vas kvas = vm_get_kvas();
+
+ intr_mask();
+
+ td = this_td();
+
+ /* Switch back to the kernel address space and destroy ourself */
+ pmap_switch_vas(vm_get_ctx(), kvas);
+ sched_destroy_td(td);
+
+ intr_unmask();
+ sched_enter();
+}
+
+/*
+ * Get the current running thread.
+ */
+struct proc *
+this_td(void)
+{
+ struct sched_state *state;
+ struct cpu_info *ci;
+
+ ci = this_cpu();
+ state = &ci->sched_state;
+ return state->td;
+}
+
/*
* Thread context switch routine
*/
@@ -174,43 +355,73 @@ sched_context_switch(struct trapframe *tf)
* If we have no threads, we should not
* preempt at all.
*/
- if (nthread == 0) {
- goto done;
- } else if ((next_td = sched_dequeue_td()) == NULL) {
- /* Empty */
- goto done;
+ if (nthread == 0 || (next_td = sched_dequeue_td()) == NULL) {
+ sched_oneshot();
+ return;
}
+ /*
+ * If we have a thread currently running and we are switching
+ * to another, we shall save our current register state
+ * by copying the trapframe.
+ */
if (state->td != NULL) {
- /* Save our trapframe */
td = state->td;
memcpy(td->tf, tf, sizeof(struct trapframe));
}
- /* Copy to stack */
+ /* Copy over the next thread's register state to us */
memcpy(tf, next_td->tf, sizeof(struct trapframe));
td = state->td;
state->td = next_td;
+ /* Re-enqueue the previous thread if it exists */
if (td != NULL) {
sched_enqueue_td(td);
}
-done:
+
+ /* Do architecture specific context switch logic */
+ processor_switch_to(td, next_td);
+
+ /* Done, switch out our vas and oneshot */
+ pmap_switch_vas(vm_get_ctx(), next_td->addrsp);
sched_oneshot();
}
void
sched_init(void)
{
+ struct proc *init;
+ struct auxval auxv = {0}, ld_auxv = {0};
+ struct vas vas = pmap_create_vas(vm_get_ctx());
+ const char *init_bin, *ld_bin;
+
+ char *ld_path;
+ char *argv[] = {"/usr/sbin/init", NULL};
+ char *envp[] = {"", NULL};
+
TAILQ_INIT(&td_queue);
- /*
- * TODO: Create init with sched_create_td()
- * and enqueue with sched_enqueue_td()
- */
- (void)sched_create_td;
- (void)sched_enqueue_td;
+ if ((init_bin = initramfs_open("/usr/sbin/init")) == NULL) {
+ panic("Could not open /usr/boot/init\n");
+ }
+ if (loader_load(vas, init_bin, &auxv, 0, &ld_path) != 0) {
+ panic("Could not load init\n");
+ }
+ if ((ld_bin = initramfs_open(ld_path)) == NULL) {
+ panic("Could not open %s\n", ld_path);
+ }
+ if (loader_load(vas, ld_bin, &ld_auxv, 0x00, NULL) != 0) {
+ panic("Could not load %s\n", ld_path);
+ }
+
+ init = sched_create_td((uintptr_t)ld_auxv.at_entry, argv, envp, ld_auxv, vas, true);
+ if (init == NULL) {
+ panic("Failed to create thread for init\n");
+ }
+
+ sched_enqueue_td(init);
}
/*
diff --git a/sys/kern/kern_syscall.c b/sys/kern/kern_syscall.c
new file mode 100644
index 0000000..b6e31d1
--- /dev/null
+++ b/sys/kern/kern_syscall.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2023-2024 Ian Marco Moffett and the Osmora Team.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Hyra nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/syscall.h>
+#include <sys/sched.h>
+#include <sys/cdefs.h>
+#include <sys/types.h>
+
+__noreturn static uint64_t
+sys_exit(struct syscall_args *args)
+{
+ sched_exit();
+ __builtin_unreachable();
+}
+
+uint64_t(*g_syscall_table[__MAX_SYSCALLS])(struct syscall_args *args) = {
+ sys_exit,
+};
diff --git a/sys/kern/vfs_init.c b/sys/kern/vfs_init.c
index 18ab3ac..0d8d194 100644
--- a/sys/kern/vfs_init.c
+++ b/sys/kern/vfs_init.c
@@ -40,8 +40,10 @@ __MODULE_NAME("vfs");
__KERNEL_META("$Hyra$: vfs.c, Ian Marco Moffett, "
"Hyra Virtual File System");
+#define INITRAMFS_ID 0
+
static struct fs_info filesystems[] = {
- { "initramfs", &g_initramfs_ops }
+ [INITRAMFS_ID] = { "initramfs", &g_initramfs_ops }
};
struct vnode *g_root_vnode = NULL;
@@ -76,4 +78,5 @@ vfs_init(void)
}
g_root_vnode->vops = &g_initramfs_vops;
+ g_root_vnode->fs = &filesystems[INITRAMFS_ID];
}
diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c
index 748d5e2..1398964 100644
--- a/sys/kern/vfs_lookup.c
+++ b/sys/kern/vfs_lookup.c
@@ -29,11 +29,13 @@
#include <sys/types.h>
#include <sys/vfs.h>
+#include <sys/mount.h>
+#include <sys/errno.h>
#include <vm/dynalloc.h>
#include <string.h>
/*
- * Fetches the filename within a past at
+ * Fetches the filename within a path at
* the nth index denoted by `idx'
*
* Returns memory allocated by dynalloc()
@@ -115,9 +117,58 @@ vfs_get_fname_at(const char *path, size_t idx)
return ret;
}
-struct vnode *
-vfs_path_to_node(const char *path)
+/*
+ * Fetches a vnode from a path.
+ *
+ * @path: Path to fetch vnode from.
+ * @vp: Output var for fetched vnode.
+ *
+ * Returns 0 on success.
+ */
+int
+vfs_path_to_node(const char *path, struct vnode **vp)
{
- /* TODO */
- return NULL;
+ struct vnode *vnode = g_root_vnode;
+ struct fs_info *fs;
+ char *name;
+ int s = 0, fs_caps = 0;
+
+ if (strcmp(path, "/") == 0 || !vfs_is_valid_path(path)) {
+ return -1;
+ } else if (*path != '/') {
+ return -1;
+ }
+
+ /* Fetch filesystem capabilities if we can */
+ if (vnode->fs != NULL) {
+ fs = vnode->fs;
+ fs_caps = fs->caps;
+ }
+
+ /*
+ * If the filesystem requires full-path lookups, we can try
+ * throwing the full path at the filesystem to see if
+ * it'll give us a vnode.
+ */
+ if (__TEST(fs_caps, FSCAP_FULLPATH)) {
+ s = vfs_vget(g_root_vnode, path, &vnode);
+ goto done;
+ }
+
+ for (size_t i = 0;; ++i) {
+ name = vfs_get_fname_at(path, i);
+ if (name == NULL) break;
+
+ s = vfs_vget(vnode, name, &vnode);
+ dynfree(name);
+
+ if (s != 0) break;
+ }
+
+done:
+ if (vp != NULL && s == 0) {
+ *vp = vnode;
+ }
+
+ return s;
}