summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIan Moffett <ian@osmora.org>2024-12-07 03:20:25 -0500
committerIan Moffett <ian@osmora.org>2024-12-07 03:20:25 -0500
commitcd612d0b75e322db04625128bdb1e475d1274736 (patch)
tree7046c9ead21b97768d9bb0ec40e5121eac697f39
parent07ddaa07aca2c75fef8b5234de0651535fbe5af0 (diff)
kernel: vcache: Improve vcache add/pull interface
Require internal add/pull routines to be passed a pointer to the vcache to be operated on. This makes it easier to support for per-process vcaches and LZVM. Signed-off-by: Ian Moffett <ian@osmora.org>
-rw-r--r--sys/include/sys/vnode.h9
-rw-r--r--sys/kern/vfs_vcache.c57
2 files changed, 34 insertions, 32 deletions
diff --git a/sys/include/sys/vnode.h b/sys/include/sys/vnode.h
index 33d5b17..d9f9afe 100644
--- a/sys/include/sys/vnode.h
+++ b/sys/include/sys/vnode.h
@@ -48,6 +48,15 @@ struct vnode {
TAILQ_ENTRY(vnode) vcache_link;
};
+/*
+ * Vnode cache, can be per-process or
+ * global.
+ */
+struct vcache {
+ TAILQ_HEAD(vcache_head, vnode) q;
+ ssize_t size; /* In entries (-1 not set up) */
+};
+
#define vfs_vref(VP) (atomic_inc_int(&(VP)->refcount))
/* vcache types */
diff --git a/sys/kern/vfs_vcache.c b/sys/kern/vfs_vcache.c
index dc140f7..e72b12c 100644
--- a/sys/kern/vfs_vcache.c
+++ b/sys/kern/vfs_vcache.c
@@ -42,47 +42,40 @@
#define pr_trace(fmt, ...) kprintf("vcache: " fmt, ##__VA_ARGS__)
-struct vcache {
- TAILQ_HEAD(vcache_head, vnode) q;
- ssize_t size; /* In entries (-1 not set up) */
-} vcache = { .size = -1 };
-
/*
* Our vcache will be here if our caching type is
* global.
*/
static int vcache_type = VCACHE_TYPE_NONE;
+static struct vcache vcache = { .size = -1 };
__cacheline_aligned static struct spinlock vcache_lock;
/*
- * Pull a vnode from the head of the global
- * vcache. Returns NULL if none are found.
- *
- * XXX: Caller must acquire vcache_lock.
+ * Pull a vnode from the head of a vcache.
+ * Returns NULL if none are found.
*/
static struct vnode *
-vcache_global_pull(void)
+vcache_pull(struct vcache *vcp)
{
struct vnode *vp;
- if (vcache.size <= 0) {
+ if (vcp->size <= 0) {
return NULL;
}
- vp = TAILQ_FIRST(&vcache.q);
- TAILQ_REMOVE(&vcache.q, vp, vcache_link);
- --vcache.size;
+ vp = TAILQ_FIRST(&vcp->q);
+ TAILQ_REMOVE(&vcp->q, vp, vcache_link);
+ --vcp->size;
return vp;
}
/*
- * Add a new entry to the global vcache
+ * Add a new entry to a vcache
*
- * XXX: Caller must acquire vcache_lock.
* @vp: New vnode to add.
*/
static int
-vcache_global_add(struct vnode *vp)
+vcache_add(struct vnode *vp, struct vcache *vcp)
{
struct vnode *tmp;
@@ -92,24 +85,24 @@ vcache_global_add(struct vnode *vp)
* queue. However, if it is less than -1... Then shit,
* good luck debugging I suppose.
*
- * The global vcache naturally behaves as an LRU cache.
- * If we need more space, the tail of the queue is evicted.
+ * Vcaches naturally behave as LRU caches. If we need
+ * more space, the tail of the queue is evicted.
*/
- if (vcache.size < 0) {
- TAILQ_INIT(&vcache.q);
- vcache.size = 0;
- } else if (vcache.size < -1) {
- panic("vcache_global_add: Bad vcache size, catching fire\n");
- } else if (vcache.size == VCACHE_SIZE) {
+ if (vcp->size < 0) {
+ TAILQ_INIT(&vcp->q);
+ vcp->size = 0;
+ } else if (vcp->size < -1) {
+ panic("vcache_add: Bad vcache size, catching fire\n");
+ } else if (vcp->size == VCACHE_SIZE) {
/* Evict the tail */
- tmp = TAILQ_LAST(&vcache.q, vcache_head);
- TAILQ_REMOVE(&vcache.q, tmp, vcache_link);
+ tmp = TAILQ_LAST(&vcp->q, vcache_head);
+ TAILQ_REMOVE(&vcp->q, tmp, vcache_link);
dynfree(tmp);
- --vcache.size;
+ --vcp->size;
}
- TAILQ_INSERT_TAIL(&vcache.q, vp, vcache_link);
- ++vcache.size;
+ TAILQ_INSERT_TAIL(&vcp->q, vp, vcache_link);
+ ++vcp->size;
return 0;
}
@@ -182,7 +175,7 @@ vfs_vcache_enter(struct vnode *vp)
/* - FALL THROUGH - */
case VCACHE_TYPE_GLOBAL:
spinlock_acquire(&vcache_lock);
- retval = vcache_global_add(vp);
+ retval = vcache_add(vp, &vcache);
spinlock_release(&vcache_lock);
break;
default:
@@ -213,7 +206,7 @@ vfs_recycle_vnode(void)
/* - FALL THROUGH - */
case VCACHE_TYPE_GLOBAL:
spinlock_acquire(&vcache_lock);
- vp = vcache_global_pull();
+ vp = vcache_pull(&vcache);
spinlock_release(&vcache_lock);
break;
default: