return b->priv.vt->store(b, b->priv.blk_start + index);
}
+INLINE void kblock_setDirty(struct KBlock *b, bool dirty)
+{
+ if (dirty)
+ b->priv.flags |= KB_CACHE_DIRTY;
+ else
+ b->priv.flags &= ~KB_CACHE_DIRTY;
+}
+
size_t kblock_read(struct KBlock *b, block_idx_t idx, void *buf, size_t offset, size_t size)
{
ASSERT(b);
- if (b->priv.cache_dirty)
+ if (kblock_cacheDirty(b))
{
LOG_INFO("flushing block %d\n", b->priv.curr_blk);
if (kblock_store(b, b->priv.curr_blk) == 0)
- b->priv.cache_dirty = false;
+ kblock_setDirty(b, false);
else
return EOF;
}
if (!kblock_loadPage(b, idx))
return 0;
- b->priv.cache_dirty = true;
+ kblock_setDirty(b, true);
return kblock_writeBuf(b, buf, offset, size);
}
return EOF;
b->priv.curr_blk = idx2;
- b->priv.cache_dirty = true;
+ kblock_setDirty(b, true);
return 0;
}
} KBlockVTable;
-#define KB_BUFFERED BV(0)
+#define KB_BUFFERED BV(0)
+#define KB_CACHE_DIRTY BV(1)
/**
* KBlock private members.
void *buf;
block_idx_t blk_start; ///< Start block number when the device is trimmed. \sa kblock_trim()
block_idx_t curr_blk;
- bool cache_dirty;
const struct KBlockVTable *vt; ///< Virtual table of interface functions.
} KBlockPriv;
return b->priv.vt->readBlock(b, b->priv.blk_start + index, buf);
}
+INLINE bool kblock_cacheDirty(struct KBlock *b)
+{
+ ASSERT(b);
+ return (b->priv.flags & KB_CACHE_DIRTY);
+}
+
INLINE block_idx_t kblock_cachedBlock(struct KBlock *b)
{
return b->priv.curr_blk;
return (b->priv.flags & KB_BUFFERED);
}
+
size_t kblock_read(struct KBlock *b, block_idx_t idx, void *buf, size_t offset, size_t size);
int kblock_flush(struct KBlock *b);
f->b.priv.vt = &kblockfile_swbuffered_vt;
kblockfile_load(&f->b, 0);
f->b.priv.curr_blk = 0;
- f->b.priv.cache_dirty = false;
}
else
f->b.priv.vt = &kblockfile_unbuffered_vt;