summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
2bc2b05)
return b->priv.vt->store(b, b->priv.blk_start + index);
}
return b->priv.vt->store(b, b->priv.blk_start + index);
}
+INLINE void kblock_setDirty(struct KBlock *b, bool dirty)
+{
+ if (dirty)
+ b->priv.flags |= KB_CACHE_DIRTY;
+ else
+ b->priv.flags &= ~KB_CACHE_DIRTY;
+}
+
size_t kblock_read(struct KBlock *b, block_idx_t idx, void *buf, size_t offset, size_t size)
size_t kblock_read(struct KBlock *b, block_idx_t idx, void *buf, size_t offset, size_t size)
- if (b->priv.cache_dirty)
+ if (kblock_cacheDirty(b))
{
LOG_INFO("flushing block %d\n", b->priv.curr_blk);
if (kblock_store(b, b->priv.curr_blk) == 0)
{
LOG_INFO("flushing block %d\n", b->priv.curr_blk);
if (kblock_store(b, b->priv.curr_blk) == 0)
- b->priv.cache_dirty = false;
+ kblock_setDirty(b, false);
if (!kblock_loadPage(b, idx))
return 0;
if (!kblock_loadPage(b, idx))
return 0;
- b->priv.cache_dirty = true;
+ kblock_setDirty(b, true);
return kblock_writeBuf(b, buf, offset, size);
}
return kblock_writeBuf(b, buf, offset, size);
}
return EOF;
b->priv.curr_blk = idx2;
return EOF;
b->priv.curr_blk = idx2;
- b->priv.cache_dirty = true;
+ kblock_setDirty(b, true);
-#define KB_BUFFERED BV(0)
+#define KB_BUFFERED BV(0)
+#define KB_CACHE_DIRTY BV(1)
/**
* KBlock private members.
/**
* KBlock private members.
void *buf;
block_idx_t blk_start; ///< Start block number when the device is trimmed. \sa kblock_trim()
block_idx_t curr_blk;
void *buf;
block_idx_t blk_start; ///< Start block number when the device is trimmed. \sa kblock_trim()
block_idx_t curr_blk;
const struct KBlockVTable *vt; ///< Virtual table of interface functions.
} KBlockPriv;
const struct KBlockVTable *vt; ///< Virtual table of interface functions.
} KBlockPriv;
return b->priv.vt->readBlock(b, b->priv.blk_start + index, buf);
}
return b->priv.vt->readBlock(b, b->priv.blk_start + index, buf);
}
+INLINE bool kblock_cacheDirty(struct KBlock *b)
+{
+ ASSERT(b);
+ return (b->priv.flags & KB_CACHE_DIRTY);
+}
+
INLINE block_idx_t kblock_cachedBlock(struct KBlock *b)
{
return b->priv.curr_blk;
INLINE block_idx_t kblock_cachedBlock(struct KBlock *b)
{
return b->priv.curr_blk;
return (b->priv.flags & KB_BUFFERED);
}
return (b->priv.flags & KB_BUFFERED);
}
size_t kblock_read(struct KBlock *b, block_idx_t idx, void *buf, size_t offset, size_t size);
int kblock_flush(struct KBlock *b);
size_t kblock_read(struct KBlock *b, block_idx_t idx, void *buf, size_t offset, size_t size);
int kblock_flush(struct KBlock *b);
f->b.priv.vt = &kblockfile_swbuffered_vt;
kblockfile_load(&f->b, 0);
f->b.priv.curr_blk = 0;
f->b.priv.vt = &kblockfile_swbuffered_vt;
kblockfile_load(&f->b, 0);
f->b.priv.curr_blk = 0;
- f->b.priv.cache_dirty = false;
}
else
f->b.priv.vt = &kblockfile_unbuffered_vt;
}
else
f->b.priv.vt = &kblockfile_unbuffered_vt;