#include "kblock.h"
#include <cfg/log.h>
+INLINE size_t kblock_readDirect(struct KBlock *b, block_idx_t index, void *buf, size_t offset, size_t size)
+{
+ KB_ASSERT_METHOD(b, readDirect);
+ return b->priv.vt->readDirect(b, index, buf, offset, size);
+}
-static void *kblock_swMap(struct KBlock *b, size_t offset, size_t size)
+INLINE size_t kblock_readBuf(struct KBlock *b, void *buf, size_t offset, size_t size)
{
- return (kblock_readBuf(b, b->priv.pagebuf, offset, size) == size) ? b->priv.pagebuf : NULL;
+ KB_ASSERT_METHOD(b, readBuf);
+ ASSERT(offset + size <= b->blk_size);
+
+ return b->priv.vt->readBuf(b, buf, offset, size);
}
+INLINE size_t kblock_writeBuf(struct KBlock *b, const void *buf, size_t offset, size_t size)
+{
+ KB_ASSERT_METHOD(b, writeBuf);
+ ASSERT(offset + size <= b->blk_size);
+ return b->priv.vt->writeBuf(b, buf, offset, size);
+}
-static int kblock_swUnmap(struct KBlock *b, size_t offset, size_t size)
+INLINE int kblock_load(struct KBlock *b, block_idx_t index)
{
- return (kblock_writeBuf(b, b->priv.pagebuf, offset, size) == size) ? 0 : EOF;
+ KB_ASSERT_METHOD(b, load);
+ ASSERT(index < b->blk_cnt);
+
+ return b->priv.vt->load(b, b->priv.blk_start + index);
}
+INLINE int kblock_store(struct KBlock *b, block_idx_t index)
+{
+ KB_ASSERT_METHOD(b, store);
+ ASSERT(index < b->blk_cnt);
+
+ return b->priv.vt->store(b, b->priv.blk_start + index);
+}
+
+
-void *kblock_unsupportedMap(struct KBlock *b, UNUSED_ARG(size_t, offset), UNUSED_ARG(size_t, size))
+size_t kblock_read(struct KBlock *b, block_idx_t idx, void *_buf, size_t offset, size_t size)
{
- LOG_WARN("This driver does not support block mapping: use kblock_addMapping() to add generic mapping functionality.\n");
- b->priv.flags |= BV(KBS_ERR_MAP_NOT_AVAILABLE);
- return NULL;
+ size_t tot_rd = 0;
+ uint8_t *buf = (uint8_t *)_buf;
+
+ ASSERT(b);
+ ASSERT(buf);
+
+ while (size)
+ {
+ size_t len = MIN(size, b->blk_size - offset);
+ size_t rlen;
+
+ if (idx == b->priv.curr_blk)
+ rlen = kblock_readBuf(b, buf, offset, len);
+ else
+ rlen = kblock_readDirect(b, idx, buf, offset, len);
+
+ tot_rd += rlen;
+ if (rlen != len)
+ break;
+
+ idx++;
+ size -= rlen;
+ offset = 0;
+ buf += rlen;
+ }
+
+ return tot_rd;
}
-void kblock_addMapping(struct KBlock *dev, void *buf, size_t size)
+
+int kblock_flush(struct KBlock *b)
{
+ ASSERT(b);
+
+ if (b->priv.cache_dirty)
+ {
+ if (kblock_store(b, b->priv.curr_blk) == 0)
+ b->priv.cache_dirty = false;
+ else
+ return EOF;
+ }
+ return 0;
+}
+
+
+static bool kblock_loadPage(struct KBlock *b, block_idx_t idx)
+{
+ ASSERT(b);
+
+ if (idx != b->priv.curr_blk)
+ {
+ if (kblock_flush(b) != 0 || kblock_load(b, idx) != 0)
+ return false;
+
+ b->priv.curr_blk = idx;
+ }
+ return true;
+}
+
+
+size_t kblock_write(struct KBlock *b, block_idx_t idx, const void *_buf, size_t offset, size_t size)
+{
+ size_t tot_wr = 0;
+ const uint8_t *buf = (const uint8_t *)_buf;
+
+ ASSERT(b);
ASSERT(buf);
- ASSERT(size);
- ASSERT(dev);
-
- dev->vt->map = kblock_swMap;
- dev->vt->unmap = kblock_swUnmap;
-
- dev->priv.pagebuf = buf;
- dev->priv.pagebuf_size = size;
+
+ while (size)
+ {
+ size_t len = MIN(size, b->blk_size - offset);
+ size_t wlen;
+
+ if (!kblock_loadPage(b, idx))
+ break;
+
+ wlen = kblock_writeBuf(b, buf, offset, len);
+ b->priv.cache_dirty = true;
+
+ tot_wr += wlen;
+ if (wlen != len)
+ break;
+
+ idx++;
+ size -= wlen;
+ offset = 0;
+ buf += wlen;
+ }
+
+ return tot_wr;
+}
+
+int kblock_copy(struct KBlock *b, block_idx_t idx1, block_idx_t idx2)
+{
+ ASSERT(b);
+
+ if (!kblock_loadPage(b, idx1))
+ return EOF;
+
+ b->priv.curr_blk = idx2;
+ b->priv.cache_dirty = true;
+ return 0;
}
+
/**
* \name Prototypes for KBlock access functions.
- *
+ *
* A KBlock user can choose which function subset to implement,
* but has to set to NULL unimplemented features.
- *
- * \{
+ *
+ * \{
*/
-typedef size_t (* kblock_read_t) (struct KBlock *b, void *buf, size_t offset, size_t size);
-typedef size_t (* kblock_write_t) (struct KBlock *b, const void *buf, size_t offset, size_t size);
-typedef int (* kblock_load_t) (struct KBlock *b, block_idx_t index);
-typedef int (* kblock_store_t) (struct KBlock *b, block_idx_t index);
-typedef void * (* kblock_map_t) (struct KBlock *b, size_t offset, size_t size);
-typedef int (* kblock_unmap_t) (struct KBlock *b, size_t offset, size_t size);
-typedef int (* kblock_error_t) (struct KBlock *b);
-typedef int (* kblock_clearerr_t)(struct KBlock *b);
-typedef int (* kblock_close_t) (struct KBlock *b);
+typedef size_t (* kblock_read_direct_t) (struct KBlock *b, block_idx_t index, void *buf, size_t offset, size_t size);
+typedef size_t (* kblock_read_t) (struct KBlock *b, void *buf, size_t offset, size_t size);
+typedef size_t (* kblock_write_t) (struct KBlock *b, const void *buf, size_t offset, size_t size);
+typedef int (* kblock_load_t) (struct KBlock *b, block_idx_t index);
+typedef int (* kblock_store_t) (struct KBlock *b, block_idx_t index);
+typedef int (* kblock_error_t) (struct KBlock *b);
+typedef int (* kblock_clearerr_t) (struct KBlock *b);
+typedef int (* kblock_close_t) (struct KBlock *b);
/* \} */
/**
*/
typedef struct KBlockVTable
{
- kblock_read_t readBuf; ///< \sa kblock_readBuf()
- kblock_write_t writeBuf; ///< \sa kblock_writeBuf()
- kblock_load_t load; ///< \sa kblock_load()
- kblock_store_t store; ///< \sa kblock_store()
+ kblock_read_direct_t readDirect;
+ kblock_read_t readBuf;
+ kblock_write_t writeBuf;
+ kblock_load_t load;
+ kblock_store_t store;
- kblock_map_t map; ///< \sa kblock_map()
- kblock_unmap_t unmap; ///< \sa kblock_unmap()
-
kblock_error_t error; ///< \sa kblock_error()
kblock_clearerr_t clearerr; ///< \sa kblock_clearerr()
-
+
kblock_close_t close; ///< \sa kblock_close()
} KBlockVTable;
-/**
- * KBlock status and error codes.
- */
-typedef enum KBlockStatus
-{
- /* Status flags */
- KBS_MAPPED, ///< Status: The current loaded block from the device is memory mapped.
-
- /* Errors */
- KBS_ERR_ALREADY_MAPPED, ///< Error: trying to memory map a block already mapped.
- KBS_ERR_NOT_MAPPED, ///< Error: trying to memory unmap a block not yet mapped.
- KBS_ERR_MAP_NOT_AVAILABLE, ///< Error: mapping methods not implemented.
-
- #define KBS_STATUS_MASK (BV(KBS_MAPPED) | 0 /* Add status flags here */)
-
- #define KBS_ERROR_MASK (BV(KBS_ERR_ALREADY_MAPPED) | BV(KBS_ERR_ALREADY_MAPPED) \
- | BV(KBS_ERR_MAP_NOT_AVAILABLE) | 0 /* Add error flags here */)
-} KBlockStatus;
-
-
/**
* KBlock private members.
* These are the private members of the KBlock class, please do not
* access these directly, use the KBlock API.
- */
+ */
typedef struct KBlockPriv
{
DB(id_t type); ///< Used to keep track, at runtime, of the class type.
- void *pagebuf; ///< Pointer to a buffer used as page buffer when memory mapping is active. \sa kblock_map(), kblock_unmap()
- size_t pagebuf_size; ///< Size of the page buffer used for memory mapping. \sa kblock_map(), kblock_unmap()
- KBlockStatus flags; ///< Status and error flags.
+ int flags; ///< Status and error flags.
block_idx_t blk_start; ///< Start block number when the device is trimmed. \sa kblock_trim()
- DB(size_t map_off); ///< When mapping is active, this is the mapped data offset inside the block. \sa kblock_map(), kblock_unmap()
- DB(size_t map_size); ///< When mapping is active, this is the mapped data size inside the block. \sa kblock_map(), kblock_unmap()
+ block_idx_t curr_blk;
+ bool cache_dirty;
+
+ struct KBlockVTable *vt; ///< Virtual table of interface functions.
} KBlockPriv;
/**
* KBlock: interface for a generic block device.
- *
+ *
* A block device is a device which can only be read/written
* with data blocks of constant size: flash memories,
* SD cards, hard disks, etc...
- *
- * This interface is designed to adapt to most block devices and
+ *
+ * This interface is designed to adapt to most block devices and
* use peculiar features in order to save CPU time and memory space.
- *
+ *
* You do not have to use this structure directly, specific implementations
* will be supplied in the peripheral drivers.
*/
typedef struct KBlock
{
KBlockPriv priv; ///< Interface private data, do not use directly.
-
+
/* Public access members/methods */
size_t blk_size; ///< Block size.
block_idx_t blk_cnt; ///< Number of blocks available in the device.
- struct KBlockVTable *vt; ///< Virtual table of interface functions.
} KBlock;
-/**
- * Add generic memory mapping functionality to a block device.
- *
- * If the device has an hardware page buffer mechanism, the map/unmap
- * functions are unimplemented.
- * If you need to use the mapping functions of such device, this function
- * will add generic software mapping features wrapping the KBlock methods.
- *
- * \param dev the block device.
- * \param buf the buffer to be used as page buffer for memory mapping functions.
- * \param size the size of the buffer. This is the maximum size that can be
- * memory mapped. If you want to map a full block, a size of at least
- * dev->blk_size have to be supplied.
- *
- * \sa kblock_map(), kblock_unmap(), kblock_readBuf(), kblock_writeBuf()
- */
-void kblock_addMapping(struct KBlock *dev, void *buf, size_t size);
-
/**
* Use a subset of the blocks on the device.
- *
+ *
* This function is useful for partitioning a device and use it for
* different purposes at the same time.
- *
+ *
* This function will limit the number of blocks used on the device by setting
* a start index and a number of blocks to be used counting from that index.
- *
+ *
* The blocks outside this range are no more accessible.
- *
+ *
* Logical block indexes will be mapped to physical indexes inside this new
* range automatically. Even following calls to kblock_trim() will use logical
* indexes, so, once trimmed, access can only be limited further and never
* expanded back.
- *
+ *
* Example:
* \code
* //...init KBlock device dev
* kblock_load(dev, 0); // Load the physical block #200.
* kblock_trim(dev, 0, 300); // Restrict access to the 200-500 physical block range.
* \endcode
- *
+ *
* \param b KBlock device.
* \param start The index of the start block for the limiting window in logical addressing units.
* \param count The number of blocks to be used.
- *
- */
+ *
+ */
INLINE void kblock_trim(struct KBlock *b, block_idx_t start, block_idx_t count)
{
ASSERT(start + count <= b->blk_cnt);
b->blk_cnt = count;
}
-/**
- * Transfer data from the internal page buffer to user memory.
- *
- * This function accesses the internal page buffer of the block device and copy
- * the data to \a buf. The content is copied from the current cached block.
- *
- * \param b KBlock device.
- * \param buf User buffer to copy the data to.
- * \param offset Address offset within the block, from which to copy data.
- * \param size Size, in bytes, of the data to be copied.
- *
- * \return The number of bytes copied. Can be less than \a size on errors.
- *
- * \sa kblock_writeBuf()
- */
-INLINE size_t kblock_readBuf(struct KBlock *b, void *buf, size_t offset, size_t size)
-{
- ASSERT(b->vt);
- ASSERT(b->vt->readBuf);
- ASSERT(offset + size <= b->blk_size);
-
- return b->vt->readBuf(b, buf, offset, size);
-}
-/**
- * Write to the page buffer.
- *
- * Copies data from user memory to the device page buffer. The data is written
- * in the current cached block buffer.
- *
- * \param b KBlock device.
- * \param buf User buffer to copy the data from.
- * \param offset Address offset within the block, from which data has to be written.
- * \param size Size, in bytes, of the data to be written.
- *
- * \return The number of bytes written. Can be less than \a size on errors.
- *
- * \sa kblock_readBuf()
- */
-INLINE size_t kblock_writeBuf(struct KBlock *b, const void *buf, size_t offset, size_t size)
-{
- ASSERT(b->vt);
- ASSERT(b->vt->writeBuf);
- ASSERT(offset + size <= b->blk_size);
- return b->vt->writeBuf(b, buf, offset, size);
-}
-
-/**
- * Load a block from the device to the page buffer.
- *
- * The block \a index will be loaded in the internal page buffer.
- *
- * \param b KBlock device.
- * \param index Logical index of the block to be loaded.
- *
- * \return 0 on success, EOF on errors.
- */
-INLINE int kblock_load(struct KBlock *b, block_idx_t index)
-{
- ASSERT(b->vt);
- ASSERT(b->vt->load);
- ASSERT(index < b->blk_cnt);
-
- return b->vt->load(b, b->priv.blk_start + index);
-}
-
-/**
- * Store a block from the page buffer to the device.
- *
- * The current content of the page buffer will be flushed to the block \a index.
- *
- * \param b KBlock device.
- * \param index Logical index of the block to be stored.
- *
- * \return 0 on success, EOF on errors.
- */
-INLINE int kblock_store(struct KBlock *b, block_idx_t index)
-{
- ASSERT(b->vt);
- ASSERT(b->vt->store);
- ASSERT(index < b->blk_cnt);
-
- return b->vt->store(b, b->priv.blk_start + index);
-}
+#define KB_ASSERT_METHOD(b, method) \
+ do \
+ { \
+ ASSERT(b); \
+ ASSERT((b)->priv.vt); \
+ ASSERT((b)->priv.vt->method); \
+ } \
+ while (0)
-/**
- * Memory map the current page buffer.
- *
- * To speed up access, instead of using kblock_readBuf() and kblock_writeBuf(),
- * you can memory map the page buffer and access it directly through the
- * returned pointer. You can freely access the pointer in any way you
- * like. Once done, call kblock_unmap() to release the lock on the page_buffer.
- *
- * \note This function may be not available on all drivers, since the page
- * buffer can be in the hardware and not directly accessible through memory.
- * For this devices you can still add generic software mapping features
- * thanks to kblock_addMapping().
- *
- * \note Only one mapping is available at a time, trying to map the page buffer
- * again before releasing it is an error.
- *
- * \param b KBlock device.
- * \param offset Address offset within the page buffer, from which data has to
- * be memory mapped.
- * \param size Size of the memory to be mapped.
- *
- * \return A pointer to the mapped region of the page buffer or NULL on errors.
- *
- * \sa kblock_addMapping(), kblock_unmap()
- */
-INLINE void * kblock_map(struct KBlock *b, size_t offset, size_t size)
-{
- ASSERT(b->vt);
- ASSERT(b->vt->map);
-
- if (b->priv.flags & BV(KBS_MAPPED))
- {
- b->priv.flags |= BV(KBS_ERR_ALREADY_MAPPED);
- return NULL;
- }
-
- ASSERT(size < b->priv.pagebuf_size);
- ASSERT(offset + size <= b->blk_size);
- DB(b->priv.map_off = offset);
- DB(b->priv.map_size = size);
-
- void *ret = b->vt->map(b, offset, size);
-
- if (ret)
- b->priv.flags |= BV(KBS_MAPPED);
-
- return ret;
-}
-
-
-/**
- * Release the memory map on the page buffer.
- *
- * This function has to be called when memory mapped access has finished.
- * This is needed because only one mapping is allowed at a time.
- * The \a offset and \a size passed should be the same passed to
- * kblock_map() when the page buffer has been mapped.
- *
- * \note Trying to unmap the page buffer when there is no mapping ongoing is
- * an error.
- *
- * \param b KBlock device.
- * \param offset Address offset within the page buffer, from which data has been
- * memory mapped. Must be the same value passed to kblock_map()
- * when the memory was mapped.
- * \param size Size of the memory mapped. Must be the same value passed to
- * kblock_map() when the memory was mapped.
- *
- * \return 0 on success, EOF on errors.
- *
- * \sa kblock_addMapping(), kblock_map()
- */
-INLINE int kblock_unmap(struct KBlock *b, size_t offset, size_t size)
-{
- ASSERT(b->vt);
- ASSERT(b->vt->unmap);
-
- if (!(b->priv.flags & BV(KBS_MAPPED)))
- {
- b->priv.flags |= BV(KBS_ERR_NOT_MAPPED);
- return EOF;
- }
-
- ASSERT(b->priv.map_off == offset);
- ASSERT(b->priv.map_size == size);
- int ret = b->vt->unmap(b, offset, size);
-
- if (ret == 0)
- b->priv.flags &= ~BV(KBS_MAPPED);
- return ret;
-}
-
/**
* Get the current errors for the device.
- *
- * \note Calling this function will not clear the errors.
- *
+ *
+ * \note Calling this function will not clear the errors.
+ *
* \param b KBlock device.
- *
+ *
* \return 0 if no error is present, a driver specific mask of errors otherwise.
- *
+ *
* \sa kblock_clearerr()
*/
INLINE int kblock_error(struct KBlock *b)
{
- ASSERT(b->vt);
- ASSERT(b->vt->error);
- /* Automatically mask status flags */
- return b->vt->error(b) & ~KBS_STATUS_MASK;
+ KB_ASSERT_METHOD(b, error);
+ return b->priv.vt->error(b);
}
/**
* Clear the errors of the device.
- *
+ *
* \param b KBlock device.
- *
+ *
* \return 0 on success, EOF on errors.
- *
+ *
* \sa kblock_error()
*/
INLINE int kblock_clearerr(struct KBlock *b)
{
- ASSERT(b->vt);
- ASSERT(b->vt->clearerr);
- /* Automatically clear error flags */
- b->priv.flags &= ~KBS_ERROR_MASK;
- return b->vt->clearerr(b);
+ KB_ASSERT_METHOD(b, clearerr);
+ return b->priv.vt->clearerr(b);
}
/**
* Close the device.
- *
+ *
* \param b KBlock device.
- *
+ *
* \return 0 on success, EOF on errors.
*/
INLINE int kblock_close(struct KBlock *b)
{
- ASSERT(b->vt);
- ASSERT(b->vt->close);
- return b->vt->close(b);
+ KB_ASSERT_METHOD(b, close);
+ return b->priv.vt->close(b);
}
-void *kblock_unsupportedMap(struct KBlock *b, UNUSED_ARG(size_t, offset), UNUSED_ARG(size_t, size));
+size_t kblock_read(struct KBlock *b, block_idx_t idx, void *buf, size_t offset, size_t size);
+
+int kblock_flush(struct KBlock *b);
+
+size_t kblock_write(struct KBlock *b, block_idx_t idx, const void *buf, size_t offset, size_t size);
+
+int kblock_copy(struct KBlock *b, block_idx_t idx1, block_idx_t idx2);
#endif /* IO_KBLOCK_H */
static int kblockram_load(KBlock *b, block_idx_t index)
{
KBlockRam *r = KBLOCKRAM_CAST(b);
- memcpy(r->b.priv.pagebuf, r->membuf + index * r->b.blk_size, r->b.blk_size);
+ memcpy(r->pagebuf, r->membuf + index * r->b.blk_size, r->b.blk_size);
return 0;
}
static int kblockram_store(struct KBlock *b, block_idx_t index)
{
KBlockRam *r = KBLOCKRAM_CAST(b);
- memcpy(r->membuf + index * r->b.blk_size, r->b.priv.pagebuf, r->b.blk_size);
+ memcpy(r->membuf + index * r->b.blk_size, r->pagebuf, r->b.blk_size);
return 0;
}
static size_t kblockram_readBuf(struct KBlock *b, void *buf, size_t offset, size_t size)
{
KBlockRam *r = KBLOCKRAM_CAST(b);
- memcpy(buf, (uint8_t *)r->b.priv.pagebuf + offset, size);
+ memcpy(buf, r->pagebuf + offset, size);
return size;
}
-static size_t kblockram_writeBuf(struct KBlock *b, const void *buf, size_t offset, size_t size)
+static size_t kblockram_readDirect(struct KBlock *b, block_idx_t index, void *buf, size_t offset, size_t size)
{
KBlockRam *r = KBLOCKRAM_CAST(b);
- memcpy((uint8_t *)r->b.priv.pagebuf + offset, buf, size);
+ memcpy(buf, r->membuf + index * r->b.blk_size + offset, size);
return size;
}
-static void * kblockram_map(struct KBlock *b, size_t offset, UNUSED_ARG(size_t, size))
-{
- return (uint8_t *)b->priv.pagebuf + offset;
-}
-
-
-static int kblockram_unmap(UNUSED_ARG(struct KBlock *, b), UNUSED_ARG(size_t, offset), UNUSED_ARG(size_t, size))
+static size_t kblockram_writeBuf(struct KBlock *b, const void *buf, size_t offset, size_t size)
{
- return 0;
+ KBlockRam *r = KBLOCKRAM_CAST(b);
+ memcpy(r->pagebuf + offset, buf, size);
+ return size;
}
static int kblockram_error(struct KBlock *b)
static KBlockVTable kblockram_vt =
{
+ .readDirect = kblockram_readDirect,
.readBuf = kblockram_readBuf,
.writeBuf = kblockram_writeBuf,
.load = kblockram_load,
.store = kblockram_store,
- .map = kblockram_map,
- .unmap = kblockram_unmap,
+
.error = kblockram_error,
.clearerr = kblockram_dummy,
.close = kblockram_dummy,
ASSERT(block_size);
memset(ram, 0, sizeof(*ram));
-
+
DB(ram->b.priv.type = KBT_KBLOCKRAM);
-
+
// First page used as page buffer
ram->b.blk_cnt = (size / block_size) - 1;
- ram->b.priv.pagebuf = buf;
- ram->b.priv.pagebuf_size = block_size;
-
+ ram->pagebuf = (uint8_t *)buf;
ram->membuf = (uint8_t *)buf + block_size;
- ram->b.blk_size = block_size;
- ram->b.vt = &kblockram_vt;
+ ram->b.blk_size = block_size;
+ ram->b.priv.vt = &kblockram_vt;
}