*
* -->
*
- * \author Francesco Sacchi <batt@develer.com>
+ * \defgroup io_kblock KBlock interface
+ * \ingroup core
+ * \{
*
* \brief KBlock interface
+ *
+ * A block device is a device which can only be read/written
+ * with data blocks of constant size: flash memories,
+ * SD cards, hard disks, etc...
+ * This interface is designed to adapt to most block devices and
+ * use peculiar features in order to save CPU time and memory space.
+ *
+ * There is no init function because you do not have to use this
+ * structure directly, specific implementations will supply their own init
+ * functions.
+ *
+ * Error handling is done in a way similar to standard C library: whenever a
+ * function (eg. kblock_flush()) returns error, you need to check the error
+ * code, which is implementation specific.
+ *
+ * Example of code flow:
+ * \code
+ * // init a KBlock-derived class
+ * Flash fls;
+ * flash_init(&fls.blk, 0);
+ *
+ * // use kblock_* functions to access the derived class
+ * kblock_write(&fls.blk, ...);
+ * if (kblock_flush(&fls.blk) == EOF)
+ * {
+ * // oops, error occurred!
+ * int err = kblock_error(&fls.blk);
+ * // handle Flash specific error conditions
+ * // ...
+ * // clear error condition
+ * kblock_clearerr(&fls.blk);
+ * }
+ * \endcode
+ *
+ * \note The KBlock interface is optimized for block reads. If you need a
+ * file-like access, you can use \ref kfile_block.
+ *
+ * \author Francesco Sacchi <batt@develer.com>
+ *
+ * $WIZ$ module_name = "kblock"
*/
#ifndef IO_KBLOCK_H
struct KBlock;
/**
- * \name Prototypes for KBlock access functions.
- *
- * A KBlock user can choose which function subset to implement,
- * but has to set to NULL unimplemented features.
- *
- * \{
+ * \name Prototypes for KBlock low level access functions.
+ *
+ * When writing a driver implementing the KBlock interface you can choose which
+ * function subset to implement, but you have to set to NULL unimplemented
+ * features.
+ *
+ * \{
*/
-typedef size_t (* kblock_read_t) (struct KBlock *b, void *buf, size_t offset, size_t size);
-typedef size_t (* kblock_write_t) (struct KBlock *b, const void *buf, size_t offset, size_t size);
-typedef int (* kblock_load_t) (struct KBlock *b, block_idx_t index);
-typedef int (* kblock_store_t) (struct KBlock *b, block_idx_t index);
-typedef void * (* kblock_map_t) (struct KBlock *b, size_t offset, size_t size);
-typedef int (* kblock_unmap_t) (struct KBlock *b, size_t offset, size_t size);
-typedef int (* kblock_error_t) (struct KBlock *b);
-typedef int (* kblock_clearerr_t)(struct KBlock *b);
-typedef int (* kblock_close_t) (struct KBlock *b);
+typedef size_t (* kblock_read_direct_t) (struct KBlock *b, block_idx_t index, void *buf, size_t offset, size_t size);
+typedef size_t (* kblock_write_direct_t) (struct KBlock *b, block_idx_t index, const void *buf, size_t offset, size_t size);
+
+typedef size_t (* kblock_read_t) (struct KBlock *b, void *buf, size_t offset, size_t size);
+typedef size_t (* kblock_write_t) (struct KBlock *b, const void *buf, size_t offset, size_t size);
+typedef int (* kblock_load_t) (struct KBlock *b, block_idx_t index);
+typedef int (* kblock_store_t) (struct KBlock *b, block_idx_t index);
+
+typedef int (* kblock_error_t) (struct KBlock *b);
+typedef void (* kblock_clearerr_t) (struct KBlock *b);
+typedef int (* kblock_close_t) (struct KBlock *b);
/* \} */
-/**
+/*
* Table of interface functions for a KBlock device.
*/
typedef struct KBlockVTable
{
- kblock_read_t readBuf; ///< \sa kblock_readBuf()
- kblock_write_t writeBuf; ///< \sa kblock_writeBuf()
- kblock_load_t load; ///< \sa kblock_load()
- kblock_store_t store; ///< \sa kblock_store()
-
- kblock_map_t map; ///< \sa kblock_map()
- kblock_unmap_t unmap; ///< \sa kblock_unmap()
-
- kblock_error_t error; ///< \sa kblock_error()
- kblock_clearerr_t clearerr; ///< \sa kblock_clearerr()
-
- kblock_close_t close; ///< \sa kblock_close()
-} KBlockVTable;
+ kblock_read_direct_t readDirect;
+ kblock_write_direct_t writeDirect;
+ kblock_read_t readBuf;
+ kblock_write_t writeBuf;
+ kblock_load_t load;
+ kblock_store_t store;
-/**
- * KBlock status and error codes.
- */
-typedef enum KBlockStatus
-{
- /* Status flags */
- KBS_MAPPED, ///< Status: The current loaded block from the device is memory mapped.
-
- /* Errors */
- KBS_ERR_ALREADY_MAPPED, ///< Error: trying to memory map a block already mapped.
- KBS_ERR_NOT_MAPPED, ///< Error: trying to memory unmap a block not yet mapped.
- KBS_ERR_MAP_NOT_AVAILABLE, ///< Error: mapping methods not implemented.
+ kblock_error_t error; // \sa kblock_error()
+ kblock_clearerr_t clearerr; // \sa kblock_clearerr()
- #define KBS_STATUS_MASK (BV(KBS_MAPPED) | 0 /* Add status flags here */)
-
- #define KBS_ERROR_MASK (BV(KBS_ERR_ALREADY_MAPPED) | BV(KBS_ERR_ALREADY_MAPPED) \
- | BV(KBS_ERR_MAP_NOT_AVAILABLE) | 0 /* Add error flags here */)
-} KBlockStatus;
+ kblock_close_t close; // \sa kblock_close()
+} KBlockVTable;
-/**
+#define KB_BUFFERED BV(0) ///< Internal flag: true if the KBlock has a buffer
+#define KB_CACHE_DIRTY BV(1) ///< Internal flag: true if the cache is dirty
+#define KB_PARTIAL_WRITE BV(2) ///< Internal flag: true if the device allows partial block write
+
+
+/*
* KBlock private members.
- * These are the private members of the KBlock class, please do not
+ * These are the private members of the KBlock interface, please do not
* access these directly, use the KBlock API.
- */
+ */
typedef struct KBlockPriv
{
- DB(id_t type); ///< Used to keep track, at runtime, of the class type.
- void *pagebuf; ///< Pointer to a buffer used as page buffer when memory mapping is active. \sa kblock_map(), kblock_unmap()
- size_t pagebuf_size; ///< Size of the page buffer used for memory mapping. \sa kblock_map(), kblock_unmap()
- KBlockStatus flags; ///< Status and error flags.
- block_idx_t blk_start; ///< Start block number when the device is trimmed. \sa kblock_trim()
- DB(size_t map_off); ///< When mapping is active, this is the mapped data offset inside the block. \sa kblock_map(), kblock_unmap()
- DB(size_t map_size); ///< When mapping is active, this is the mapped data size inside the block. \sa kblock_map(), kblock_unmap()
+ DB(id_t type); // Used to keep track, at runtime, of the class type.
+ int flags; // Status and error flags.
+ void *buf; // Pointer to the page buffer for RAM-cached KBlocks.
+ block_idx_t blk_start; // Start block number when the device is trimmed. \sa kblock_trim().
+ block_idx_t curr_blk; // Current cached block number in cached KBlocks.
+
+ const struct KBlockVTable *vt; // Virtual table of interface functions.
} KBlockPriv;
/**
* KBlock: interface for a generic block device.
- *
- * A block device is a device which can only be read/written
- * with data blocks of constant size: flash memories,
- * SD cards, hard disks, etc...
- *
- * This interface is designed to adapt to most block devices and
- * use peculiar features in order to save CPU time and memory space.
- *
- * You do not have to use this structure directly, specific implementations
- * will be supplied in the peripheral drivers.
+ *
*/
typedef struct KBlock
{
KBlockPriv priv; ///< Interface private data, do not use directly.
-
- /* Public access members/methods */
+
+ /* Public access members */
size_t blk_size; ///< Block size.
block_idx_t blk_cnt; ///< Number of blocks available in the device.
- struct KBlockVTable *vt; ///< Virtual table of interface functions.
} KBlock;
-/**
- * Add generic memory mapping functionality to a block device.
- *
- * If the device has an hardware page buffer mechanism, the map/unmap
- * functions are unimplemented.
- * If you need to use the mapping functions of such device, this function
- * will add generic software mapping features wrapping the KBlock methods.
- *
- * \param dev the block device.
- * \param buf the buffer to be used as page buffer for memory mapping functions.
- * \param size the size of the buffer. This is the maximum size that can be
- * memory mapped. If you want to map a full block, a size of at least
- * dev->blk_size have to be supplied.
- *
- * \sa kblock_map(), kblock_unmap(), kblock_readBuf(), kblock_writeBuf()
- */
-void kblock_addMapping(struct KBlock *dev, void *buf, size_t size);
-
/**
* Use a subset of the blocks on the device.
- *
+ *
* This function is useful for partitioning a device and use it for
* different purposes at the same time.
- *
+ *
* This function will limit the number of blocks used on the device by setting
* a start index and a number of blocks to be used counting from that index.
- *
+ *
* The blocks outside this range are no more accessible.
- *
+ *
* Logical block indexes will be mapped to physical indexes inside this new
* range automatically. Even following calls to kblock_trim() will use logical
* indexes, so, once trimmed, access can only be limited further and never
* expanded back.
- *
+ *
* Example:
* \code
* //...init KBlock device dev
* kblock_trim(dev, 200, 1500); // Restrict access to the 200-1700 physical block range.
- * kblock_load(dev, 0); // Load the physical block #200.
+ * kblock_read(dev, 0, buf, 0, dev->blk_size); // Read from physical block #200.
* kblock_trim(dev, 0, 300); // Restrict access to the 200-500 physical block range.
* \endcode
- *
+ *
* \param b KBlock device.
* \param start The index of the start block for the limiting window in logical addressing units.
* \param count The number of blocks to be used.
- *
- */
-INLINE void kblock_trim(struct KBlock *b, block_idx_t start, block_idx_t count)
-{
- ASSERT(start + count <= b->blk_cnt);
- b->priv.blk_start += start;
- b->blk_cnt = count;
-}
+ *
+ * \return 0 if all is OK, EOF on errors.
+ */
+int kblock_trim(struct KBlock *b, block_idx_t start, block_idx_t count);
+
+
+#define KB_ASSERT_METHOD(b, method) \
+ do \
+ { \
+ ASSERT(b); \
+ ASSERT((b)->priv.vt); \
+ ASSERT((b)->priv.vt->method); \
+ } \
+ while (0)
+
/**
- * Transfer data from the internal page buffer to user memory.
- *
- * This function accesses the internal page buffer of the block device and copy
- * the data to \a buf. The content is copied from the current cached block.
- *
+ * Get the current errors for the device.
+ *
+ * \note Calling this function will not clear the errors.
+ *
* \param b KBlock device.
- * \param buf User buffer to copy the data to.
- * \param offset Address offset within the block, from which to copy data.
- * \param size Size, in bytes, of the data to be copied.
- *
- * \return The number of bytes copied. Can be less than \a size on errors.
- *
- * \sa kblock_writeBuf()
+ *
+ * \return 0 if no error is present, a driver specific mask of errors otherwise.
+ *
+ * \sa kblock_clearerr()
*/
-INLINE size_t kblock_readBuf(struct KBlock *b, void *buf, size_t offset, size_t size)
+INLINE int kblock_error(struct KBlock *b)
{
- ASSERT(b->vt);
- ASSERT(b->vt->readBuf);
- ASSERT(offset + size <= b->blk_size);
-
- return b->vt->readBuf(b, buf, offset, size);
+ KB_ASSERT_METHOD(b, error);
+ return b->priv.vt->error(b);
}
/**
- * Write to the page buffer.
- *
- * Copies data from user memory to the device page buffer. The data is written
- * in the current cached block buffer.
- *
+ * Clear the errors of the device.
+ *
* \param b KBlock device.
- * \param buf User buffer to copy the data from.
- * \param offset Address offset within the block, from which data has to be written.
- * \param size Size, in bytes, of the data to be written.
- *
- * \return The number of bytes written. Can be less than \a size on errors.
- *
- * \sa kblock_readBuf()
+ *
+ *
+ * \sa kblock_error()
*/
-INLINE size_t kblock_writeBuf(struct KBlock *b, const void *buf, size_t offset, size_t size)
+INLINE void kblock_clearerr(struct KBlock *b)
{
- ASSERT(b->vt);
- ASSERT(b->vt->writeBuf);
- ASSERT(offset + size <= b->blk_size);
- return b->vt->writeBuf(b, buf, offset, size);
+ KB_ASSERT_METHOD(b, clearerr);
+ b->priv.vt->clearerr(b);
}
+
/**
- * Load a block from the device to the page buffer.
- *
- * The block \a index will be loaded in the internal page buffer.
- *
+ * Flush the cache (if any) to the device.
+ *
+ * This function will write any pending modifications to the device.
+ * If the device does not have a cache, this function will do nothing.
+ *
+ * \return 0 if all is OK, EOF on errors.
+ * \sa kblock_read(), kblock_write(), kblock_buffered().
+ */
+int kblock_flush(struct KBlock *b);
+
+/**
+ * Close the device.
+ *
* \param b KBlock device.
- * \param index Logical index of the block to be loaded.
- *
+ *
* \return 0 on success, EOF on errors.
*/
-INLINE int kblock_load(struct KBlock *b, block_idx_t index)
+INLINE int kblock_close(struct KBlock *b)
{
- ASSERT(b->vt);
- ASSERT(b->vt->load);
- ASSERT(index < b->blk_cnt);
-
- return b->vt->load(b, b->priv.blk_start + index);
+ KB_ASSERT_METHOD(b, close);
+ return kblock_flush(b) | b->priv.vt->close(b);
}
/**
- * Store a block from the page buffer to the device.
- *
- * The current content of the page buffer will be flushed to the block \a index.
- *
+ * \return true if the device \a b is buffered, false otherwise.
* \param b KBlock device.
- * \param index Logical index of the block to be stored.
- *
- * \return 0 on success, EOF on errors.
+ * \sa kblock_cachedBlock(), kblock_cacheDirty().
*/
-INLINE int kblock_store(struct KBlock *b, block_idx_t index)
+INLINE bool kblock_buffered(struct KBlock *b)
{
- ASSERT(b->vt);
- ASSERT(b->vt->store);
- ASSERT(index < b->blk_cnt);
-
- return b->vt->store(b, b->priv.blk_start + index);
+ ASSERT(b);
+ return (b->priv.flags & KB_BUFFERED);
}
/**
- * Memory map the current page buffer.
- *
- * To speed up access, instead of using kblock_readBuf() and kblock_writeBuf(),
- * you can memory map the page buffer and access it directly through the
- * returned pointer. You can freely access the pointer in any way you
- * like. Once done, call kblock_unmap() to release the lock on the page_buffer.
- *
- * \note This function may be not available on all drivers, since the page
- * buffer can be in the hardware and not directly accessible through memory.
- * For this devices you can still add generic software mapping features
- * thanks to kblock_addMapping().
- *
- * \note Only one mapping is available at a time, trying to map the page buffer
- * again before releasing it is an error.
- *
+ * \return The current cached block number if the device is buffered.
* \param b KBlock device.
- * \param offset Address offset within the page buffer, from which data has to
- * be memory mapped.
- * \param size Size of the memory to be mapped.
- *
- * \return A pointer to the mapped region of the page buffer or NULL on errors.
- *
- * \sa kblock_addMapping(), kblock_unmap()
+ * \note This function will throw an ASSERT if called on a non buffered KBlock.
+ * \sa kblock_buffered(), kblock_cacheDirty().
*/
-INLINE void * kblock_map(struct KBlock *b, size_t offset, size_t size)
+INLINE block_idx_t kblock_cachedBlock(struct KBlock *b)
{
- ASSERT(b->vt);
- ASSERT(b->vt->map);
-
- if (b->priv.flags & BV(KBS_MAPPED))
- {
- b->priv.flags |= BV(KBS_ERR_ALREADY_MAPPED);
- return NULL;
- }
-
- ASSERT(size < b->priv.pagebuf_size);
- ASSERT(offset + size <= b->blk_size);
- DB(b->priv.map_off = offset);
- DB(b->priv.map_size = size);
-
- void *ret = b->vt->map(b, offset, size);
-
- if (ret)
- b->priv.flags |= BV(KBS_MAPPED);
-
- return ret;
+ ASSERT(kblock_buffered(b));
+ return b->priv.curr_blk;
}
/**
- * Release the memory map on the page buffer.
- *
- * This function has to be called when memory mapped access has finished.
- * This is needed because only one mapping is allowed at a time.
- * The \a offset and \a size passed should be the same passed to
- * kblock_map() when the page buffer has been mapped.
- *
- * \note Trying to unmap the page buffer when there is no mapping ongoing is
- * an error.
- *
+ * Return the status of the internal cache.
+ *
* \param b KBlock device.
- * \param offset Address offset within the page buffer, from which data has been
- * memory mapped. Must be the same value passed to kblock_map()
- * when the memory was mapped.
- * \param size Size of the memory mapped. Must be the same value passed to
- * kblock_map() when the memory was mapped.
- *
- * \return 0 on success, EOF on errors.
- *
- * \sa kblock_addMapping(), kblock_map()
- */
-INLINE int kblock_unmap(struct KBlock *b, size_t offset, size_t size)
+ * \return If the device supports buffering, returns true if the cache is dirty,
+ * false if the cache is clean and coherent with device content.
+ * \note This function will throw an ASSERT if called on a non buffered KBlock.
+ * \sa kblock_cachedBlock(), kblock_buffered().
+ */
+INLINE bool kblock_cacheDirty(struct KBlock *b)
{
- ASSERT(b->vt);
- ASSERT(b->vt->unmap);
-
- if (!(b->priv.flags & BV(KBS_MAPPED)))
- {
- b->priv.flags |= BV(KBS_ERR_NOT_MAPPED);
- return EOF;
- }
-
- ASSERT(b->priv.map_off == offset);
- ASSERT(b->priv.map_size == size);
- int ret = b->vt->unmap(b, offset, size);
-
- if (ret == 0)
- b->priv.flags &= ~BV(KBS_MAPPED);
- return ret;
+ ASSERT(kblock_buffered(b));
+ return kblock_buffered(b) && (b->priv.flags & KB_CACHE_DIRTY);
}
/**
- * Get the current errors for the device.
- *
- * \note Calling this function will not clear the errors.
- *
+ * \return true if the device \a b supports partial block write. That is, you
+ * can call kblock_write() with a size which is lesser than the block
+ * size.
* \param b KBlock device.
- *
- * \return 0 if no error is present, a driver specific mask of errors otherwise.
- *
- * \sa kblock_clearerr()
+ * \sa kblock_write().
*/
-INLINE int kblock_error(struct KBlock *b)
+INLINE bool kblock_partialWrite(struct KBlock *b)
{
- ASSERT(b->vt);
- ASSERT(b->vt->error);
- /* Automatically mask status flags */
- return b->vt->error(b) & ~KBS_STATUS_MASK;
+ ASSERT(b);
+ return (b->priv.flags & KB_PARTIAL_WRITE);
}
/**
- * Clear the errors of the device.
- *
+ * Read data from the block device.
+ *
+ * This function will read \a size bytes from block \a idx starting at
+ * address \a offset inside the block.
+ *
+ * Most block devices (almost all flash memories, for instance),
+ * can efficiently read even a part of the block.
+ *
+ * \note This function can be slow if you try to partial read a block from
+ * a device which does not support partial block reads and is opened
+ * in unbuffered mode.
+ *
* \param b KBlock device.
- *
- * \return 0 on success, EOF on errors.
- *
- * \sa kblock_error()
+ * \param idx the block number where you want to read.
+ * \param buf a buffer where the data will be read.
+ * \param offset the offset inside the block from which data reading will start.
+ * \param size the size of data to be read.
+ *
+ * \return the number of bytes read.
+ *
+ * \sa kblock_write().
*/
-INLINE int kblock_clearerr(struct KBlock *b)
-{
- ASSERT(b->vt);
- ASSERT(b->vt->clearerr);
- /* Automatically clear error flags */
- b->priv.flags &= ~KBS_ERROR_MASK;
- return b->vt->clearerr(b);
-}
+size_t kblock_read(struct KBlock *b, block_idx_t idx, void *buf, size_t offset, size_t size);
+
/**
- * Close the device.
- *
+ * Write data to the block device.
+ *
+ * This function will write \a size bytes to block \a idx starting at
+ * address \a offset inside the block.
+ *
+ * \note Partial block writes are supported only on certain devices.
+ * You can use kblock_partialWrite() in order to check if the device
+ * has this feature or not.
+ *
+ * \note If the device is opened in buffered mode, this function will use
+ * efficiently and trasparently the cache provided.
+ * In order to be sure that all modifications are actually written
+ * to the device you have to call kblock_flush().
+ *
* \param b KBlock device.
- *
- * \return 0 on success, EOF on errors.
+ * \param idx the block number where you want to write.
+ * \param buf a pointer to the data to be written.
+ * \param offset the offset inside the block from which data writing will start.
+ * \param size the size of data to be written.
+ *
+ * \return the number of bytes written.
+ *
+ * \sa kblock_read(), kblock_flush(), kblock_buffered(), kblock_partialWrite().
*/
-INLINE int kblock_close(struct KBlock *b)
-{
- ASSERT(b->vt);
- ASSERT(b->vt->close);
- return b->vt->close(b);
-}
+size_t kblock_write(struct KBlock *b, block_idx_t idx, const void *buf, size_t offset, size_t size);
+
+/**
+ * Copy one block to another.
+ *
+ * This function will copy the content of block \a src to block \a dest.
+ *
+ * \note This function is available only on devices which support partial
+ * block write or are opened in buffered mode.
+ *
+ * \param b KBlock device.
+ * \param src source block number.
+ * \param dest destination block number.
+ *
+ * \return 0 if all is OK, EOF on errors.
+ */
+int kblock_copy(struct KBlock *b, block_idx_t src, block_idx_t dest);
+
+int kblock_swLoad(struct KBlock *b, block_idx_t index);
+int kblock_swStore(struct KBlock *b, block_idx_t index);
+size_t kblock_swReadBuf(struct KBlock *b, void *buf, size_t offset, size_t size);
+size_t kblock_swWriteBuf(struct KBlock *b, const void *buf, size_t offset, size_t size);
+int kblock_swClose(struct KBlock *b);
+
+/** \} */ //defgroup io_kblock
+
#endif /* IO_KBLOCK_H */