X-Git-Url: https://codewiz.org/gitweb?a=blobdiff_plain;f=bertos%2Fio%2Fkblock.h;h=abc0fcb28e0de43e29bef346a56465a3d555c073;hb=d25edd9a1824faeab9e4deeec2a8db56c512b0fe;hp=b486fa6d3250891b184584d008434b6bcd5508d9;hpb=f97c447c6d175d1db58f27e74d93e053904b6cfd;p=bertos.git diff --git a/bertos/io/kblock.h b/bertos/io/kblock.h index b486fa6d..abc0fcb2 100644 --- a/bertos/io/kblock.h +++ b/bertos/io/kblock.h @@ -50,21 +50,24 @@ struct KBlock; /** * \name Prototypes for KBlock access functions. - * + * * A KBlock user can choose which function subset to implement, * but has to set to NULL unimplemented features. - * - * \{ + * + * \{ */ -typedef size_t (* kblock_read_t) (struct KBlock *b, void *buf, size_t offset, size_t size); -typedef size_t (* kblock_write_t) (struct KBlock *b, const void *buf, size_t offset, size_t size); -typedef int (* kblock_load_t) (struct KBlock *b, block_idx_t index); -typedef int (* kblock_store_t) (struct KBlock *b, block_idx_t index); -typedef void * (* kblock_map_t) (struct KBlock *b, size_t offset, size_t size); -typedef int (* kblock_unmap_t) (struct KBlock *b, size_t offset, size_t size); -typedef int (* kblock_error_t) (struct KBlock *b); -typedef int (* kblock_clearerr_t)(struct KBlock *b); -typedef int (* kblock_close_t) (struct KBlock *b); +typedef size_t (* kblock_read_direct_t) (struct KBlock *b, block_idx_t index, void *buf, size_t offset, size_t size); +typedef size_t (* kblock_read_t) (struct KBlock *b, void *buf, size_t offset, size_t size); +typedef size_t (* kblock_write_t) (struct KBlock *b, const void *buf, size_t offset, size_t size); +typedef int (* kblock_load_t) (struct KBlock *b, block_idx_t index); +typedef int (* kblock_store_t) (struct KBlock *b, block_idx_t index); + +typedef int (* kblock_write_block_t) (struct KBlock *b, block_idx_t index, const void *buf); +typedef int (* kblock_read_block_t) (struct KBlock *b, block_idx_t index, void *buf); + +typedef int (* kblock_error_t) (struct KBlock *b); +typedef int (* kblock_clearerr_t) (struct KBlock *b); +typedef int (* kblock_close_t) (struct KBlock *b); /* \} */ /** @@ -72,115 +75,81 @@ typedef int (* kblock_close_t) (struct KBlock *b); */ typedef struct KBlockVTable { - kblock_read_t readBuf; ///< \sa kblock_readBuf() - kblock_write_t writeBuf; ///< \sa kblock_writeBuf() - kblock_load_t load; ///< \sa kblock_load() - kblock_store_t store; ///< \sa kblock_store() + kblock_read_direct_t readDirect; - kblock_map_t map; ///< \sa kblock_map() - kblock_unmap_t unmap; ///< \sa kblock_unmap() + kblock_read_t readBuf; + kblock_write_t writeBuf; + kblock_load_t load; + kblock_store_t store; + + kblock_read_block_t readBlock; + kblock_write_block_t writeBlock; kblock_error_t error; ///< \sa kblock_error() kblock_clearerr_t clearerr; ///< \sa kblock_clearerr() - + kblock_close_t close; ///< \sa kblock_close() } KBlockVTable; -/** - * KBlock status and error codes. - */ -typedef enum KBlockStatus -{ - /* Status flags */ - KBS_MAPPED, ///< Status: The current loaded block from the device is memory mapped. - - /* Errors */ - KBS_ERR_ALREADY_MAPPED, ///< Error: trying to memory map a block already mapped. - KBS_ERR_NOT_MAPPED, ///< Error: trying to memory unmap a block not yet mapped. - KBS_ERR_MAP_NOT_AVAILABLE, ///< Error: mapping methods not implemented. - - #define KBS_STATUS_MASK (BV(KBS_MAPPED) | 0 /* Add status flags here */) - - #define KBS_ERROR_MASK (BV(KBS_ERR_ALREADY_MAPPED) | BV(KBS_ERR_ALREADY_MAPPED) \ - | BV(KBS_ERR_MAP_NOT_AVAILABLE) | 0 /* Add error flags here */) -} KBlockStatus; - +#define KB_BUFFERED BV(0) /** * KBlock private members. * These are the private members of the KBlock class, please do not * access these directly, use the KBlock API. - */ + */ typedef struct KBlockPriv { DB(id_t type); ///< Used to keep track, at runtime, of the class type. - void *pagebuf; ///< Pointer to a buffer used as page buffer when memory mapping is active. \sa kblock_map(), kblock_unmap() - size_t pagebuf_size; ///< Size of the page buffer used for memory mapping. \sa kblock_map(), kblock_unmap() - KBlockStatus flags; ///< Status and error flags. + int flags; ///< Status and error flags. + void *buf; block_idx_t blk_start; ///< Start block number when the device is trimmed. \sa kblock_trim() - DB(size_t map_off); ///< When mapping is active, this is the mapped data offset inside the block. \sa kblock_map(), kblock_unmap() - DB(size_t map_size); ///< When mapping is active, this is the mapped data size inside the block. \sa kblock_map(), kblock_unmap() + block_idx_t curr_blk; + bool cache_dirty; + + const struct KBlockVTable *vt; ///< Virtual table of interface functions. } KBlockPriv; /** * KBlock: interface for a generic block device. - * + * * A block device is a device which can only be read/written * with data blocks of constant size: flash memories, * SD cards, hard disks, etc... - * - * This interface is designed to adapt to most block devices and + * + * This interface is designed to adapt to most block devices and * use peculiar features in order to save CPU time and memory space. - * + * * You do not have to use this structure directly, specific implementations * will be supplied in the peripheral drivers. */ typedef struct KBlock { KBlockPriv priv; ///< Interface private data, do not use directly. - + /* Public access members/methods */ size_t blk_size; ///< Block size. block_idx_t blk_cnt; ///< Number of blocks available in the device. - struct KBlockVTable *vt; ///< Virtual table of interface functions. } KBlock; -/** - * Add generic memory mapping functionality to a block device. - * - * If the device has an hardware page buffer mechanism, the map/unmap - * functions are unimplemented. - * If you need to use the mapping functions of such device, this function - * will add generic software mapping features wrapping the KBlock methods. - * - * \param dev the block device. - * \param buf the buffer to be used as page buffer for memory mapping functions. - * \param size the size of the buffer. This is the maximum size that can be - * memory mapped. If you want to map a full block, a size of at least - * dev->blk_size have to be supplied. - * - * \sa kblock_map(), kblock_unmap(), kblock_readBuf(), kblock_writeBuf() - */ -void kblock_addMapping(struct KBlock *dev, void *buf, size_t size); - /** * Use a subset of the blocks on the device. - * + * * This function is useful for partitioning a device and use it for * different purposes at the same time. - * + * * This function will limit the number of blocks used on the device by setting * a start index and a number of blocks to be used counting from that index. - * + * * The blocks outside this range are no more accessible. - * + * * Logical block indexes will be mapped to physical indexes inside this new * range automatically. Even following calls to kblock_trim() will use logical * indexes, so, once trimmed, access can only be limited further and never * expanded back. - * + * * Example: * \code * //...init KBlock device dev @@ -188,12 +157,12 @@ void kblock_addMapping(struct KBlock *dev, void *buf, size_t size); * kblock_load(dev, 0); // Load the physical block #200. * kblock_trim(dev, 0, 300); // Restrict access to the 200-500 physical block range. * \endcode - * + * * \param b KBlock device. * \param start The index of the start block for the limiting window in logical addressing units. * \param count The number of blocks to be used. - * - */ + * + */ INLINE void kblock_trim(struct KBlock *b, block_idx_t start, block_idx_t count) { ASSERT(start + count <= b->blk_cnt); @@ -201,233 +170,103 @@ INLINE void kblock_trim(struct KBlock *b, block_idx_t start, block_idx_t count) b->blk_cnt = count; } -/** - * Transfer data from the internal page buffer to user memory. - * - * This function accesses the internal page buffer of the block device and copy - * the data to \a buf. The content is copied from the current cached block. - * - * \param b KBlock device. - * \param buf User buffer to copy the data to. - * \param offset Address offset within the block, from which to copy data. - * \param size Size, in bytes, of the data to be copied. - * - * \return The number of bytes copied. Can be less than \a size on errors. - * - * \sa kblock_writeBuf() - */ -INLINE size_t kblock_readBuf(struct KBlock *b, void *buf, size_t offset, size_t size) -{ - ASSERT(b->vt); - ASSERT(b->vt->readBuf); - ASSERT(offset + size <= b->blk_size); - - return b->vt->readBuf(b, buf, offset, size); -} + +#define KB_ASSERT_METHOD(b, method) \ + do \ + { \ + ASSERT(b); \ + ASSERT((b)->priv.vt); \ + ASSERT((b)->priv.vt->method); \ + } \ + while (0) + /** - * Write to the page buffer. - * - * Copies data from user memory to the device page buffer. The data is written - * in the current cached block buffer. - * + * Get the current errors for the device. + * + * \note Calling this function will not clear the errors. + * * \param b KBlock device. - * \param buf User buffer to copy the data from. - * \param offset Address offset within the block, from which data has to be written. - * \param size Size, in bytes, of the data to be written. - * - * \return The number of bytes written. Can be less than \a size on errors. - * - * \sa kblock_readBuf() + * + * \return 0 if no error is present, a driver specific mask of errors otherwise. + * + * \sa kblock_clearerr() */ -INLINE size_t kblock_writeBuf(struct KBlock *b, const void *buf, size_t offset, size_t size) +INLINE int kblock_error(struct KBlock *b) { - ASSERT(b->vt); - ASSERT(b->vt->writeBuf); - ASSERT(offset + size <= b->blk_size); - return b->vt->writeBuf(b, buf, offset, size); + KB_ASSERT_METHOD(b, error); + return b->priv.vt->error(b); } /** - * Load a block from the device to the page buffer. - * - * The block \a index will be loaded in the internal page buffer. - * + * Clear the errors of the device. + * * \param b KBlock device. - * \param index Logical index of the block to be loaded. - * + * * \return 0 on success, EOF on errors. + * + * \sa kblock_error() */ -INLINE int kblock_load(struct KBlock *b, block_idx_t index) +INLINE int kblock_clearerr(struct KBlock *b) { - ASSERT(b->vt); - ASSERT(b->vt->load); - ASSERT(index < b->blk_cnt); - - return b->vt->load(b, b->priv.blk_start + index); + KB_ASSERT_METHOD(b, clearerr); + return b->priv.vt->clearerr(b); } /** - * Store a block from the page buffer to the device. - * - * The current content of the page buffer will be flushed to the block \a index. - * + * Close the device. + * * \param b KBlock device. - * \param index Logical index of the block to be stored. - * + * * \return 0 on success, EOF on errors. */ -INLINE int kblock_store(struct KBlock *b, block_idx_t index) +INLINE int kblock_close(struct KBlock *b) { - ASSERT(b->vt); - ASSERT(b->vt->store); - ASSERT(index < b->blk_cnt); - - return b->vt->store(b, b->priv.blk_start + index); + KB_ASSERT_METHOD(b, close); + return b->priv.vt->close(b); } - -/** - * Memory map the current page buffer. - * - * To speed up access, instead of using kblock_readBuf() and kblock_writeBuf(), - * you can memory map the page buffer and access it directly through the - * returned pointer. You can freely access the pointer in any way you - * like. Once done, call kblock_unmap() to release the lock on the page_buffer. - * - * \note This function may be not available on all drivers, since the page - * buffer can be in the hardware and not directly accessible through memory. - * For this devices you can still add generic software mapping features - * thanks to kblock_addMapping(). - * - * \note Only one mapping is available at a time, trying to map the page buffer - * again before releasing it is an error. - * - * \param b KBlock device. - * \param offset Address offset within the page buffer, from which data has to - * be memory mapped. - * \param size Size of the memory to be mapped. - * - * \return A pointer to the mapped region of the page buffer or NULL on errors. - * - * \sa kblock_addMapping(), kblock_unmap() - */ -INLINE void * kblock_map(struct KBlock *b, size_t offset, size_t size) +INLINE int kblock_writeBlock(struct KBlock *b, block_idx_t index, const void *buf) { - ASSERT(b->vt); - ASSERT(b->vt->map); - - if (b->priv.flags & BV(KBS_MAPPED)) - { - b->priv.flags |= BV(KBS_ERR_ALREADY_MAPPED); - return NULL; - } - - ASSERT(size < b->priv.pagebuf_size); - ASSERT(offset + size <= b->blk_size); - DB(b->priv.map_off = offset); - DB(b->priv.map_size = size); - - void *ret = b->vt->map(b, offset, size); - - if (ret) - b->priv.flags |= BV(KBS_MAPPED); - - return ret; + KB_ASSERT_METHOD(b, writeBlock); + ASSERT(index < b->blk_cnt); + return b->priv.vt->writeBlock(b, b->priv.blk_start + index, buf); } - -/** - * Release the memory map on the page buffer. - * - * This function has to be called when memory mapped access has finished. - * This is needed because only one mapping is allowed at a time. - * The \a offset and \a size passed should be the same passed to - * kblock_map() when the page buffer has been mapped. - * - * \note Trying to unmap the page buffer when there is no mapping ongoing is - * an error. - * - * \param b KBlock device. - * \param offset Address offset within the page buffer, from which data has been - * memory mapped. Must be the same value passed to kblock_map() - * when the memory was mapped. - * \param size Size of the memory mapped. Must be the same value passed to - * kblock_map() when the memory was mapped. - * - * \return 0 on success, EOF on errors. - * - * \sa kblock_addMapping(), kblock_map() - */ -INLINE int kblock_unmap(struct KBlock *b, size_t offset, size_t size) +INLINE int kblock_readBlock(struct KBlock *b, block_idx_t index, void *buf) { - ASSERT(b->vt); - ASSERT(b->vt->unmap); - - if (!(b->priv.flags & BV(KBS_MAPPED))) - { - b->priv.flags |= BV(KBS_ERR_NOT_MAPPED); - return EOF; - } - - ASSERT(b->priv.map_off == offset); - ASSERT(b->priv.map_size == size); - int ret = b->vt->unmap(b, offset, size); - - if (ret == 0) - b->priv.flags &= ~BV(KBS_MAPPED); - return ret; + KB_ASSERT_METHOD(b, readDirect); + ASSERT(index < b->blk_cnt); + return b->priv.vt->readBlock(b, b->priv.blk_start + index, buf); } -/** - * Get the current errors for the device. - * - * \note Calling this function will not clear the errors. - * - * \param b KBlock device. - * - * \return 0 if no error is present, a driver specific mask of errors otherwise. - * - * \sa kblock_clearerr() - */ -INLINE int kblock_error(struct KBlock *b) +INLINE block_idx_t kblock_cachedBlock(struct KBlock *b) { - ASSERT(b->vt); - ASSERT(b->vt->error); - /* Automatically mask status flags */ - return b->vt->error(b) & ~KBS_STATUS_MASK; + return b->priv.curr_blk; } -/** - * Clear the errors of the device. - * - * \param b KBlock device. - * - * \return 0 on success, EOF on errors. - * - * \sa kblock_error() - */ -INLINE int kblock_clearerr(struct KBlock *b) +INLINE bool kblock_buffered(struct KBlock *b) { - ASSERT(b->vt); - ASSERT(b->vt->clearerr); - /* Automatically clear error flags */ - b->priv.flags &= ~KBS_ERROR_MASK; - return b->vt->clearerr(b); + ASSERT(b); + return (b->priv.flags & KB_BUFFERED); } -/** - * Close the device. - * - * \param b KBlock device. - * - * \return 0 on success, EOF on errors. - */ -INLINE int kblock_close(struct KBlock *b) -{ - ASSERT(b->vt); - ASSERT(b->vt->close); - return b->vt->close(b); -} +size_t kblock_read(struct KBlock *b, block_idx_t idx, void *buf, size_t offset, size_t size); + +int kblock_flush(struct KBlock *b); + +size_t kblock_write(struct KBlock *b, block_idx_t idx, const void *buf, size_t offset, size_t size); + +int kblock_copy(struct KBlock *b, block_idx_t idx1, block_idx_t idx2); + + +int kblock_swWriteBlock(struct KBlock *b, block_idx_t index, const void *buf); +int kblock_swReadBlock(struct KBlock *b, block_idx_t index, void *buf); + +size_t kblock_swReadDirect(struct KBlock *b, block_idx_t index, void *buf, size_t offset, size_t size); +int kblock_swLoad(struct KBlock *b, block_idx_t index); +int kblock_swStore(struct KBlock *b, block_idx_t index); +size_t kblock_swReadBuf(struct KBlock *b, void *buf, size_t offset, size_t size); +size_t kblock_swWriteBuf(struct KBlock *b, const void *buf, size_t offset, size_t size); #endif /* IO_KBLOCK_H */