4 * This file is part of BeRTOS.
6 * Bertos is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * As a special exception, you may use this file as part of a free software
21 * library without restriction. Specifically, if other files instantiate
22 * templates or use macros or inline functions from this file, or you compile
23 * this file and link it with other files to produce an executable, this
24 * file does not by itself cause the resulting executable to be covered by
25 * the GNU General Public License. This exception does not however
26 * invalidate any other reasons why the executable file might be covered by
27 * the GNU General Public License.
29 * Copyright 2009 Develer S.r.l. (http://www.develer.com/)
33 * \author Francesco Sacchi <batt@develer.com>
35 * \brief KBlock interface
41 #include <cfg/compiler.h>
42 #include <cfg/debug.h>
43 #include <cfg/macros.h>
45 /** Type for addressing blocks in the device. */
46 typedef uint32_t block_idx_t;
52 * \name Prototypes for KBlock access functions.
54 * A KBlock user can choose which function subset to implement,
55 * but has to set to NULL unimplemented features.
59 typedef size_t (* kblock_read_t) (struct KBlock *b, void *buf, size_t offset, size_t size);
60 typedef size_t (* kblock_write_t) (struct KBlock *b, const void *buf, size_t offset, size_t size);
61 typedef int (* kblock_load_t) (struct KBlock *b, block_idx_t index);
62 typedef int (* kblock_store_t) (struct KBlock *b, block_idx_t index);
63 typedef void * (* kblock_map_t) (struct KBlock *b, size_t offset, size_t size);
64 typedef int (* kblock_unmap_t) (struct KBlock *b, size_t offset, size_t size);
65 typedef int (* kblock_error_t) (struct KBlock *b);
66 typedef int (* kblock_clearerr_t)(struct KBlock *b);
67 typedef int (* kblock_close_t) (struct KBlock *b);
71 * Table of interface functions for a KBlock device.
73 typedef struct KBlockVTable
75 kblock_read_t readBuf; ///< \sa kblock_readBuf()
76 kblock_write_t writeBuf; ///< \sa kblock_writeBuf()
77 kblock_load_t load; ///< \sa kblock_load()
78 kblock_store_t store; ///< \sa kblock_store()
80 kblock_map_t map; ///< \sa kblock_map()
81 kblock_unmap_t unmap; ///< \sa kblock_unmap()
83 kblock_error_t error; ///< \sa kblock_error()
84 kblock_clearerr_t clearerr; ///< \sa kblock_clearerr()
86 kblock_close_t close; ///< \sa kblock_close()
91 * KBlock status and error codes.
93 typedef enum KBlockStatus
96 KBS_MAPPED, ///< Status: The current loaded block from the device is memory mapped.
99 KBS_ERR_ALREADY_MAPPED, ///< Error: trying to memory map a block already mapped.
100 KBS_ERR_NOT_MAPPED, ///< Error: trying to memory unmap a block not yet mapped.
101 KBS_ERR_MAP_NOT_AVAILABLE, ///< Error: mapping methods not implemented.
103 #define KBS_STATUS_MASK (BV(KBS_MAPPED) | 0 /* Add status flags here */)
105 #define KBS_ERROR_MASK (BV(KBS_ERR_ALREADY_MAPPED) | BV(KBS_ERR_ALREADY_MAPPED) \
106 | BV(KBS_ERR_MAP_NOT_AVAILABLE) | 0 /* Add error flags here */)
111 * KBlock private members.
112 * These are the private members of the KBlock class, please do not
113 * access these directly, use the KBlock API.
115 typedef struct KBlockPriv
117 DB(id_t type); ///< Used to keep track, at runtime, of the class type.
118 void *pagebuf; ///< Pointer to a buffer used as page buffer when memory mapping is active. \sa kblock_map(), kblock_unmap()
119 size_t pagebuf_size; ///< Size of the page buffer used for memory mapping. \sa kblock_map(), kblock_unmap()
120 KBlockStatus flags; ///< Status and error flags.
121 block_idx_t blk_start; ///< Start block number when the device is trimmed. \sa kblock_trim()
122 DB(size_t map_off); ///< When mapping is active, this is the mapped data offset inside the block. \sa kblock_map(), kblock_unmap()
123 DB(size_t map_size); ///< When mapping is active, this is the mapped data size inside the block. \sa kblock_map(), kblock_unmap()
127 * KBlock: interface for a generic block device.
129 * A block device is a device which can only be read/written
130 * with data blocks of constant size: flash memories,
131 * SD cards, hard disks, etc...
133 * This interface is designed to adapt to most block devices and
134 * use peculiar features in order to save CPU time and memory space.
136 * You do not have to use this structure directly, specific implementations
137 * will be supplied in the peripheral drivers.
139 typedef struct KBlock
141 KBlockPriv priv; ///< Interface private data, do not use directly.
143 /* Public access members/methods */
144 size_t blk_size; ///< Block size.
145 block_idx_t blk_cnt; ///< Number of blocks available in the device.
146 struct KBlockVTable *vt; ///< Virtual table of interface functions.
151 * Add generic memory mapping functionality to a block device.
153 * If the device has an hardware page buffer mechanism, the map/unmap
154 * functions are unimplemented.
155 * If you need to use the mapping functions of such device, this function
156 * will add generic software mapping features wrapping the KBlock methods.
158 * \param dev the block device.
159 * \param buf the buffer to be used as page buffer for memory mapping functions.
160 * \param size the size of the buffer. This is the maximum size that can be
161 * memory mapped. If you want to map a full block, a size of at least
162 * dev->blk_size have to be supplied.
164 * \sa kblock_map(), kblock_unmap(), kblock_readBuf(), kblock_writeBuf()
166 void kblock_addMapping(struct KBlock *dev, void *buf, size_t size);
169 * Use a subset of the blocks on the device.
171 * This function is useful for partitioning a device and use it for
172 * different purposes at the same time.
174 * This function will limit the number of blocks used on the device by setting
175 * a start index and a number of blocks to be used counting from that index.
177 * The blocks outside this range are no more accessible.
179 * Logical block indexes will be mapped to physical indexes inside this new
180 * range automatically. Even following calls to kblock_trim() will use logical
181 * indexes, so, once trimmed, access can only be limited further and never
186 * //...init KBlock device dev
187 * kblock_trim(dev, 200, 1500); // Restrict access to the 200-1700 physical block range.
188 * kblock_load(dev, 0); // Load the physical block #200.
189 * kblock_trim(dev, 0, 300); // Restrict access to the 200-500 physical block range.
192 * \param b KBlock device.
193 * \param start The index of the start block for the limiting window in logical addressing units.
194 * \param count The number of blocks to be used.
197 INLINE void kblock_trim(struct KBlock *b, block_idx_t start, block_idx_t count)
199 ASSERT(start + count <= b->blk_cnt);
200 b->priv.blk_start += start;
205 * Transfer data from the internal page buffer to user memory.
207 * This function accesses the internal page buffer of the block device and copy
208 * the data to \a buf. The content is copied from the current cached block.
210 * \param b KBlock device.
211 * \param buf User buffer to copy the data to.
212 * \param offset Address offset within the block, from which to copy data.
213 * \param size Size, in bytes, of the data to be copied.
215 * \return The number of bytes copied. Can be less than \a size on errors.
217 * \sa kblock_writeBuf()
219 INLINE size_t kblock_readBuf(struct KBlock *b, void *buf, size_t offset, size_t size)
222 ASSERT(b->vt->readBuf);
223 ASSERT(offset + size <= b->blk_size);
225 return b->vt->readBuf(b, buf, offset, size);
229 * Write to the page buffer.
231 * Copies data from user memory to the device page buffer. The data is written
232 * in the current cached block buffer.
234 * \param b KBlock device.
235 * \param buf User buffer to copy the data from.
236 * \param offset Address offset within the block, from which data has to be written.
237 * \param size Size, in bytes, of the data to be written.
239 * \return The number of bytes written. Can be less than \a size on errors.
241 * \sa kblock_readBuf()
243 INLINE size_t kblock_writeBuf(struct KBlock *b, const void *buf, size_t offset, size_t size)
246 ASSERT(b->vt->writeBuf);
247 ASSERT(offset + size <= b->blk_size);
248 return b->vt->writeBuf(b, buf, offset, size);
252 * Load a block from the device to the page buffer.
254 * The block \a index will be loaded in the internal page buffer.
256 * \param b KBlock device.
257 * \param index Logical index of the block to be loaded.
259 * \return 0 on success, EOF on errors.
261 INLINE int kblock_load(struct KBlock *b, block_idx_t index)
265 ASSERT(index < b->blk_cnt);
267 return b->vt->load(b, b->priv.blk_start + index);
271 * Store a block from the page buffer to the device.
273 * The current content of the page buffer will be flushed to the block \a index.
275 * \param b KBlock device.
276 * \param index Logical index of the block to be stored.
278 * \return 0 on success, EOF on errors.
280 INLINE int kblock_store(struct KBlock *b, block_idx_t index)
283 ASSERT(b->vt->store);
284 ASSERT(index < b->blk_cnt);
286 return b->vt->store(b, b->priv.blk_start + index);
291 * Memory map the current page buffer.
293 * To speed up access, instead of using kblock_readBuf() and kblock_writeBuf(),
294 * you can memory map the page buffer and access it directly through the
295 * returned pointer. You can freely access the pointer in any way you
296 * like. Once done, call kblock_unmap() to release the lock on the page_buffer.
298 * \note This function may be not available on all drivers, since the page
299 * buffer can be in the hardware and not directly accessible through memory.
300 * For this devices you can still add generic software mapping features
301 * thanks to kblock_addMapping().
303 * \note Only one mapping is available at a time, trying to map the page buffer
304 * again before releasing it is an error.
306 * \param b KBlock device.
307 * \param offset Address offset within the page buffer, from which data has to
309 * \param size Size of the memory to be mapped.
311 * \return A pointer to the mapped region of the page buffer or NULL on errors.
313 * \sa kblock_addMapping(), kblock_unmap()
315 INLINE void * kblock_map(struct KBlock *b, size_t offset, size_t size)
320 if (b->priv.flags & BV(KBS_MAPPED))
322 b->priv.flags |= BV(KBS_ERR_ALREADY_MAPPED);
326 ASSERT(size < b->priv.pagebuf_size);
327 ASSERT(offset + size <= b->blk_size);
328 DB(b->priv.map_off = offset);
329 DB(b->priv.map_size = size);
331 void *ret = b->vt->map(b, offset, size);
334 b->priv.flags |= BV(KBS_MAPPED);
341 * Release the memory map on the page buffer.
343 * This function has to be called when memory mapped access has finished.
344 * This is needed because only one mapping is allowed at a time.
345 * The \a offset and \a size passed should be the same passed to
346 * kblock_map() when the page buffer has been mapped.
348 * \note Trying to unmap the page buffer when there is no mapping ongoing is
351 * \param b KBlock device.
352 * \param offset Address offset within the page buffer, from which data has been
353 * memory mapped. Must be the same value passed to kblock_map()
354 * when the memory was mapped.
355 * \param size Size of the memory mapped. Must be the same value passed to
356 * kblock_map() when the memory was mapped.
358 * \return 0 on success, EOF on errors.
360 * \sa kblock_addMapping(), kblock_map()
362 INLINE int kblock_unmap(struct KBlock *b, size_t offset, size_t size)
365 ASSERT(b->vt->unmap);
367 if (!(b->priv.flags & BV(KBS_MAPPED)))
369 b->priv.flags |= BV(KBS_ERR_NOT_MAPPED);
373 ASSERT(b->priv.map_off == offset);
374 ASSERT(b->priv.map_size == size);
375 int ret = b->vt->unmap(b, offset, size);
378 b->priv.flags &= ~BV(KBS_MAPPED);
383 * Get the current errors for the device.
385 * \note Calling this function will not clear the errors.
387 * \param b KBlock device.
389 * \return 0 if no error is present, a driver specific mask of errors otherwise.
391 * \sa kblock_clearerr()
393 INLINE int kblock_error(struct KBlock *b)
396 ASSERT(b->vt->error);
397 /* Automatically mask status flags */
398 return b->vt->error(b) & ~KBS_STATUS_MASK;
402 * Clear the errors of the device.
404 * \param b KBlock device.
406 * \return 0 on success, EOF on errors.
410 INLINE int kblock_clearerr(struct KBlock *b)
413 ASSERT(b->vt->clearerr);
414 /* Automatically clear error flags */
415 b->priv.flags &= ~KBS_ERROR_MASK;
416 return b->vt->clearerr(b);
422 * \param b KBlock device.
424 * \return 0 on success, EOF on errors.
426 INLINE int kblock_close(struct KBlock *b)
429 ASSERT(b->vt->close);
430 return b->vt->close(b);
433 void *kblock_unsupportedMap(struct KBlock *b, UNUSED_ARG(size_t, offset), UNUSED_ARG(size_t, size));
435 #endif /* IO_KBLOCK_H */