#define SD_WRITE_SINGLEBLOCK 0x58
#define SD_DATA_ACCEPTED 0x05
-static int sd_writeBlock(KBlock *b, block_idx_t idx, const void *buf)
+static size_t sd_writeDirect(KBlock *b, block_idx_t idx, const void *buf, size_t offset, size_t size)
{
Sd *sd = SD_CAST(b);
KFile *fd = sd->ch;
+ ASSERT(offset == 0);
+ ASSERT(size == SD_DEFAULT_BLOCKLEN);
LOG_INFO("writing block %ld\n", idx);
if (sd->tranfer_len != SD_DEFAULT_BLOCKLEN)
if ((sd->r1 = sd_setBlockLen(sd, SD_DEFAULT_BLOCKLEN)))
{
LOG_ERR("setBlockLen failed: %04X\n", sd->r1);
- return sd->r1;
+ return 0;
}
sd->tranfer_len = SD_DEFAULT_BLOCKLEN;
}
{
LOG_ERR("write single block failed: %04X\n", sd->r1);
sd_select(sd, false);
- return sd->r1;
+ return 0;
}
kfile_putc(SD_STARTTOKEN, fd);
return EOF;
}
- return 0;
+ return SD_DEFAULT_BLOCKLEN;
}
void sd_writeTest(Sd *sd)
for (block_idx_t i = 0; i < sd->b.blk_cnt; i++)
{
- LOG_INFO("writing block %ld: %s\n", i, (sd_writeBlock(&sd->b, i, buf) == 0) ? "OK" : "FAIL");
+ LOG_INFO("writing block %ld: %s\n", i, (sd_writeDirect(&sd->b, i, buf, 0, SD_DEFAULT_BLOCKLEN) == SD_DEFAULT_BLOCKLEN) ? "OK" : "FAIL");
}
}
kputchar('\n');
}
- if (sd_writeBlock(&sd->b, 0, buf) != 0)
+ if (sd_writeDirect(&sd->b, 0, buf, 0, SD_DEFAULT_BLOCKLEN) != SD_DEFAULT_BLOCKLEN)
return false;
memset(buf, 0, sizeof(buf));
static const KBlockVTable sd_unbuffered_vt =
{
.readDirect = sd_readDirect,
- .writeBlock = sd_writeBlock,
+ .writeDirect = sd_writeDirect,
.error = sd_error,
.clearerr = sd_clearerr,
static const KBlockVTable sd_buffered_vt =
{
.readDirect = sd_readDirect,
- .writeBlock = sd_writeBlock,
+ .writeDirect = sd_writeDirect,
.readBuf = kblock_swReadBuf,
.writeBuf = kblock_swWriteBuf,
if (sd_blockInit(sd, ch))
{
sd->b.priv.buf = sd_buf;
- sd->b.priv.flags |= KB_BUFFERED;
+ sd->b.priv.flags |= KB_BUFFERED | KB_PARTIAL_WRITE;
sd->b.priv.vt = &sd_buffered_vt;
sd->b.priv.vt->load(&sd->b, 0);
return true;
pgoff_t filelen_table[BATTFS_MAX_FILES];
ASSERT(dev);
- ASSERT(kblock_buffered(dev));
+ ASSERT(kblock_partialWrite(dev));
disk->dev = dev;
ASSERT(disk->dev->blk_size > BATTFS_HEADER_LEN);
/*
* Renew page only if is not in cache.
- * This avoids rewriting the same page continuously
+ * This avoids rewriting the same page continuously
* if the user code keeps writing in the same portion
* of the file.
*/
- if ((fdb->start[fdb->max_off] != kblock_cachedBlock(disk->dev)) || !kblock_cacheDirty(disk->dev))
+ if (kblock_buffered(disk->dev)
+ && ((fdb->start[fdb->max_off] != kblock_cachedBlock(disk->dev)) || !kblock_cacheDirty(disk->dev)))
{
new_page = renewPage(disk, fdb->start[fdb->max_off]);
if (new_page == NO_SPACE)
}
/* Renew page only if is not in cache. */
- if ((fdb->start[fdb->max_off] != kblock_cachedBlock(disk->dev)) || !kblock_cacheDirty(disk->dev))
+ if (kblock_buffered(disk->dev)
+ && ((fdb->start[fdb->max_off] != kblock_cachedBlock(disk->dev)) || !kblock_cacheDirty(disk->dev)))
{
new_page = renewPage(disk, fdb->start[pg_offset]);
if (new_page == NO_SPACE)
LOG_INFO("Using cached block %d\n", fdb->start[pg_offset]);
new_page = fdb->start[pg_offset];
}
-
+
curr_hdr.seq++;
}
//LOG_INFO("writing to buffer for page %d, offset %d, size %d\n", disk->curr_page, addr_offset, wr_len);
return b->priv.vt->readDirect(b, b->priv.blk_start + index, buf, offset, size);
}
-INLINE int kblock_writeBlock(struct KBlock *b, block_idx_t index, const void *buf)
+INLINE size_t kblock_writeDirect(struct KBlock *b, block_idx_t index, const void *buf, size_t offset, size_t size)
{
- KB_ASSERT_METHOD(b, writeBlock);
+ KB_ASSERT_METHOD(b, writeDirect);
ASSERT(index < b->blk_cnt);
- return b->priv.vt->writeBlock(b, b->priv.blk_start + index, buf);
+ return b->priv.vt->writeDirect(b, b->priv.blk_start + index, buf, offset, size);
}
INLINE size_t kblock_readBuf(struct KBlock *b, void *buf, size_t offset, size_t size)
}
else
{
- ASSERT(offset == 0);
- ASSERT(size == b->blk_size);
- return (kblock_writeBlock(b, idx, buf) == 0) ? size : 0;
+ #ifdef _DEBUG
+ if (offset != 0 || size != b->blk_size)
+ ASSERT(kblock_partialWrite(b));
+ #endif
+ return kblock_writeDirect(b, idx, buf, offset, size);
}
}
ASSERT(b);
ASSERT(src < b->blk_cnt);
ASSERT(dest < b->blk_cnt);
- ASSERT(kblock_buffered(b));
- if (!kblock_loadPage(b, src))
- return EOF;
+ if (kblock_buffered(b))
+ {
+ if (!kblock_loadPage(b, src))
+ return EOF;
- b->priv.curr_blk = dest;
- kblock_setDirty(b, true);
- return 0;
+ b->priv.curr_blk = dest;
+ kblock_setDirty(b, true);
+ return 0;
+ }
+ else if (kblock_partialWrite(b))
+ {
+ uint8_t buf[16];
+ size_t blk_size = b->blk_size;
+ size_t offset = 0;
+
+ while (blk_size)
+ {
+ size_t size = MIN(sizeof(buf), blk_size);
+ if (kblock_readDirect(b, src, buf, offset, size) != size)
+ return EOF;
+ if (kblock_writeDirect(b, dest, buf, offset, size) != size)
+ return EOF;
+
+ blk_size -= size;
+ offset += size;
+ }
+ return 0;
+ }
+ else
+ {
+ ASSERT(0);
+ return EOF;
+ }
}
int kblock_swLoad(struct KBlock *b, block_idx_t index)
int kblock_swStore(struct KBlock *b, block_idx_t index)
{
- return kblock_writeBlock(b, index, b->priv.buf);
+ return (kblock_writeDirect(b, index, b->priv.buf, 0, b->blk_size) == b->blk_size) ? 0 : EOF;
}
size_t kblock_swReadBuf(struct KBlock *b, void *buf, size_t offset, size_t size)
*
* \{
*/
-typedef size_t (* kblock_read_direct_t) (struct KBlock *b, block_idx_t index, void *buf, size_t offset, size_t size);
-typedef int (* kblock_write_block_t) (struct KBlock *b, block_idx_t index, const void *buf);
+typedef size_t (* kblock_read_direct_t) (struct KBlock *b, block_idx_t index, void *buf, size_t offset, size_t size);
+typedef size_t (* kblock_write_direct_t) (struct KBlock *b, block_idx_t index, const void *buf, size_t offset, size_t size);
typedef size_t (* kblock_read_t) (struct KBlock *b, void *buf, size_t offset, size_t size);
typedef size_t (* kblock_write_t) (struct KBlock *b, const void *buf, size_t offset, size_t size);
typedef struct KBlockVTable
{
kblock_read_direct_t readDirect;
- kblock_write_block_t writeBlock;
+ kblock_write_direct_t writeDirect;
kblock_read_t readBuf;
kblock_write_t writeBuf;
#define KB_BUFFERED BV(0) ///< Internal flag: true if the KBlock has a buffer
#define KB_CACHE_DIRTY BV(1) ///< Internal flag: true if the cache is dirty
+#define KB_PARTIAL_WRITE BV(2) ///< Internal flag: true if the device allows partial block write
/**
* KBlock private members.
return kblock_buffered(b) && (b->priv.flags & KB_CACHE_DIRTY);
}
+/**
+ * \return true if the device \a b supports partial block write. That is, you
+ * can call kblock_write() with a size which is lesser than the block
+ * size.
+ * \param b KBlock device.
+ * \sa kblock_write().
+ */
+INLINE bool kblock_partialWrite(struct KBlock *b)
+{
+ ASSERT(b);
+ return (b->priv.flags & KB_PARTIAL_WRITE);
+}
/**
* Read data from the block device.
* This function will write \a size bytes to block \a idx starting at
* address \a offset inside the block.
*
- * \note Partial block writes are supported only if the device is opened in
- * buffered mode. You can use kblock_buffered() to check if the device
- * has an internal cache or not.
+ * \note Partial block writes are supported only on certain devices.
+ * You can use kblock_partialWrite() in order to check if the device
+ * has this feature or not.
*
* \note If the device is opened in buffered mode, this function will use
* efficiently and trasparently the cache provided.
*
* \return the number of bytes written.
*
- * \sa kblock_read(), kblock_flush(), kblock_buffered().
+ * \sa kblock_read(), kblock_flush(), kblock_buffered(), kblock_partialWrite().
*/
size_t kblock_write(struct KBlock *b, block_idx_t idx, const void *buf, size_t offset, size_t size);
*
* This function will copy the content of block \a src to block \a dest.
*
- * \note This function is available only on devices opened in buffered mode.
+ * \note This function is available only on devices which support partial
+ * block write or are opened in buffered mode.
*
* \param b KBlock device.
* \param src source block number.
return size;
}
-static int kblockfile_writeBlock(struct KBlock *b, block_idx_t index, const void *buf)
+static size_t kblockfile_writeDirect(struct KBlock *b, block_idx_t index, const void *buf, size_t offset, size_t size)
{
KBlockFile *f = KBLOCKFILE_CAST(b);
ASSERT(buf);
ASSERT(index < b->blk_cnt);
- fseek(f->fp, index * b->blk_size, SEEK_SET);
- return (fwrite(f->b.priv.buf, 1, b->blk_size, f->fp) == b->blk_size) ? 0 : EOF;
+ fseek(f->fp, index * b->blk_size + offset, SEEK_SET);
+ return fwrite(buf, 1, size, f->fp);
}
static int kblockfile_error(struct KBlock *b)
.writeBuf = kblockfile_writeBuf,
.load = kblockfile_load,
.store = kblockfile_store,
-
+
.error = kblockfile_error,
.clearerr = kblockfile_claererr,
.close = kblockfile_close,
static const KBlockVTable kblockfile_swbuffered_vt =
{
.readDirect = kblockfile_readDirect,
- .writeBlock =kblockfile_writeBlock,
-
+ .writeDirect =kblockfile_writeDirect,
+
.readBuf = kblock_swReadBuf,
.writeBuf = kblock_swWriteBuf,
.load = kblock_swLoad,
.store = kblock_swStore,
-
+
.error = kblockfile_error,
.clearerr = kblockfile_claererr,
.close = kblockfile_close,
static const KBlockVTable kblockfile_unbuffered_vt =
{
.readDirect = kblockfile_readDirect,
- .writeBlock =kblockfile_writeBlock,
+ .writeDirect =kblockfile_writeDirect,
.error = kblockfile_error,
.clearerr = kblockfile_claererr,
f->fp = fp;
f->b.blk_size = block_size;
f->b.blk_cnt = block_count;
-
+
+ f->b.priv.flags |= KB_PARTIAL_WRITE;
if (buf)
{
f->b.priv.flags |= KB_BUFFERED;
return size;
}
-static int kblockram_writeBlock(struct KBlock *b, block_idx_t index, const void *buf)
+static size_t kblockram_writeDirect(struct KBlock *b, block_idx_t index, const void *buf, size_t offset, size_t size)
{
KBlockRam *r = KBLOCKRAM_CAST(b);
ASSERT(buf);
ASSERT(index < b->blk_cnt);
- memcpy(r->membuf + index * r->b.blk_size, buf, r->b.blk_size);
- return 0;
+ memcpy(r->membuf + index * r->b.blk_size + offset, buf, size);
+ return size;
}
static int kblockram_dummy(UNUSED_ARG(struct KBlock *,b))
static const KBlockVTable kblockram_hwbuffered_vt =
{
.readDirect = kblockram_readDirect,
-
+
.readBuf = kblockram_readBuf,
.writeBuf = kblockram_writeBuf,
.load = kblockram_load,
.store = kblockram_store,
-
+
.error = kblockram_dummy,
.clearerr = kblockram_dummy,
.close = kblockram_dummy,
static const KBlockVTable kblockram_swbuffered_vt =
{
.readDirect = kblockram_readDirect,
- .writeBlock = kblockram_writeBlock,
-
+ .writeDirect = kblockram_writeDirect,
+
.readBuf = kblock_swReadBuf,
.writeBuf = kblock_swWriteBuf,
.load = kblock_swLoad,
.store = kblock_swStore,
-
+
.error = kblockram_dummy,
.clearerr = kblockram_dummy,
.close = kblockram_dummy,
static const KBlockVTable kblockram_unbuffered_vt =
{
.readDirect = kblockram_readDirect,
- .writeBlock = kblockram_writeBlock,
+ .writeDirect = kblockram_writeDirect,
.error = kblockram_dummy,
.clearerr = kblockram_dummy,
DB(ram->b.priv.type = KBT_KBLOCKRAM);
ram->b.blk_size = block_size;
-
+ ram->b.priv.flags |= KB_PARTIAL_WRITE;
+
if (buffered)
{
ram->b.priv.flags |= KB_BUFFERED;
ram->b.priv.buf = buf;
// First page used as page buffer
ram->membuf = (uint8_t *)buf + block_size;
-
+
if (hwbuffered)
ram->b.priv.vt = &kblockram_hwbuffered_vt;
else
ram->b.priv.vt = &kblockram_swbuffered_vt;
-
+
kblockram_load(&ram->b, 0);
}
else