Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix index creation and lookup logic #15527

Merged
merged 2 commits into from
Jul 27, 2023
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 47 additions & 29 deletions libretro-db/libretrodb.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ struct libretrodb_index
char name[50];
uint64_t key_size;
uint64_t next;
uint64_t count;
};

typedef struct libretrodb_metadata
Expand Down Expand Up @@ -184,7 +185,7 @@ int libretrodb_open(const char *path, libretrodb_t *db)
libretrodb_header_t header;
libretrodb_metadata_t md;
RFILE *fd = filestream_open(path,
RETRO_VFS_FILE_ACCESS_READ,
RETRO_VFS_FILE_ACCESS_READ_WRITE | RETRO_VFS_FILE_ACCESS_UPDATE_EXISTING,
RETRO_VFS_FILE_ACCESS_HINT_NONE);

if (!fd)
Expand Down Expand Up @@ -223,27 +224,29 @@ int libretrodb_open(const char *path, libretrodb_t *db)
static int libretrodb_find_index(libretrodb_t *db, const char *index_name,
libretrodb_index_t *idx)
{
ssize_t eof = filestream_get_size(db->fd);
ssize_t offset = filestream_seek(db->fd,
(ssize_t)db->first_index_offset,
RETRO_VFS_SEEK_POSITION_START);
filestream_seek(db->fd,
(ssize_t)db->first_index_offset,
RETRO_VFS_SEEK_POSITION_START);

/* TODO: this should use filestream_eof instead */
while (offset < eof)
while (!filestream_eof(db->fd))
{
uint64_t name_len = 50;
/* Read index header */
rmsgpack_dom_read_into(db->fd,
if (rmsgpack_dom_read_into(db->fd,
"name", idx->name, &name_len,
"key_size", &idx->key_size,
"next", &idx->next, NULL);
"next", &idx->next,
"count", &idx->count,
NULL) < 0) {
printf("Invalid index header\n");
break;
}

if (strncmp(index_name, idx->name, strlen(idx->name)) == 0)
return 0;

filestream_seek(db->fd, (ssize_t)idx->next,
RETRO_VFS_SEEK_POSITION_CURRENT);
offset = filestream_tell(db->fd);
}

return -1;
Expand All @@ -254,7 +257,7 @@ static int binsearch(const void *buff, const void *item,
{
int mid = (int)(count / 2);
int item_size = field_size + sizeof(uint64_t);
uint64_t *current = (uint64_t *)buff + (mid * item_size);
uint8_t *current = ((uint8_t *)buff + (mid * item_size));
int rv = memcmp(current, item, field_size);

if (rv == 0)
Expand All @@ -278,21 +281,21 @@ int libretrodb_find_entry(libretrodb_t *db, const char *index_name,
{
libretrodb_index_t idx;
int rv;
void *buff;
uint8_t *buff;
int count;
uint64_t offset;
ssize_t bufflen, nread = 0;

if (libretrodb_find_index(db, index_name, &idx) < 0)
return -1;

bufflen = idx.next;

if (!(buff = malloc(bufflen)))
return -1;

while (nread < bufflen)
{
void *buff_ = (uint64_t *)buff + nread;
void *buff_ = (buff + nread);
rv = (int)filestream_read(db->fd, buff_, bufflen - nread);

if (rv <= 0)
Expand All @@ -303,14 +306,16 @@ int libretrodb_find_entry(libretrodb_t *db, const char *index_name,
nread += rv;
}

rv = binsearch(buff, key, db->count, (ssize_t)idx.key_size, &offset);
rv = binsearch(buff, key, idx.count, (ssize_t)idx.key_size, &offset);
free(buff);

if (rv == 0)
filestream_seek(db->fd, (ssize_t)offset,
RETRO_VFS_SEEK_POSITION_START);

return rmsgpack_dom_read(db->fd, out);
if (rv == 0) {
filestream_seek(db->fd, (ssize_t)offset, RETRO_VFS_SEEK_POSITION_START);
rmsgpack_dom_read(db->fd, out);
return 0;
} else {
return -1;
}
}

/**
Expand Down Expand Up @@ -447,7 +452,15 @@ int libretrodb_create_index(libretrodb_t *db,
uint64_t *buff_u64 = NULL;
uint8_t field_size = 0;
uint64_t item_loc = filestream_tell(db->fd);
bintree_t *tree = bintree_new(node_compare, &field_size);
bintree_t *tree;
uint64_t item_count = 0;
int rval = -1;

if (libretrodb_find_index(db, name, &idx) >= 0) {
return 1;
}

tree = bintree_new(node_compare, &field_size);

item.type = RDT_NULL;

Expand All @@ -466,7 +479,7 @@ int libretrodb_create_index(libretrodb_t *db,

/* Field not found in item? */
if (!(field = rmsgpack_dom_value_map_value(&item, &key)))
goto clean;
continue;

/* Field is not binary? */
if (field->type != RDT_BINARY)
Expand All @@ -487,7 +500,7 @@ int libretrodb_create_index(libretrodb_t *db,

memcpy(buff, field->val.binary.buff, field_size);

buff_u64 = (uint64_t *)buff + field_size;
buff_u64 = (uint64_t *)((uint8_t *)buff + field_size);

memcpy(buff_u64, &item_loc, sizeof(uint64_t));

Expand All @@ -497,31 +510,36 @@ int libretrodb_create_index(libretrodb_t *db,
rmsgpack_dom_value_print(field);
goto clean;
}
item_count++;
buff = NULL;
rmsgpack_dom_value_free(&item);
item_loc = filestream_tell(db->fd);
item_loc = filestream_tell(cur.fd);
}

rval = 0;

filestream_seek(db->fd, 0, RETRO_VFS_SEEK_POSITION_END);

strlcpy(idx.name, name, sizeof(idx.name));

idx.key_size = field_size;
idx.next = db->count * (field_size + sizeof(uint64_t));

idx.next = item_count * (field_size + sizeof(uint64_t));
idx.count = item_count;
/* Write index header */
rmsgpack_write_map_header(db->fd, 3);
rmsgpack_write_map_header(db->fd, 4);
rmsgpack_write_string(db->fd, "name", STRLEN_CONST("name"));
rmsgpack_write_string(db->fd, idx.name, (uint32_t)strlen(idx.name));
rmsgpack_write_string(db->fd, "key_size", (uint32_t)STRLEN_CONST("key_size"));
rmsgpack_write_uint (db->fd, idx.key_size);
rmsgpack_write_string(db->fd, "next", STRLEN_CONST("next"));
rmsgpack_write_uint (db->fd, idx.next);
rmsgpack_write_string(db->fd, "count", STRLEN_CONST("count"));
rmsgpack_write_uint (db->fd, idx.count);

nictx.db = db;
nictx.idx = &idx;
bintree_iterate(tree->root, node_iter, &nictx);

filestream_flush(db->fd);
clean:
rmsgpack_dom_value_free(&item);
if (buff)
Expand All @@ -531,7 +549,7 @@ int libretrodb_create_index(libretrodb_t *db,
if (tree && tree->root)
bintree_free(tree->root);
free(tree);
return 0;
return rval;
}

libretrodb_cursor_t *libretrodb_cursor_new(void)
Expand Down
Loading