summaryrefslogtreecommitdiff
path: root/content
diff options
context:
space:
mode:
authorVincent Sanders <vince@kyllikki.org>2015-05-28 16:08:46 +0100
committerVincent Sanders <vince@kyllikki.org>2015-05-28 16:08:46 +0100
commitc105738fa36bb2400adc47399c5b878d252d1c86 (patch)
tree138eeb449e1bf51ee1726b5f820740aada0ccd0b /content
parent20f2c86a511f7913cf858e7bd3668b0b59663ba0 (diff)
downloadnetsurf-c105738fa36bb2400adc47399c5b878d252d1c86.tar.gz
netsurf-c105738fa36bb2400adc47399c5b878d252d1c86.tar.bz2
Change LOG() macro to be varadic
This changes the LOG macro to be varadic removing the need for all callsites to have double bracketing and allows for future improvement on how we use the logging macros. The callsites were changed with coccinelle and the changes checked by hand. Compile tested for several frontends but not all. A formatting annotation has also been added which allows the compiler to check the parameters and types passed to the logging.
Diffstat (limited to 'content')
-rw-r--r--content/content.c31
-rw-r--r--content/fetch.c12
-rw-r--r--content/fetchers/curl.c59
-rw-r--r--content/fetchers/data.c11
-rw-r--r--content/fs_backing_store.c173
-rw-r--r--content/hlcache.c17
-rw-r--r--content/llcache.c35
-rw-r--r--content/urldb.c41
8 files changed, 182 insertions, 197 deletions
diff --git a/content/content.c b/content/content.c
index cf0216453..057b96ee1 100644
--- a/content/content.c
+++ b/content/content.c
@@ -79,8 +79,7 @@ nserror content__init(struct content *c, const content_handler *handler,
struct content_user *user_sentinel;
nserror error;
- LOG(("url "URL_FMT_SPC" -> %p",
- nsurl_access(llcache_handle_get_url(llcache)), c));
+ LOG("url "URL_FMT_SPC" -> %p", nsurl_access(llcache_handle_get_url(llcache)), c);
user_sentinel = calloc(1, sizeof(struct content_user));
if (user_sentinel == NULL) {
@@ -280,8 +279,7 @@ void content_convert(struct content *c)
if (c->locked == true)
return;
- LOG(("content "URL_FMT_SPC" (%p)",
- nsurl_access(llcache_handle_get_url(c->llcache)), c));
+ LOG("content "URL_FMT_SPC" (%p)", nsurl_access(llcache_handle_get_url(c->llcache)), c);
if (c->handler->data_complete != NULL) {
c->locked = true;
@@ -360,7 +358,7 @@ void content__reformat(struct content *c, bool background,
assert(c->status == CONTENT_STATUS_READY ||
c->status == CONTENT_STATUS_DONE);
assert(c->locked == false);
- LOG(("%p %s", c, nsurl_access(llcache_handle_get_url(c->llcache))));
+ LOG("%p %s", c, nsurl_access(llcache_handle_get_url(c->llcache)));
c->available_width = width;
if (c->handler->reformat != NULL) {
@@ -385,8 +383,7 @@ void content_destroy(struct content *c)
struct content_rfc5988_link *link;
assert(c);
- LOG(("content %p %s", c,
- nsurl_access(llcache_handle_get_url(c->llcache))));
+ LOG("content %p %s", c, nsurl_access(llcache_handle_get_url(c->llcache)));
assert(c->locked == false);
if (c->handler->destroy != NULL)
@@ -599,7 +596,7 @@ bool content_scaled_redraw(struct hlcache_handle *h,
return true;
}
- LOG(("Content %p %dx%d ctx:%p", c, width, height, ctx));
+ LOG("Content %p %dx%d ctx:%p", c, width, height, ctx);
if (ctx->plot->option_knockout) {
knockout_plot_start(ctx, &new_ctx);
@@ -664,9 +661,7 @@ bool content_add_user(struct content *c,
{
struct content_user *user;
- LOG(("content "URL_FMT_SPC" (%p), user %p %p",
- nsurl_access(llcache_handle_get_url(c->llcache)),
- c, callback, pw));
+ LOG("content "URL_FMT_SPC" (%p), user %p %p", nsurl_access(llcache_handle_get_url(c->llcache)), c, callback, pw);
user = malloc(sizeof(struct content_user));
if (!user)
return false;
@@ -692,9 +687,7 @@ void content_remove_user(struct content *c,
void *pw)
{
struct content_user *user, *next;
- LOG(("content "URL_FMT_SPC" (%p), user %p %p",
- nsurl_access(llcache_handle_get_url(c->llcache)), c,
- callback, pw));
+ LOG("content "URL_FMT_SPC" (%p), user %p %p", nsurl_access(llcache_handle_get_url(c->llcache)), c, callback, pw);
/* user_list starts with a sentinel */
for (user = c->user_list; user->next != 0 &&
@@ -702,7 +695,7 @@ void content_remove_user(struct content *c,
user->next->pw == pw); user = user->next)
;
if (user->next == 0) {
- LOG(("user not found in list"));
+ LOG("user not found in list");
assert(0);
return;
}
@@ -808,8 +801,7 @@ void content_open(hlcache_handle *h, struct browser_window *bw,
{
struct content *c = hlcache_handle_get_content(h);
assert(c != 0);
- LOG(("content %p %s", c,
- nsurl_access(llcache_handle_get_url(c->llcache))));
+ LOG("content %p %s", c, nsurl_access(llcache_handle_get_url(c->llcache)));
if (c->handler->open != NULL)
c->handler->open(c, bw, page, params);
}
@@ -825,8 +817,7 @@ void content_close(hlcache_handle *h)
{
struct content *c = hlcache_handle_get_content(h);
assert(c != 0);
- LOG(("content %p %s", c,
- nsurl_access(llcache_handle_get_url(c->llcache))));
+ LOG("content %p %s", c, nsurl_access(llcache_handle_get_url(c->llcache)));
if (c->handler->close != NULL)
c->handler->close(c);
}
@@ -1479,7 +1470,7 @@ nserror content__clone(const struct content *c, struct content *nc)
*/
nserror content_abort(struct content *c)
{
- LOG(("Aborting %p", c));
+ LOG("Aborting %p", c);
if (c->handler->stop != NULL)
c->handler->stop(c);
diff --git a/content/fetch.c b/content/fetch.c
index 0ca90413a..158eb7c07 100644
--- a/content/fetch.c
+++ b/content/fetch.c
@@ -218,14 +218,14 @@ static void dump_rings(void)
q = queue_ring;
if (q) {
do {
- LOG(("queue_ring: %s", nsurl_access(q->url)));
+ LOG("queue_ring: %s", nsurl_access(q->url));
q = q->r_next;
} while (q != queue_ring);
}
f = fetch_ring;
if (f) {
do {
- LOG(("fetch_ring: %s", nsurl_access(f->url)));
+ LOG("fetch_ring: %s", nsurl_access(f->url));
f = f->r_next;
} while (f != fetch_ring);
}
@@ -340,9 +340,7 @@ void fetcher_quit(void)
* the reference count to allow the fetcher to
* be stopped.
*/
- LOG(("Fetcher for scheme %s still has %d active users at quit.",
- lwc_string_data(fetchers[fetcherd].scheme),
- fetchers[fetcherd].refcount));
+ LOG("Fetcher for scheme %s still has %d active users at quit.", lwc_string_data(fetchers[fetcherd].scheme), fetchers[fetcherd].refcount);
fetchers[fetcherd].refcount = 1;
}
@@ -748,9 +746,9 @@ void fetch_remove_from_queues(struct fetch *fetch)
RING_GETSIZE(struct fetch, fetch_ring, all_active);
RING_GETSIZE(struct fetch, queue_ring, all_queued);
- LOG(("Fetch ring is now %d elements.", all_active));
+ LOG("Fetch ring is now %d elements.", all_active);
- LOG(("Queue ring is now %d elements.", all_queued));
+ LOG("Queue ring is now %d elements.", all_queued);
#endif
}
diff --git a/content/fetchers/curl.c b/content/fetchers/curl.c
index b82f6b063..a2c6f2eb4 100644
--- a/content/fetchers/curl.c
+++ b/content/fetchers/curl.c
@@ -112,7 +112,7 @@ static char fetch_proxy_userpwd[100]; /**< Proxy authentication details. */
*/
static bool fetch_curl_initialise(lwc_string *scheme)
{
- LOG(("Initialise cURL fetcher for %s", lwc_string_data(scheme)));
+ LOG("Initialise cURL fetcher for %s", lwc_string_data(scheme));
curl_fetchers_registered++;
return true; /* Always succeeds */
}
@@ -128,17 +128,17 @@ static void fetch_curl_finalise(lwc_string *scheme)
struct cache_handle *h;
curl_fetchers_registered--;
- LOG(("Finalise cURL fetcher %s", lwc_string_data(scheme)));
+ LOG("Finalise cURL fetcher %s", lwc_string_data(scheme));
if (curl_fetchers_registered == 0) {
CURLMcode codem;
/* All the fetchers have been finalised. */
- LOG(("All cURL fetchers finalised, closing down cURL"));
+ LOG("All cURL fetchers finalised, closing down cURL");
curl_easy_cleanup(fetch_blank_curl);
codem = curl_multi_cleanup(fetch_curl_multi);
if (codem != CURLM_OK)
- LOG(("curl_multi_cleanup failed: ignoring"));
+ LOG("curl_multi_cleanup failed: ignoring");
curl_global_cleanup();
}
@@ -208,8 +208,7 @@ fetch_curl_post_convert(const struct fetch_multipart_data *control)
"application/octet-stream",
CURLFORM_END);
if (code != CURL_FORMADD_OK)
- LOG(("curl_formadd: %d (%s)",
- code, control->name));
+ LOG("curl_formadd: %d (%s)", code, control->name);
} else {
char *mimetype = guit->fetch->mimetype(control->value);
code = curl_formadd(&post, &last,
@@ -220,9 +219,7 @@ fetch_curl_post_convert(const struct fetch_multipart_data *control)
(mimetype != 0 ? mimetype : "text/plain"),
CURLFORM_END);
if (code != CURL_FORMADD_OK)
- LOG(("curl_formadd: %d (%s=%s)",
- code, control->name,
- control->value));
+ LOG("curl_formadd: %d (%s=%s)", code, control->name, control->value);
free(mimetype);
}
free(leafname);
@@ -233,9 +230,7 @@ fetch_curl_post_convert(const struct fetch_multipart_data *control)
CURLFORM_COPYCONTENTS, control->value,
CURLFORM_END);
if (code != CURL_FORMADD_OK)
- LOG(("curl_formadd: %d (%s=%s)", code,
- control->name,
- control->value));
+ LOG("curl_formadd: %d (%s=%s)", code, control->name, control->value);
}
}
@@ -282,7 +277,7 @@ fetch_curl_setup(struct fetch *parent_fetch,
fetch->fetch_handle = parent_fetch;
- LOG(("fetch %p, url '%s'", fetch, nsurl_access(url)));
+ LOG("fetch %p, url '%s'", fetch, nsurl_access(url));
/* construct a new fetch structure */
fetch->curl_handle = NULL;
@@ -681,7 +676,7 @@ static void fetch_curl_abort(void *vf)
{
struct curl_fetch_info *f = (struct curl_fetch_info *)vf;
assert(f);
- LOG(("fetch %p, url '%s'", f, nsurl_access(f->url)));
+ LOG("fetch %p, url '%s'", f, nsurl_access(f->url));
if (f->curl_handle) {
f->abort = true;
} else {
@@ -701,7 +696,7 @@ static void fetch_curl_stop(struct curl_fetch_info *f)
CURLMcode codem;
assert(f);
- LOG(("fetch %p, url '%s'", f, nsurl_access(f->url)));
+ LOG("fetch %p, url '%s'", f, nsurl_access(f->url));
if (f->curl_handle) {
/* remove from curl multi handle */
@@ -769,7 +764,7 @@ static bool fetch_curl_process_headers(struct curl_fetch_info *f)
assert(code == CURLE_OK);
}
http_code = f->http_code;
- LOG(("HTTP status code %li", http_code));
+ LOG("HTTP status code %li", http_code);
if (http_code == 304 && !f->post_urlenc && !f->post_multipart) {
/* Not Modified && GET request */
@@ -780,7 +775,7 @@ static bool fetch_curl_process_headers(struct curl_fetch_info *f)
/* handle HTTP redirects (3xx response codes) */
if (300 <= http_code && http_code < 400 && f->location != 0) {
- LOG(("FETCH_REDIRECT, '%s'", f->location));
+ LOG("FETCH_REDIRECT, '%s'", f->location);
msg.type = FETCH_REDIRECT;
msg.data.redirect = f->location;
fetch_send_callback(&msg, f->fetch_handle);
@@ -836,7 +831,7 @@ static void fetch_curl_done(CURL *curl_handle, CURLcode result)
assert(code == CURLE_OK);
abort_fetch = f->abort;
- LOG(("done %s", nsurl_access(f->url)));
+ LOG("done %s", nsurl_access(f->url));
if (abort_fetch == false && (result == CURLE_OK ||
(result == CURLE_WRITE_ERROR && f->stopped == false))) {
@@ -874,7 +869,7 @@ static void fetch_curl_done(CURL *curl_handle, CURLcode result)
memset(f->cert_data, 0, sizeof(f->cert_data));
cert = true;
} else {
- LOG(("Unknown cURL response code %d", result));
+ LOG("Unknown cURL response code %d", result);
error = true;
}
@@ -1007,8 +1002,7 @@ static void fetch_curl_poll(lwc_string *scheme_ignored)
do {
codem = curl_multi_perform(fetch_curl_multi, &running);
if (codem != CURLM_OK && codem != CURLM_CALL_MULTI_PERFORM) {
- LOG(("curl_multi_perform: %i %s",
- codem, curl_multi_strerror(codem)));
+ LOG("curl_multi_perform: %i %s", codem, curl_multi_strerror(codem));
warn_user("MiscError", curl_multi_strerror(codem));
return;
}
@@ -1169,7 +1163,7 @@ static size_t fetch_curl_header(char *data, size_t size, size_t nmemb,
free(f->location);
f->location = malloc(size);
if (!f->location) {
- LOG(("malloc failed"));
+ LOG("malloc failed");
return size;
}
SKIP_ST(9);
@@ -1242,17 +1236,17 @@ nserror fetch_curl_register(void)
.finalise = fetch_curl_finalise
};
- LOG(("curl_version %s", curl_version()));
+ LOG("curl_version %s", curl_version());
code = curl_global_init(CURL_GLOBAL_ALL);
if (code != CURLE_OK) {
- LOG(("curl_global_init failed."));
+ LOG("curl_global_init failed.");
return NSERROR_INIT_FAILED;
}
fetch_curl_multi = curl_multi_init();
if (!fetch_curl_multi) {
- LOG(("curl_multi_init failed."));
+ LOG("curl_multi_init failed.");
return NSERROR_INIT_FAILED;
}
@@ -1280,7 +1274,7 @@ nserror fetch_curl_register(void)
*/
fetch_blank_curl = curl_easy_init();
if (!fetch_blank_curl) {
- LOG(("curl_easy_init failed"));
+ LOG("curl_easy_init failed");
return NSERROR_INIT_FAILED;
}
@@ -1312,11 +1306,11 @@ nserror fetch_curl_register(void)
if (nsoption_charp(ca_bundle) &&
strcmp(nsoption_charp(ca_bundle), "")) {
- LOG(("ca_bundle: '%s'", nsoption_charp(ca_bundle)));
+ LOG("ca_bundle: '%s'", nsoption_charp(ca_bundle));
SETOPT(CURLOPT_CAINFO, nsoption_charp(ca_bundle));
}
if (nsoption_charp(ca_path) && strcmp(nsoption_charp(ca_path), "")) {
- LOG(("ca_path: '%s'", nsoption_charp(ca_path)));
+ LOG("ca_path: '%s'", nsoption_charp(ca_path));
SETOPT(CURLOPT_CAPATH, nsoption_charp(ca_path));
}
@@ -1328,7 +1322,7 @@ nserror fetch_curl_register(void)
curl_with_openssl = false;
}
- LOG(("cURL %slinked against openssl", curl_with_openssl ? "" : "not "));
+ LOG("cURL %slinked against openssl", curl_with_openssl ? "" : "not ");
/* cURL initialised okay, register the fetchers */
@@ -1347,20 +1341,19 @@ nserror fetch_curl_register(void)
}
if (fetcher_add(scheme, &fetcher_ops) != NSERROR_OK) {
- LOG(("Unable to register cURL fetcher for %s",
- data->protocols[i]));
+ LOG("Unable to register cURL fetcher for %s", data->protocols[i]);
}
}
return NSERROR_OK;
curl_easy_setopt_failed:
- LOG(("curl_easy_setopt failed."));
+ LOG("curl_easy_setopt failed.");
return NSERROR_INIT_FAILED;
#if LIBCURL_VERSION_NUM >= 0x071e00
curl_multi_setopt_failed:
- LOG(("curl_multi_setopt failed."));
+ LOG("curl_multi_setopt failed.");
return NSERROR_INIT_FAILED;
#endif
}
diff --git a/content/fetchers/data.c b/content/fetchers/data.c
index ecf77b048..00494ccc7 100644
--- a/content/fetchers/data.c
+++ b/content/fetchers/data.c
@@ -62,7 +62,7 @@ static CURL *curl;
static bool fetch_data_initialise(lwc_string *scheme)
{
- LOG(("fetch_data_initialise called for %s", lwc_string_data(scheme)));
+ LOG("fetch_data_initialise called for %s", lwc_string_data(scheme));
if ( (curl = curl_easy_init()) == NULL)
return false;
else
@@ -71,7 +71,7 @@ static bool fetch_data_initialise(lwc_string *scheme)
static void fetch_data_finalise(lwc_string *scheme)
{
- LOG(("fetch_data_finalise called for %s", lwc_string_data(scheme)));
+ LOG("fetch_data_finalise called for %s", lwc_string_data(scheme));
curl_easy_cleanup(curl);
}
@@ -154,7 +154,7 @@ static bool fetch_data_process(struct fetch_data_context *c)
* data must still be there.
*/
- LOG(("url: %.140s", c->url));
+ LOG("url: %.140s", c->url);
if (strlen(c->url) < 6) {
/* 6 is the minimum possible length (data:,) */
@@ -263,8 +263,7 @@ static void fetch_data_poll(lwc_string *scheme)
char header[64];
fetch_set_http_code(c->parent_fetch, 200);
- LOG(("setting data: MIME type to %s, length to %zd",
- c->mimetype, c->datalen));
+ LOG("setting data: MIME type to %s, length to %zd", c->mimetype, c->datalen);
/* Any callback can result in the fetch being aborted.
* Therefore, we _must_ check for this after _every_
* call to fetch_data_send_callback().
@@ -300,7 +299,7 @@ static void fetch_data_poll(lwc_string *scheme)
fetch_data_send_callback(&msg, c);
}
} else {
- LOG(("Processing of %s failed!", c->url));
+ LOG("Processing of %s failed!", c->url);
/* Ensure that we're unlocked here. If we aren't,
* then fetch_data_process() is broken.
diff --git a/content/fs_backing_store.c b/content/fs_backing_store.c
index 2e7b3919a..cedb8a49f 100644
--- a/content/fs_backing_store.c
+++ b/content/fs_backing_store.c
@@ -519,11 +519,11 @@ invalidate_entry(struct store_state *state, struct store_entry *bse)
* This entry cannot be immediately removed as it has
* associated allocation so wait for allocation release.
*/
- LOG(("invalidating entry with referenced allocation"));
+ LOG("invalidating entry with referenced allocation");
return NSERROR_OK;
}
- LOG(("Removing entry for %p", bse));
+ LOG("Removing entry for %p", bse);
/* remove the entry from the index */
ret = remove_store_entry(state, &bse);
@@ -533,12 +533,12 @@ invalidate_entry(struct store_state *state, struct store_entry *bse)
ret = invalidate_element(state, bse, ENTRY_ELEM_META);
if (ret != NSERROR_OK) {
- LOG(("Error invalidating metadata element"));
+ LOG("Error invalidating metadata element");
}
ret = invalidate_element(state, bse, ENTRY_ELEM_DATA);
if (ret != NSERROR_OK) {
- LOG(("Error invalidating data element"));
+ LOG("Error invalidating data element");
}
return NSERROR_OK;
@@ -620,8 +620,8 @@ static nserror store_evict(struct store_state *state)
return NSERROR_OK;
}
- LOG(("Evicting entries to reduce %d by %d",
- state->total_alloc, state->hysteresis));
+ LOG("Evicting entries to reduce %"PRIu64" by %zd",
+ state->total_alloc, state->hysteresis);
/* allocate storage for the list */
elist = malloc(sizeof(entry_ident_t) * state->last_entry);
@@ -658,7 +658,7 @@ static nserror store_evict(struct store_state *state)
free(elist);
- LOG(("removed %d in %d entries", removed, ent));
+ LOG("removed %zd in %d entries", removed, ent);
return ret;
}
@@ -773,7 +773,7 @@ static nserror write_blocks(struct store_state *state)
&state->blocks[elem_idx][bfidx].use_map[0],
BLOCK_USE_MAP_SIZE);
if (wr != BLOCK_USE_MAP_SIZE) {
- LOG(("writing block file %d use index on file number %d failed", elem_idx, bfidx));
+ LOG("writing block file %d use index on file number %d failed", elem_idx, bfidx);
goto wr_err;
}
written += wr;
@@ -829,19 +829,19 @@ static nserror set_block_extents(struct store_state *state)
return NSERROR_OK;
}
- LOG(("Starting"));
+ LOG("Starting");
for (elem_idx = 0; elem_idx < ENTRY_ELEM_COUNT; elem_idx++) {
for (bfidx = 0; bfidx < BLOCK_FILE_COUNT; bfidx++) {
if (state->blocks[elem_idx][bfidx].fd != -1) {
/* ensure block file is correct extent */
ftr = ftruncate(state->blocks[elem_idx][bfidx].fd, 1U << (log2_block_size[elem_idx] + BLOCK_ENTRY_COUNT));
if (ftr == -1) {
- LOG(("Truncate failed errno:%d", errno));
+ LOG("Truncate failed errno:%d", errno);
}
}
}
}
- LOG(("Complete"));
+ LOG("Complete");
state->blocks_opened = false;
@@ -886,7 +886,7 @@ get_store_entry(struct store_state *state, nsurl *url, struct store_entry **bse)
entry_ident_t ident;
unsigned int sei; /* store entry index */
- LOG(("url:%s", nsurl_access(url)));
+ LOG("url:%s", nsurl_access(url));
/* use the url hash as the entry identifier */
ident = nsurl_hash(url);
@@ -894,13 +894,13 @@ get_store_entry(struct store_state *state, nsurl *url, struct store_entry **bse)
sei = BS_ENTRY_INDEX(ident, state);
if (sei == 0) {
- LOG(("Failed to find ident 0x%x in index", ident));
+ LOG("Failed to find ident 0x%x in index", ident);
return NSERROR_NOT_FOUND;
}
if (state->entries[sei].ident != ident) {
/* entry ident did not match */
- LOG(("ident did not match entry"));
+ LOG("ident did not match entry");
return NSERROR_NOT_FOUND;
}
@@ -975,7 +975,7 @@ set_store_entry(struct store_state *state,
nserror ret;
struct store_entry_element *elem;
- LOG(("url:%s", nsurl_access(url)));
+ LOG("url:%s", nsurl_access(url));
/* evict entries as required and ensure there is at least one
* new entry available.
@@ -1013,8 +1013,7 @@ set_store_entry(struct store_state *state,
* to see if the old entry is in use and if
* not prefer the newly stored entry instead?
*/
- LOG(("Entry index collision trying to replace %x with %x",
- se->ident, ident));
+ LOG("Entry index collision trying to replace %x with %x", se->ident, ident);
return NSERROR_PERMISSION;
}
}
@@ -1027,7 +1026,7 @@ set_store_entry(struct store_state *state,
/* this entry cannot be removed as it has associated
* allocation.
*/
- LOG(("attempt to overwrite entry with in use data"));
+ LOG("attempt to overwrite entry with in use data");
return NSERROR_PERMISSION;
}
@@ -1086,7 +1085,7 @@ store_open(struct store_state *state,
fname = store_fname(state, ident, elem_idx);
if (fname == NULL) {
- LOG(("filename error"));
+ LOG("filename error");
return -1;
}
@@ -1094,13 +1093,13 @@ store_open(struct store_state *state,
if (openflags & O_CREAT) {
ret = netsurf_mkdir_all(fname);
if (ret != NSERROR_OK) {
- LOG(("file path \"%s\" could not be created", fname));
+ LOG("file path \"%s\" could not be created", fname);
free(fname);
return -1;
}
}
- LOG(("opening %s", fname));
+ LOG("opening %s", fname);
fd = open(fname, openflags, S_IRUSR | S_IWUSR);
free(fname);
@@ -1127,9 +1126,9 @@ build_entrymap(struct store_state *state)
{
unsigned int eloop;
- LOG(("Allocating %d bytes for max of %d buckets",
- (1 << state->ident_bits) * sizeof(entry_index_t),
- 1 << state->ident_bits));
+ LOG("Allocating %ld bytes for max of %d buckets",
+ (1 << state->ident_bits) * sizeof(entry_index_t),
+ 1 << state->ident_bits);
state->addrmap = calloc(1 << state->ident_bits, sizeof(entry_index_t));
if (state->addrmap == NULL) {
@@ -1205,10 +1204,10 @@ read_entries(struct store_state *state)
entries_size = (1 << state->entry_bits) * sizeof(struct store_entry);
- LOG(("Allocating %d bytes for max of %d entries of %d length elements %d length",
- entries_size, 1 << state->entry_bits,
- sizeof(struct store_entry),
- sizeof(struct store_entry_element)));
+ LOG("Allocating %zd bytes for max of %d entries of %ld length elements %ld length",
+ entries_size, 1 << state->entry_bits,
+ sizeof(struct store_entry),
+ sizeof(struct store_entry_element));
state->entries = calloc(1, entries_size);
if (state->entries == NULL) {
@@ -1223,7 +1222,7 @@ read_entries(struct store_state *state)
close(fd);
if (rd > 0) {
state->last_entry = rd / sizeof(struct store_entry);
- LOG(("Read %d entries", state->last_entry));
+ LOG("Read %d entries", state->last_entry);
}
} else {
/* could rebuild entries from fs */
@@ -1254,7 +1253,7 @@ read_blocks(struct store_state *state)
return ret;
}
- LOG(("Initialising block use map from %s", fname));
+ LOG("Initialising block use map from %s", fname);
fd = open(fname, O_RDWR);
free(fname);
@@ -1266,7 +1265,7 @@ read_blocks(struct store_state *state)
&state->blocks[elem_idx][bfidx].use_map[0],
BLOCK_USE_MAP_SIZE);
if (rd <= 0) {
- LOG(("reading block file %d use index on file number %d failed", elem_idx, bfidx));
+ LOG("reading block file %d use index on file number %d failed", elem_idx, bfidx);
goto rd_err;
}
}
@@ -1275,7 +1274,7 @@ read_blocks(struct store_state *state)
close(fd);
} else {
- LOG(("Initialising block use map to defaults"));
+ LOG("Initialising block use map to defaults");
/* ensure block 0 (invalid sentinal) is skipped */
state->blocks[ENTRY_ELEM_DATA][0].use_map[0] = 1;
state->blocks[ENTRY_ELEM_META][0].use_map[0] = 1;
@@ -1345,7 +1344,7 @@ write_control(struct store_state *state)
return ret;
}
- LOG(("writing control file \"%s\"", fname));
+ LOG("writing control file \"%s\"", fname);
ret = netsurf_mkdir_all(fname);
if (ret != NSERROR_OK) {
@@ -1393,7 +1392,7 @@ read_control(struct store_state *state)
return ret;
}
- LOG(("opening control file \"%s\"", fname));
+ LOG("opening control file \"%s\"", fname);
fcontrol = fopen(fname, "rb");
@@ -1510,7 +1509,7 @@ initialise(const struct llcache_store_parameters *parameters)
/* read store control and create new if required */
ret = read_control(newstate);
if (ret != NSERROR_OK) {
- LOG(("read control failed %s", messages_get_errorcode(ret)));
+ LOG("read control failed %s", messages_get_errorcode(ret));
ret = write_control(newstate);
if (ret == NSERROR_OK) {
unlink_entries(newstate);
@@ -1559,12 +1558,15 @@ initialise(const struct llcache_store_parameters *parameters)
storestate = newstate;
- LOG(("FS backing store init successful"));
+ LOG("FS backing store init successful");
- LOG(("path:%s limit:%d hyst:%d addr:%d entries:%d",
- newstate->path, newstate->limit, newstate->hysteresis,
- newstate->ident_bits, newstate->entry_bits));
- LOG(("Using %lld/%lld", newstate->total_alloc, newstate->limit));
+ LOG("path:%s limit:%zd hyst:%zd addr:%d entries:%d",
+ newstate->path,
+ newstate->limit,
+ newstate->hysteresis,
+ newstate->ident_bits,
+ newstate->entry_bits);
+ LOG("Using %"PRIu64"/%zd", newstate->total_alloc, newstate->limit);
return NSERROR_OK;
}
@@ -1603,14 +1605,14 @@ finalise(void)
/* avoid division by zero */
if (op_count > 0) {
- LOG(("Cache total/hit/miss/fail (counts) %d/%d/%d/%d (100%%/%d%%/%d%%/%d%%)",
- op_count,
- storestate->hit_count,
- storestate->miss_count,
- 0,
- (storestate->hit_count * 100) / op_count,
- (storestate->miss_count * 100) / op_count,
- 0));
+ LOG("Cache total/hit/miss/fail (counts) %d/%zd/%zd/%d (100%%/%zd%%/%zd%%/%d%%)",
+ op_count,
+ storestate->hit_count,
+ storestate->miss_count,
+ 0,
+ (storestate->hit_count * 100) / op_count,
+ (storestate->miss_count * 100) / op_count,
+ 0);
}
free(storestate->path);
@@ -1644,7 +1646,7 @@ static nserror store_write_block(struct store_state *state,
state->blocks[elem_idx][bf].fd = store_open(state, bf,
elem_idx + ENTRY_ELEM_COUNT, O_CREAT | O_RDWR);
if (state->blocks[elem_idx][bf].fd == -1) {
- LOG(("Open failed errno %d", errno));
+ LOG("Open failed errno %d", errno);
return NSERROR_SAVE_FAILED;
}
@@ -1659,15 +1661,21 @@ static nserror store_write_block(struct store_state *state,
bse->elem[elem_idx].size,
offst);
if (wr != (ssize_t)bse->elem[elem_idx].size) {
- LOG(("Write failed %d of %d bytes from %p at 0x%x block %d errno %d",
- wr, bse->elem[elem_idx].size, bse->elem[elem_idx].data,
- offst, bse->elem[elem_idx].block, errno));
+ LOG("Write failed %zd of %d bytes from %p at 0x%jx block %d errno %d",
+ wr,
+ bse->elem[elem_idx].size,
+ bse->elem[elem_idx].data,
+ (uintmax_t)offst,
+ bse->elem[elem_idx].block,
+ errno);
return NSERROR_SAVE_FAILED;
}
- LOG(("Wrote %d bytes from %p at 0x%x block %d",
- wr, bse->elem[elem_idx].data,
- offst, bse->elem[elem_idx].block));
+ LOG("Wrote %zd bytes from %p at 0x%jx block %d",
+ wr,
+ bse->elem[elem_idx].data,
+ (uintmax_t)offst,
+ bse->elem[elem_idx].block);
return NSERROR_OK;
}
@@ -1691,7 +1699,7 @@ static nserror store_write_file(struct store_state *state,
fd = store_open(state, bse->ident, elem_idx, O_CREAT | O_WRONLY);
if (fd < 0) {
perror("");
- LOG(("Open failed %d errno %d", fd, errno));
+ LOG("Open failed %d errno %d", fd, errno);
return NSERROR_SAVE_FAILED;
}
@@ -1700,15 +1708,17 @@ static nserror store_write_file(struct store_state *state,
close(fd);
if (wr != (ssize_t)bse->elem[elem_idx].size) {
- LOG(("Write failed %d of %d bytes from %p errno %d",
- wr, bse->elem[elem_idx].size, bse->elem[elem_idx].data,
- err));
+ LOG("Write failed %zd of %d bytes from %p errno %d",
+ wr,
+ bse->elem[elem_idx].size,
+ bse->elem[elem_idx].data,
+ err);
/** @todo Delete the file? */
return NSERROR_SAVE_FAILED;
}
- LOG(("Wrote %d bytes from %p", wr, bse->elem[elem_idx].data));
+ LOG("Wrote %zd bytes from %p", wr, bse->elem[elem_idx].data);
return NSERROR_OK;
}
@@ -1749,7 +1759,7 @@ store(nsurl *url,
/* set the store entry up */
ret = set_store_entry(storestate, url, elem_idx, data, datalen, &bse);
if (ret != NSERROR_OK) {
- LOG(("store entry setting failed"));
+ LOG("store entry setting failed");
return ret;
}
@@ -1772,7 +1782,7 @@ static nserror entry_release_alloc(struct store_entry_element *elem)
if ((elem->flags & ENTRY_ELEM_FLAG_HEAP) != 0) {
elem->ref--;
if (elem->ref == 0) {
- LOG(("freeing %p", elem->data));
+ LOG("freeing %p", elem->data);
free(elem->data);
elem->flags &= ~ENTRY_ELEM_FLAG_HEAP;
}
@@ -1804,7 +1814,7 @@ static nserror store_read_block(struct store_state *state,
state->blocks[elem_idx][bf].fd = store_open(state, bf,
elem_idx + ENTRY_ELEM_COUNT, O_CREAT | O_RDWR);
if (state->blocks[elem_idx][bf].fd == -1) {
- LOG(("Open failed errno %d", errno));
+ LOG("Open failed errno %d", errno);
return NSERROR_SAVE_FAILED;
}
@@ -1819,15 +1829,21 @@ static nserror store_read_block(struct store_state *state,
bse->elem[elem_idx].size,
offst);
if (rd != (ssize_t)bse->elem[elem_idx].size) {
- LOG(("Failed reading %d of %d bytes into %p from 0x%x block %d errno %d",
- rd, bse->elem[elem_idx].size, bse->elem[elem_idx].data,
- offst, bse->elem[elem_idx].block, errno));
+ LOG("Failed reading %zd of %d bytes into %p from 0x%jx block %d errno %d",
+ rd,
+ bse->elem[elem_idx].size,
+ bse->elem[elem_idx].data,
+ (uintmax_t)offst,
+ bse->elem[elem_idx].block,
+ errno);
return NSERROR_SAVE_FAILED;
}
- LOG(("Read %d bytes into %p from 0x%x block %d",
- rd, bse->elem[elem_idx].data,
- offst, bse->elem[elem_idx].block));
+ LOG("Read %zd bytes into %p from 0x%jx block %d",
+ rd,
+ bse->elem[elem_idx].data,
+ (uintmax_t)offst,
+ bse->elem[elem_idx].block);
return NSERROR_OK;
}
@@ -1852,7 +1868,7 @@ static nserror store_read_file(struct store_state *state,
/* separate file in backing store */
fd = store_open(storestate, bse->ident, elem_idx, O_RDONLY);
if (fd < 0) {
- LOG(("Open failed %d errno %d", fd, errno));
+ LOG("Open failed %d errno %d", fd, errno);
/** @todo should this invalidate the entry? */
return NSERROR_NOT_FOUND;
}
@@ -1862,7 +1878,7 @@ static nserror store_read_file(struct store_state *state,
bse->elem[elem_idx].data + tot,
bse->elem[elem_idx].size - tot);
if (rd <= 0) {
- LOG(("read error returned %d errno %d", rd, errno));
+ LOG("read error returned %zd errno %d", rd, errno);
ret = NSERROR_NOT_FOUND;
break;
}
@@ -1871,7 +1887,7 @@ static nserror store_read_file(struct store_state *state,
close(fd);
- LOG(("Read %d bytes into %p", tot, bse->elem[elem_idx].data));
+ LOG("Read %zd bytes into %p", tot, bse->elem[elem_idx].data);
return ret;
}
@@ -1904,13 +1920,13 @@ fetch(nsurl *url,
/* fetch store entry */
ret = get_store_entry(storestate, url, &bse);
if (ret != NSERROR_OK) {
- LOG(("entry not found"));
+ LOG("entry not found");
storestate->miss_count++;
return ret;
}
storestate->hit_count++;
- LOG(("retriving cache data for url:%s", nsurl_access(url)));
+ LOG("retriving cache data for url:%s", nsurl_access(url));
/* calculate the entry element index */
if ((bsflags & BACKING_STORE_META) != 0) {
@@ -1925,17 +1941,16 @@ fetch(nsurl *url,
/* use the existing allocation and bump the ref count. */
elem->ref++;
- LOG(("Using existing entry (%p) allocation %p refs:%d",
- bse, elem->data, elem->ref));
+ LOG("Using existing entry (%p) allocation %p refs:%d", bse, elem->data, elem->ref);
} else {
/* allocate from the heap */
elem->data = malloc(elem->size);
if (elem->data == NULL) {
- LOG(("Failed to create new heap allocation"));
+ LOG("Failed to create new heap allocation");
return NSERROR_NOMEM;
}
- LOG(("Created new heap allocation %p", elem->data));
+ LOG("Created new heap allocation %p", elem->data);
/* mark the entry as having a valid heap allocation */
elem->flags |= ENTRY_ELEM_FLAG_HEAP;
@@ -1984,7 +1999,7 @@ static nserror release(nsurl *url, enum backing_store_flags bsflags)
ret = get_store_entry(storestate, url, &bse);
if (ret != NSERROR_OK) {
- LOG(("entry not found"));
+ LOG("entry not found");
return ret;
}
diff --git a/content/hlcache.c b/content/hlcache.c
index 6e991281d..388c59a94 100644
--- a/content/hlcache.c
+++ b/content/hlcache.c
@@ -191,7 +191,7 @@ static void hlcache_content_callback(struct content *c, content_msg msg,
error = handle->cb(handle, &event, handle->pw);
if (error != NSERROR_OK)
- LOG(("Error in callback: %d", error));
+ LOG("Error in callback: %d", error);
}
/**
@@ -560,7 +560,7 @@ void hlcache_finalise(void)
num_contents++;
}
- LOG(("%d contents remain before cache drain", num_contents));
+ LOG("%d contents remain before cache drain", num_contents);
/* Drain cache */
do {
@@ -574,17 +574,14 @@ void hlcache_finalise(void)
}
} while (num_contents > 0 && num_contents != prev_contents);
- LOG(("%d contents remaining:", num_contents));
+ LOG("%d contents remaining:", num_contents);
for (entry = hlcache->content_list; entry != NULL; entry = entry->next) {
hlcache_handle entry_handle = { entry, NULL, NULL };
if (entry->content != NULL) {
- LOG((" %p : %s (%d users)", entry,
- nsurl_access(
- hlcache_handle_get_url(&entry_handle)),
- content_count_users(entry->content)));
+ LOG(" %p : %s (%d users)", entry, nsurl_access(hlcache_handle_get_url(&entry_handle)), content_count_users(entry->content));
} else {
- LOG((" %p", entry));
+ LOG(" %p", entry);
}
}
@@ -612,12 +609,12 @@ void hlcache_finalise(void)
hlcache->retrieval_ctx_ring = NULL;
}
- LOG(("hit/miss %d/%d", hlcache->hit_count, hlcache->miss_count));
+ LOG("hit/miss %d/%d", hlcache->hit_count, hlcache->miss_count);
free(hlcache);
hlcache = NULL;
- LOG(("Finalising low-level cache"));
+ LOG("Finalising low-level cache");
llcache_finalise();
}
diff --git a/content/llcache.c b/content/llcache.c
index 046dd1ae4..219e315b3 100644
--- a/content/llcache.c
+++ b/content/llcache.c
@@ -1294,13 +1294,13 @@ llcache_serialise_metadata(llcache_object *object,
overflow:
/* somehow we overflowed the buffer - hth? */
- LOG(("Overflowed metadata buffer"));
+ LOG("Overflowed metadata buffer");
free(data);
return NSERROR_INVALID;
operror:
/* output error */
- LOG(("Output error"));
+ LOG("Output error");
free(data);
return NSERROR_INVALID;
}
@@ -1338,7 +1338,7 @@ llcache_process_metadata(llcache_object *object)
size_t num_headers;
size_t hloop;
- LOG(("Retriving metadata"));
+ LOG("Retriving metadata");
/* attempt to retrieve object metadata from the backing store */
res = guit->llcache->fetch(object->url,
@@ -1351,7 +1351,7 @@ llcache_process_metadata(llcache_object *object)
end = metadata + metadatalen;
- LOG(("Processing retrived data"));
+ LOG("Processing retrived data");
/* metadata line 1 is the url the metadata referrs to */
line = 1;
@@ -1374,9 +1374,7 @@ llcache_process_metadata(llcache_object *object)
* by simply skipping caching of this object.
*/
- LOG(("Got metadata for %s instead of %s",
- nsurl_access(metadataurl),
- nsurl_access(object->url)));
+ LOG("Got metadata for %s instead of %s", nsurl_access(metadataurl), nsurl_access(object->url));
nsurl_unref(metadataurl);
@@ -1469,7 +1467,7 @@ llcache_process_metadata(llcache_object *object)
return NSERROR_OK;
format_error:
- LOG(("metadata error on line %d error code %d\n", line, res));
+ LOG("metadata error on line %d error code %d\n", line, res);
guit->llcache->release(object->url, BACKING_STORE_META);
return res;
@@ -1859,7 +1857,7 @@ static nserror llcache_fetch_redirect(llcache_object *object, const char *target
/* Forcibly stop redirecting if we've followed too many redirects */
#define REDIRECT_LIMIT 10
if (object->fetch.redirect_count > REDIRECT_LIMIT) {
- LOG(("Too many nested redirects"));
+ LOG("Too many nested redirects");
event.type = LLCACHE_EVENT_ERROR;
event.data.msg = messages_get("BadRedirect");
@@ -2444,9 +2442,8 @@ static void llcache_persist_slowcheck(void *p)
total_bandwidth = (llcache->total_written * 1000) / llcache->total_elapsed;
if (total_bandwidth < llcache->minimum_bandwidth) {
- LOG(("Current bandwidth %llu less than minimum %llu",
- total_bandwidth, llcache->minimum_bandwidth));
- LOG(("Disabling disc cache; too slow"));
+ LOG("Current bandwidth %"PRIu64" less than minimum %zd",
+ total_bandwidth, llcache->minimum_bandwidth);
guit->llcache->finalise();
}
}
@@ -2502,7 +2499,7 @@ static void llcache_persist(void *p)
* (bandwidth) for this run being exceeded.
*/
if (total_elapsed > llcache->time_quantum) {
- LOG(("Overran timeslot"));
+ LOG("Overran timeslot");
/* writeout has exhausted the available time.
* Either the writeout is slow or the last
* object was very large.
@@ -2858,12 +2855,11 @@ static nserror llcache_object_notify_users(llcache_object *object)
#ifdef LLCACHE_TRACE
if (handle->state != objstate) {
if (emitted_notify == false) {
- LOG(("Notifying users of %p", object));
+ LOG("Notifying users of %p", object);
emitted_notify = true;
}
- LOG(("User %p state: %d Object state: %d",
- user, handle->state, objstate));
+ LOG("User %p state: %d Object state: %d", user, handle->state, objstate);
}
#endif
@@ -3296,7 +3292,7 @@ llcache_initialise(const struct llcache_parameters *prm)
llcache->time_quantum = prm->time_quantum;
llcache->all_caught_up = true;
- LOG(("llcache initialising with a limit of %d bytes", llcache->limit));
+ LOG("llcache initialising with a limit of %d bytes", llcache->limit);
/* backing store initialisation */
return guit->llcache->initialise(&prm->store);
@@ -3359,10 +3355,7 @@ void llcache_finalise(void)
llcache->total_elapsed;
}
- LOG(("Backing store wrote %"PRIu64" bytes in %"PRIu64" ms "
- "(average %"PRIu64" bytes/second)",
- llcache->total_written, llcache->total_elapsed,
- total_bandwidth));
+ LOG("Backing store wrote %"PRIu64" bytes in %"PRIu64" ms ""(average %"PRIu64" bytes/second)", llcache->total_written, llcache->total_elapsed, total_bandwidth);
free(llcache);
llcache = NULL;
diff --git a/content/urldb.c b/content/urldb.c
index a9476d72a..272f080c8 100644
--- a/content/urldb.c
+++ b/content/urldb.c
@@ -564,7 +564,7 @@ static bool urldb__host_is_ip_address(const char *host)
c[slash - host] = '\0';
sane_host = c;
host_len = slash - host - 1;
- LOG(("WARNING: called with non-host '%s'", host));
+ LOG("WARNING: called with non-host '%s'", host);
}
if (strspn(sane_host, "0123456789abcdefABCDEF[].:") < host_len)
@@ -1152,7 +1152,7 @@ static struct path_data *urldb_match_path(const struct path_data *parent,
assert(parent->segment == NULL);
if (path[0] != '/') {
- LOG(("path is %s", path));
+ LOG("path is %s", path);
}
assert(path[0] == '/');
@@ -1278,12 +1278,12 @@ static void urldb_dump_paths(struct path_data *parent)
do {
if (p->segment != NULL) {
- LOG(("\t%s : %u", lwc_string_data(p->scheme), p->port));
+ LOG("\t%s : %u", lwc_string_data(p->scheme), p->port);
- LOG(("\t\t'%s'", p->segment));
+ LOG("\t\t'%s'", p->segment);
for (i = 0; i != p->frag_cnt; i++)
- LOG(("\t\t\t#%s", p->fragment[i]));
+ LOG("\t\t\t#%s", p->fragment[i]);
}
if (p->children != NULL) {
@@ -1312,10 +1312,9 @@ static void urldb_dump_hosts(struct host_part *parent)
struct host_part *h;
if (parent->part) {
- LOG(("%s", parent->part));
+ LOG("%s", parent->part);
- LOG(("\t%s invalid SSL certs",
- parent->permit_invalid_certs ? "Permits" : "Denies"));
+ LOG("\t%s invalid SSL certs", parent->permit_invalid_certs ? "Permits" : "Denies");
}
/* Dump path data */
@@ -2493,14 +2492,14 @@ nserror urldb_load(const char *filename)
assert(filename);
- LOG(("Loading URL file %s", filename));
+ LOG("Loading URL file %s", filename);
if (url_bloom == NULL)
url_bloom = bloom_create(BLOOM_SIZE);
fp = fopen(filename, "r");
if (!fp) {
- LOG(("Failed to open file '%s' for reading", filename));
+ LOG("Failed to open file '%s' for reading", filename);
return NSERROR_NOT_FOUND;
}
@@ -2511,12 +2510,12 @@ nserror urldb_load(const char *filename)
version = atoi(s);
if (version < MIN_URL_FILE_VERSION) {
- LOG(("Unsupported URL file version."));
+ LOG("Unsupported URL file version.");
fclose(fp);
return NSERROR_INVALID;
}
if (version > URL_FILE_VERSION) {
- LOG(("Unknown URL file version."));
+ LOG("Unknown URL file version.");
fclose(fp);
return NSERROR_INVALID;
}
@@ -2546,13 +2545,13 @@ nserror urldb_load(const char *filename)
/* no URLs => try next host */
if (urls == 0) {
- LOG(("No URLs for '%s'", host));
+ LOG("No URLs for '%s'", host);
continue;
}
h = urldb_add_host(host);
if (!h) {
- LOG(("Failed adding host: '%s'", host));
+ LOG("Failed adding host: '%s'", host);
fclose(fp);
return NSERROR_NOMEM;
}
@@ -2603,7 +2602,7 @@ nserror urldb_load(const char *filename)
* Need a nsurl_save too.
*/
if (nsurl_create(url, &nsurl) != NSERROR_OK) {
- LOG(("Failed inserting '%s'", url));
+ LOG("Failed inserting '%s'", url);
fclose(fp);
return NSERROR_NOMEM;
}
@@ -2616,7 +2615,7 @@ nserror urldb_load(const char *filename)
/* Copy and merge path/query strings */
if (nsurl_get(nsurl, NSURL_PATH | NSURL_QUERY,
&path_query, &len) != NSERROR_OK) {
- LOG(("Failed inserting '%s'", url));
+ LOG("Failed inserting '%s'", url);
fclose(fp);
return NSERROR_NOMEM;
}
@@ -2627,7 +2626,7 @@ nserror urldb_load(const char *filename)
p = urldb_add_path(scheme_lwc, port, h, path_query,
fragment_lwc, nsurl);
if (!p) {
- LOG(("Failed inserting '%s'", url));
+ LOG("Failed inserting '%s'", url);
fclose(fp);
return NSERROR_NOMEM;
}
@@ -2668,7 +2667,7 @@ nserror urldb_load(const char *filename)
}
fclose(fp);
- LOG(("Successfully loaded URL file"));
+ LOG("Successfully loaded URL file");
#undef MAXIMUM_URL_LENGTH
return NSERROR_OK;
@@ -2684,7 +2683,7 @@ nserror urldb_save(const char *filename)
fp = fopen(filename, "w");
if (!fp) {
- LOG(("Failed to open file '%s' for writing", filename));
+ LOG("Failed to open file '%s' for writing", filename);
return NSERROR_SAVE_FAILED;
}
@@ -3065,7 +3064,7 @@ void urldb_set_thumbnail(nsurl *url, struct bitmap *bitmap)
p = urldb_find_url(url);
if (p != NULL) {
- LOG(("Setting bitmap on %s", nsurl_access(url)));
+ LOG("Setting bitmap on %s", nsurl_access(url));
if (p->thumb && p->thumb != bitmap) {
guit->bitmap->destroy(p->thumb);
@@ -3773,7 +3772,7 @@ void urldb_load_cookies(const char *filename)
if (loaded_cookie_file_version <
MIN_COOKIE_FILE_VERSION) {
- LOG(("Unsupported Cookie file version"));
+ LOG("Unsupported Cookie file version");
break;
}