Sorry for the repetition. But I have made changes as per some of David's suggestions, but still get one error and memory leakage in Valgrind. Please suggest where am I doing it wrong..
char *handle_url_followup(char* url) { CURL *curl; char *url_new=NULL; struct url_data data; data.size = 0; data.data = malloc(4096); /* reasonable size initial buffer */ if(NULL == data.data) { fprintf(stderr, "Failed to allocate memory.\n"); return NULL; } data.data[0] = '\0'; CURLcode res; curl = curl_easy_init(); FILE *TEMP_url=fopen("new_url.txt","w"); if (curl) { curl_easy_setopt(curl, CURLOPT_URL, url); curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &data); curl_easy_perform(curl); curl_easy_getinfo(curl,CURLINFO_EFFECTIVE_URL,&url_new); fprintf(TEMP_url,"%s",url_new); res = curl_easy_perform(curl); if(res != CURLE_OK) { fprintf(stderr, "curl_easy_perform() failed: %s\n", curl_easy_strerror(res)); } } curl_easy_cleanup(curl); return data.data; fclose(TEMP_url); } int main(int argc,char *argv[]) { char *mapping_data="NULL"; char url_new[100]="NULL"; //FIRST URL printf("URL: %s\n", url); mapping_data=handle_url_followup(url); printf("%s\n",mapping_data); //SECOND URL is in the file new_url.txt The Valgrind errors are: ==2860== HEAP SUMMARY: ==2860== in use at exit: 357,525 bytes in 3,045 blocks ==2860== total heap usage: 5,033 allocs, 1,988 frees, 962,456 bytes allocated ==2860== ==2860== 403 bytes in 9 blocks are definitely lost in loss record 827 of 898 ==2860== at 0x10001155D: realloc (vg_replace_malloc.c:525) ==2860== by 0x100000C71: write_data (Gwidd_uniprot_map2.c:23) ==2860== by 0x10002DD9A: Curl_client_write (in /usr/lib/libcurl.4.dylib) ==2860== by 0x1000431F9: Curl_readwrite (in /usr/lib/libcurl.4.dylib) ==2860== by 0x1000439C1: Curl_perform (in /usr/lib/libcurl.4.dylib) ==2860== by 0x100000FBF: handle_url_followup (Gwidd_uniprot_map2.c:109) ==2860== by 0x100001998: main (Gwidd_uniprot_map2.c:248) ==2860== ==2860== LEAK SUMMARY: ==2860== definitely lost: 403 bytes in 9 blocks ==2860== indirectly lost: 0 bytes in 0 blocks ==2860== possibly lost: 0 bytes in 0 blocks ==2860== still reachable: 357,122 bytes in 3,036 blocks ==2860== suppressed: 0 bytes in 0 blocks ==2860== Reachable blocks (those to which a pointer was found) are not shown. ==2860== To see them, rerun with: --leak-check=full --show-reachable=yes ==2860== ==2860== For counts of detected and suppressed errors, rerun with: -v ==2860== ERROR SUMMARY: 1 errors from 1 contexts (suppressed: 0 from 0) Thanks! On Mon, Aug 4, 2014 at 6:36 PM, David Chapman <dcchap...@acm.org> wrote: > On 8/4/2014 4:16 PM, Madhurima Das wrote: > > I want to retrieve the information data of a particular website (url) and > also find its follow-up url (url_new) but getting too many errors after > running Valgrind along with memory leakage. > > The function and part of the main program is as follows: > > void handle_url_followup(char* url, char** mapping_data, char** url_n) > { > CURL *curl; > char *url_new="NULL"; > > > Don't put quote marks here; you would be assigning a constant string to a > non-const pointer. Even though you are not writing into the buffer, it is > a risky practice. Just use NULL without quotes. > > > > struct url_data data; > data.size = 0; > data.data = malloc(4096); /* reasonable size initial buffer */ > if(NULL == data.data) > { > fprintf(stderr, "Failed to allocate memory.\n"); > return NULL; > } > > data.data[0] = '\0'; > > CURLcode res; > > curl = curl_easy_init(); > if (curl) > { > curl_easy_setopt(curl, CURLOPT_URL, url); > curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L); > curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data); > curl_easy_setopt(curl, CURLOPT_WRITEDATA, &data); > curl_easy_perform(curl); > curl_easy_getinfo(curl,CURLINFO_EFFECTIVE_URL,&url_new); > > > You need to copy the effective URL to a safe place; this call returns a > pointer to an internal cURL data structure. I would not expect cURL to > guarantee that this pointer would survive another call to > curl_easy_perform(). > > > res = curl_easy_perform(curl); > if(res != CURLE_OK) > { > fprintf(stderr, "curl_easy_perform() failed: %s\n", > curl_easy_strerror(res)); > } > } > curl_easy_cleanup(curl); > > > And now the effective URL has been freed. > > *mapping_data=data.data; > *url_n=url_new; > > > The returned string url_new needs to be freed by the caller. > > Fix this, then see if you still have valgrind errors. > > } > > int main(int argc,char *argv[]) > { > char url[100]="NULL"; > char *mapping_data="NULL"; > char *url_new="NULL"; > > //FIRST URL printf("URL: %s\n", url); > handle_url_followup(url,&mapping_data,&url_new); > //SECOND URL > printf("%s\n",url_new); > > > I get both the urls but in addition get 973 errors with Valgrind and > memory leakage of 403 bytes in 9 blocks. A few errors are: > > ==6599== Invalid read of size 1 > ==6599== at 0x1000124F6: strlen (mc_replace_strmem.c:282) > ==6599== by 0x10010AEF4: puts (in /usr/lib/libSystem.B.dylib) > ==6599== by 0x1000019F9: main (prog.c:247) > ==6599== Address 0x1011db5d0 is 0 bytes inside a block of size 51 free'd > ==6599== at 0x100010E9F: free (vg_replace_malloc.c:366) > ==6599== by 0x100037C6B: Curl_close (in /usr/lib/libcurl.4.dylib) > ==6599== by 0x100001055: handle_url_followup (prog.c:113) > ==6599== by 0x1000019ED: main (prog.c:245) > > ==6599== Invalid read of size 8 > ==6599== at 0x100013C30: memcpy (mc_replace_strmem.c:635) > ==6599== by 0x100096E74: __sfvwrite (in /usr/lib/libSystem.B.dylib) > ==6599== by 0x10010AF66: puts (in /usr/lib/libSystem.B.dylib) > ==6599== by 0x1000019F9: main (prog.c:247) > ==6599== Address 0x1011db5d0 is 0 bytes inside a block of size 51 free'd > ==6599== at 0x100010E9F: free (vg_replace_malloc.c:366) > ==6599== by 0x100037C6B: Curl_close (in /usr/lib/libcurl.4.dylib) > ==6599== by 0x100001055: handle_url_followup (prog.c:113) > ==6599== by 0x1000019ED: main (prog.c:245) > ==6599== > > ==6599== > ==6599== HEAP SUMMARY: > ==6599== in use at exit: 320,629 bytes in 3,035 blocks > ==6599== total heap usage: 5,023 allocs, 1,988 frees, 925,560 bytes > allocated > ==6599== > ==6599== 403 bytes in 9 blocks are definitely lost in loss record 827 of > 897 > ==6599== at 0x10001155D: realloc (vg_replace_malloc.c:525) > ==6599== by 0x100000D05: write_data (Gwidd_uniprot_map2.c:23) > ==6599== by 0x10002DD9A: Curl_client_write (in /usr/lib/libcurl.4.dylib) > ==6599== by 0x1000431F9: Curl_readwrite (in /usr/lib/libcurl.4.dylib) > ==6599== by 0x1000439C1: Curl_perform (in /usr/lib/libcurl.4.dylib) > ==6599== by 0x10000101D: handle_url_followup (Gwidd_uniprot_map2.c:106) > ==6599== by 0x1000019ED: main (Gwidd_uniprot_map2.c:245) > ==6599== > ==6599== LEAK SUMMARY: > ==6599== definitely lost: 403 bytes in 9 blocks > > > Any suggestion would be highly appreciated. > > Thanks! > > N.B. The other functions are: > > struct url_data > { > size_t size; > char* data; > }; > > size_t write_data(void *ptr, size_t size, size_t nmemb, struct url_data > *data) > { > size_t index = data->size; > size_t n = (size * nmemb); > char* tmp; > > data->size += (size * nmemb); > > #ifdef DEBUG > fprintf(stderr, "data at %p size=%ld nmemb=%ld\n", ptr, size, nmemb); > #endif > tmp = realloc(data->data, data->size + 1); /* +1 for '\0' */ > > if(tmp) > { > data->data = tmp; > } > else > { > if(data->data) > { > free(data->data); > } > fprintf(stderr, "Failed to allocate memory.\n"); > return 0; > } > memcpy((data->data + index), ptr, n); > data->data[data->size] = '\0'; > > return size * nmemb; > } > > > > ------------------------------------------------------------------- > List admin: http://cool.haxx.se/list/listinfo/curl-library > Etiquette: http://curl.haxx.se/mail/etiquette.html > > > > -- > David Chapman dcchap...@acm.org > Chapman Consulting -- San Jose, CA > Software Development Done Right. > www.chapman-consulting-sj.com > >
------------------------------------------------------------------- List admin: http://cool.haxx.se/list/listinfo/curl-library Etiquette: http://curl.haxx.se/mail/etiquette.html