2 Copyright (c) 2009 Joseph A. Adams
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions
8 1. Redistributions of source code must retain the above copyright
9 notice, this list of conditions and the following disclaimer.
10 2. Redistributions in binary form must reproduce the above copyright
11 notice, this list of conditions and the following disclaimer in the
12 documentation and/or other materials provided with the distribution.
13 3. The name of the author may not be used to endorse or promote products
14 derived from this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "ccan_tokenizer.h"
30 #include <ccan/talloc/talloc.h>
34 //Shown by operator precedence; based on
35 // http://tigcc.ticalc.org/doc/opers.html#precedence .
37 static struct dict_entry c_dictionary[] = {
46 {'!',"!"}, {'~',"~"}, //prefix
47 {INC_OP,"++"}, {DEC_OP,"--"}, //prefix or postfix
58 {LEFT_OP,"<<"}, {RIGHT_OP,">>"},
62 {LE_OP,"<="}, {GE_OP,">="},
65 {EQ_OP,"=="}, {NE_OP,"!="},
84 {MUL_ASSIGN,"*="}, {DIV_ASSIGN,"/="}, {MOD_ASSIGN,"%="},
85 {ADD_ASSIGN,"+="}, {SUB_ASSIGN,"-="},
86 {AND_ASSIGN,"&="}, {XOR_ASSIGN,"^="}, {OR_ASSIGN,"|="},
87 {LEFT_ASSIGN,"<<="}, {RIGHT_ASSIGN,">>="},
102 {'+',"+"}, {'-',"-"},
103 {'&',"&"}, {'*',"*"},
107 {_COMPLEX, "_Complex"},
108 {_IMAGINARY, "_Imaginary"},
113 {CONTINUE, "continue"},
114 {DEFAULT, "default"},
127 {REGISTER, "register"},
128 {RESTRICT, "restrict"},
136 {TYPEDEF, "typedef"},
138 {UNSIGNED, "unsigned"},
140 {VOLATILE, "volatile"},
143 //Preprocessor keywords (except those already defined)
144 {VA_ARGS, "__VA_ARGS__"},
153 {INCLUDE, "include"},
157 {WARNING, "warning"},
162 struct tokenizer *tokenizer_new(void *ctx) {
163 struct tokenizer *t = talloc(ctx, struct tokenizer);
165 queue_init(t->mq, t);
166 t->dict = dict_build(t, c_dictionary, sizeof(c_dictionary)/sizeof(*c_dictionary));
173 #define MESSAGE_PATH "tokenize/"
175 static void unbreak_backslash_broken_lines(struct token_list *tl, tok_message_queue *mq) {
176 const char *s = tl->orig, *e = s+tl->orig_size;
177 array_char txt = array_new(tl);
178 array(const char*) olines = array_new(tl);
179 array(const char*) tlines = array_new(tl);
182 const char *line_start = s, *line_end;
183 const char *lnw; //last non-white
184 size_t start_offset = txt.size;
186 //scan to the next line and find the last non-white character in the line
187 while (s<e && !creturn(*s)) s++;
190 while (lnw>line_start && cspace(lnw[-1])) lnw--;
191 if (s<e && creturn(*s)) {
193 //check for non-standard newlines (i.e. "\r", "\r\n", or "\n\r")
194 if (s<e && *s=='\n'+'\r'-s[-1])
198 //add the backslash-break-free version of the text
199 if (lnw>line_start && lnw[-1]=='\\' && line_end<e) {
200 array_append_items(txt, line_start, lnw-1-line_start);
201 if (lnw<e && cspace(*lnw)) {
202 tok_msg_warn(spaces_after_backslash_break, lnw,
203 "Trailing spaces after backslash-broken line");
206 array_append_items(txt, line_start, s-line_start);
208 //add the line starts for this line
209 array_append(olines, line_start);
210 array_append(tlines, (const char*)start_offset);
211 //Since the txt buffer moves when expanded, we're storing offsets
212 // for now. Once we're done building txt, we can add the base
213 // of it to all the offsets to make them pointers.
216 //stick a null terminator at the end of the text
217 array_realloc(txt, txt.size+1);
218 txt.item[txt.size] = 0;
220 //convert the line start offsets to pointers
221 array_for(i, tlines, *i = txt.item + (size_t)*i);
223 tl->olines = olines.item;
224 tl->olines_size = olines.size;
226 tl->txt_size = txt.size;
227 tl->tlines = tlines.item;
228 tl->tlines_size = tlines.size;
231 static void normal_keyword(struct token *tok) {
232 if (tok->type==TOK_KEYWORD &&
233 (opkw_is_directive_only(tok->opkw) || tok->opkw==VA_ARGS))
234 tok->type = TOK_IDENTIFIER;
237 static int define_parmlist_has_ellipsis(struct token *start, struct token *end) {
238 while (end>start && token_is_ignored(end-1)) end--;
239 return (end-->start && end->type==TOK_OPERATOR && end->opkw==ELLIPSIS);
242 //Used to label __VA_ARGS__ as keywords within applicable macro expansions
243 //Start should follow the DEFINE directive keyword
244 static void this_is_a_define(struct token *start, struct token *end) {
245 struct token *i = start, *pl_start;
247 //skip past the identifier that is defined
248 while (i<end && token_is_ignored(i)) i++;
251 //TODO: check i->type to make sure it's an identifier, throw error otherwise
254 //see if this is actually a variadic macro
255 if (!(i<end && i->type==TOK_OPERATOR && i->opkw=='('))
258 while (i<end && !(i->type==TOK_OPERATOR && i->opkw==')'))
260 if (!define_parmlist_has_ellipsis(pl_start, i++))
263 //We have arrived at the macro expansion and know there is a ... argument
264 //Thus, we'll only change directive-only keywords to identifiers
266 if (i->type==TOK_KEYWORD && opkw_is_directive_only(i->opkw))
267 i->type = TOK_IDENTIFIER;
275 //fill the flags field of each token and untangle keywords and such
276 static void finalize_line(struct token *start, struct token *end) {
277 struct token *i = start, *j;
279 assert(start<end && start->type==TOK_STARTLINE);
282 while (i<end && token_is_ignored(i)) i++;
284 if (i<end && i->type==TOK_OPERATOR && i->opkw=='#') {
286 i->type = TOK_LEADING_POUND;
288 //set pp on all tokens in this line
289 for (j=start; j<end; j++)
292 //find the relevant token after the '#'
293 for (i++; i<end; i++) {
294 if (!token_is_ignored(i)) {
295 i->flags.pp_directive = 1;
296 if (i->type==TOK_KEYWORD && !opkw_is_directive(i->opkw))
297 i->type = TOK_IDENTIFIER;
298 //TODO: Handle invalid preprocessor directives (e.g. #+ )
300 if (i->type==TOK_KEYWORD && i->opkw==DEFINE) {
301 for (j=i+1; j<end; j++)
302 this_is_a_define(i+1, end);
317 //fill the list, flags, line, col, orig, and orig_size fields of each token
318 //convert identifiers mistaken for preprocessor keywords (e.g. ifdef) to identifiers
319 static void finalize(struct token_list *tl, struct token *start, struct token *end) {
320 const char * const *lss = tl->tlines;
321 const char * const *lse = lss + tl->tlines_size;
323 struct token *startline = NULL;
330 for (i=start; ; i++) {
331 //perform a second pass on each line
332 if (i >= end || i->type == TOK_STARTLINE) {
334 finalize_line(startline, i);
339 end[-1].orig_size = tl->orig+tl->orig_size - end[-1].orig;
343 //set up the list links
344 i->prev = i>start ? i-1 : NULL;
345 i->next = i+1<end ? i+1 : NULL;
347 //if i->txt starts on a later line, advance to it
348 while (lss+1<lse && i->txt >= lss[1] && i->txt > lss[0])
351 //set up line, col, orig, and orig_size
352 i->line = lss - tl->tlines;
353 i->col = i->txt - *lss;
354 i->orig = tl->olines[i->line] + i->col;
356 i[-1].orig_size = i->orig - i[-1].orig;
358 assert(i->line < tl->olines_size);
361 memset(&i->flags, 0, sizeof(i->flags));
365 #define add(...) do { \
366 struct token tok = {__VA_ARGS__}; \
368 tok.txt_size = s-orig; \
369 array_append(array, tok); \
372 #define cstray(c) (ccontrol(c) || cextended(c) || (c)=='@' || (c)=='`' || (c)=='\\')
373 #define cident(c) (cletter(c) || cdigit(c) || c=='_' || c=='$')
374 //believe it or not, $ is a valid character in an identifier
376 struct dict *tokenizer_dict = NULL;
378 static void free_tokenizer_dict(void) {
379 talloc_free(tokenizer_dict);
382 struct token_list *tokenize(const char *orig, size_t orig_size,
383 tok_message_queue *mq) {
384 struct token_list *tl = talloc(orig, struct token_list);
386 size_t stray_count=0, cr_count=0;
387 array(struct token) array = array_new(tl);
388 int only_pound_include = 0;
390 if (!tokenizer_dict) {
391 tokenizer_dict = dict_build(NULL, c_dictionary,
392 sizeof(c_dictionary)/sizeof(*c_dictionary));
393 atexit(free_tokenizer_dict);
397 tl->orig_size = orig_size;
398 unbreak_backslash_broken_lines(tl, mq);
402 e = s + tl->txt_size;
404 array_appends(array, {
405 .type = TOK_STARTLINE,
411 const char *orig = s;
413 int added_something = 1;
417 while (s<e && cstray(*s)) {
421 add(.type = TOK_STRAY);
423 /* This has the potential to be very noisy on binary
424 files, but it really is quite useful. */
425 tok_msg_error(stray_segment, orig,
426 "%zu stray characters", s-orig);
428 } else if (creturn(c)) {
429 //check for non-standard newlines (i.e. "\r", "\r\n", or "\n\r")
430 if (s<e && *s=='\n'+'\r'-c) {
436 add(.type = TOK_WHITE);
439 //add a TOK_STARTLINE for the next line unless this is the end of the document
441 add(.type = TOK_STARTLINE);
443 only_pound_include = 0;
445 } else if (cspace(c)) {
446 //skip over the remaining whitespace
447 while (s<e && cspace(*s)) s++;
448 add(.type = TOK_WHITE);
451 } else if (cdigit(c) || (c=='.' && s<e && cdigit(*s))) {
453 s = read_cnumber(&tok, s-1, e, mq);
455 tok.txt_size = s-orig;
456 array_append(array, tok);
458 } else if (csymbol(c) || cident(c)) {
459 if (only_pound_include && (c=='"' || c=='<')) { //include string
461 char end = c=='"' ? '"' : '>';
462 short type = c=='"' ? TOK_STRING_IQUOTE : TOK_STRING_IANGLE;
464 while (s<e && !creturn(*s) && *s!=end) s++;
465 include = talloc_strndup(tl, orig+1, s-(orig+1));
467 if (s<e && *s==end) {
470 tok_msg_error(include_missing_terminator, orig,
471 "Missing terminating %c character", end);
475 {.include = include});
477 } else if (c=='\'' || c=='\"') { //character or string literal
478 array_char string = array_new(tl);
479 s = read_cstring(&string, s, e, c, mq);
480 if (s<e) s++; //advance past endquote (if available)
481 add(.type = c=='\'' ? TOK_CHAR : TOK_STRING,
484 if (c=='\'' && string.size==0) {
485 tok_msg_error(empty_char_constant, orig,
486 "Empty character constant");
489 } else if (c=='/' && s<e && (*s=='*' || *s=='/')) { //comment
490 if (*s++ == '*') { /* C-style comment */
491 const char *comment_start = s-2;
495 tok_msg_error(unterminated_comment, comment_start,
496 "Unterminated comment");
499 if (s[0]=='*' && s[1]=='/') {
504 add(.type = TOK_CCOMMENT);
505 } else { // C++-style comment
506 while (s<e && !creturn(*s)) s++;
507 add(.type = TOK_CPPCOMMENT);
511 } else { //operator, keyword, or identifier
512 struct dict_entry *ent;
513 const char *ident_e = --s;
514 while (ident_e<e && cident(*ident_e) ) ident_e++;
516 ent = dict_lookup(tokenizer_dict, &s, e);
517 if (cident(c)) { //keyword or identifier
518 if (ent && s==ident_e) {
519 add(.type = TOK_KEYWORD,
521 if (ent->id == INCLUDE) {
522 //hacky way to lex #include string properly
523 struct token *ts = array.item;
524 struct token *tp = ts+array.size-1;
525 while (tp>ts && token_is_ignored(tp-1))
527 if (tp>ts && token_is_op(tp-1, '#')) {
529 while (tp>ts && token_is_ignored(tp-1))
531 if (tp>ts && tp[-1].type==TOK_STARTLINE) {
532 only_pound_include = 1;
539 add(.type = TOK_IDENTIFIER);
541 } else if (ent) { //operator
542 add(.type = TOK_OPERATOR,
544 } else { //invalid symbol (shouldn't happen)
545 tok_msg_bug(unrecognized_symbol, s,
546 "Unrecognized symbol \'%c\'", c);
548 add(.type = TOK_STRAY);
554 only_pound_include = 0;
558 tok_msg_error(stray_characters, NULL,
559 "%lu stray characters in text", (unsigned long)stray_count);
562 tok_msg_warn(nonstandard_newlines, NULL,
563 "Text contains non-standard line terminators");
566 finalize(tl, array.item, array.item+array.size);
571 size_t token_list_count(const struct token_list *tl) {
573 const struct token *i;
575 for (i=tl->first; i; i=i->next)
581 static size_t find_line(const char *ptr, const char * const *lines, size_t line_count) {
582 const char * const *orig = lines;
583 const char * const *orig_e = lines+line_count;
585 while (line_count > 1) {
586 size_t middle = line_count>>1;
587 if (ptr < lines[middle])
591 line_count -= middle;
595 //select the *last* of equivalent lines
596 while (lines+1 < orig_e && lines[0]==lines[1])
599 // (don't) select the *first* of equivalent lines
600 //while (lines>orig && lines<orig_e && lines[-1]==lines[0])
606 int tok_point_lookup(struct tok_point *out, const char *ptr,
607 const struct token_list *tl) {
608 size_t line_count = tl->olines_size;
610 memset(out, 0, sizeof(*out));
614 if (ptr >= tl->txt && ptr <= tl->txt+tl->txt_size) {
616 out->line = find_line(ptr, tl->tlines, line_count);
617 if (out->line < line_count) {
618 out->col = ptr - tl->tlines[out->line];
619 out->orig = tl->olines[out->line] + out->col;
622 out->orig = tl->orig + tl->orig_size;
625 } else if (ptr >= tl->orig && ptr <= tl->orig+tl->orig_size) {
627 out->line = find_line(ptr, tl->olines, line_count);
628 if (out->line < line_count) {
629 const char *tline_start = tl->tlines[out->line];
630 const char *tline_end = out->line+1 < line_count ?
631 tl->tlines[out->line+1] :
632 tl->txt + tl->txt_size;
634 out->col = ptr - tl->olines[out->line];
635 out->txt = tline_start + out->col;
637 if (out->txt > tline_end)
638 out->txt = tline_end;
641 out->txt = tl->txt + tl->txt_size;
649 static char *escape_string(array_char *buf, const char *str, size_t size) {
650 const char *s = str, *e = s+size;
651 array_from_lit(*buf, "");
655 const char *esc = buffer;
656 unsigned char c = (unsigned char)*s;
658 sprintf(buffer, "\\x%02X", c);
660 case '\t': esc = "\\t"; break;
661 case '\n': esc = "\\n"; break;
662 case '\v': esc = "\\v"; break;
663 case '\f': esc = "\\f"; break;
664 case '\r': esc = "\\r"; break;
665 case '"': esc = "\\\""; break;
666 case '\\': esc = "\\\\"; break;
671 array_append_string(*buf, esc);
677 static int txt_orig_matches(const char *txt, size_t txt_size, const char *orig, size_t orig_size) {
678 const char *ts = txt, *te = ts+txt_size;
679 const char *os = orig, *oe = os+orig_size;
682 const char *ob = os; //start of next backslash break
683 const char *obe = os; //end of next backslash break
684 size_t size; //amount of text to compare for this round
686 while (ob<oe && *ob!='\\') ob++;
688 if (obe < oe) { //there's a backslash
690 while (obe<oe && cspace(*obe)) obe++;
691 if (obe<oe && creturn(*obe)) { //there's a backslash-broken line
693 if (obe<oe && *obe == '\n'+'\r'-obe[-1])
695 } else //this is just a plain old backslash
701 if (ts+size > te || memcmp(ts, os, size))
707 if (ts != te || os != oe)
713 static int is_backslash_break(const char **end, const char *s, const char *e) {
714 if (s<e && *s == '\\') {
716 while (s<e && cspace(*s)) s++;
717 if (s<e && creturn(*s)) {
719 if (s<e && *s=='\n'+'\r'-s[-1])
729 #define failed(fmt, ...) do {fprintf(err, fmt "\n", ##__VA_ARGS__); return 0; } while(0)
731 //tests that should pass on an untainted token list out of the tokenize() function
732 static int token_list_sanity_check_initial(const struct token_list *tl, FILE *err) {
733 struct token *first = tl->first;
734 struct token *last = tl->last;
736 const char *txt=tl->txt, *orig=tl->orig;
737 const char *txt_e = txt+tl->txt_size, *orig_e = orig+tl->orig_size;
739 if ((char*)first > (char*)last ||
740 (size_t)((char*)last - (char*)first) % sizeof(struct token))
741 failed("Token list pointers don't look right");
743 //token list should not end with TOK_STARTLINE unless
744 // the document is empty
745 if (last!=first && last->type==TOK_STARTLINE)
748 for (i=first; i; i=i->next) {
750 if (i != first && i->prev != i-1)
751 failed("list.prev is incorrect");
752 if (i != last && i->next != i+1)
753 failed("list.next is incorrect");
755 //Make sure txt segments fill the entire tl->txt
757 failed("txt does not fill the token list");
760 failed("txt is out of bounds");
762 //Make sure orig segments fill the entire tl->orig
764 failed("orig does not fill the token list");
765 orig += i->orig_size;
767 failed("orig is out of bounds");
778 int token_list_sanity_check(const struct token_list *tl, FILE *err) {
779 struct token *first = tl->first;
780 struct token *last = tl->last;
784 if (tl->first == NULL || tl->last == NULL)
785 failed("Token list is completely empty");
787 if (first->type!=TOK_STARTLINE ||
788 first->txt!=tl->txt || first->txt_size!=0 ||
789 first->orig!=tl->orig || first->orig_size!=0 ||
790 first->line!=0 || first->col!=0)
791 failed("Token list does not start with a valid TOK_STARTLINE");
793 if (first->prev!=NULL || last->next!=NULL)
794 failed("Token edge links are not NULL");
796 for (i=first; i; i=i->next) {
798 if (tl->tlines[i->line] + i->col != i->txt)
799 failed("line,col is wrong against txt");
800 if (tl->olines[i->line] + i->col != i->orig)
801 failed("line,col is wrong against orig");
803 //Make sure tokens have proper sizes
804 if (i->type!=TOK_STARTLINE && (i->txt_size==0 || i->orig_size==0 || i->txt_size > i->orig_size) )
805 failed("Token is empty");
806 if (i->type==TOK_STARTLINE && (i->txt_size!=0 || i->orig_size!=0) )
807 failed("TOK_STARTLINE is non-empty");
809 //Make sure TOK_WHITE actually contains white tokens
810 if (i->type==TOK_WHITE) {
811 const char *s = i->txt, *e = s+i->txt_size;
812 while (s<e && cwhite(*s)) s++;
814 failed("TOK_WHITE does not contain only white characters");
817 //Make sure txt and orig match exactly except for backslash line breaks
818 if (!txt_orig_matches(i->txt, i->txt_size, i->orig, i->orig_size)) {
819 array_char buf = array_new(NULL);
821 "txt and orig do not match:\n"
823 escape_string(&buf, i->txt, i->txt_size) );
824 fprintf(err, "\torig = \"%s\"\n",
825 escape_string(&buf, i->orig, i->orig_size) );
831 //Make sure tok_point_lookup returns correct point
833 struct tok_point tok_point;
834 const char *t=i->txt, *o=i->orig, *e=o+i->orig_size, *p;
835 size_t line=i->line, col=i->col;
837 #define check(ptr) do { \
838 if (tok_point_lookup(&tok_point, ptr, tl)) { \
839 if (tok_point.txt != t || tok_point.orig != o) \
840 failed("tok_point_lookup on txt reported incorrect txt/orig (orig is %d, should be %d)", \
841 (int)(tok_point.orig-i->orig), (int)(o-i->orig)); \
842 if (tok_point.line != line || tok_point.col != col) \
843 failed("tok_point_lookup on txt reported incorrect line/col (off by %d, %d)", \
844 (int)(tok_point.line-line), (int)(tok_point.col-col)); \
845 } else if (initial) {\
846 failed("tok_point_lookup failed on initial token list"); \
851 while (is_backslash_break(&p, o, e)) {
865 if (p<e && *p=='\n'+'\r'-p[-1])
879 } while (o<e && *o!='\\');
886 //Verify olines and tlines
888 const char *s = tl->orig, *e = s+tl->orig_size;
889 size_t i, line_count = tl->olines_size;
891 //both line arrays should be exactly the same size
892 if (tl->olines_size != tl->tlines_size)
895 for (i=0; s<e; i++) {
896 const char *line_start = s, *line_end;
897 size_t tline_size, oline_size;
900 if (i+1 < line_count)
901 tline_size = tl->tlines[i+1] - tl->tlines[i];
903 tline_size = tl->txt+tl->txt_size - tl->tlines[i];
905 while (s<e && !creturn(*s)) s++;
909 if (s<e && *s=='\n'+'\r'-s[-1])
913 oline_size = s-line_start;
915 //verify that olines elements are correct
916 if (line_start != tl->olines[i])
919 //verify that tlines elements are in range
921 if (p < tl->txt || p+tline_size > tl->txt+tl->txt_size)
924 //verify that original lines have sizes >= the unbroken lines
925 if (oline_size < tline_size)
928 //if sizes are inconsistent, make sure it is due to a backslash escape
929 if (oline_size > tline_size) {
930 p = line_start+tline_size;
933 while (p<e && cspace(*p)) p++;
938 //make sure the text of both copies match
947 if (initial && !token_list_sanity_check_initial(tl, err))
948 failed("Initial sanity checks failed. Has the list been modified after it was returned from tokenize() ?");
955 static char *sprint_token_flags(char buf[3], struct token_flags flags) {
956 buf[0] = flags.pp ? 'p' : '-';
957 buf[1] = flags.pp_directive ? 'D' : '-';
962 void token_list_dump(const struct token_list *tl, FILE *f) {
964 array_char buf = array_new(NULL);
967 const char *token_type_str[] = {
985 for (tok=tl->first; tok; tok=tok->next) {
986 fprintf(f, "%lu\t%s\t%s\t\"%s\"", (unsigned long)(i++),
987 token_type_str[tok->type],
988 sprint_token_flags(buf2, tok->flags),
989 escape_string(&buf, tok->txt, tok->txt_size));
990 #if 1 //print tok->orig
991 fprintf(f, "\t\"%s\"\n", escape_string(&buf, tok->orig, tok->orig_size));
1000 void tok_message_print(struct tok_message *m, struct token_list *tl) {
1001 struct tok_point pt;
1002 int resolved = tok_point_lookup(&pt, m->location, tl);
1005 printf("%s:%s", tl->filename, resolved ? "" : " ");
1009 printf("%zu:%zu %s: %s\n",
1010 pt.line+1, pt.col+1,
1011 m->level==TM_DEBUG ? "debug" :
1012 m->level==TM_INFO ? "info" :
1013 m->level==TM_WARN ? "warning" :
1014 m->level==TM_ERROR ? "error" :
1015 m->level==TM_BUG ? "BUG" :
1020 m->level==TM_DEBUG ? "debug" :
1021 m->level==TM_INFO ? "info" :
1022 m->level==TM_WARN ? "warning" :
1023 m->level==TM_ERROR ? "error" :
1024 m->level==TM_BUG ? "BUG" :
1030 void tok_message_dump(struct tok_message *m) {
1031 printf("%s: %s: %s\n",
1032 m->level==TM_DEBUG ? "debug" :
1033 m->level==TM_INFO ? "info" :
1034 m->level==TM_WARN ? "warning" :
1035 m->level==TM_ERROR ? "error" :
1036 m->level==TM_BUG ? "BUG" :
1037 "???", m->path, m->message);
1040 void tok_message_add(tok_message_queue *mq, enum tok_message_level level,
1041 const char *path, const char *loc, const char *fmt, ...) {
1042 struct tok_message msg = {.level=level, .path=path, .location=loc};
1049 msg.message = talloc_vasprintf(mq->item, fmt, ap);
1055 void tok_message_queue_dump(const tok_message_queue *mq) {
1057 for (i=0; i<queue_count(*mq); i++)
1058 tok_message_dump(&queue_item(*mq, i));