2 Copyright (c) 2009 Joseph A. Adams
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions
8 1. Redistributions of source code must retain the above copyright
9 notice, this list of conditions and the following disclaimer.
10 2. Redistributions in binary form must reproduce the above copyright
11 notice, this list of conditions and the following disclaimer in the
12 documentation and/or other materials provided with the distribution.
13 3. The name of the author may not be used to endorse or promote products
14 derived from this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "ccan_tokenizer.h"
30 #include <ccan/talloc/talloc.h>
34 //Shown by operator precedence; based on
35 // http://tigcc.ticalc.org/doc/opers.html#precedence .
37 static struct dict_entry c_dictionary[] = {
46 {'!',"!"}, {'~',"~"}, //prefix
47 {INC_OP,"++"}, {DEC_OP,"--"}, //prefix or postfix
58 {LEFT_OP,"<<"}, {RIGHT_OP,">>"},
62 {LE_OP,"<="}, {GE_OP,">="},
65 {EQ_OP,"=="}, {NE_OP,"!="},
84 {MUL_ASSIGN,"*="}, {DIV_ASSIGN,"/="}, {MOD_ASSIGN,"%="},
85 {ADD_ASSIGN,"+="}, {SUB_ASSIGN,"-="},
86 {AND_ASSIGN,"&="}, {XOR_ASSIGN,"^="}, {OR_ASSIGN,"|="},
87 {LEFT_ASSIGN,"<<="}, {RIGHT_ASSIGN,">>="},
102 {'+',"+"}, {'-',"-"},
103 {'&',"&"}, {'*',"*"},
107 {_COMPLEX, "_Complex"},
108 {_IMAGINARY, "_Imaginary"},
113 {CONTINUE, "continue"},
114 {DEFAULT, "default"},
127 {REGISTER, "register"},
128 {RESTRICT, "restrict"},
136 {TYPEDEF, "typedef"},
138 {UNSIGNED, "unsigned"},
140 {VOLATILE, "volatile"},
143 //Preprocessor keywords (except those already defined)
144 {VA_ARGS, "__VA_ARGS__"},
153 {INCLUDE, "include"},
157 {WARNING, "warning"},
162 struct tokenizer *tokenizer_new(void *ctx) {
163 struct tokenizer *t = talloc(ctx, struct tokenizer);
165 queue_init(t->mq, t);
166 t->dict = dict_build(t, c_dictionary, sizeof(c_dictionary)/sizeof(*c_dictionary));
173 #define MESSAGE_PATH "tokenize/"
175 static void unbreak_backslash_broken_lines(struct token_list *tl, tok_message_queue *mq) {
176 const char *s = tl->orig, *e = s+tl->orig_size;
177 array_char txt = array_new(tl);
178 array(const char*) olines = array_new(tl);
179 array(const char*) tlines = array_new(tl);
182 const char *line_start = s, *line_end;
183 const char *lnw; //last non-white
184 size_t start_offset = txt.size;
186 //scan to the next line and find the last non-white character in the line
187 while (s<e && !creturn(*s)) s++;
190 while (lnw>line_start && cspace(lnw[-1])) lnw--;
191 if (s<e && creturn(*s)) {
193 //check for non-standard newlines (i.e. "\r", "\r\n", or "\n\r")
194 if (s<e && *s=='\n'+'\r'-s[-1])
198 //add the backslash-break-free version of the text
199 if (lnw>line_start && lnw[-1]=='\\' && line_end<e) {
200 array_append_items(txt, line_start, lnw-1-line_start);
201 if (lnw<e && cspace(*lnw)) {
202 tok_msg_warn(spaces_after_backslash_break, lnw,
203 "Trailing spaces after backslash-broken line");
206 array_append_items(txt, line_start, s-line_start);
208 //add the line starts for this line
209 array_append(olines, line_start);
210 array_append(tlines, (const char*)start_offset);
211 //Since the txt buffer moves when expanded, we're storing offsets
212 // for now. Once we're done building txt, we can add the base
213 // of it to all the offsets to make them pointers.
216 //stick a null terminator at the end of the text
217 array_realloc(txt, txt.size+1);
218 txt.item[txt.size] = 0;
220 //convert the line start offsets to pointers
221 array_for(i, tlines, *i = txt.item + (size_t)*i);
223 tl->olines = olines.item;
224 tl->olines_size = olines.size;
226 tl->txt_size = txt.size;
227 tl->tlines = tlines.item;
228 tl->tlines_size = tlines.size;
231 static void normal_keyword(struct token *tok) {
232 if (tok->type==TOK_KEYWORD &&
233 (opkw_is_directive_only(tok->opkw) || tok->opkw==VA_ARGS))
234 tok->type = TOK_IDENTIFIER;
237 static int define_parmlist_has_ellipsis(struct token *start, struct token *end) {
238 while (end>start && token_is_ignored(end-1)) end--;
239 return (end-->start && end->type==TOK_OPERATOR && end->opkw==ELLIPSIS);
242 //Used to label __VA_ARGS__ as keywords within applicable macro expansions
243 //Start should follow the DEFINE directive keyword
244 static void this_is_a_define(struct token *start, struct token *end) {
245 struct token *i = start, *pl_start;
247 //skip past the identifier that is defined
248 while (i<end && token_is_ignored(i)) i++;
251 //TODO: check i->type to make sure it's an identifier, throw error otherwise
254 //see if this is actually a variadic macro
255 if (!(i<end && i->type==TOK_OPERATOR && i->opkw=='('))
258 while (i<end && !(i->type==TOK_OPERATOR && i->opkw==')'))
260 if (!define_parmlist_has_ellipsis(pl_start, i++))
263 //We have arrived at the macro expansion and know there is a ... argument
264 //Thus, we'll only change directive-only keywords to identifiers
266 if (i->type==TOK_KEYWORD && opkw_is_directive_only(i->opkw))
267 i->type = TOK_IDENTIFIER;
275 //fill the flags field of each token and untangle keywords and such
276 static void finalize_line(struct token *start, struct token *end) {
277 struct token *i = start, *j;
279 assert(start<end && start->type==TOK_STARTLINE);
282 while (i<end && token_is_ignored(i)) i++;
284 if (i<end && i->type==TOK_OPERATOR && i->opkw=='#') {
286 i->type = TOK_LEADING_POUND;
288 //set pp on all tokens in this line
289 for (j=start; j<end; j++)
292 //find the relevant token after the '#'
293 for (i++; i<end; i++) {
294 if (!token_is_ignored(i)) {
295 i->flags.pp_directive = 1;
296 if (i->type==TOK_KEYWORD && !opkw_is_directive(i->opkw))
297 i->type = TOK_IDENTIFIER;
298 //TODO: Handle invalid preprocessor directives (e.g. #+ )
300 if (i->type==TOK_KEYWORD && i->opkw==DEFINE) {
301 for (j=i+1; j<end; j++)
302 this_is_a_define(i+1, end);
317 //fill the list, flags, line, col, orig, and orig_size fields of each token
318 //convert identifiers mistaken for preprocessor keywords (e.g. ifdef) to identifiers
319 static void finalize(struct token_list *tl, struct token *start, struct token *end) {
320 const char * const *lss = tl->tlines;
321 const char * const *lse = lss + tl->tlines_size;
323 struct token *startline = NULL;
330 for (i=start; ; i++) {
331 //perform a second pass on each line
332 if (i >= end || i->type == TOK_STARTLINE) {
334 finalize_line(startline, i);
339 end[-1].orig_size = tl->orig+tl->orig_size - end[-1].orig;
343 //set up the list links
344 i->prev = i>start ? i-1 : NULL;
345 i->next = i+1<end ? i+1 : NULL;
347 //if i->txt starts on a later line, advance to it
348 while (lss+1<lse && i->txt >= lss[1] && i->txt > lss[0])
351 //set up line, col, orig, and orig_size
352 i->line = lss - tl->tlines;
353 i->col = i->txt - *lss;
354 i->orig = tl->olines[i->line] + i->col;
356 i[-1].orig_size = i->orig - i[-1].orig;
358 assert(i->line < tl->olines_size);
361 memset(&i->flags, 0, sizeof(i->flags));
365 #define add(...) do { \
366 struct token tok = {__VA_ARGS__}; \
368 tok.txt_size = s-orig; \
369 array_append(array, tok); \
372 #define cstray(c) (ccontrol(c) || cextended(c) || (c)=='@' || (c)=='`' || (c)=='\\')
373 #define cident(c) (cletter(c) || cdigit(c) || c=='_' || c=='$')
374 //believe it or not, $ is a valid character in an identifier
376 struct dict *tokenizer_dict = NULL;
378 static void free_tokenizer_dict(void) {
379 talloc_free(tokenizer_dict);
382 struct token_list *tokenize(const char *orig, size_t orig_size,
383 tok_message_queue *mq) {
384 struct token_list *tl = talloc(orig, struct token_list);
386 size_t stray_count=0, cr_count=0;
387 array(struct token) array = array_new(tl);
388 int only_pound_include = 0;
390 if (!tokenizer_dict) {
391 tokenizer_dict = dict_build(NULL, c_dictionary,
392 sizeof(c_dictionary)/sizeof(*c_dictionary));
393 atexit(free_tokenizer_dict);
397 tl->orig_size = orig_size;
398 unbreak_backslash_broken_lines(tl, mq);
402 e = s + tl->txt_size;
404 array_appends(array, {
405 .type = TOK_STARTLINE,
411 const char *orig = s;
413 int added_something = 1;
417 while (s<e && cstray(*s)) {
421 add(.type = TOK_STRAY);
423 /* This has the potential to be very noisy on binary
424 files, but it really is quite useful. */
425 tok_msg_error(stray_segment, orig,
426 "%zu stray characters", s-orig);
428 } else if (creturn(c)) {
429 //check for non-standard newlines (i.e. "\r", "\r\n", or "\n\r")
430 if (s<e && *s=='\n'+'\r'-c) {
436 add(.type = TOK_WHITE);
439 //add a TOK_STARTLINE for the next line unless this is the end of the document
441 add(.type = TOK_STARTLINE);
443 only_pound_include = 0;
445 } else if (cspace(c)) {
446 //skip over the remaining whitespace
447 while (s<e && cspace(*s)) s++;
448 add(.type = TOK_WHITE);
451 } else if (cdigit(c) || (c=='.' && s<e && cdigit(*s))) {
453 s = read_cnumber(&tok, s-1, e, mq);
455 tok.txt_size = s-orig;
456 array_append(array, tok);
458 } else if (csymbol(c) || cident(c)) {
459 if (only_pound_include && (c=='"' || c=='<')) { //include string
461 char end = c=='"' ? '"' : '>';
462 short type = c=='"' ? TOK_STRING_IQUOTE : TOK_STRING_IANGLE;
464 while (s<e && !creturn(*s) && *s!=end) s++;
465 include = talloc_strndup(tl, orig+1, s-(orig+1));
467 if (s<e && *s==end) {
470 tok_msg_error(include_missing_terminator, orig,
471 "Missing terminating %c character", end);
475 {.include = include});
476 } else if (c=='\'' || c=='\"') { //character or string literal
477 array_char string = array_new(tl);
478 s = read_cstring(&string, s, e, c, mq);
479 if (s<e) s++; //advance past endquote (if available)
480 add(.type = c=='\'' ? TOK_CHAR : TOK_STRING,
482 } else if (c=='/' && s<e && (*s=='*' || *s=='/')) { //comment
483 if (*s++ == '*') { /* C-style comment */
484 const char *comment_start = s-2;
488 tok_msg_error(unterminated_comment, comment_start,
489 "Unterminated comment");
492 if (s[0]=='*' && s[1]=='/') {
497 add(.type = TOK_CCOMMENT);
498 } else { // C++-style comment
499 while (s<e && !creturn(*s)) s++;
500 add(.type = TOK_CPPCOMMENT);
504 } else { //operator, keyword, or identifier
505 struct dict_entry *ent;
506 const char *ident_e = --s;
507 while (ident_e<e && cident(*ident_e) ) ident_e++;
509 ent = dict_lookup(tokenizer_dict, &s, e);
510 if (cident(c)) { //keyword or identifier
511 if (ent && s==ident_e) {
512 add(.type = TOK_KEYWORD,
514 if (ent->id == INCLUDE) {
515 //hacky way to lex #include string properly
516 struct token *ts = array.item;
517 struct token *tp = ts+array.size-1;
518 while (tp>ts && token_is_ignored(tp-1))
520 if (tp>ts && token_is_op(tp-1, '#')) {
522 while (tp>ts && token_is_ignored(tp-1))
524 if (tp>ts && tp[-1].type==TOK_STARTLINE) {
525 only_pound_include = 1;
532 add(.type = TOK_IDENTIFIER);
534 } else if (ent) { //operator
535 add(.type = TOK_OPERATOR,
537 } else { //invalid symbol (shouldn't happen)
538 tok_msg_bug(unrecognized_symbol, s,
539 "Unrecognized symbol \'%c\'", c);
541 add(.type = TOK_STRAY);
547 only_pound_include = 0;
551 tok_msg_error(stray_characters, NULL,
552 "%lu stray characters in text", (unsigned long)stray_count);
555 tok_msg_warn(nonstandard_newlines, NULL,
556 "Text contains non-standard line terminators");
559 finalize(tl, array.item, array.item+array.size);
564 size_t token_list_count(const struct token_list *tl) {
566 const struct token *i;
568 for (i=tl->first; i; i=i->next)
574 static size_t find_line(const char *ptr, const char * const *lines, size_t line_count) {
575 const char * const *orig = lines;
576 const char * const *orig_e = lines+line_count;
578 while (line_count > 1) {
579 size_t middle = line_count>>1;
580 if (ptr < lines[middle])
584 line_count -= middle;
588 //select the *last* of equivalent lines
589 while (lines+1 < orig_e && lines[0]==lines[1])
592 // (don't) select the *first* of equivalent lines
593 //while (lines>orig && lines<orig_e && lines[-1]==lines[0])
599 int tok_point_lookup(struct tok_point *out, const char *ptr,
600 const struct token_list *tl) {
601 size_t line_count = tl->olines_size;
603 memset(out, 0, sizeof(*out));
607 if (ptr >= tl->txt && ptr <= tl->txt+tl->txt_size) {
609 out->line = find_line(ptr, tl->tlines, line_count);
610 if (out->line < line_count) {
611 out->col = ptr - tl->tlines[out->line];
612 out->orig = tl->olines[out->line] + out->col;
615 out->orig = tl->orig + tl->orig_size;
618 } else if (ptr >= tl->orig && ptr <= tl->orig+tl->orig_size) {
620 out->line = find_line(ptr, tl->olines, line_count);
621 if (out->line < line_count) {
622 const char *tline_start = tl->tlines[out->line];
623 const char *tline_end = out->line+1 < line_count ?
624 tl->tlines[out->line+1] :
625 tl->txt + tl->txt_size;
627 out->col = ptr - tl->olines[out->line];
628 out->txt = tline_start + out->col;
630 if (out->txt > tline_end)
631 out->txt = tline_end;
634 out->txt = tl->txt + tl->txt_size;
642 static char *escape_string(array_char *buf, const char *str, size_t size) {
643 const char *s = str, *e = s+size;
644 array_from_lit(*buf, "");
648 const char *esc = buffer;
649 unsigned char c = (unsigned char)*s;
651 sprintf(buffer, "\\x%02X", c);
653 case '\t': esc = "\\t"; break;
654 case '\n': esc = "\\n"; break;
655 case '\v': esc = "\\v"; break;
656 case '\f': esc = "\\f"; break;
657 case '\r': esc = "\\r"; break;
658 case '"': esc = "\\\""; break;
659 case '\\': esc = "\\\\"; break;
664 array_append_string(*buf, esc);
670 static int txt_orig_matches(const char *txt, size_t txt_size, const char *orig, size_t orig_size) {
671 const char *ts = txt, *te = ts+txt_size;
672 const char *os = orig, *oe = os+orig_size;
675 const char *ob = os; //start of next backslash break
676 const char *obe = os; //end of next backslash break
677 size_t size; //amount of text to compare for this round
679 while (ob<oe && *ob!='\\') ob++;
681 if (obe < oe) { //there's a backslash
683 while (obe<oe && cspace(*obe)) obe++;
684 if (obe<oe && creturn(*obe)) { //there's a backslash-broken line
686 if (obe<oe && *obe == '\n'+'\r'-obe[-1])
688 } else //this is just a plain old backslash
694 if (ts+size > te || memcmp(ts, os, size))
700 if (ts != te || os != oe)
706 static int is_backslash_break(const char **end, const char *s, const char *e) {
707 if (s<e && *s == '\\') {
709 while (s<e && cspace(*s)) s++;
710 if (s<e && creturn(*s)) {
712 if (s<e && *s=='\n'+'\r'-s[-1])
722 #define failed(fmt, ...) do {fprintf(err, fmt "\n", ##__VA_ARGS__); return 0; } while(0)
724 //tests that should pass on an untainted token list out of the tokenize() function
725 static int token_list_sanity_check_initial(const struct token_list *tl, FILE *err) {
726 struct token *first = tl->first;
727 struct token *last = tl->last;
729 const char *txt=tl->txt, *orig=tl->orig;
730 const char *txt_e = txt+tl->txt_size, *orig_e = orig+tl->orig_size;
732 if ((char*)first > (char*)last ||
733 (size_t)((char*)last - (char*)first) % sizeof(struct token))
734 failed("Token list pointers don't look right");
736 //token list should not end with TOK_STARTLINE unless
737 // the document is empty
738 if (last!=first && last->type==TOK_STARTLINE)
741 for (i=first; i; i=i->next) {
743 if (i != first && i->prev != i-1)
744 failed("list.prev is incorrect");
745 if (i != last && i->next != i+1)
746 failed("list.next is incorrect");
748 //Make sure txt segments fill the entire tl->txt
750 failed("txt does not fill the token list");
753 failed("txt is out of bounds");
755 //Make sure orig segments fill the entire tl->orig
757 failed("orig does not fill the token list");
758 orig += i->orig_size;
760 failed("orig is out of bounds");
771 int token_list_sanity_check(const struct token_list *tl, FILE *err) {
772 struct token *first = tl->first;
773 struct token *last = tl->last;
777 if (tl->first == NULL || tl->last == NULL)
778 failed("Token list is completely empty");
780 if (first->type!=TOK_STARTLINE ||
781 first->txt!=tl->txt || first->txt_size!=0 ||
782 first->orig!=tl->orig || first->orig_size!=0 ||
783 first->line!=0 || first->col!=0)
784 failed("Token list does not start with a valid TOK_STARTLINE");
786 if (first->prev!=NULL || last->next!=NULL)
787 failed("Token edge links are not NULL");
789 for (i=first; i; i=i->next) {
791 if (tl->tlines[i->line] + i->col != i->txt)
792 failed("line,col is wrong against txt");
793 if (tl->olines[i->line] + i->col != i->orig)
794 failed("line,col is wrong against orig");
796 //Make sure tokens have proper sizes
797 if (i->type!=TOK_STARTLINE && (i->txt_size==0 || i->orig_size==0 || i->txt_size > i->orig_size) )
798 failed("Token is empty");
799 if (i->type==TOK_STARTLINE && (i->txt_size!=0 || i->orig_size!=0) )
800 failed("TOK_STARTLINE is non-empty");
802 //Make sure TOK_WHITE actually contains white tokens
803 if (i->type==TOK_WHITE) {
804 const char *s = i->txt, *e = s+i->txt_size;
805 while (s<e && cwhite(*s)) s++;
807 failed("TOK_WHITE does not contain only white characters");
810 //Make sure txt and orig match exactly except for backslash line breaks
811 if (!txt_orig_matches(i->txt, i->txt_size, i->orig, i->orig_size)) {
812 array_char buf = array_new(NULL);
814 "txt and orig do not match:\n"
816 escape_string(&buf, i->txt, i->txt_size) );
817 fprintf(err, "\torig = \"%s\"\n",
818 escape_string(&buf, i->orig, i->orig_size) );
824 //Make sure tok_point_lookup returns correct point
826 struct tok_point tok_point;
827 const char *t=i->txt, *o=i->orig, *e=o+i->orig_size, *p;
828 size_t line=i->line, col=i->col;
830 #define check(ptr) do { \
831 if (tok_point_lookup(&tok_point, ptr, tl)) { \
832 if (tok_point.txt != t || tok_point.orig != o) \
833 failed("tok_point_lookup on txt reported incorrect txt/orig (orig is %d, should be %d)", \
834 (int)(tok_point.orig-i->orig), (int)(o-i->orig)); \
835 if (tok_point.line != line || tok_point.col != col) \
836 failed("tok_point_lookup on txt reported incorrect line/col (off by %d, %d)", \
837 (int)(tok_point.line-line), (int)(tok_point.col-col)); \
838 } else if (initial) {\
839 failed("tok_point_lookup failed on initial token list"); \
844 while (is_backslash_break(&p, o, e)) {
858 if (p<e && *p=='\n'+'\r'-p[-1])
872 } while (o<e && *o!='\\');
879 //Verify olines and tlines
881 const char *s = tl->orig, *e = s+tl->orig_size;
882 size_t i, line_count = tl->olines_size;
884 //both line arrays should be exactly the same size
885 if (tl->olines_size != tl->tlines_size)
888 for (i=0; s<e; i++) {
889 const char *line_start = s, *line_end;
890 size_t tline_size, oline_size;
893 if (i+1 < line_count)
894 tline_size = tl->tlines[i+1] - tl->tlines[i];
896 tline_size = tl->txt+tl->txt_size - tl->tlines[i];
898 while (s<e && !creturn(*s)) s++;
902 if (s<e && *s=='\n'+'\r'-s[-1])
906 oline_size = s-line_start;
908 //verify that olines elements are correct
909 if (line_start != tl->olines[i])
912 //verify that tlines elements are in range
914 if (p < tl->txt || p+tline_size > tl->txt+tl->txt_size)
917 //verify that original lines have sizes >= the unbroken lines
918 if (oline_size < tline_size)
921 //if sizes are inconsistent, make sure it is due to a backslash escape
922 if (oline_size > tline_size) {
923 p = line_start+tline_size;
926 while (p<e && cspace(*p)) p++;
931 //make sure the text of both copies match
940 if (initial && !token_list_sanity_check_initial(tl, err))
941 failed("Initial sanity checks failed. Has the list been modified after it was returned from tokenize() ?");
948 static char *sprint_token_flags(char buf[3], struct token_flags flags) {
949 buf[0] = flags.pp ? 'p' : '-';
950 buf[1] = flags.pp_directive ? 'D' : '-';
955 void token_list_dump(const struct token_list *tl, FILE *f) {
957 array_char buf = array_new(NULL);
960 const char *token_type_str[] = {
978 for (tok=tl->first; tok; tok=tok->next) {
979 fprintf(f, "%lu\t%s\t%s\t\"%s\"", (unsigned long)(i++),
980 token_type_str[tok->type],
981 sprint_token_flags(buf2, tok->flags),
982 escape_string(&buf, tok->txt, tok->txt_size));
983 #if 1 //print tok->orig
984 fprintf(f, "\t\"%s\"\n", escape_string(&buf, tok->orig, tok->orig_size));
993 void tok_message_print(struct tok_message *m, struct token_list *tl) {
995 int resolved = tok_point_lookup(&pt, m->location, tl);
998 printf("%s:%s", tl->filename, resolved ? "" : " ");
1002 printf("%zu:%zu %s: %s\n",
1003 pt.line+1, pt.col+1,
1004 m->level==TM_DEBUG ? "debug" :
1005 m->level==TM_INFO ? "info" :
1006 m->level==TM_WARN ? "warning" :
1007 m->level==TM_ERROR ? "error" :
1008 m->level==TM_BUG ? "BUG" :
1013 m->level==TM_DEBUG ? "debug" :
1014 m->level==TM_INFO ? "info" :
1015 m->level==TM_WARN ? "warning" :
1016 m->level==TM_ERROR ? "error" :
1017 m->level==TM_BUG ? "BUG" :
1023 void tok_message_dump(struct tok_message *m) {
1024 printf("%s: %s: %s\n",
1025 m->level==TM_DEBUG ? "debug" :
1026 m->level==TM_INFO ? "info" :
1027 m->level==TM_WARN ? "warning" :
1028 m->level==TM_ERROR ? "error" :
1029 m->level==TM_BUG ? "BUG" :
1030 "???", m->path, m->message);
1033 void tok_message_add(tok_message_queue *mq, enum tok_message_level level,
1034 const char *path, const char *loc, const char *fmt, ...) {
1035 struct tok_message msg = {.level=level, .path=path, .location=loc};
1042 msg.message = talloc_vasprintf(mq->item, fmt, ap);
1048 void tok_message_queue_dump(const tok_message_queue *mq) {
1050 for (i=0; i<queue_count(*mq); i++)
1051 tok_message_dump(&queue_item(*mq, i));