忽略flex和bison中的空白

忽略flex和bison中的空白,bison,space,flex-lexer,ignore,bisonc++,Bison,Space,Flex Lexer,Ignore,Bisonc++,我正在尝试分析以下行: BEGIN WRAP WIO3 NAME lgCell_prg160_io CONDITION UNI_PLACE_GLOBAL && compBits ENDS WIO3 我用来解析上面几行的语法一直解析到UNI\u PLACE\u GLOBAL,然后给出解析错误 请帮我找出我犯的错误 我认为它不接受空格,所以我如何

我正在尝试分析以下行:

BEGIN WRAP WIO3     
    NAME                            lgCell_prg160_io
    CONDITION                       UNI_PLACE_GLOBAL && compBits
ENDS WIO3       
我用来解析上面几行的语法一直解析到UNI\u PLACE\u GLOBAL,然后给出解析错误

请帮我找出我犯的错误

我认为它不接受空格,所以我如何允许解析这些行

我已经做了

法律

%{
#包括
#包括
常量字符s[2]=“”;
#包括“yacca.tab.h”
字符*令牌;
#定义YY_DECL外部“C”int yylex()
int line_num=1;
#ifdef调试
#定义RETURN(x)cerr“;}/*忽略行继续*/
^{CRLF}{return TOK_EMPTY_LINE;}
{CRLF}{}
.{}/*忽略未知字符*/
\n{++line_num;/*返回(ENDL);*/}
yacc.y

%{
#include <cstdio> 
#include <cstring>
#include <iostream>
#include <stdio.h>

#define YYDEBUG 1

using namespace std;

extern "C" int yylex();
extern "C" FILE *yyin;
extern int line_num;


void yyerror(const char* s);
%}

// Symbols.
%union
{
    char* sval;
};

%token <sval> TOK_NAME
%token <sval> TOK_SIZE
%token <sval> TOK_STRING
%token <sval> TOK_ITERATE
%token <sval> TOK_DIRECTION
%token <sval> TOK_STRAP
%token <sval> TOK_WRAP
%token <sval> TOK_VIA
%token <sval> TOK_EMPTY_LINE 
%token <sval> TOK_BLOCK
%token <sval> TOK_LINE
%token <sval> TOK_BEGIN
%token <sval> TOK_END
%token <sval> TOK_VERSION
%token <sval> TOK_STRUCT
%token <sval> TOK_UNIQUE
%token <sval> TOK_REF
%token <sval> TOK_POS
%token <sval> TOK_CON
%token <sval> TOK_ORI
%token END ENDL




%%

language : program ;

program : block
| program block
;


block   : TOK_BEGIN TOK_BLOCK TOK_STRING blockcontents TOK_END TOK_STRING 
  {
    if (strcmp($3,$6) == 0 )
    {
        printf("\nHEADER %s ",$2);
        printf("\nID %s ",$3);
    }
    else
    {
        printf("Block %s is not able to find its END\n" , $3);
    }

  }
  | TOK_BEGIN TOK_BLOCK TOK_STRING blockcontents TOK_END  { printf("Block %s is not able to find its END\n" , $3); }

  | TOK_BEGIN TOK_STRING blockcontents TOK_END TOK_STRING {}
  | TOK_BEGIN TOK_STRUCT TOK_STRING blockcontents TOK_END TOK_STRING
  {
    if (strcmp($3,$6) == 0 )
    {
        printf("\nHEADER %s ",$2);
        printf("\nID %s \n",$3);
    }
    else
    {
        printf("Block %s is not able to find its END\n" , $3);
    }

  }
  | TOK_BEGIN TOK_STRAP TOK_STRING blockcontents TOK_END TOK_STRING
  {
    if (strcmp($3,$6) == 0 )
    {
        printf("\nHEADER %s ",$2);
        printf("\nID %s \n",$3);
    }
    else
    {
        printf("Block %s is not able to find its END\n" , $3);
    }
  }
  | TOK_BEGIN TOK_WRAP TOK_STRING blockcontents TOK_END TOK_STRING
;


blockcontents : item
      | blockcontents item
      ;


item    : TOK_NAME TOK_STRING        { cout << endl << $1 << "->" << $2 << "  "; }
| TOK_SIZE TOK_STRING        { cout << $1 << "->" << $2 << "  "; }
| TOK_ITERATE TOK_STRING     { cout << $1 << "->" << $2 << "  ";  }
| TOK_DIRECTION TOK_STRING   { cout << endl << $1 << "->" << $2 << "  " << endl; }
| TOK_STRAP TOK_STRING       { cout  << $1 << "->" << $2 << "  "; }
| TOK_WRAP TOK_STRING        { cout << $1 << "->" << $2 << "  "; }
| TOK_VIA TOK_STRING         { cout << $1 << "->" << $2 << "  " << endl; }
| TOK_VERSION TOK_STRING     {}
| TOK_UNIQUE TOK_STRING      { cout << endl << $1 << "->" << $2 << "  " << endl; }
| TOK_REF TOK_STRING         { cout << endl << $1 << "->" << $2 << "  " << endl; }
| TOK_POS TOK_STRING         { cout << endl << $1 << "->" << $2 << "  " << endl; }
| TOK_CON TOK_STRING         { cout << endl << $1 << "->" << $2 << "  " << endl; }
| TOK_ORI TOK_STRING         { cout << endl << $1 << "->" << $2 << "  " << endl; }  
| block
;




%%



int main(void) {
FILE * pt = fopen("LG.txt", "r" );
if(!pt)
{
cout << "Bad Input.Noexistant file" << endl;
return -1;
}
yyin = pt;
do
{
//yydebug = 1;
    yyparse();
}while (!feof(yyin));      
}
void yyerror(const char *s) {
    cout << "parse error on line " << line_num << "!  Message: " << s << endl;
    exit(-1);
}

extern "C" int yywrap()
{
        return (1 == 1);
}



#include "lex.yy.c"
%{
#包括
#包括
#包括
#包括
#定义调试1
使用名称空间std;
外部“C”int yylex();
外部“C”文件*yyin;
外部内部行数;
无效错误(常量字符*s);
%}
//符号。
%联合
{
char*sval;
};
%token TOK_名称
%令牌托库大小
%令牌TOK_串
%令牌TOK_迭代
%token TOK_方向
%token TOK_带
%token TOK_WRAP
%token TOK_VIA
%token TOK_空_线
%token TOK_区块
%令牌托库线
%token TOK_BEGIN
%token TOK_END
%token TOK_版本
%token TOK_结构
%token TOK_UNIQUE
%TOK_REF令牌
%token TOK_POS
%token TOK_CON
%token TOK_ORI
%令牌端
%%
语言:程序;
节目:block
|程序块
;
块:TOK_开始TOK_块TOK_串块内容TOK_结束TOK_串
{
如果(strcmp($3,$6)==0)
{
printf(“\n标题%s,$2”);
printf(“\nID%s”,3美元);
}
其他的
{
printf(“块%s无法找到其结尾\n”,$3);
}
}
|TOK_BEGIN TOK_BLOCK TOK_STRING blockcontents TOK_END{printf(“块%s找不到它的结尾”,$3);}
|TOK_开始TOK_串块内容TOK_结束TOK_串{}
|TOK_开始TOK_结构TOK_字符串块内容TOK_结束TOK_字符串
{
如果(strcmp($3,$6)==0)
{
printf(“\n标题%s,$2”);
printf(“\nID%s\n”,3美元);
}
其他的
{
printf(“块%s无法找到其结尾\n”,$3);
}
}
|TOK\ U BEGIN TOK\ U STRAP TOK\ U串块内容TOK\ U END TOK\ U串
{
如果(strcmp($3,$6)==0)
{
printf(“\n标题%s,$2”);
printf(“\nID%s\n”,3美元);
}
其他的
{
printf(“块%s无法找到其结尾\n”,$3);
}
}
|TOK\ U BEGIN TOK\ U WRAP TOK\ U串块内容TOK\ U END TOK\ U串
;
区块内容:项目
|块内容项
;

条目:TOK_NAME TOK_STRING{cout您需要做什么使用
-d
选项标志启用lexer的调试模式,也使用
-t
标志启用解析跟踪模式。您还必须向主程序添加几行以启用解析跟踪:

intmain(void){
之后,将这些行添加到ayacc.y中:

    extern int yydebug;
    yydebug = 1;
现在使用调试和跟踪构建:

flex -d lex.l
bison -t -d ayacc.y
现在,当您跑步时,您会看到:

Starting parse
Entering state 0
Reading a token: --(end of buffer or a NUL)
--accepting rule at line 80 ("BEGIN")
Next token is token TOK_BEGIN ()
Shifting token TOK_BEGIN ()
Entering state 1
Reading a token: --accepting rule at line 52 (" ")
--accepting rule at line 71 ("WRAP")
Next token is token TOK_WRAP ()
Shifting token TOK_WRAP ()
Entering state 7
Reading a token: --accepting rule at line 52 (" ")
--accepting rule at line 109 ("WIO3")
Next token is token TOK_STRING ()
Shifting token TOK_STRING ()
Entering state 29
Reading a token: --accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 119 ("
")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 56 ("NAME")
Next token is token TOK_NAME ()
Shifting token TOK_NAME ()
Entering state 12
Reading a token: --accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 109 ("lgCell_prg160_io")
Next token is token TOK_STRING ()
Shifting token TOK_STRING ()
Entering state 32
Reducing stack by rule 12 (line 109):
   $1 = token TOK_NAME ()
   $2 = token TOK_STRING ()

-> $$ = nterm item ()
Stack now 0 1 7 29
Entering state 27
Reducing stack by rule 10 (line 104):
   $1 = nterm item ()
-> $$ = nterm blockcontents ()
Stack now 0 1 7 29
Entering state 48
Reading a token: --accepting rule at line 119 ("
")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 103 ("CONDITION")
Next token is token TOK_CON ()
Shifting token TOK_CON ()
Entering state 23
Reading a token: --accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 109 ("UNI_PLACE_GLOBAL")
Next token is token TOK_STRING ()
Shifting token TOK_STRING ()
Entering state 43
Reducing stack by rule 23 (line 120):
   $1 = token TOK_CON ()
   $2 = token TOK_STRING ()
NAME->lgCell_prg160_io  
CONDITION->UNI_PLACE_GLOBAL  
-> $$ = nterm item ()
Stack now 0 1 7 29 48
Entering state 46
Reducing stack by rule 11 (line 105):
   $1 = nterm blockcontents ()
   $2 = nterm item ()
-> $$ = nterm blockcontents ()
Stack now 0 1 7 29
Entering state 48
Reading a token: --accepting rule at line 52 (" ")
--accepting rule at line 109 ("&&")
Next token is token TOK_STRING ()
parse error on line 3!  Message: syntax error
您可以看到它使用lexer规则正确地忽略了空格。您的问题是
&&
符号被识别为TOK_字符串,这不是语法规则所期望的


不知道你的语言的正确语法应该是什么(你没有说)如果没有更多信息,我无法为您解决此问题。您可能希望修复TOK_字符串规则以仅匹配字符串!

好的。我将在回答中发布我迄今为止所做的事情@BrianI发布了我的解决方案请帮助我@Briany您有几个冲突的空格lexer规则;例如,一个跳过空格,另一个返回令牌.您可能想检查一下…我已经删除了。但我在UNI_PLACE_GLOBAL附近发现了相同的错误。
Starting parse
Entering state 0
Reading a token: --(end of buffer or a NUL)
--accepting rule at line 80 ("BEGIN")
Next token is token TOK_BEGIN ()
Shifting token TOK_BEGIN ()
Entering state 1
Reading a token: --accepting rule at line 52 (" ")
--accepting rule at line 71 ("WRAP")
Next token is token TOK_WRAP ()
Shifting token TOK_WRAP ()
Entering state 7
Reading a token: --accepting rule at line 52 (" ")
--accepting rule at line 109 ("WIO3")
Next token is token TOK_STRING ()
Shifting token TOK_STRING ()
Entering state 29
Reading a token: --accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 119 ("
")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 56 ("NAME")
Next token is token TOK_NAME ()
Shifting token TOK_NAME ()
Entering state 12
Reading a token: --accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 109 ("lgCell_prg160_io")
Next token is token TOK_STRING ()
Shifting token TOK_STRING ()
Entering state 32
Reducing stack by rule 12 (line 109):
   $1 = token TOK_NAME ()
   $2 = token TOK_STRING ()

-> $$ = nterm item ()
Stack now 0 1 7 29
Entering state 27
Reducing stack by rule 10 (line 104):
   $1 = nterm item ()
-> $$ = nterm blockcontents ()
Stack now 0 1 7 29
Entering state 48
Reading a token: --accepting rule at line 119 ("
")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 103 ("CONDITION")
Next token is token TOK_CON ()
Shifting token TOK_CON ()
Entering state 23
Reading a token: --accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 52 (" ")
--accepting rule at line 109 ("UNI_PLACE_GLOBAL")
Next token is token TOK_STRING ()
Shifting token TOK_STRING ()
Entering state 43
Reducing stack by rule 23 (line 120):
   $1 = token TOK_CON ()
   $2 = token TOK_STRING ()
NAME->lgCell_prg160_io  
CONDITION->UNI_PLACE_GLOBAL  
-> $$ = nterm item ()
Stack now 0 1 7 29 48
Entering state 46
Reducing stack by rule 11 (line 105):
   $1 = nterm blockcontents ()
   $2 = nterm item ()
-> $$ = nterm blockcontents ()
Stack now 0 1 7 29
Entering state 48
Reading a token: --accepting rule at line 52 (" ")
--accepting rule at line 109 ("&&")
Next token is token TOK_STRING ()
parse error on line 3!  Message: syntax error