001package gudusoft.gsqlparser.parser; 002 003import gudusoft.gsqlparser.EDbVendor; 004import gudusoft.gsqlparser.TBaseType; 005import gudusoft.gsqlparser.TGSqlParser; 006import gudusoft.gsqlparser.TCustomLexer; 007import gudusoft.gsqlparser.TCustomParser; 008import gudusoft.gsqlparser.TCustomSqlStatement; 009import gudusoft.gsqlparser.TLexerSparksql; 010import gudusoft.gsqlparser.TParserSparksql; 011import gudusoft.gsqlparser.TSourceToken; 012import gudusoft.gsqlparser.TSourceTokenList; 013import gudusoft.gsqlparser.TStatementList; 014import gudusoft.gsqlparser.TSyntaxError; 015import gudusoft.gsqlparser.EFindSqlStateType; 016import gudusoft.gsqlparser.ETokenType; 017import gudusoft.gsqlparser.ETokenStatus; 018import gudusoft.gsqlparser.ESqlStatementType; 019import gudusoft.gsqlparser.EErrorType; 020import gudusoft.gsqlparser.stmt.TUnknownSqlStatement; 021import gudusoft.gsqlparser.stmt.mysql.TMySQLSource; 022import gudusoft.gsqlparser.sqlcmds.ISqlCmds; 023import gudusoft.gsqlparser.sqlcmds.SqlCmdsFactory; 024import gudusoft.gsqlparser.compiler.TContext; 025import gudusoft.gsqlparser.sqlenv.TSQLEnv; 026import gudusoft.gsqlparser.compiler.TGlobalScope; 027import gudusoft.gsqlparser.compiler.TFrame; 028import gudusoft.gsqlparser.resolver.TSQLResolver; 029import gudusoft.gsqlparser.resolver2.TSQLResolver2; 030import gudusoft.gsqlparser.resolver2.TSQLResolverConfig; 031import gudusoft.gsqlparser.TLog; 032import gudusoft.gsqlparser.compiler.TASTEvaluator; 033 034import java.io.BufferedReader; 035import java.util.ArrayList; 036import java.util.List; 037import java.util.Stack; 038 039import static gudusoft.gsqlparser.ESqlStatementType.*; 040 041/** 042 * Apache Spark SQL parser implementation. 043 * 044 * <p>This parser handles SparkSQL-specific SQL syntax including: 045 * <ul> 046 * <li>SparkSQL SELECT statements</li> 047 * <li>SparkSQL-specific functions (DATE, TIME, TIMESTAMP, INTERVAL)</li> 048 * <li>SparkSQL stored procedures and triggers</li> 049 * <li>SparkSQL delimiter handling</li> 050 * </ul> 051 * 052 * <p><b>Design Notes:</b> 053 * <ul> 054 * <li>Extends {@link AbstractSqlParser} using the template method pattern</li> 055 * <li>Uses {@link TLexerSparksql} for tokenization</li> 056 * <li>Uses {@link TParserSparksql} for parsing</li> 057 * <li>Tokenization is simple, similar to MySQL</li> 058 * <li>Delimiter character: ';' for SparkSQL statements</li> 059 * </ul> 060 * 061 * @see SqlParser 062 * @see AbstractSqlParser 063 * @see TLexerSparksql 064 * @see TParserSparksql 065 * @since 3.2.0.0 066 */ 067public class SparksqlSqlParser extends AbstractSqlParser { 068 069 /** 070 * Construct SparkSQL parser. 071 * <p> 072 * Configures the parser for Apache Spark SQL with default delimiter (;). 073 */ 074 public SparksqlSqlParser() { 075 super(EDbVendor.dbvsparksql); 076 this.delimiterChar = ';'; 077 this.defaultDelimiterStr = ";"; 078 079 // Create lexer once - will be reused for all parsing operations 080 this.flexer = new TLexerSparksql(); 081 this.flexer.delimiterchar = this.delimiterChar; 082 this.flexer.defaultDelimiterStr = this.defaultDelimiterStr; 083 084 // Set parent's lexer reference for shared tokenization logic 085 this.lexer = this.flexer; 086 087 // Create parser once - will be reused for all parsing operations 088 this.fparser = new TParserSparksql(null); 089 this.fparser.lexer = this.flexer; 090 } 091 092 // ========== Parser Components ========== 093 094 /** The SparkSQL lexer used for tokenization */ 095 public TLexerSparksql flexer; 096 097 /** SparkSQL parser (for SparkSQL statements) */ 098 private TParserSparksql fparser; 099 100 /** Current statement being built during extraction */ 101 private TCustomSqlStatement gcurrentsqlstatement; 102 103 /** User-defined delimiter string */ 104 private String userDelimiterStr = ";"; 105 106 /** Current delimiter character */ 107 private char curdelimiterchar = ';'; 108 109 // ========== AbstractSqlParser Abstract Methods Implementation ========== 110 111 /** 112 * Return the SparkSQL lexer instance. 113 */ 114 @Override 115 protected TCustomLexer getLexer(ParserContext context) { 116 return this.flexer; 117 } 118 119 /** 120 * Return the SparkSQL parser instance with updated token list. 121 */ 122 @Override 123 protected TCustomParser getParser(ParserContext context, TSourceTokenList tokens) { 124 this.fparser.sourcetokenlist = tokens; 125 return this.fparser; 126 } 127 128 /** 129 * Call SparkSQL-specific tokenization logic. 130 */ 131 @Override 132 protected void tokenizeVendorSql() { 133 dosparksqltexttotokenlist(); 134 } 135 136 /** 137 * Setup SparkSQL parser for raw statement extraction. 138 */ 139 @Override 140 protected void setupVendorParsersForExtraction() { 141 this.fparser.sqlcmds = this.sqlcmds; 142 this.fparser.sourcetokenlist = this.sourcetokenlist; 143 } 144 145 /** 146 * Call SparkSQL-specific raw statement extraction logic. 147 */ 148 @Override 149 protected void extractVendorRawStatements(SqlParseResult.Builder builder) { 150 dosparksqlgetrawsqlstatements(builder); 151 } 152 153 /** 154 * Perform full parsing of statements with syntax checking. 155 */ 156 @Override 157 protected TStatementList performParsing(ParserContext context, 158 TCustomParser parser, 159 TCustomParser secondaryParser, 160 TSourceTokenList tokens, 161 TStatementList rawStatements) { 162 this.fparser = (TParserSparksql) parser; 163 this.sourcetokenlist = tokens; 164 this.parserContext = context; 165 this.sqlstatements = rawStatements; 166 167 this.sqlcmds = SqlCmdsFactory.get(vendor); 168 this.fparser.sqlcmds = this.sqlcmds; 169 170 if (context != null && context.getGsqlparser() != null) { 171 TGSqlParser gsqlparser = (TGSqlParser) context.getGsqlparser(); 172 this.frameStack = gsqlparser.getFrameStack(); 173 this.fparser.getNf().setGsqlParser(gsqlparser); 174 this.globalContext = new TContext(); 175 this.sqlEnv = new TSQLEnv(this.vendor) { 176 @Override 177 public void initSQLEnv() { 178 } 179 }; 180 this.globalContext.setSqlEnv(this.sqlEnv, this.sqlstatements); 181 } else { 182 initializeGlobalContext(); 183 } 184 185 for (int i = 0; i < sqlstatements.size(); i++) { 186 TCustomSqlStatement stmt = sqlstatements.getRawSql(i); 187 188 try { 189 stmt.setFrameStack(frameStack); 190 int parseResult = stmt.parsestatement(null, false, context.isOnlyNeedRawParseTree()); 191 192 boolean doRecover = TBaseType.ENABLE_ERROR_RECOVER_IN_CREATE_TABLE; 193 if (doRecover && ((parseResult != 0) || (stmt.getErrorCount() > 0))) { 194 handleCreateTableErrorRecovery(stmt); 195 } 196 197 if ((parseResult != 0) || (stmt.getErrorCount() > 0)) { 198 copyErrorsFromStatement(stmt); 199 } 200 201 } catch (Exception ex) { 202 handleStatementParsingException(stmt, i, ex); 203 continue; 204 } 205 } 206 207 if (globalFrame != null) { 208 globalFrame.popMeFromStack(frameStack); 209 } 210 211 return this.sqlstatements; 212 } 213 214 private void handleCreateTableErrorRecovery(TCustomSqlStatement stmt) { 215 if (((stmt.sqlstatementtype == ESqlStatementType.sstcreatetable) 216 || (stmt.sqlstatementtype == ESqlStatementType.sstcreateindex)) 217 && (!TBaseType.c_createTableStrictParsing)) { 218 219 int nested = 0; 220 boolean isIgnore = false, isFoundIgnoreToken = false; 221 TSourceToken firstIgnoreToken = null; 222 223 for (int k = 0; k < stmt.sourcetokenlist.size(); k++) { 224 TSourceToken st = stmt.sourcetokenlist.get(k); 225 if (isIgnore) { 226 if (st.issolidtoken() && (st.tokencode != ';')) { 227 isFoundIgnoreToken = true; 228 if (firstIgnoreToken == null) { 229 firstIgnoreToken = st; 230 } 231 } 232 if (st.tokencode != ';') { 233 st.tokencode = TBaseType.sqlpluscmd; 234 } 235 continue; 236 } 237 if (st.tokencode == (int) ')') { 238 nested--; 239 if (nested == 0) { 240 boolean isSelect = false; 241 TSourceToken st1 = st.searchToken(TBaseType.rrw_as, 1); 242 if (st1 != null) { 243 TSourceToken st2 = st.searchToken((int) '(', 2); 244 if (st2 != null) { 245 TSourceToken st3 = st.searchToken(TBaseType.rrw_select, 3); 246 isSelect = (st3 != null); 247 } 248 } 249 if (!isSelect) isIgnore = true; 250 } 251 } else if (st.tokencode == (int) '(') { 252 nested++; 253 } 254 } 255 256 if (isFoundIgnoreToken) { 257 stmt.clearError(); 258 stmt.parsestatement(null, false); 259 } 260 } 261 } 262 263 @Override 264 protected void performSemanticAnalysis(ParserContext context, TStatementList statements) { 265 if (context != null && context.getGsqlparser() != null) { 266 return; 267 } 268 269 if (getSyntaxErrors().isEmpty()) { 270 if (TBaseType.isEnableResolver2()) { 271 TSQLResolverConfig config = new TSQLResolverConfig(); 272 config.setVendor(vendor); 273 TSQLResolver2 resolver2 = new TSQLResolver2(null, statements, config); 274 if (this.sqlEnv != null) { 275 resolver2.setSqlEnv(this.sqlEnv); 276 } 277 resolver2.resolve(); 278 } else if (TBaseType.isEnableResolver()) { 279 TSQLResolver resolver = new TSQLResolver(globalContext, statements); 280 resolver.resolve(); 281 } 282 } 283 } 284 285 @Override 286 protected void performInterpreter(ParserContext context, TStatementList statements) { 287 if (TBaseType.ENABLE_INTERPRETER && getSyntaxErrors().isEmpty()) { 288 TLog.clearLogs(); 289 TGlobalScope interpreterScope = new TGlobalScope(sqlEnv); 290 TLog.enableInterpreterLogOnly(); 291 TASTEvaluator astEvaluator = new TASTEvaluator(statements, interpreterScope); 292 astEvaluator.eval(); 293 } 294 } 295 296 // ========== Helper Methods ========== 297 298 /** 299 * Add token to current statement with proper statement linkage. 300 * This replicates TCustomSqlStatement.addtokentolist() behavior 301 * which sets st.stmt = this before adding. 302 */ 303 private void addTokenToStatement(TSourceToken st) { 304 st.stmt = gcurrentsqlstatement; 305 gcurrentsqlstatement.sourcetokenlist.add(st); 306 } 307 308 // ========== SparkSQL-Specific Tokenization ========== 309 310 /** 311 * SparkSQL-specific tokenization logic. 312 * <p> 313 * SparkSQL uses simple tokenization, similar to MySQL. 314 * Handles ROLLUP keyword specially. 315 * Migrated from TGSqlParser.dosparksqltexttotokenlist() 316 */ 317 private void dosparksqltexttotokenlist() { 318 TSourceToken asourcetoken, lcprevst; 319 int yychar; 320 boolean startDelimiter = false; 321 322 flexer.tmpDelimiter = ""; 323 324 asourcetoken = getanewsourcetoken(); 325 if (asourcetoken == null) return; 326 yychar = asourcetoken.tokencode; 327 328 while (yychar > 0) { 329 sourcetokenlist.add(asourcetoken); 330 asourcetoken = getanewsourcetoken(); 331 if (asourcetoken == null) break; 332 checkMySQLCommentToken(asourcetoken); 333 334 if ((asourcetoken.tokencode == TBaseType.lexnewline) && (startDelimiter)) { 335 startDelimiter = false; 336 flexer.tmpDelimiter = sourcetokenlist.get(sourcetokenlist.size() - 1).getAstext(); 337 } 338 339 if (asourcetoken.tokencode == TBaseType.rrw_rollup) { 340 // with rollup 341 lcprevst = getprevsolidtoken(asourcetoken); 342 if (lcprevst != null) { 343 if (lcprevst.tokencode == TBaseType.rrw_with) 344 lcprevst.tokencode = TBaseType.with_rollup; 345 } 346 } 347 348 yychar = asourcetoken.tokencode; 349 } 350 } 351 352 private TSourceToken getprevsolidtoken(TSourceToken ptoken) { 353 if (ptoken == null) return null; 354 TSourceTokenList lcstlist = ptoken.container; 355 if (TBaseType.assigned(lcstlist)) { 356 return lcstlist.nextsolidtoken(ptoken.posinlist - 1, -1, false); 357 } 358 return null; 359 } 360 361 /** 362 * Check and process MySQL-style comment tokens. 363 * <p> 364 * Migrated from TGSqlParser.checkMySQLCommentToken() 365 */ 366 private void checkMySQLCommentToken(TSourceToken asourcetoken) { 367 if (asourcetoken.tokencode == TBaseType.cmtslashstar) { 368 String cmtText = asourcetoken.toString(); 369 if (cmtText.startsWith("/*+") || cmtText.startsWith("/*-") || cmtText.startsWith("/*!") 370 || cmtText.startsWith("/*%")) { 371 // Optimizer hint comment, leave as is 372 } 373 } 374 } 375 376 // ========== SparkSQL-Specific Raw Statement Extraction ========== 377 378 /** 379 * SparkSQL-specific raw statement extraction logic. 380 * <p> 381 * SparkSQL uses MySQL-like raw statement extraction with special handling 382 * for DATE, TIME, TIMESTAMP, and INTERVAL keywords. 383 * Migrated from TGSqlParser.dosparksqlgetrawsqlstatements() 384 */ 385 private void dosparksqlgetrawsqlstatements(SqlParseResult.Builder builder) { 386 int errorcount = 0; 387 gcurrentsqlstatement = null; 388 EFindSqlStateType gst = EFindSqlStateType.stnormal; 389 int i; 390 TSourceToken ast; 391 boolean waitingDelimiter = false; 392 393 // reset delimiter 394 userDelimiterStr = defaultDelimiterStr; 395 396 for (i = 0; i < sourcetokenlist.size(); i++) { 397 ast = sourcetokenlist.get(i); 398 sourcetokenlist.curpos = i; 399 400 // Handle SparkSQL-specific keywords 401 if (ast.tokencode == TBaseType.rrw_date) { 402 TSourceToken st1 = ast.nextSolidToken(); 403 if (st1 != null) { 404 if (st1.tokencode == '(') { 405 ast.tokencode = TBaseType.rrw_spark_date_function; 406 } else if (st1.tokencode == TBaseType.sconst) { 407 ast.tokencode = TBaseType.rrw_spark_date_const; 408 } 409 } 410 } else if (ast.tokencode == TBaseType.rrw_time) { 411 TSourceToken st1 = ast.nextSolidToken(); 412 if (st1 != null) { 413 if (st1.tokencode == TBaseType.sconst) { 414 ast.tokencode = TBaseType.rrw_spark_time_const; 415 } 416 } 417 } else if (ast.tokencode == TBaseType.rrw_timestamp) { 418 TSourceToken st1 = ast.nextSolidToken(); 419 if (st1 != null) { 420 if (st1.tokencode == TBaseType.sconst) { 421 ast.tokencode = TBaseType.rrw_spark_timestamp_constant; 422 } else if (st1.tokencode == TBaseType.ident) { 423 if (st1.toString().startsWith("\"")) { 424 ast.tokencode = TBaseType.rrw_spark_timestamp_constant; 425 st1.tokencode = TBaseType.sconst; 426 } 427 } 428 } 429 } else if (ast.tokencode == TBaseType.rrw_interval) { 430 TSourceToken leftParen = ast.searchToken('(', 1); 431 if (leftParen != null) { 432 int k = leftParen.posinlist + 1; 433 boolean commaToken = false; 434 while (k < ast.container.size()) { 435 if (ast.container.get(k).tokencode == ')') break; 436 if (ast.container.get(k).tokencode == ',') { 437 commaToken = true; 438 break; 439 } 440 k++; 441 } 442 if (commaToken) { 443 ast.tokencode = TBaseType.rrw_mysql_interval_func; 444 } 445 } 446 } else if (ast.tokencode == TBaseType.rrw_spark_position) { 447 TSourceToken leftParen = ast.searchToken('(', 1); 448 if (leftParen == null) { 449 ast.tokencode = TBaseType.ident; // treat it as identifier 450 } 451 } 452 453 switch (gst) { 454 case sterror: { 455 if (ast.tokentype == ETokenType.ttsemicolon) { 456 addTokenToStatement(ast); 457 onRawStatementComplete(parserContext, gcurrentsqlstatement, fparser, null, sqlstatements, false, builder); 458 gst = EFindSqlStateType.stnormal; 459 } else { 460 addTokenToStatement(ast); 461 } 462 break; 463 } 464 case stnormal: { 465 if ((ast.tokencode == TBaseType.cmtdoublehyphen) 466 || (ast.tokencode == TBaseType.cmtslashstar) 467 || (ast.tokencode == TBaseType.lexspace) 468 || (ast.tokencode == TBaseType.lexnewline) 469 || (ast.tokentype == ETokenType.ttsemicolon)) { 470 if (TBaseType.assigned(gcurrentsqlstatement)) { 471 addTokenToStatement(ast); 472 } 473 474 continue; 475 } 476 477 if ((ast.isFirstTokenOfLine()) && ((ast.tokencode == TBaseType.rrw_mysql_source) || (ast.tokencode == TBaseType.slash_dot))) { 478 gst = EFindSqlStateType.stsqlplus; 479 gcurrentsqlstatement = new TMySQLSource(vendor); 480 addTokenToStatement(ast); 481 continue; 482 } 483 484 // find a tokentext to start sql or plsql mode 485 gcurrentsqlstatement = sqlcmds.issql(ast, gst, gcurrentsqlstatement); 486 487 if (TBaseType.assigned(gcurrentsqlstatement)) { 488 ESqlStatementType[] ses = {ESqlStatementType.sstmysqlcreateprocedure, ESqlStatementType.sstmysqlcreatefunction 489 , sstcreateprocedure, ESqlStatementType.sstcreatefunction 490 , ESqlStatementType.sstcreatetrigger}; 491 if (includesqlstatementtype(gcurrentsqlstatement.sqlstatementtype, ses)) { 492 gst = EFindSqlStateType.ststoredprocedure; 493 waitingDelimiter = false; 494 addTokenToStatement(ast); 495 curdelimiterchar = ';'; 496 } else { 497 gst = EFindSqlStateType.stsql; 498 addTokenToStatement(ast); 499 } 500 501 } 502 503 if (!TBaseType.assigned(gcurrentsqlstatement)) { // error tokentext found 504 505 this.syntaxErrors.add(new TSyntaxError(ast.getAstext(), ast.lineNo, (ast.columnNo < 0 ? 0 : ast.columnNo) 506 , "Error when tokenlize", EErrorType.spwarning, TBaseType.MSG_WARNING_ERROR_WHEN_TOKENIZE, null, ast.posinlist)); 507 508 ast.tokentype = ETokenType.tttokenlizererrortoken; 509 gst = EFindSqlStateType.sterror; 510 511 gcurrentsqlstatement = new TUnknownSqlStatement(vendor); 512 gcurrentsqlstatement.sqlstatementtype = ESqlStatementType.sstinvalid; 513 addTokenToStatement(ast); 514 515 } 516 break; 517 } 518 case stsqlplus: { 519 if (ast.tokencode == TBaseType.lexnewline) { 520 gst = EFindSqlStateType.stnormal; 521 addTokenToStatement(ast); // so add it here 522 onRawStatementComplete(parserContext, gcurrentsqlstatement, fparser, null, sqlstatements, false, builder); 523 } else { 524 addTokenToStatement(ast); 525 } 526 527 break; 528 } 529 case stsql: { 530 if ((ast.tokentype == ETokenType.ttsemicolon) && (gcurrentsqlstatement.sqlstatementtype != ESqlStatementType.sstmysqldelimiter)) { 531 gst = EFindSqlStateType.stnormal; 532 addTokenToStatement(ast); 533 gcurrentsqlstatement.semicolonended = ast; 534 onRawStatementComplete(parserContext, gcurrentsqlstatement, fparser, null, sqlstatements, false, builder); 535 continue; 536 } 537 if (ast.toString().equalsIgnoreCase(userDelimiterStr)) { 538 gst = EFindSqlStateType.stnormal; 539 ast.tokencode = ';'; // treat it as semicolon 540 addTokenToStatement(ast); 541 gcurrentsqlstatement.semicolonended = ast; 542 onRawStatementComplete(parserContext, gcurrentsqlstatement, fparser, null, sqlstatements, false, builder); 543 continue; 544 } 545 addTokenToStatement(ast); 546 547 if ((ast.tokencode == TBaseType.lexnewline) 548 && (gcurrentsqlstatement.sqlstatementtype == ESqlStatementType.sstmysqldelimiter)) { 549 gst = EFindSqlStateType.stnormal; 550 userDelimiterStr = ""; 551 for (int k = 0; k < gcurrentsqlstatement.sourcetokenlist.size(); k++) { 552 TSourceToken st = gcurrentsqlstatement.sourcetokenlist.get(k); 553 if ((st.tokencode == TBaseType.rrw_mysql_delimiter) 554 || (st.tokencode == TBaseType.lexnewline) 555 || (st.tokencode == TBaseType.lexspace) 556 || (st.tokencode == TBaseType.rrw_set)) { 557 continue; 558 } 559 560 userDelimiterStr += st.toString(); 561 } 562 onRawStatementComplete(parserContext, gcurrentsqlstatement, fparser, null, sqlstatements, false, builder); 563 564 continue; 565 } 566 567 break; 568 } 569 case ststoredprocedure: { 570 if (waitingDelimiter) { 571 if (userDelimiterStr.equalsIgnoreCase(ast.toString())) { 572 gst = EFindSqlStateType.stnormal; 573 gcurrentsqlstatement.semicolonended = ast; 574 onRawStatementComplete(parserContext, gcurrentsqlstatement, fparser, null, sqlstatements, false, builder); 575 continue; 576 } else if (userDelimiterStr.startsWith(ast.toString())) { 577 String lcstr = ast.toString(); 578 for (int k = ast.posinlist + 1; k < ast.container.size(); k++) { 579 TSourceToken st = ast.container.get(k); 580 if ((st.tokencode == TBaseType.rrw_mysql_delimiter) || (st.tokencode == TBaseType.lexnewline) || (st.tokencode == TBaseType.lexspace)) { 581 break; 582 } 583 lcstr = lcstr + st.toString(); 584 } 585 586 if (userDelimiterStr.equalsIgnoreCase(lcstr)) { 587 for (int k = ast.posinlist; k < ast.container.size(); k++) { 588 TSourceToken st = ast.container.get(k); 589 if ((st.tokencode == TBaseType.rrw_mysql_delimiter) || (st.tokencode == TBaseType.lexnewline) || (st.tokencode == TBaseType.lexspace)) { 590 break; 591 } 592 ast.tokenstatus = ETokenStatus.tsignorebyyacc; 593 } 594 gst = EFindSqlStateType.stnormal; 595 gcurrentsqlstatement.semicolonended = ast; 596 onRawStatementComplete(parserContext, gcurrentsqlstatement, fparser, null, sqlstatements, false, builder); 597 continue; 598 } 599 600 } 601 } 602 if (ast.tokencode == TBaseType.rrw_begin) 603 waitingDelimiter = true; 604 605 if (userDelimiterStr.equals(";") || (waitingDelimiter == false)) { 606 addTokenToStatement(ast); 607 if (ast.tokentype == ETokenType.ttsemicolon) { 608 gst = EFindSqlStateType.stnormal; 609 gcurrentsqlstatement.semicolonended = ast; 610 onRawStatementComplete(parserContext, gcurrentsqlstatement, fparser, null, sqlstatements, false, builder); 611 continue; 612 } 613 } else { 614 if (ast.toString().equals(userDelimiterStr)) { 615 ast.tokenstatus = ETokenStatus.tsignorebyyacc; 616 addTokenToStatement(ast); 617 gst = EFindSqlStateType.stnormal; 618 onRawStatementComplete(parserContext, gcurrentsqlstatement, fparser, null, sqlstatements, false, builder); 619 } else { 620 if ((ast.tokentype == ETokenType.ttsemicolon) && (userDelimiterStr.equals(";"))) { 621 TSourceToken lcprevtoken = ast.container.nextsolidtoken(ast, -1, false); 622 if (lcprevtoken != null) { 623 if (lcprevtoken.tokencode == TBaseType.rrw_end) { 624 gst = EFindSqlStateType.stnormal; 625 gcurrentsqlstatement.semicolonended = ast; 626 addTokenToStatement(ast); 627 onRawStatementComplete(parserContext, gcurrentsqlstatement, fparser, null, sqlstatements, false, builder); 628 continue; 629 } 630 } 631 } 632 633 addTokenToStatement(ast); 634 } 635 } 636 break; 637 } 638 } 639 } 640 641 // last statement 642 if (TBaseType.assigned(gcurrentsqlstatement) && ((gst == EFindSqlStateType.stsql) || (gst == EFindSqlStateType.ststoredprocedure) || (gst == EFindSqlStateType.sterror))) { 643 onRawStatementComplete(parserContext, gcurrentsqlstatement, fparser, null, sqlstatements, true, builder); 644 } 645 646 builder.sqlStatements(this.sqlstatements); 647 builder.errorCode(errorcount); 648 builder.errorMessage(errorcount == 0 ? "" : String.format("Extraction completed with %d error(s)", errorcount)); 649 } 650 651 /** 652 * Check if a SQL statement type is included in an array. 653 */ 654 private boolean includesqlstatementtype(ESqlStatementType type, ESqlStatementType[] types) { 655 for (ESqlStatementType t : types) { 656 if (t == type) return true; 657 } 658 return false; 659 } 660}