|
@@ -28,6 +28,7 @@ cmLexErrorRecd cmLexErrorArray[] =
|
28
|
28
|
{ kFileCloseErrLexRC, "File close failed on cmLexSetFile()"},
|
29
|
29
|
{ kMemAllocErrLexRC, "An attempted memory allocation failed"},
|
30
|
30
|
{ kEofRC, "The end of the input text was encountered (this is a normal condition not an error)"},
|
|
31
|
+ { kInvalidLexTIdLexRC, "An invalid token id was encountered."},
|
31
|
32
|
{ kInvalidLexRC, "Unknown lexer error code." }
|
32
|
33
|
};
|
33
|
34
|
|
|
@@ -42,6 +43,7 @@ typedef struct
|
42
|
43
|
cmLexMatcherFuncPtr_t funcPtr; // recognizer function (only used if userPtr==NULL)
|
43
|
44
|
cmChar_t* tokenStr; // fixed string data used by the recognizer (only used if userPtr==NULL)
|
44
|
45
|
cmLexUserMatcherPtr_t userPtr; // user defined recognizer function (only used if funcPtr==NULL)
|
|
46
|
+ bool enableFl; // true if this matcher is enabled
|
45
|
47
|
} cmLexMatcher;
|
46
|
48
|
|
47
|
49
|
|
|
@@ -325,6 +327,7 @@ cmRC_t _cmLexInstallMatcher( cmLex* p, unsigned typeId, cmLexMatcherFuncPtr_t f
|
325
|
327
|
p->mfp[p->mfi].typeId = typeId;
|
326
|
328
|
p->mfp[p->mfi].funcPtr = funcPtr;
|
327
|
329
|
p->mfp[p->mfi].userPtr = userPtr;
|
|
330
|
+ p->mfp[p->mfi].enableFl = true;
|
328
|
331
|
|
329
|
332
|
if( keyStr != NULL )
|
330
|
333
|
{
|
|
@@ -637,6 +640,21 @@ cmRC_t cmLexRegisterMatcher( cmLexH h, unsigned id, cmLexUserMatcher
|
637
|
640
|
return _cmLexInstallMatcher( p, id, NULL, NULL, userPtr );
|
638
|
641
|
}
|
639
|
642
|
|
|
643
|
+cmRC_t cmLexEnableToken( cmLexH h, unsigned id, bool enableFl )
|
|
644
|
+{
|
|
645
|
+ cmLex* p = _cmLexHandleToPtr(h);
|
|
646
|
+
|
|
647
|
+ unsigned mi = 0;
|
|
648
|
+ for(; mi<p->mfi; ++mi)
|
|
649
|
+ if( p->mfp[mi].typeId == id )
|
|
650
|
+ {
|
|
651
|
+ p->mfp[mi].enableFl = enableFl;
|
|
652
|
+ return cmOkRC;
|
|
653
|
+ }
|
|
654
|
+
|
|
655
|
+ return _cmLexError( p, kInvalidLexTIdLexRC, "%i is not a valid token type id.",id);
|
|
656
|
+}
|
|
657
|
+
|
640
|
658
|
unsigned cmLexFilterFlags( cmLexH h )
|
641
|
659
|
{
|
642
|
660
|
cmLex* p = _cmLexHandleToPtr(h);
|
|
@@ -669,32 +687,44 @@ unsigned cmLexGetNextToken( cmLexH h )
|
669
|
687
|
p->curTokenCharCnt = 0;
|
670
|
688
|
|
671
|
689
|
|
|
690
|
+ // try each mater
|
672
|
691
|
for(; mi<p->mfi; ++mi)
|
673
|
|
- {
|
674
|
|
- unsigned charCnt = 0;
|
675
|
|
- if( p->mfp[mi].funcPtr != NULL )
|
676
|
|
- charCnt = p->mfp[mi].funcPtr(p, p->cp + p->ci, p->cn - p->ci, p->mfp[mi].tokenStr );
|
677
|
|
- else
|
678
|
|
- charCnt = p->mfp[mi].userPtr( p->cp + p->ci, p->cn - p->ci);
|
679
|
|
-
|
680
|
|
- if( cmErrLastRC(&p->err) != kOkLexRC )
|
681
|
|
- return kErrorLexTId;
|
682
|
|
-
|
683
|
|
- // if this matched token is longer then the prev. matched token or
|
684
|
|
- // if the prev matched token was an identifier and this matched token is an equal length user defined token
|
685
|
|
- if( (charCnt > maxCharCnt) || (charCnt>0 && charCnt==maxCharCnt && p->mfp[maxIdx].typeId==kIdentLexTId && p->mfp[mi].typeId >=kUserLexTId ) )
|
|
692
|
+ if( p->mfp[mi].enableFl )
|
686
|
693
|
{
|
687
|
|
- maxCharCnt = charCnt;
|
688
|
|
- maxIdx = mi;
|
689
|
|
- }
|
|
694
|
+ unsigned charCnt = 0;
|
|
695
|
+ if( p->mfp[mi].funcPtr != NULL )
|
|
696
|
+ charCnt = p->mfp[mi].funcPtr(p, p->cp + p->ci, p->cn - p->ci, p->mfp[mi].tokenStr );
|
|
697
|
+ else
|
|
698
|
+ charCnt = p->mfp[mi].userPtr( p->cp + p->ci, p->cn - p->ci);
|
|
699
|
+
|
|
700
|
+ if( cmErrLastRC(&p->err) != kOkLexRC )
|
|
701
|
+ return kErrorLexTId;
|
|
702
|
+
|
|
703
|
+ // if this matched token is longer then the prev. matched token or
|
|
704
|
+ // if the prev matched token was an identifier and this matched token is an equal length user defined token
|
|
705
|
+ if( (charCnt > maxCharCnt)
|
|
706
|
+ || (charCnt>0 && charCnt==maxCharCnt && p->mfp[maxIdx].typeId==kIdentLexTId && p->mfp[mi].typeId >=kUserLexTId )
|
|
707
|
+ || (charCnt>0 && charCnt<maxCharCnt && p->mfp[maxIdx].typeId==kIdentLexTId && p->mfp[mi].typeId >=kUserLexTId && cmIsFlag(p->flags,kUserDefPriorityLexFl))
|
|
708
|
+ )
|
|
709
|
+ {
|
|
710
|
+ maxCharCnt = charCnt;
|
|
711
|
+ maxIdx = mi;
|
|
712
|
+ }
|
690
|
713
|
|
691
|
|
- }
|
|
714
|
+ }
|
692
|
715
|
|
693
|
716
|
// no token was matched
|
694
|
717
|
if( maxIdx == cmInvalidIdx )
|
695
|
718
|
{
|
696
|
|
- _cmLexError( p, kNoMatchLexRC, "Unable to recognize token:'%c'.",*(p->cp+p->ci));
|
697
|
|
- return kErrorLexTId;
|
|
719
|
+ if( cmIsFlag(p->flags,kReturnUnknownLexFl) )
|
|
720
|
+ {
|
|
721
|
+ maxCharCnt = 1;
|
|
722
|
+ }
|
|
723
|
+ else
|
|
724
|
+ {
|
|
725
|
+ _cmLexError( p, kNoMatchLexRC, "Unable to recognize token:'%c'.",*(p->cp+p->ci));
|
|
726
|
+ return kErrorLexTId;
|
|
727
|
+ }
|
698
|
728
|
}
|
699
|
729
|
|
700
|
730
|
// update the current line and column position
|
|
@@ -716,16 +746,19 @@ unsigned cmLexGetNextToken( cmLexH h )
|
716
|
746
|
|
717
|
747
|
bool returnFl = true;
|
718
|
748
|
|
719
|
|
- // check the space token filter
|
720
|
|
- if( (p->mfp[ maxIdx ].typeId == kSpaceLexTId) && (cmIsFlag(p->flags,kReturnSpaceLexFl)==0) )
|
721
|
|
- returnFl = false;
|
|
749
|
+ if( maxIdx != cmInvalidIdx )
|
|
750
|
+ {
|
|
751
|
+ // check the space token filter
|
|
752
|
+ if( (p->mfp[ maxIdx ].typeId == kSpaceLexTId) && (cmIsFlag(p->flags,kReturnSpaceLexFl)==0) )
|
|
753
|
+ returnFl = false;
|
722
|
754
|
|
723
|
|
- // check the comment token filter
|
724
|
|
- if( _cmLexIsCommentTypeId(p->mfp[ maxIdx ].typeId) && (cmIsFlag(p->flags,kReturnCommentsLexFl)==0) )
|
725
|
|
- returnFl = false;
|
|
755
|
+ // check the comment token filter
|
|
756
|
+ if( _cmLexIsCommentTypeId(p->mfp[ maxIdx ].typeId) && (cmIsFlag(p->flags,kReturnCommentsLexFl)==0) )
|
|
757
|
+ returnFl = false;
|
|
758
|
+ }
|
726
|
759
|
|
727
|
760
|
// update the lexer state
|
728
|
|
- p->curTokenId = p->mfp[ maxIdx ].typeId;
|
|
761
|
+ p->curTokenId = maxIdx==cmInvalidIdx ? kUnknownLexTId : p->mfp[ maxIdx ].typeId;
|
729
|
762
|
p->curTokenCharIdx = p->ci;
|
730
|
763
|
p->curTokenCharCnt = maxCharCnt;
|
731
|
764
|
|