eclipse plugin: Reorganize a bit the tests

This commit is contained in:
Timotei Dolean 2011-07-26 15:42:32 +00:00
parent 19791b6850
commit e11276bce8
3 changed files with 128 additions and 116 deletions

View file

@ -1,30 +1,37 @@
package org.wesnoth.tests; package org.wesnoth.tests;
import java.io.StringReader;
import java.util.List; import java.util.List;
import org.antlr.runtime.ANTLRStringStream; import org.antlr.runtime.ANTLRStringStream;
import org.antlr.runtime.CharStream; import org.antlr.runtime.CharStream;
import org.antlr.runtime.Token; import org.antlr.runtime.Token;
import org.eclipse.xtext.ParserRule;
import org.eclipse.xtext.junit.AbstractXtextTests; import org.eclipse.xtext.junit.AbstractXtextTests;
import org.eclipse.xtext.nodemodel.INode;
import org.eclipse.xtext.parser.IParseResult; import org.eclipse.xtext.parser.IParseResult;
import org.eclipse.xtext.parser.IParser; import org.eclipse.xtext.parser.IParser;
import org.eclipse.xtext.parser.antlr.ITokenDefProvider; import org.eclipse.xtext.parser.antlr.ITokenDefProvider;
import org.eclipse.xtext.parser.antlr.Lexer; import org.eclipse.xtext.parser.antlr.Lexer;
import org.eclipse.xtext.parser.antlr.XtextTokenStream; import org.eclipse.xtext.parser.antlr.XtextTokenStream;
import org.wesnoth.WMLStandaloneSetup;
import org.wesnoth.services.WMLGrammarAccess;
import sun.reflect.generics.reflectiveObjects.NotImplementedException; @SuppressWarnings( "all" )
public abstract class WMLTests extends AbstractXtextTests public abstract class WMLTests extends AbstractXtextTests
{ {
protected WMLGrammarAccess grammar;
private Lexer lexer; private Lexer lexer;
private ITokenDefProvider tokenDefProvider; private ITokenDefProvider tokenDefProvider;
private IParser parser; private IParser parser;
protected Lexer getLexer() { protected Lexer getLexer()
{
return lexer; return lexer;
} }
protected ITokenDefProvider getTokenDefProvider() { protected ITokenDefProvider getTokenDefProvider()
{
return tokenDefProvider; return tokenDefProvider;
} }
@ -34,115 +41,126 @@ public abstract class WMLTests extends AbstractXtextTests
return parser; return parser;
} }
@SuppressWarnings("rawtypes") Class getStandaloneSetupClass() {
abstract Class getStandaloneSetupClass(); return WMLStandaloneSetup.class;
}
@SuppressWarnings("unchecked")
@Override @Override
protected void setUp() throws Exception { protected void setUp() throws Exception
super.setUp(); {
with(getStandaloneSetupClass()); super.setUp( );
lexer = get(Lexer.class); with( getStandaloneSetupClass( ) );
tokenDefProvider = get(ITokenDefProvider.class); lexer = get( Lexer.class );
parser = get(IParser.class); tokenDefProvider = get( ITokenDefProvider.class );
parser = get( IParser.class );
grammar = ( WMLGrammarAccess ) getGrammarAccess( );
} }
/** /**
* return the list of tokens created by the lexer from the given input * return the list of tokens created by the lexer from the given input
* */ */
protected List<Token> getTokens(String input) { protected List<Token> getTokens( String input )
CharStream stream = new ANTLRStringStream(input); {
getLexer().setCharStream(stream); CharStream stream = new ANTLRStringStream( input );
XtextTokenStream tokenStream = new XtextTokenStream(getLexer(), getLexer( ).setCharStream( stream );
getTokenDefProvider()); XtextTokenStream tokenStream = new XtextTokenStream( getLexer( ), getTokenDefProvider( ) );
@SuppressWarnings("unchecked") List<Token> tokens = tokenStream.getTokens( );
List<Token> tokens = tokenStream.getTokens();
return tokens; return tokens;
} }
/** /**
* return the name of the terminal rule for a given token * return the name of the terminal rule for a given token
* */ */
protected String getTokenType(Token token) { protected String getTokenType( Token token )
return getTokenDefProvider().getTokenDefMap().get(token.getType()); {
return getTokenDefProvider( ).getTokenDefMap( ).get( token.getType( ) );
} }
/** /**
* check whether an input is chopped into a list of expected token types * check whether an input is chopped into a list of expected token types
* */ */
protected void checkTokenisation(String input, String... expectedTokenTypes) { protected void checkTokenisation( String input, String... expectedTokenTypes )
List<Token> tokens = getTokens(input); {
assertEquals(input, expectedTokenTypes.length, tokens.size()); List<Token> tokens = getTokens( input );
for (int i = 0; i < tokens.size(); i++) { assertEquals( input, expectedTokenTypes.length, tokens.size( ) );
Token token = tokens.get(i); for ( int i = 0; i < tokens.size( ); i++ ) {
assertEquals(input, expectedTokenTypes[i], getTokenType(token)); Token token = tokens.get( i );
assertEquals( input, expectedTokenTypes[i], getTokenType( token ) );
} }
} }
protected void showTokenisation(String input) { protected void showTokenisation( String input )
List<Token> tokens = getTokens(input); {
for (int i = 0; i < tokens.size(); i++) { List<Token> tokens = getTokens( input );
Token token = tokens.get(i); for ( int i = 0; i < tokens.size( ); i++ ) {
System.out.println(getTokenType(token)); Token token = tokens.get( i );
System.out.println( getTokenType( token ) );
} }
} }
/** /**
* check that an input is not tokenised using a particular terminal rule * check that an input is not tokenised using a particular terminal rule
* */ */
protected void failTokenisation(String input, String unExpectedTokenType) { protected void failTokenisation( String input, String unExpectedTokenType )
List<Token> tokens = getTokens(input); {
assertEquals(input, 1, tokens.size()); List<Token> tokens = getTokens( input );
Token token = tokens.get(0); assertEquals( input, 1, tokens.size( ) );
assertNotSame(input, unExpectedTokenType, getTokenType(token)); Token token = tokens.get( 0 );
assertNotSame( input, unExpectedTokenType, getTokenType( token ) );
} }
/** /**
* return the parse result for an input given a specific entry rule of the * return the parse result for an input given a specific entry rule of the
* grammar * grammar
* */ */
protected IParseResult getParseResult(String input, String entryRule) { protected IParseResult getParseResult( String input, ParserRule entryRule )
//return getParser().parse(entryRule, new StringReader(input)); {
throw new NotImplementedException(); return getParser( ).parse( entryRule, new StringReader( input ) );
} }
/** /**
* check that the input can be successfully parsed given a specific entry * check that the input can be successfully parsed given a specific entry
* rule of the grammar * rule of the grammar
* */ */
protected void checkParsing(String input, String entryRule) { protected void checkParsing( String input, ParserRule entryRule )
IParseResult la = getParseResult(input, entryRule); {
System.out.println(la.hasSyntaxErrors( )); IParseResult la = getParseResult( input, entryRule );
assertEquals(input, false, la.hasSyntaxErrors( )); for ( INode node : la.getSyntaxErrors( ) ) {
System.out.println( node.getSyntaxErrorMessage( ) );
}
assertEquals( input, false, la.hasSyntaxErrors( ) );
} }
/** /**
* check that the input cannot be successfully parsed given a specific entry * check that the input cannot be successfully parsed given a specific entry
* rule of the grammar * rule of the grammar
* */ */
protected void failParsing(String input, String entryRule) { protected void failParsing( String input, ParserRule entryRule )
IParseResult la = getParseResult(input, entryRule); {
assertNotSame(input, false, la.hasSyntaxErrors( )); IParseResult la = getParseResult( input, entryRule );
assertNotSame( input, false, la.hasSyntaxErrors( ) );
} }
/** /**
* check that input is treated as a keyword by the grammar * check that input is treated as a keyword by the grammar
* */ */
protected void checkKeyword(String input) { protected void checkKeyword( String input )
{
// the rule name for a keyword is usually // the rule name for a keyword is usually
// the keyword enclosed in single quotes // the keyword enclosed in single quotes
String rule = new StringBuilder("'").append(input).append("'") //$NON-NLS-1$ //$NON-NLS-2$ String rule = new StringBuilder( "'" ).append( input ).append( "'" ) //$NON-NLS-1$ //$NON-NLS-2$
.toString(); .toString( );
checkTokenisation(input, rule); checkTokenisation( input, rule );
} }
/** /**
* check that input is not treated as a keyword by the grammar * check that input is not treated as a keyword by the grammar
* */ */
protected void failKeyword(String keyword) { protected void failKeyword( String keyword )
List<Token> tokens = getTokens(keyword); {
assertEquals(keyword, 1, tokens.size()); List<Token> tokens = getTokens( keyword );
String type = getTokenType(tokens.get(0)); assertEquals( keyword, 1, tokens.size( ) );
assertFalse(keyword, type.charAt(0) == '\''); String type = getTokenType( tokens.get( 0 ) );
assertFalse( keyword, type.charAt( 0 ) == '\'' );
} }
} }

View file

@ -1,49 +0,0 @@
package org.wesnoth.tests;
import org.wesnoth.WMLStandaloneSetup;
public class WMLTestsImpl extends WMLTests
{
@SuppressWarnings("rawtypes")
@Override
Class getStandaloneSetupClass() {
return WMLStandaloneSetup.class;
}
//for convenience, define constants for the
//rule names in your grammar
//the names of terminal rules will be capitalised
//and "RULE_" will be appended to the front
private static final String ID="RULE_ID"; //$NON-NLS-1$
private static final String INT="RULE_INT"; //$NON-NLS-1$
private static final String WS="RULE_WS"; //$NON-NLS-1$
private static final String SL_COMMENT="RULE_SL_COMMENT"; //$NON-NLS-1$
public void testINT()
{
checkTokenisation("1", INT); //$NON-NLS-1$
}
public void testID(){
checkTokenisation("a", ID); //$NON-NLS-1$
checkTokenisation("abc", ID); //$NON-NLS-1$
checkTokenisation("abc123", ID); //$NON-NLS-1$
checkTokenisation("abc_123", ID); //$NON-NLS-1$
}
public void testSLCOMMENT(){
checkTokenisation("#comment", SL_COMMENT); //$NON-NLS-1$
checkTokenisation("#comment\n", SL_COMMENT); //$NON-NLS-1$
checkTokenisation("# comment \t\t comment\r\n", SL_COMMENT); //$NON-NLS-1$
}
public void testTokenSequences(){
showTokenisation("amount=+$random\n"); //$NON-NLS-1$
checkParsing("amount=+$random", "WMLKey"); //$NON-NLS-1$ //$NON-NLS-2$
checkTokenisation("123 abc", ID, WS, ID); //$NON-NLS-1$
checkTokenisation("123 \t#comment\n abc", ID, WS, SL_COMMENT,WS,ID); //$NON-NLS-1$
//note that no white space is necessary!
checkTokenisation("123abc", ID); //$NON-NLS-1$
}
}

View file

@ -0,0 +1,43 @@
package org.wesnoth.tests.grammar;
import org.wesnoth.tests.WMLTests;
/**
* Tests parts of the grammar
*/
public class WMLGrammarTokens extends WMLTests
{
// for convenience, define constants for the
// rule names in your grammar
// the names of terminal rules will be capitalised
// and "RULE_" will be appended to the front
private static final String ID = "RULE_ID";
private static final String WS = "RULE_WS";
private static final String SL_COMMENT = "RULE_SL_COMMENT";
public void testID()
{
checkTokenisation( "1", ID );
checkTokenisation( "a", ID );
checkTokenisation( "abc", ID );
checkTokenisation( "abc123", ID );
checkTokenisation( "abc_123", ID );
}
public void testSLCOMMENT()
{
checkTokenisation( "#comment", SL_COMMENT );
checkTokenisation( "#comment\n", SL_COMMENT );
checkTokenisation( "# comment \t\t comment\r\n", SL_COMMENT );
}
public void testTokenSequences()
{
checkParsing( "amount=+$random\r\n", grammar.getWMLKeyRule( ) );
checkTokenisation( "123 abc", ID, WS, ID );
checkTokenisation( "123 \t#comment\n abc", ID, WS, SL_COMMENT, WS, ID );
// note that no white space is necessary!
checkTokenisation( "123abc", ID );
}
}