diff --git a/examples/error-report.rs b/examples/error-report.rs index 56eee822..e14bc9dc 100644 --- a/examples/error-report.rs +++ b/examples/error-report.rs @@ -1,4 +1,4 @@ -use rnix::parser::ParseError; +use rnix::ParseError; use std::{env, fs}; fn main() { diff --git a/src/lib.rs b/src/lib.rs index 342ca21c..39e1b8aa 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,23 +2,22 @@ mod macros; pub mod ast; mod kinds; -pub mod parser; +mod parser; #[cfg(test)] mod tests; mod token_set; -pub mod tokenizer; +mod tokenizer; use std::marker::PhantomData; pub use self::{kinds::SyntaxKind, tokenizer::tokenize}; use ast::AstNode; -use parser::ParseError; +pub use parser::ParseError; use rowan::GreenNode; pub use rowan::{NodeOrToken, TextRange, TextSize, TokenAtOffset, WalkEvent}; pub(crate) use token_set::TokenSet; - -use self::tokenizer::Tokenizer; +pub use tokenizer::Token; #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum NixLanguage {} @@ -45,7 +44,7 @@ pub use ast::Root; impl Root { pub fn parse(s: &str) -> Parse { - let (green, errors) = parser::parse(Tokenizer::new(s)); + let (green, errors) = parser::parse(tokenize(s)); Parse { green, errors, _ty: PhantomData } } } diff --git a/src/tokenizer.rs b/src/tokenizer.rs index 4cc734cb..9a30d988 100644 --- a/src/tokenizer.rs +++ b/src/tokenizer.rs @@ -46,13 +46,12 @@ impl Eq for State<'_> {} pub type Token<'a> = (SyntaxKind, &'a str); -/// A convenience function for tokenizing the given input -pub fn tokenize(input: &str) -> Vec> { - Tokenizer::new(input).collect() +/// Tokenize the given input +pub fn tokenize(input: &str) -> impl Iterator> + '_ { + Tokenizer::new(input) } -/// The tokenizer. You may want to use the `tokenize` convenience function from this module instead. -pub struct Tokenizer<'a> { +struct Tokenizer<'a> { ctx: Vec, state: State<'a>, }