fixed grammar, comment cleanup and greetings

This commit is contained in:
Tiago Sousa 2023-05-28 18:06:12 +01:00
parent acd8a6a8c7
commit f04cd34656
4 changed files with 45 additions and 27 deletions

View file

@ -4,16 +4,20 @@
/ / / /_/ / / / / /___/ ____/ ___ |/ _, _/___/ / /___/ _, _/
/_/ \____/_/ /_/_____/_/ /_/ |_/_/ |_|/____/_____/_/ |_|
toml : content
toml : newlines content
| content
content : content tomlEntries
| tomlEntries
tomlEntries : table
| object
tomlEntries : table newlines
| object newlines
table : '[' ID ']'
| '[' '[' ID ']' ']'
table : TABLE
| ARRTABLE
newlines : newlines NEWLINE
| NEWLINE
object : key '=' value
| key '=' array
@ -48,6 +52,7 @@ key : ID
| BIN
| OCT
| INF
| BOOL
| NAN
value : STR
@ -59,5 +64,6 @@ value : STR
| BIN
| OCT
| INF
| BOOL
| NAN

View file

@ -13,7 +13,7 @@ tokens = [
"HEX",
"BIN",
"OCT",
"FLOAT", # need to implement exponents check https://toml.io/en/
"FLOAT",
"BOOL",
"INF",
"NAN",
@ -65,25 +65,21 @@ def t_dict_close_dict(t):
t.lexer.pop_state()
return t
# needs to check if datetime is valid
def t_DATETIME(t):
r"\d{4}-\d{2}-\d{2}[T ]\d{2}:\d{2}:\d{2}(\.\d{1,6})?(Z|[+-]\d{2}:\d{2})"
return t
# needs to check if date is valid
def t_DATE(t):
r"\d{4}-\d{2}-\d{2}"
return t
# needs to check if time is valid
def t_TIME(t):
r"\d{2}:\d{2}:\d{2}(\.\d{1,6})?"
return t
# needs number grouping (example : flt8 = 224_617.445_991_228)
def t_FLOAT(t):
r"[+-]?\d+(_\d+)*\s*\.\s*\d+(_\d+)*([eE][-+]?\d+(_\d+)*)?"
#case where float appears on the left side with spaces in between
@ -93,13 +89,11 @@ def t_FLOAT(t):
return t
# needs number grouping (example : int6 = 5_349_221)
def t_INT(t):
r"[-+]?(\d+(_\d+)*)"
return t
# needs number grouping (example : hex3 = 0xdead_beef)
def t_HEX(t):
r"0x[0-9a-fA-F]+(_[0-9a-fA-F]+)*"
return t
@ -131,7 +125,6 @@ def t_BOOL(t):
return t
# ID needs to be the last so it doesnt catch everything (literally)
def t_ID(t):
r"(([\w_]+)|(\"[\w_]+\"|\'[\w_]+\')\s*\.\s*([\w_]+|\"[\w_]+\"|\'[\w_]+\'))(\s*\.\s*([\w_]+|\"[\w_]+\"|\'[\w_]+\'))*"
t.value = [s.strip(" \"'") for s in t.value.split('.')]
@ -144,7 +137,6 @@ def t_MLSTR(t):
return t
# STR needs to be the first one to catch
def t_STR(t):
r"(\"(?:[^\"\\]|\\.)*\")|(\'[^\']*\')"
t.value = t.value.strip("\"'")

View file

@ -158,7 +158,6 @@ def p_dict_empty(p):
def p_dictCont_multiple(p):
"""dictCont : dictCont ',' dictElem"""
# checar se os dicts nao teem keys repetidas
duplicate_list = [k for k in p[1] if k in p[3]]
for dup in duplicate_list:
print(f"Duplicate inline-table key {dup}")
@ -192,7 +191,7 @@ def p_key_id(p):
p[0] = p[1]
# the rest of the cases are the specific cases where the key as the same format as a float/int/etc
# the rest of the cases are the specific cases where the key is the same format as a float/int/etc
# so we need make them a singleton list.
def p_key_rest(p):
"""key : STR

View file

@ -4,20 +4,41 @@ from tokenizer import tokenizer
import sys
import argparse
def greetings():
print(r" ____ __ __________ __ _____ ")
print(r" / __ \/ / /_ __/ __ \/ |/ / / ")
print(r" / /_/ / / / / / / / / /|_/ / / ")
print(r" / ____/ /___/ / / /_/ / / / / /___")
print(r" /_/ /_____/_/ \____/_/ /_/_____/")
print(r"--------------------------------------")
print(r"Authors: Tiago Sousa and Afonso Franco")
print(r"--------------------------------------")
def main():
sys.argv
argv_parser = argparse.ArgumentParser(prog="PLTOML",description="A command line tool to convert toml files into json using ply.")
argv_parser.add_argument("-i","--input",help="The filepath to the target input file")
argv_parser.add_argument("-o","--output",help="The filepath to the target output file")
argv_parser.add_argument("-t","--tokenizer",help="This feature allows you to inspect all the tokens captured by the lexer (should only be used for debugging)")
if sys.stdout.isatty():
greetings()
argv_parser = argparse.ArgumentParser(
prog="PLTOML",
description="A command line tool to convert toml files into json using ply.",
)
argv_parser.add_argument(
"-i", "--input", help="The filepath to the target input file"
)
argv_parser.add_argument(
"-o", "--output", help="The filepath to the target output file"
)
argv_parser.add_argument(
"-t",
"--tokenizer",
help="This feature allows you to inspect all the tokens captured by the lexer (should only be used for debugging)",
)
args = argv_parser.parse_args()
if args.tokenizer is not None:
tokenizer(args.input,args.output)
tokenizer(args.input, args.output)
else:
parse(args.input,args.output)
parse(args.input, args.output)
if __name__ == "__main__":
main()