diff --git a/generators/x86_64.py b/generators/x86_64.py index b8c2291..c4f3039 100644 --- a/generators/x86_64.py +++ b/generators/x86_64.py @@ -704,7 +704,12 @@ class X86_64Generator(Generator): value_type = type(value) if value_type == str: if not const["no_string"]: - final = f'db "' + value.replace("\\n", "\", 10, \"") + "\", 0" + value = value.replace("\"", "\", 34, \"") + value = value.replace("\r", "\", 13, \"") + value = value.replace("\n", "\", 10, \"") + value = value.replace("\a", "\", 7, \"") + + final = f'db "' + value + "\", 0" final = final.replace(", \"\", ", ", ") f.write(final) else: diff --git a/out b/out index f148d25..d1f28de 100644 Binary files a/out and b/out differ diff --git a/test.grnd b/test.grnd index e69de29..c961dad 100644 --- a/test.grnd +++ b/test.grnd @@ -0,0 +1,6 @@ +@loop +stdout "/\r" +stdout "-\r" +stdout "\\r" +stdout "|\r" +jump %loop \ No newline at end of file diff --git a/tokenizer.py b/tokenizer.py index 173f21e..9adc167 100644 --- a/tokenizer.py +++ b/tokenizer.py @@ -379,7 +379,8 @@ def tokenize(input_string: str): current_char = input_string[pos] while current_char != '"': - current_token += current_char + if current_char != "\\": + current_token += current_char pos += 1 column += 1 if pos > len(input_string)-1: @@ -393,12 +394,11 @@ def tokenize(input_string: str): column += 1 while pos <= len(input_string)-1: escape += input_string[pos] - print(escape) - valid_escapes = ['"', 'n', 't', 'a', 'r'] + valid_escapes = ['"', 'n', 't', 'a', 'r', '\\'] if escape == '"': - current_token += 'aaaaaaa' + current_token += '"' elif escape == "n": current_token += '\n' elif escape == "t": @@ -407,6 +407,8 @@ def tokenize(input_string: str): current_token += "\a" elif escape == "r": current_token += "\r" + elif escape == "\\": + current_token += "\\" if escape in valid_escapes: break