diff --git a/patches/patches/103_tokenization.patch b/patches/patches/103_tokenization.patch index 98f645b..5fab36d 100644 --- a/patches/patches/103_tokenization.patch +++ b/patches/patches/103_tokenization.patch @@ -1,10 +1,19 @@ ---- exercises/103_tokenization.zig 2023-10-03 22:15:22.125574535 +0200 -+++ answers/103_tokenization.zig 2023-10-05 20:04:07.309438291 +0200 +--- exercises/103_tokenization.zig 2023-10-05 21:29:56.965283604 +0200 ++++ answers/103_tokenization.zig 2023-10-05 21:30:19.815708910 +0200 +@@ -62,7 +62,7 @@ + // // A standard tokenizer is called (Zig has several) and + // // used to locate the positions of the respective separators + // // (we remember, space and comma) and pass them to an iterator. +-// var it = std.mem.tokenizeAny(u8, input, " ,"); ++// var it = std.mem.tokenize(u8, input, " ,"); + // + // // The iterator can now be processed in a loop and the + // // individual numbers can be transferred. @@ -136,7 +136,7 @@ ; // now the tokenizer, but what do we need here? -- var it = std.mem.tokenize(u8, poem, ???); +- var it = std.mem.tokenizeAny(u8, poem, ???); + var it = std.mem.tokenize(u8, poem, " ,;!\n"); // print all words and count them