diff --git a/core/odin/tokenizer/token.odin b/core/odin/tokenizer/token.odin index 35cb566be..23808cf44 100644 --- a/core/odin/tokenizer/token.odin +++ b/core/odin/tokenizer/token.odin @@ -147,6 +147,8 @@ Token_Kind :: enum u32 { Context, // context Or_Else, // or_else Or_Return, // or_return + Or_Break, // or_break + Or_Continue, // or_continue Asm, // asm Inline, // inline No_Inline, // no_inline @@ -278,6 +280,8 @@ tokens := [Token_Kind.COUNT]string { "context", "or_else", "or_return", + "or_break", + "or_continue", "asm", "inline", "no_inline", diff --git a/examples/demo/demo.odin b/examples/demo/demo.odin index a320749d6..00dd8a171 100644 --- a/examples/demo/demo.odin +++ b/examples/demo/demo.odin @@ -1786,19 +1786,7 @@ range_statements_with_multiple_return_values :: proc() { data[i] = i32(i*i) } - { - it := make_my_iterator(data) - for val in my_iterator(&it) { - fmt.println(val) - } - } - { - it := make_my_iterator(data) - for val, idx in my_iterator(&it) { - fmt.println(val, idx) - } - } - { + { // Manual Style it := make_my_iterator(data) for { val, _, cond := my_iterator(&it) @@ -1808,6 +1796,25 @@ range_statements_with_multiple_return_values :: proc() { fmt.println(val) } } + { // or_break + it := make_my_iterator(data) + loop: for { + val, _ := my_iterator(&it) or_break loop + fmt.println(val) + } + } + { // first value + it := make_my_iterator(data) + for val in my_iterator(&it) { + fmt.println(val) + } + } + { // first and second value + it := make_my_iterator(data) + for val, idx in my_iterator(&it) { + fmt.println(val, idx) + } + } } @@ -2072,7 +2079,7 @@ or_else_operator :: proc() { // have optional ok semantics v: union{int, f64} i: int - i = v.(int) or_else 123 + i = v.(int) or_else 123 i = v.? or_else 123 // Type inference magic assert(i == 123) @@ -2178,6 +2185,70 @@ or_return_operator :: proc() { foo_2() } + +or_break_and_or_continue_operators :: proc() { + fmt.println("\n#'or_break' and 'or_continue'") + // The concept of 'or_break' and 'or_continue' is very similar to that of 'or_return'. + // The difference is that unlike 'or_return', the value does not get returned from + // the current procedure but rather discarded if it is 'false' or not 'nil', and then + // the specified branch (i.e. break or_continue). + // The or branch expression can be labelled if a specific statement needs to be used. + + Error :: enum { + None, + Something_Bad, + Something_Worse, + The_Worst, + Your_Mum, + } + + caller_1 :: proc() -> Error { + return .Something_Bad + } + + caller_2 :: proc() -> (int, Error) { + return 123, .Something_Worse + } + caller_3 :: proc() -> (int, int, Error) { + return 123, 345, .None + } + + for { // common approach + err := caller_1() + if err != nil { + break + } + } + for { // or_break approach + caller_1() or_break + } + + for { // or_break approach with multiple values + n := caller_2() or_break + _ = n + } + + loop: for { // or_break approach with named label + n := caller_2() or_break loop + _ = n + } + + for { // or_continue + x, y := caller_3() or_continue + _, _ = x, y + + break + } + + continue_loop: for { // or_continue with named label + x, y := caller_3() or_continue continue_loop + _, _ = x, y + + break + } + +} + arbitrary_precision_mathematics :: proc() { fmt.println("\n# core:math/big") @@ -2258,98 +2329,98 @@ matrix_type :: proc() { fmt.println("\n# matrix type") // A matrix is a mathematical type built into Odin. It is a regular array of numbers, // arranged in rows and columns - + { // The following represents a matrix that has 2 rows and 3 columns m: matrix[2, 3]f32 - + m = matrix[2, 3]f32{ 1, 9, -13, 20, 5, -6, } - + // Element types of integers, float, and complex numbers are supported by matrices. // There is no support for booleans, quaternions, or any compound type. - + // Indexing a matrix can be used with the matrix indexing syntax // This mirrors othe type usages: type on the left, usage on the right - + elem := m[1, 2] // row 1, column 2 assert(elem == -6) - - + + // Scalars act as if they are scaled identity matrices // and can be assigned to matrices as them b := matrix[2, 2]f32{} f := f32(3) b = f - + fmt.println("b", b) fmt.println("b == f", b == f) - - } - + + } + { // Matrices support multiplication between matrices a := matrix[2, 3]f32{ 2, 3, 1, 4, 5, 0, } - + b := matrix[3, 2]f32{ 1, 2, 3, 4, 5, 6, } - + fmt.println("a", a) fmt.println("b", b) - + c := a * b #assert(type_of(c) == matrix[2, 2]f32) - fmt.tprintln("c = a * b", c) + fmt.tprintln("c = a * b", c) } - + { // Matrices support multiplication between matrices and arrays m := matrix[4, 4]f32{ - 1, 2, 3, 4, - 5, 5, 4, 2, - 0, 1, 3, 0, + 1, 2, 3, 4, + 5, 5, 4, 2, + 0, 1, 3, 0, 0, 1, 4, 1, } - + v := [4]f32{1, 5, 4, 3} - + // treating 'v' as a column vector fmt.println("m * v", m * v) - + // treating 'v' as a row vector fmt.println("v * m", v * m) - + // Support with non-square matrices s := matrix[2, 4]f32{ // [4][2]f32 - 2, 4, 3, 1, - 7, 8, 6, 5, + 2, 4, 3, 1, + 7, 8, 6, 5, } - + w := [2]f32{1, 2} r: [4]f32 = w * s fmt.println("r", r) } - - { // Component-wise operations + + { // Component-wise operations // if the element type supports it // Not support for '/', '%', or '%%' operations - + a := matrix[2, 2]i32{ 1, 2, 3, 4, } - + b := matrix[2, 2]i32{ -5, 1, 9, -7, } - + c0 := a + b c1 := a - b c2 := a & b @@ -2359,9 +2430,9 @@ matrix_type :: proc() { // component-wise multiplication // since a * b would be a standard matrix multiplication - c6 := hadamard_product(a, b) - - + c6 := hadamard_product(a, b) + + fmt.println("a + b", c0) fmt.println("a - b", c1) fmt.println("a & b", c2) @@ -2370,23 +2441,23 @@ matrix_type :: proc() { fmt.println("a &~ b", c5) fmt.println("hadamard_product(a, b)", c6) } - + { // Submatrix casting square matrices // Casting a square matrix to another square matrix with same element type - // is supported. + // is supported. // If the cast is to a smaller matrix type, the top-left submatrix is taken. // If the cast is to a larger matrix type, the matrix is extended with zeros - // everywhere and ones in the diagonal for the unfilled elements of the + // everywhere and ones in the diagonal for the unfilled elements of the // extended matrix. - + mat2 :: distinct matrix[2, 2]f32 mat4 :: distinct matrix[4, 4]f32 - + m2 := mat2{ 1, 3, 2, 4, } - + m4 := mat4(m2) assert(m4[2, 2] == 1) assert(m4[3, 3] == 1) @@ -2394,7 +2465,7 @@ matrix_type :: proc() { fmt.println("m4", m4) fmt.println("mat2(m4)", mat2(m4)) assert(mat2(m4) == m2) - + b4 := mat4{ 1, 2, 0, 0, 3, 4, 0, 0, @@ -2403,43 +2474,43 @@ matrix_type :: proc() { } fmt.println("b4", matrix_flatten(b4)) } - + { // Casting non-square matrices - // Casting a matrix to another matrix is allowed as long as they share + // Casting a matrix to another matrix is allowed as long as they share // the same element type and the number of elements (rows*columns). // Matrices in Odin are stored in column-major order, which means // the casts will preserve this element order. - + mat2x4 :: distinct matrix[2, 4]f32 mat4x2 :: distinct matrix[4, 2]f32 - + x := mat2x4{ - 1, 3, 5, 7, + 1, 3, 5, 7, 2, 4, 6, 8, } - + y := mat4x2(x) fmt.println("x", x) fmt.println("y", y) } - + // TECHNICAL INFORMATION: the internal representation of a matrix in Odin is stored // in column-major format // e.g. matrix[2, 3]f32 is internally [3][2]f32 (with different a alignment requirement) - // Column-major is used in order to utilize (SIMD) vector instructions effectively on + // Column-major is used in order to utilize (SIMD) vector instructions effectively on // modern hardware, if possible. // // Unlike normal arrays, matrices try to maximize alignment to allow for the (SIMD) vectorization // properties whilst keeping zero padding (either between columns or at the end of the type). - // + // // Zero padding is a compromise for use with third-party libraries, instead of optimizing for performance. - // Padding between columns was not taken even if that would have allowed each column to be loaded - // individually into a SIMD register with the correct alignment properties. - // + // Padding between columns was not taken even if that would have allowed each column to be loaded + // individually into a SIMD register with the correct alignment properties. + // // Currently, matrices are limited to a maximum of 16 elements (rows*columns), and a minimum of 1 element. // This is because matrices are stored as values (not a reference type), and thus operations on them will // be stored on the stack. Restricting the maximum element count minimizing the possibility of stack overflows. - + // Built-in Procedures (Compiler Level) // transpose(m) // transposes a matrix @@ -2454,13 +2525,13 @@ matrix_type :: proc() { // Example: // m := matrix[2, 2]f32{ // x0, x1, - // y0, y1, + // y0, y1, // } // array: [4]f32 = matrix_flatten(m) // assert(array == {x0, y0, x1, y1}) // conj(x) // conjugates the elements of a matrix for complex element types only - + // Built-in Procedures (Runtime Level) (all square matrix procedures) // determinant(m) // adjugate(m) @@ -2474,8 +2545,8 @@ matrix_type :: proc() { main :: proc() { /* For More Odin Examples - https://github.com/odin-lang/examples - This repository contains examples of how certain things can be accomplished - in idiomatic Odin, allowing you learn its semantics, as well as how to use + This repository contains examples of how certain things can be accomplished + in idiomatic Odin, allowing you learn its semantics, as well as how to use parts of the core and vendor package collections. */ @@ -2513,7 +2584,8 @@ main :: proc() { relative_data_types() or_else_operator() or_return_operator() + or_break_and_or_continue_operators() arbitrary_precision_mathematics() matrix_type() } -} +} \ No newline at end of file diff --git a/src/check_expr.cpp b/src/check_expr.cpp index 968b6ec1e..1401793a5 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -3826,6 +3826,15 @@ gb_internal void update_untyped_expr_type(CheckerContext *c, Ast *e, Type *type, update_untyped_expr_type(c, ore->expr, type, final); case_end; + case_ast_node(obe, OrBranchExpr, e); + if (old->value.kind != ExactValue_Invalid) { + // See above note in UnaryExpr case + break; + } + + update_untyped_expr_type(c, obe->expr, type, final); + case_end; + case_ast_node(oee, OrElseExpr, e); if (old->value.kind != ExactValue_Invalid) { // See above note in UnaryExpr case @@ -8193,6 +8202,104 @@ gb_internal ExprKind check_or_return_expr(CheckerContext *c, Operand *o, Ast *no return Expr_Expr; } +gb_internal ExprKind check_or_branch_expr(CheckerContext *c, Operand *o, Ast *node, Type *type_hint) { + ast_node(be, OrBranchExpr, node); + + String name = be->token.string; + Operand x = {}; + check_multi_expr_with_type_hint(c, &x, be->expr, type_hint); + if (x.mode == Addressing_Invalid) { + o->mode = Addressing_Value; + o->type = t_invalid; + o->expr = node; + return Expr_Expr; + } + + Type *left_type = nullptr; + Type *right_type = nullptr; + check_or_return_split_types(c, &x, name, &left_type, &right_type); + add_type_and_value(c, be->expr, x.mode, x.type, x.value); + + if (right_type == nullptr) { + check_or_else_expr_no_value_error(c, name, x, type_hint); + } else { + if (is_type_boolean(right_type) || type_has_nil(right_type)) { + // okay + } else { + gbString s = type_to_string(right_type); + error(node, "'%.*s' requires a boolean or nil-able type, got %s", s); + gb_string_free(s); + } + } + + o->expr = node; + o->type = left_type; + if (left_type != nullptr) { + o->mode = Addressing_Value; + } else { + o->mode = Addressing_NoValue; + } + + if (c->curr_proc_sig == nullptr) { + error(node, "'%.*s' can only be used within a procedure", LIT(name)); + } + + Ast *label = be->label; + + switch (be->token.kind) { + case Token_or_break: + if ((c->stmt_flags & Stmt_BreakAllowed) == 0 && label == nullptr) { + error(be->token, "'%.*s' only allowed in non-inline loops or 'switch' statements", LIT(name)); + } + break; + case Token_or_continue: + if ((c->stmt_flags & Stmt_ContinueAllowed) == 0 && label == nullptr) { + error(be->token, "'%.*s' only allowed in non-inline loops", LIT(name)); + } + break; + } + + if (label != nullptr) { + if (label->kind != Ast_Ident) { + error(label, "A branch statement's label name must be an identifier"); + return Expr_Expr; + } + Ast *ident = label; + String name = ident->Ident.token.string; + Operand o = {}; + Entity *e = check_ident(c, &o, ident, nullptr, nullptr, false); + if (e == nullptr) { + error(ident, "Undeclared label name: %.*s", LIT(name)); + return Expr_Expr; + } + add_entity_use(c, ident, e); + if (e->kind != Entity_Label) { + error(ident, "'%.*s' is not a label", LIT(name)); + return Expr_Expr; + } + Ast *parent = e->Label.parent; + GB_ASSERT(parent != nullptr); + switch (parent->kind) { + case Ast_BlockStmt: + case Ast_IfStmt: + case Ast_SwitchStmt: + if (be->token.kind != Token_or_break) { + error(label, "Label '%.*s' can only be used with 'or_break'", LIT(e->token.string)); + } + break; + case Ast_RangeStmt: + case Ast_ForStmt: + if ((be->token.kind != Token_or_break) && (be->token.kind != Token_or_continue)) { + error(label, "Label '%.*s' can only be used with 'or_break' and 'or_continue'", LIT(e->token.string)); + } + break; + + } + } + + return Expr_Expr; +} + gb_internal void check_compound_literal_field_values(CheckerContext *c, Slice const &elems, Operand *o, Type *type, bool &is_constant) { Type *bt = base_type(type); @@ -9947,6 +10054,10 @@ gb_internal ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast return check_or_return_expr(c, o, node, type_hint); case_end; + case_ast_node(re, OrBranchExpr, node); + return check_or_branch_expr(c, o, node, type_hint); + case_end; + case_ast_node(cl, CompoundLit, node); kind = check_compound_literal(c, o, node, type_hint); case_end; @@ -10513,6 +10624,16 @@ gb_internal gbString write_expr_to_string(gbString str, Ast *node, bool shorthan str = gb_string_appendc(str, " or_return"); case_end; + case_ast_node(oe, OrBranchExpr, node); + str = write_expr_to_string(str, oe->expr, shorthand); + str = gb_string_append_rune(str, ' '); + str = string_append_token(str, oe->token); + if (oe->label) { + str = gb_string_append_rune(str, ' '); + str = write_expr_to_string(str, oe->label, shorthand); + } + case_end; + case_ast_node(pe, ParenExpr, node); str = gb_string_append_rune(str, '('); str = write_expr_to_string(str, pe->expr, shorthand); diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp index 3f1b9611c..0fe44289c 100644 --- a/src/check_stmt.cpp +++ b/src/check_stmt.cpp @@ -102,8 +102,13 @@ gb_internal void check_stmt_list(CheckerContext *ctx, Slice const &stmts, new_flags |= Stmt_FallthroughAllowed; } + u32 prev_stmt_flags = ctx->stmt_flags; + ctx->stmt_flags = new_flags; + check_stmt(ctx, n, new_flags); + ctx->stmt_flags = prev_stmt_flags; + if (i+1 < max_non_constant_declaration) { switch (n->kind) { case Ast_ReturnStmt: diff --git a/src/checker.hpp b/src/checker.hpp index bf956393c..a6a5f6788 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -435,6 +435,7 @@ struct CheckerContext { #define MAX_INLINE_FOR_DEPTH 1024ll i64 inline_for_depth; + u32 stmt_flags; bool in_enum_type; bool collect_delayed_decls; bool allow_polymorphic_types; diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp index 13a250f00..abdeea4ba 100644 --- a/src/llvm_backend.hpp +++ b/src/llvm_backend.hpp @@ -535,6 +535,7 @@ gb_internal void lb_mem_zero_ptr(lbProcedure *p, LLVMValueRef ptr, Type *type, u gb_internal void lb_emit_init_context(lbProcedure *p, lbAddr addr); +gb_internal lbBranchBlocks lb_lookup_branch_blocks(lbProcedure *p, Ast *ident); gb_internal lbStructFieldRemapping lb_get_struct_remapping(lbModule *m, Type *t); gb_internal LLVMTypeRef lb_type_padding_filler(lbModule *m, i64 padding, i64 padding_align); @@ -556,6 +557,7 @@ gb_internal LLVMTypeRef OdinLLVMGetVectorElementType(LLVMTypeRef type); gb_internal String lb_filepath_ll_for_module(lbModule *m); + gb_internal LLVMTypeRef llvm_array_type(LLVMTypeRef ElementType, uint64_t ElementCount) { #if LB_USE_NEW_PASS_SYSTEM return LLVMArrayType2(ElementType, ElementCount); diff --git a/src/llvm_backend_expr.cpp b/src/llvm_backend_expr.cpp index d6e8843fa..aba129ea4 100644 --- a/src/llvm_backend_expr.cpp +++ b/src/llvm_backend_expr.cpp @@ -3305,6 +3305,62 @@ gb_internal lbValue lb_build_expr_internal(lbProcedure *p, Ast *expr) { return lb_emit_or_return(p, oe->expr, tv); case_end; + case_ast_node(be, OrBranchExpr, expr); + lbBlock *block = nullptr; + + if (be->label != nullptr) { + lbBranchBlocks bb = lb_lookup_branch_blocks(p, be->label); + switch (be->token.kind) { + case Token_or_break: block = bb.break_; break; + case Token_or_continue: block = bb.continue_; break; + } + } else { + for (lbTargetList *t = p->target_list; t != nullptr && block == nullptr; t = t->prev) { + if (t->is_block) { + continue; + } + + switch (be->token.kind) { + case Token_or_break: block = t->break_; break; + case Token_or_continue: block = t->continue_; break; + } + } + } + + lbValue lhs = {}; + lbValue rhs = {}; + lb_emit_try_lhs_rhs(p, be->expr, tv, &lhs, &rhs); + + Type *type = default_type(tv.type); + + lbBlock *then = lb_create_block(p, "or_branch.then"); + lbBlock *done = lb_create_block(p, "or_branch.done"); // NOTE(bill): Append later + lbBlock *else_ = lb_create_block(p, "or_branch.else"); + + lb_emit_if(p, lb_emit_try_has_value(p, rhs), then, else_); + lb_start_block(p, then); + + lbValue res = {}; + if (lhs.value) { + res = lb_emit_conv(p, lhs, type); + } + + lb_emit_jump(p, done); + lb_start_block(p, else_); + + if (lhs.value) { + res = lb_const_nil(p->module, type); + } + + if (block != nullptr) { + lb_emit_defer_stmts(p, lbDeferExit_Branch, block); + } + lb_emit_jump(p, block); + lb_start_block(p, done); + + return res; + case_end; + case_ast_node(ta, TypeAssertion, expr); TokenPos pos = ast_token(expr).pos; lbValue e = lb_build_expr(p, ta->expr); diff --git a/src/parser.cpp b/src/parser.cpp index a81594663..c0498b425 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -533,8 +533,13 @@ gb_internal Ast *ast_tag_expr(AstFile *f, Token token, Token name, Ast *expr) { gb_internal Ast *ast_unary_expr(AstFile *f, Token op, Ast *expr) { Ast *result = alloc_ast_node(f, Ast_UnaryExpr); - if (expr && expr->kind == Ast_OrReturnExpr) { + if (expr) switch (expr->kind) { + case Ast_OrReturnExpr: syntax_error_with_verbose(expr, "'or_return' within an unary expression not wrapped in parentheses (...)"); + break; + case Ast_OrBranchExpr: + syntax_error_with_verbose(expr, "'or_%.*s' within an unary expression not wrapped in parentheses (...)", LIT(expr->OrBranchExpr.token.string)); + break; } result->UnaryExpr.op = op; @@ -555,11 +560,22 @@ gb_internal Ast *ast_binary_expr(AstFile *f, Token op, Ast *left, Ast *right) { right = ast_bad_expr(f, op, op); } - if (left->kind == Ast_OrReturnExpr) { + + if (left) switch (left->kind) { + case Ast_OrReturnExpr: syntax_error_with_verbose(left, "'or_return' within a binary expression not wrapped in parentheses (...)"); + break; + case Ast_OrBranchExpr: + syntax_error_with_verbose(left, "'or_%.*s' within a binary expression not wrapped in parentheses (...)", LIT(left->OrBranchExpr.token.string)); + break; } - if (right->kind == Ast_OrReturnExpr) { + if (right) switch (right->kind) { + case Ast_OrReturnExpr: syntax_error_with_verbose(right, "'or_return' within a binary expression not wrapped in parentheses (...)"); + break; + case Ast_OrBranchExpr: + syntax_error_with_verbose(right, "'or_%.*s' within a binary expression not wrapped in parentheses (...)", LIT(right->OrBranchExpr.token.string)); + break; } result->BinaryExpr.op = op; @@ -800,6 +816,14 @@ gb_internal Ast *ast_or_return_expr(AstFile *f, Ast *expr, Token const &token) { return result; } +gb_internal Ast *ast_or_branch_expr(AstFile *f, Ast *expr, Token const &token, Ast *label) { + Ast *result = alloc_ast_node(f, Ast_OrBranchExpr); + result->OrBranchExpr.expr = expr; + result->OrBranchExpr.token = token; + result->OrBranchExpr.label = label; + return result; +} + gb_internal Ast *ast_type_assertion(AstFile *f, Ast *expr, Token dot, Ast *type) { Ast *result = alloc_ast_node(f, Ast_TypeAssertion); result->TypeAssertion.expr = expr; @@ -1477,19 +1501,20 @@ gb_internal Token expect_operator(AstFile *f) { // okay } else if (prev.kind == Token_if || prev.kind == Token_when) { // okay - } else if (prev.kind == Token_or_else || prev.kind == Token_or_return) { + } else if (prev.kind == Token_or_else || prev.kind == Token_or_return || + prev.kind == Token_or_break || prev.kind == Token_or_continue) { // okay } else if (!gb_is_between(prev.kind, Token__OperatorBegin+1, Token__OperatorEnd-1)) { String p = token_to_string(prev); - syntax_error(f->curr_token, "Expected an operator, got '%.*s'", + syntax_error(prev, "Expected an operator, got '%.*s'", LIT(p)); } else if (!f->allow_range && is_token_range(prev)) { String p = token_to_string(prev); - syntax_error(f->curr_token, "Expected an non-range operator, got '%.*s'", + syntax_error(prev, "Expected an non-range operator, got '%.*s'", LIT(p)); } - if (f->curr_token.kind == Token_Ellipsis) { - syntax_warning(f->curr_token, "'..' for ranges has now been deprecated, prefer '..='"); + if (prev.kind == Token_Ellipsis) { + syntax_warning(prev, "'..' for ranges has now been deprecated, prefer '..='"); f->tokens[f->curr_token_index].flags |= TokenFlag_Replace; } @@ -1736,6 +1761,8 @@ gb_internal Ast *strip_or_return_expr(Ast *node) { } if (node->kind == Ast_OrReturnExpr) { node = node->OrReturnExpr.expr; + } else if (node->kind == Ast_OrBranchExpr) { + node = node->OrBranchExpr.expr; } else if (node->kind == Ast_ParenExpr) { node = node->ParenExpr.expr; } else { @@ -2869,8 +2896,16 @@ gb_internal Ast *parse_call_expr(AstFile *f, Ast *operand) { } gb_internal void parse_check_or_return(Ast *operand, char const *msg) { - if (operand && operand->kind == Ast_OrReturnExpr) { + if (operand == nullptr) { + return; + } + switch (operand->kind) { + case Ast_OrReturnExpr: syntax_error_with_verbose(operand, "'or_return' use within %s is not wrapped in parentheses (...)", msg); + break; + case Ast_OrBranchExpr: + syntax_error_with_verbose(operand, "'or_%.*s' use within %s is not wrapped in parentheses (...)", msg, LIT(operand->OrBranchExpr.token.string)); + break; } } @@ -3004,6 +3039,18 @@ gb_internal Ast *parse_atom_expr(AstFile *f, Ast *operand, bool lhs) { operand = ast_or_return_expr(f, operand, expect_token(f, Token_or_return)); break; + case Token_or_break: + case Token_or_continue: + { + Token token = advance_token(f); + Ast *label = nullptr; + if (f->curr_token.kind == Token_Ident) { + label = parse_ident(f); + } + operand = ast_or_branch_expr(f, operand, token, label); + } + break; + case Token_OpenBrace: if (!lhs && is_literal_type(operand) && f->expr_level >= 0) { operand = parse_literal_value(f, operand); diff --git a/src/parser.hpp b/src/parser.hpp index dd7bd0928..bce818652 100644 --- a/src/parser.hpp +++ b/src/parser.hpp @@ -464,6 +464,7 @@ AST_KIND(_ExprBegin, "", bool) \ AST_KIND(TernaryWhenExpr, "ternary when expression", struct { Ast *x, *cond, *y; }) \ AST_KIND(OrElseExpr, "or_else expression", struct { Ast *x; Token token; Ast *y; }) \ AST_KIND(OrReturnExpr, "or_return expression", struct { Ast *expr; Token token; }) \ + AST_KIND(OrBranchExpr, "or branch expression", struct { Ast *expr; Token token; Ast *label; }) \ AST_KIND(TypeAssertion, "type assertion", struct { \ Ast *expr; \ Token dot; \ diff --git a/src/parser_pos.cpp b/src/parser_pos.cpp index 3d2e8f27d..f49c40f16 100644 --- a/src/parser_pos.cpp +++ b/src/parser_pos.cpp @@ -52,6 +52,7 @@ gb_internal Token ast_token(Ast *node) { case Ast_TernaryWhenExpr: return ast_token(node->TernaryWhenExpr.x); case Ast_OrElseExpr: return ast_token(node->OrElseExpr.x); case Ast_OrReturnExpr: return ast_token(node->OrReturnExpr.expr); + case Ast_OrBranchExpr: return ast_token(node->OrBranchExpr.expr); case Ast_TypeAssertion: return ast_token(node->TypeAssertion.expr); case Ast_TypeCast: return node->TypeCast.token; case Ast_AutoCast: return node->AutoCast.token; @@ -195,6 +196,11 @@ Token ast_end_token(Ast *node) { case Ast_TernaryWhenExpr: return ast_end_token(node->TernaryWhenExpr.y); case Ast_OrElseExpr: return ast_end_token(node->OrElseExpr.y); case Ast_OrReturnExpr: return node->OrReturnExpr.token; + case Ast_OrBranchExpr: + if (node->OrBranchExpr.label != nullptr) { + return ast_end_token(node->OrBranchExpr.label); + } + return node->OrBranchExpr.token; case Ast_TypeAssertion: return ast_end_token(node->TypeAssertion.type); case Ast_TypeCast: return ast_end_token(node->TypeCast.expr); case Ast_AutoCast: return ast_end_token(node->AutoCast.expr); diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp index ad7aa81de..dd9908be5 100644 --- a/src/tokenizer.cpp +++ b/src/tokenizer.cpp @@ -116,6 +116,8 @@ TOKEN_KIND(Token__KeywordBegin, ""), \ TOKEN_KIND(Token_context, "context"), \ TOKEN_KIND(Token_or_else, "or_else"), \ TOKEN_KIND(Token_or_return, "or_return"), \ + TOKEN_KIND(Token_or_break, "or_break"), \ + TOKEN_KIND(Token_or_continue, "or_continue"), \ TOKEN_KIND(Token_asm, "asm"), \ TOKEN_KIND(Token_matrix, "matrix"), \ TOKEN_KIND(Token__KeywordEnd, ""), \ @@ -1072,6 +1074,8 @@ semicolon_check:; case Token_fallthrough: case Token_return: case Token_or_return: + case Token_or_break: + case Token_or_continue: /*fallthrough*/ case Token_Integer: case Token_Float: