diff --git a/RELEASENOTES-1.4.docu b/RELEASENOTES-1.4.docu
index 592e86d75..6945da14e 100644
--- a/RELEASENOTES-1.4.docu
+++ b/RELEASENOTES-1.4.docu
@@ -521,4 +521,20 @@
This fix is only enabled by default with Simics API version 7 or above.
With version 6 or below it must be explicitly enabled by passing
--no-compat=shared_logs_on_device to DMLC.
+
+ _ = any_expression;
+ _ = throwing_method();
+ (_, x, _) = method_with_multiple_return_values();
+
+ For backwards compatibility, declared variables and object members are
+ still allowed to be named '_' with Simics API version 6 or below.
+ Any such declaration will shadow the discard reference —
+ i.e. make it unavailable within the scope that the declaration is
+ accessible. This compatibility feature can be disabled by passing
+ --no-compat=discard_ref_shadowing to DMLC.
diff --git a/lib/1.2/dml-builtins.dml b/lib/1.2/dml-builtins.dml
index 0b29285ea..a75c1120b 100644
--- a/lib/1.2/dml-builtins.dml
+++ b/lib/1.2/dml-builtins.dml
@@ -211,6 +211,7 @@ template device {
parameter _compat_port_obj_param auto;
parameter _compat_io_memory auto;
parameter _compat_shared_logs_on_device auto;
+ parameter _compat_discard_ref_shadowing auto;
parameter _compat_dml12_inline auto;
parameter _compat_dml12_not auto;
parameter _compat_dml12_goto auto;
diff --git a/lib/1.4/dml-builtins.dml b/lib/1.4/dml-builtins.dml
index 86c3b8031..64b4d2b6f 100644
--- a/lib/1.4/dml-builtins.dml
+++ b/lib/1.4/dml-builtins.dml
@@ -545,6 +545,7 @@ template device {
param _compat_port_obj_param auto;
param _compat_io_memory auto;
param _compat_shared_logs_on_device auto;
+ param _compat_discard_ref_shadowing auto;
param _compat_dml12_inline auto;
param _compat_dml12_not auto;
param _compat_dml12_goto auto;
@@ -1848,7 +1849,7 @@ template bank is (object, shown_desc) {
}
shared method _num_registers() -> (uint32) {
- local (const register *_, uint64 table_size) = _reginfo_table();
+ local (const register *_table, uint64 table_size) = _reginfo_table();
return table_size;
}
@@ -2808,7 +2809,7 @@ template register is (_conf_attribute, get, set, shown_desc,
shared method _size() -> (int) { return this.bitsize / 8; }
shared method _num_fields() -> (uint32) {
local uint32 num_fields = 0;
- foreach f in (fields) {
+ foreach _f in (fields) {
num_fields++;
}
return num_fields;
@@ -2991,7 +2992,6 @@ template register is (_conf_attribute, get, set, shown_desc,
local uint64 unmapped_bits = unmapped & ~field_bits;
local uint64 val = (this.val & default_access_bits & enabled_bytes);
- local int r_lsb = _enabled_bytes_to_offset(enabled_bytes) * 8;
for (local int f = 0; f < num_fields; f++) {
local int f_lsb = fields[f].lsb;
local int f_msb = f_lsb + fields[f].bitsize - 1;
@@ -3080,8 +3080,6 @@ template register is (_conf_attribute, get, set, shown_desc,
return;
}
- local int r_lsb = _enabled_bytes_to_offset(enabled_bytes) * 8;
- local int r_msb = _enabled_bytes_to_size(enabled_bytes) * 8 - 1;
for (local int f = 0; f < num_fields; f++) {
local int f_lsb = fields[f].lsb;
local int f_msb = f_lsb + fields[f].bitsize - 1;
diff --git a/py/dml/c_backend.py b/py/dml/c_backend.py
index 2cb4384e4..f01bf63b4 100644
--- a/py/dml/c_backend.py
+++ b/py/dml/c_backend.py
@@ -1837,7 +1837,7 @@ def generate_init_data_objs(device):
start_function_definition(
'void _init_data_objs(%s *_dev)' % (crep.structtype(device),))
out('{\n', postindent = 1)
- with crep.DeviceInstanceContext():
+ with crep.DeviceInstanceContext(), allow_linemarks():
for node in device.initdata:
# Usually, the initializer is constant, but we permit that it
# depends on index. When the initializer is constant, we use a loop
@@ -1859,25 +1859,26 @@ def generate_init_data_objs(device):
# mainly meant to capture EIDXVAR; for other errors, the error will
# normally re-appear when evaluating per instance
except DMLError:
- with allow_linemarks():
- for indices in node.all_indices():
- index_exprs = tuple(mkIntegerLiteral(node.site, i)
- for i in indices)
- nref = mkNodeRef(node.site, node, index_exprs)
- try:
- init = eval_initializer(
- node.site, node._type, node.astinit,
- Location(node.parent, index_exprs),
- global_scope, True)
- except DMLError as e:
- report(e)
- else:
- markers = ([('store_writes_const_field', 'FALSE')]
- if deep_const(node._type) else [])
- coverity_markers(markers, init.site)
- init.assign_to(nref, node._type)
+ for indices in node.all_indices():
+ index_exprs = tuple(mkIntegerLiteral(node.site, i)
+ for i in indices)
+ nref = mkNodeRef(node.site, node, index_exprs)
+ try:
+ init = eval_initializer(
+ node.site, node._type, node.astinit,
+ Location(node.parent, index_exprs),
+ global_scope, True)
+ except DMLError as e:
+ report(e)
+ else:
+ markers = ([('store_writes_const_field', 'FALSE')]
+ if deep_const(node._type) else [])
+ coverity_markers(markers, init.site)
+ out(init.assign_to(nref.read(), node._type) + ';\n')
else:
index_exprs = ()
+ if node.dimensions:
+ reset_line_directive()
for (i, sz) in enumerate(node.dimsizes):
var = 'i%d' % (i,)
out(('for (int %s = 0; %s < %s; ++%s) {\n'
@@ -1885,11 +1886,12 @@ def generate_init_data_objs(device):
postindent=1)
index_exprs += (mkLit(node.site, var, TInt(64, True)),)
nref = mkNodeRef(node.site, node, index_exprs)
- with allow_linemarks():
- markers = ([('store_writes_const_field', 'FALSE')]
- if deep_const(node._type) else [])
- coverity_markers(markers, init.site)
- init.assign_to(nref, node._type)
+ markers = ([('store_writes_const_field', 'FALSE')]
+ if deep_const(node._type) else [])
+ coverity_markers(markers, init.site)
+ out(init.assign_to(nref.read(), node._type) + ';\n')
+ if node.dimensions:
+ reset_line_directive()
for _ in range(node.dimensions):
out('}\n', postindent=-1)
out('}\n\n', preindent = -1)
@@ -3120,12 +3122,7 @@ def generate_startup_trait_calls(data, idxvars):
ref = ObjTraitRef(site, node, trait, indices)
out(f'_tref = {ref.read()};\n')
for method in trait_methods:
- outargs = [mkLit(method.site,
- ('*((%s) {0})'
- % ((TArray(t, mkIntegerLiteral(method.site, 1))
- .declaration('')),)),
- t)
- for (_, t) in method.outp]
+ outargs = [mkDiscardRef(method.site) for _ in method.outp]
method_ref = TraitMethodDirect(
method.site, mkLit(method.site, '_tref', TTrait(trait)), method)
@@ -3137,12 +3134,7 @@ def generate_startup_trait_calls(data, idxvars):
def generate_startup_regular_call(method, idxvars):
site = method.site
indices = tuple(mkLit(site, idx, TInt(32, False)) for idx in idxvars)
- outargs = [mkLit(site,
- ('*((%s) {0})'
- % ((TArray(t, mkIntegerLiteral(site, 1))
- .declaration('')),)),
- t)
- for (_, t) in method.outp]
+ outargs = [mkDiscardRef(method.site) for _ in method.outp]
# startup memoized methods can throw, which is ignored during startup.
# Memoization of the throw then allows for the user to check whether
# or not the method did throw during startup by calling the method
diff --git a/py/dml/codegen.py b/py/dml/codegen.py
index 644e9d064..b1c540df2 100644
--- a/py/dml/codegen.py
+++ b/py/dml/codegen.py
@@ -704,9 +704,9 @@ def error_out_at_index(_i, exc, msg):
site, val_expr, targets, error_out_at_index,
f'deserialization of arguments to {self.method.name}')
if self.args_type:
- ctree.mkAssignStatement(site, out_expr,
- ctree.ExpressionInitializer(
- tmp_out_ref)).toc()
+ ctree.AssignStatement(site, out_expr,
+ ctree.ExpressionInitializer(
+ tmp_out_ref)).toc()
@property
def args_type(self):
@@ -822,8 +822,8 @@ def error_out_at_index(_i, exc, msg):
'deserialization of arguments to a send_now')
- ctree.mkAssignStatement(site, out_expr,
- ctree.ExpressionInitializer(tmp_out_ref)).toc()
+ ctree.AssignStatement(site, out_expr,
+ ctree.ExpressionInitializer(tmp_out_ref)).toc()
@property
def args_type(self):
@@ -1148,7 +1148,7 @@ def expr_unop(tree, location, scope):
elif op == 'post--': return mkPostDec(tree.site, rh)
elif op == 'sizeof':
if (compat.dml12_misc not in dml.globals.enabled_compat
- and not isinstance(rh, ctree.LValue)):
+ and not rh.addressable):
raise ERVAL(rh.site, 'sizeof')
return codegen_sizeof(tree.site, rh)
elif op == 'defined': return mkBoolConstant(tree.site, True)
@@ -1207,6 +1207,13 @@ def expr_variable(tree, location, scope):
if in_dev_tree:
e = in_dev_tree
if e is None:
+ # TODO/HACK: The discard ref is exposed like this to allow it to be as
+ # keyword-like as possible while still allowing it to be shadowed.
+ # Once we remove support for discard_ref_shadowing the discard ref
+ # should become a proper keyword and its codegen be done via dedicated
+ # dispatch
+ if name == '_' and tree.site.dml_version() != (1, 2):
+ return mkDiscardRef(tree.site)
raise EIDENT(tree.site, name)
return e
@@ -1538,7 +1545,7 @@ def eval_type(asttype, site, location, scope, extern=False, typename=None,
etype = expr.node_type
else:
raise expr.exc()
- elif (not isinstance(expr, ctree.LValue)
+ elif (not expr.addressable
and compat.dml12_misc not in dml.globals.enabled_compat):
raise ERVAL(expr.site, 'typeof')
else:
@@ -2131,8 +2138,8 @@ def make_static_var(site, location, static_sym_type, name, init=None,
with init_code:
if deep_const(static_sym_type):
coverity_marker('store_writes_const_field', 'FALSE')
- init.assign_to(mkStaticVariable(site, static_sym),
- static_sym_type)
+ out(init.assign_to(mkStaticVariable(site, static_sym).read(),
+ static_sym_type) + ';\n')
c_init = init_code.buf
else:
c_init = None
@@ -2340,21 +2347,31 @@ def try_codegen_invocation(site, init_ast, outargs, location, scope):
else:
return common_inline(site, meth_node, indices, inargs, outargs)
+def codegen_src_for_nonvalue_target(site, tgt, src_ast, location, scope):
+ if not tgt.writable:
+ raise EASSIGN(site, tgt)
+ if src_ast.kind != 'initializer_scalar':
+ raise EDATAINIT(tgt.site,
+ f'{tgt} can only be used as the target '
+ + 'of an assignment if its initializer is a '
+ + 'simple expression or a return value of a '
+ + 'method call')
+ return codegen_expression(src_ast.args[0], location, scope)
+
@statement_dispatcher
def stmt_assign(stmt, location, scope):
(_, site, tgt_ast, src_asts) = stmt
assert tgt_ast.kind in ('assign_target_chain', 'assign_target_tuple')
- tgts = [codegen_expression(ast, location, scope)
+ tgts = [codegen_expression_maybe_nonvalue(ast, location, scope)
for ast in tgt_ast.args[0]]
for tgt in tgts:
- if deep_const(tgt.ctype()):
+ if not isinstance(tgt, NonValue) and deep_const(tgt.ctype()):
raise ECONST(tgt.site)
if tgt_ast.kind == 'assign_target_chain':
method_tgts = [tgts[0]]
else:
method_tgts = tgts
- # TODO support multiple assign sources. It should be generalized.
method_invocation = try_codegen_invocation(site, src_asts, method_tgts,
location, scope)
if method_invocation:
@@ -2370,19 +2387,33 @@ def stmt_assign(stmt, location, scope):
+ f'initializer: Expected {src_asts}, got 1'))
return []
- stmts = []
- lscope = Symtab(scope)
+ if isinstance(tgts[-1], NonValue):
+ if len(tgts) != 1:
+ raise tgts[-1].exc()
+ expr = codegen_src_for_nonvalue_target(site, tgts[0], src_asts[0],
+ location, scope)
+ return [mkCopyData(site, expr, tgts[0])]
+
+ init_typ = tgts[-1].ctype()
init = eval_initializer(
- site, tgts[-1].ctype(), src_asts[0], location, scope, False)
-
- for (i, tgt) in enumerate(reversed(tgts[1:])):
- name = 'tmp%d' % (i,)
- sym = lscope.add_variable(
- name, type=tgt.ctype(), site=tgt.site, init=init, stmt=True)
- init = ExpressionInitializer(mkLocalVariable(tgt.site, sym))
- stmts.extend([sym_declaration(sym),
- mkAssignStatement(tgt.site, tgt, init)])
- return stmts + [mkAssignStatement(tgts[0].site, tgts[0], init)]
+ tgts[-1].site, init_typ, src_asts[0], location, scope, False)
+
+ if len(tgts) == 1:
+ return [mkAssignStatement(tgts[0].site, tgts[0], init)]
+
+ lscope = Symtab(scope)
+ sym = lscope.add_variable(
+ 'tmp', type=init_typ, site=init.site, init=init,
+ stmt=True)
+ init_expr = mkLocalVariable(init.site, sym)
+ stmts = [sym_declaration(sym)]
+ for tgt in reversed(tgts[1:]):
+ stmts.append(mkCopyData(tgt.site, init_expr, tgt))
+ init_expr = (tgt if isinstance(tgt, NonValue)
+ else source_for_assignment(tgt.site, tgt.ctype(),
+ init_expr))
+ stmts.append(mkCopyData(tgts[0].site, init_expr, tgts[0]))
+ return [mkCompound(site, stmts)]
else:
# Guaranteed by grammar
assert tgt_ast.kind == 'assign_target_tuple' and len(tgts) > 1
@@ -2399,53 +2430,66 @@ def stmt_assign(stmt, location, scope):
stmts = []
lscope = Symtab(scope)
- syms = []
+ stmt_pairs = []
for (i, (tgt, src_ast)) in enumerate(zip(tgts, src_asts)):
- init = eval_initializer(site, tgt.ctype(), src_ast, location,
- scope, False)
- name = 'tmp%d' % (i,)
- sym = lscope.add_variable(
- name, type=tgt.ctype(), site=tgt.site, init=init,
- stmt=True)
- syms.append(sym)
-
- stmts.extend(map(sym_declaration, syms))
- stmts.extend(
- mkAssignStatement(
- tgt.site, tgt, ExpressionInitializer(mkLocalVariable(tgt.site,
- sym)))
- for (tgt, sym) in zip(tgts, syms))
- return stmts
+ if isinstance(tgt, NonValue):
+ expr = codegen_src_for_nonvalue_target(site, tgt, src_ast,
+ location, scope)
+ stmt_pairs.append((mkCopyData(tgt.site, expr, tgt), None))
+ else:
+ init = eval_initializer(site, tgt.ctype(), src_ast, location,
+ scope, False)
+ name = 'tmp%d' % (i,)
+ sym = lscope.add_variable(
+ name, type=tgt.ctype(), site=tgt.site, init=init,
+ stmt=True)
+ write = AssignStatement(
+ tgt.site, tgt,
+ ExpressionInitializer(mkLocalVariable(tgt.site, sym)))
+ stmt_pairs.append((sym_declaration(sym), write))
+
+ stmts.extend(first for (first, _) in stmt_pairs)
+ stmts.extend(second for (_, second) in stmt_pairs
+ if second is not None)
+ return [mkCompound(site, stmts)]
@statement_dispatcher
def stmt_assignop(stmt, location, scope):
- (kind, site, tgt_ast, op, src_ast) = stmt
+ (_, site, tgt_ast, op, src_ast) = stmt
tgt = codegen_expression(tgt_ast, location, scope)
- if deep_const(tgt.ctype()):
+ if isinstance(tgt, ctree.InlinedParam):
+ raise EASSINL(tgt.site, tgt.name)
+ if not tgt.writable:
+ raise EASSIGN(site, tgt)
+
+ ttype = tgt.ctype()
+ if deep_const(ttype):
raise ECONST(tgt.site)
- if isinstance(tgt, ctree.BitSlice):
- # destructive hack
- return stmt_assign(
- ast.assign(site, ast.assign_target_chain(site, [tgt_ast]),
- [ast.initializer_scalar(
- site,
- ast.binop(site, tgt_ast, op[:-1], src_ast))]),
- location, scope)
+
src = codegen_expression(src_ast, location, scope)
- ttype = tgt.ctype()
- lscope = Symtab(scope)
- sym = lscope.add_variable(
- 'tmp', type = TPtr(ttype), site = tgt.site,
- init = ExpressionInitializer(mkAddressOf(tgt.site, tgt)), stmt=True)
- # Side-Effect Free representation of the tgt lvalue
- tgt_sef = mkDereference(site, mkLocalVariable(tgt.site, sym))
- return [
- sym_declaration(sym), mkExpressionStatement(
- site,
- mkAssignOp(site, tgt_sef, arith_binops[op[:-1]](
- site, tgt_sef, src)))]
+ if tgt.addressable:
+ lscope = Symtab(scope)
+ tmp_tgt_sym = lscope.add_variable(
+ '_tmp_tgt', type = TPtr(ttype), site = tgt.site,
+ init = ExpressionInitializer(mkAddressOf(tgt.site, tgt)),
+ stmt=True)
+ # Side-Effect Free representation of the tgt lvalue
+ tgt = mkDereference(site, mkLocalVariable(tgt.site, tmp_tgt_sym))
+ else:
+ # TODO Not ideal. This path is needed to deal with writable
+ # expressions that do not correspond to C lvalues; such as bit slices.
+ # The incurred repeated evaluation is painful.
+ tmp_tgt_sym = None
+
+ assign_src = source_for_assignment(site, ttype,
+ arith_binops[op[:-1]](site, tgt, src))
+
+ return [mkCompound(site,
+ ([sym_declaration(tmp_tgt_sym)] if tmp_tgt_sym else [])
+ + [mkExpressionStatement(site,
+ ctree.AssignOp(site, tgt, assign_src))])]
@statement_dispatcher
def stmt_expression(stmt, location, scope):
[expr] = stmt.args
@@ -3601,7 +3645,7 @@ def codegen_inline(site, meth_node, indices, inargs, outargs,
parmtype if parmtype else arg.ctype(),
meth_node.name)
for (arg, var, (parmname, parmtype)) in zip(
- outargs, outvars, meth_node.outp)]
+ outargs, outvars, meth_node.outp)]
exit_handler = GotoExit_dml12()
with exit_handler:
code = [codegen_statement(meth_node.astcode,
@@ -3885,7 +3929,7 @@ def prelude():
param = mkDereference(site,
mkLit(site, name, TPtr(typ)))
fnscope.add(ExpressionSymbol(name, param, site))
- code.append(mkAssignStatement(site, param, init))
+ code.append(AssignStatement(site, param, init))
else:
code = []
@@ -4025,15 +4069,20 @@ def copy_outarg(arg, var, parmname, parmtype, method_name):
an exception. We would be able to skip the proxy variable for
calls to non-throwing methods when arg.ctype() and parmtype are
equivalent types, but we don't do this today.'''
- argtype = arg.ctype()
-
- if not argtype:
- raise ICE(arg.site, "unknown expression type")
+ if isinstance(arg, NonValue):
+ if not arg.writable:
+ raise arg.exc()
else:
- ok, trunc, constviol = realtype(parmtype).canstore(realtype(argtype))
- if not ok:
- raise EARGT(arg.site, 'call', method_name,
- arg.ctype(), parmname, parmtype, 'output')
+ argtype = arg.ctype()
+
+ if not argtype:
+ raise ICE(arg.site, "unknown expression type")
+ else:
+ ok, trunc, constviol = realtype(parmtype).canstore(
+ realtype(argtype))
+ if not ok:
+ raise EARGT(arg.site, 'call', method_name,
+ arg.ctype(), parmname, parmtype, 'output')
return mkCopyData(var.site, var, arg)
diff --git a/py/dml/compat.py b/py/dml/compat.py
index 3df8385ef..35c904ef6 100644
--- a/py/dml/compat.py
+++ b/py/dml/compat.py
@@ -117,6 +117,13 @@ class shared_logs_on_device(CompatFeature):
short = "Make logs inside shared methods always log on the device object"
last_api_version = api_6
+@feature
+class discard_ref_shadowing(CompatFeature):
+ '''This compatibility feature allows declarations (within methods or
+ objects) to be named '_'. This will cause the discard reference `_` to be
+ inaccessible (*shadowed*) in all scopes with such a declaration.'''
+ short = "Allow declarations to shadow '_'"
+ last_api_version = api_6
@feature
class dml12_inline(CompatFeature):
diff --git a/py/dml/ctree.py b/py/dml/ctree.py
index 24541c295..d1716f079 100644
--- a/py/dml/ctree.py
+++ b/py/dml/ctree.py
@@ -66,7 +66,7 @@
'mkVectorForeach',
'mkBreak',
'mkContinue',
- 'mkAssignStatement',
+ 'mkAssignStatement', 'AssignStatement',
'mkCopyData',
'mkIfExpr', 'IfExpr',
#'BinOp',
@@ -126,6 +126,7 @@
'mkEachIn', 'EachIn',
'mkBoolConstant',
'mkUndefined', 'Undefined',
+ 'mkDiscardRef',
'TraitParameter',
'TraitSessionRef',
'TraitHookRef',
@@ -599,8 +600,11 @@ def mkExpressionStatement(site, expr):
def toc_constsafe_pointer_assignment(site, source, target, typ):
target_val = mkDereference(site,
Cast(site, mkLit(site, target, TPtr(void)), TPtr(typ)))
- mkAssignStatement(site, target_val,
- ExpressionInitializer(mkLit(site, source, typ))).toc()
+
+ init = ExpressionInitializer(
+ source_for_assignment(site, typ, mkLit(site, source, typ)))
+
+ return AssignStatement(site, target_val, init).toc()
class After(Statement):
@auto_init
@@ -1020,22 +1024,39 @@ class AssignStatement(Statement):
@auto_init
def __init__(self, site, target, initializer):
assert isinstance(initializer, Initializer)
+
def toc_stmt(self):
self.linemark()
- out('{\n', postindent=1)
- self.toc_inline()
- self.linemark()
- out('}\n', preindent=-1)
- def toc_inline(self):
- self.linemark()
- self.initializer.assign_to(self.target, self.target.ctype())
+ out(self.target.write(self.initializer) + ';\n')
+
+def mkAssignStatement(site, target, init):
+ if isinstance(target, InlinedParam):
+ raise EASSINL(target.site, target.name)
+ if not target.writable:
+ raise EASSIGN(site, target)
+
+ if isinstance(target, NonValue):
+ if not isinstance(init, ExpressionInitializer):
+ raise EDATAINIT(target.site,
+ f'{target} can only be used as the target of an '
+ + 'assignment if its initializer is a simple '
+ + 'expression or a return value of a method call')
+ else:
+ target_type = target.ctype()
+
+ if deep_const(target_type):
+ raise ECONST(site)
+
+ if isinstance(init, ExpressionInitializer):
+ init = ExpressionInitializer(
+ source_for_assignment(site, target_type, init.expr))
+
+ return AssignStatement(site, target, init)
-mkAssignStatement = AssignStatement
def mkCopyData(site, source, target):
"Convert a copy statement to intermediate representation"
- assignexpr = mkAssignOp(site, target, source)
- return mkExpressionStatement(site, assignexpr)
+ return mkAssignStatement(site, target, ExpressionInitializer(source))
#
# Expressions
@@ -1094,21 +1115,12 @@ def truncate_int_bits(value, signed, bits=64):
return value & mask
class LValue(Expression):
- "Somewhere to read or write data"
+ """An expression whose C representation is always an LValue, whose address
+ is always safe to take, in the sense that the duration that address
+ remains valid is intuitively predictable by the user"""
writable = True
-
- def write(self, source):
- rt = realtype(self.ctype())
- if isinstance(rt, TEndianInt):
- return (f'{rt.dmllib_fun("copy")}(&{self.read()},'
- + f' {source.read()})')
- return '%s = %s' % (self.read(), source.read())
-
- @property
- def is_stack_allocated(self):
- '''Returns true only if it's known that writing to the lvalue will
- write to stack-allocated data'''
- return False
+ addressable = True
+ c_lval = True
class IfExpr(Expression):
priority = 30
@@ -2444,8 +2456,8 @@ class AssignOp(BinOp):
def __str__(self):
return "%s = %s" % (self.lh, self.rh)
- def discard(self):
- return self.lh.write(self.rh)
+ def discard(self, explicit=False):
+ return self.lh.write(ExpressionInitializer(self.rh))
def read(self):
return '((%s), (%s))' % (self.discard(), self.lh.read())
@@ -2524,13 +2536,13 @@ def make_simple(cls, site, rh):
TPtr(TVoid())],
TVoid())))
if (compat.dml12_misc not in dml.globals.enabled_compat
- and not isinstance(rh, LValue)):
+ and not rh.addressable):
raise ERVAL(rh.site, '&')
return AddressOf(site, rh)
@property
def is_pointer_to_stack_allocation(self):
- return isinstance(self.rh, LValue) and self.rh.is_stack_allocated
+ return self.rh.is_stack_allocated
def mkAddressOf(site, rh):
if dml.globals.compat_dml12_int(site):
@@ -2568,7 +2580,8 @@ def is_stack_allocated(self):
@property
def is_pointer_to_stack_allocation(self):
- return isinstance(self.type, TArray) and self.is_stack_allocated
+ return (isinstance(safe_realtype_shallow(self.type), TArray)
+ and self.is_stack_allocated)
mkDereference = Dereference.make
@@ -2690,7 +2703,7 @@ def mkUnaryPlus(site, rh):
rh, _ = promote_integer(rh, rhtype)
else:
raise ICE(site, "Unexpected arith argument to unary +")
- if isinstance(rh, LValue):
+ if rh.addressable or rh.writable:
# +x is a rvalue
rh = mkRValue(rh)
return rh
@@ -2716,7 +2729,7 @@ def make_simple(cls, site, rh):
rhtype = safe_realtype(rh.ctype())
if not isinstance(rhtype, (IntegerType, TPtr)):
raise EINCTYPE(site, cls.op)
- if not isinstance(rh, LValue):
+ if not rh.addressable:
if isinstance(rh, BitSlice):
hint = 'try %s= 1' % (cls.base_op[0],)
else:
@@ -2922,7 +2935,8 @@ def writable(self):
return self.expr.writable
def write(self, source):
- source_expr = source
+ assert isinstance(source, ExpressionInitializer)
+ source_expr = source.expr
# if not self.size.constant or source.ctype() > self.type:
# source = mkBitAnd(source, self.mask)
@@ -2944,7 +2958,7 @@ def write(self, source):
target_type = realtype(self.expr.ctype())
if target_type.is_int and target_type.is_endian:
expr = mkCast(self.site, expr, target_type)
- return self.expr.write(expr)
+ return self.expr.write(ExpressionInitializer(expr))
def mkBitSlice(site, expr, msb, lsb, bitorder):
# lsb == None means that only one bit number was given (expr[i]
@@ -3467,6 +3481,18 @@ def exc(self):
mkUndefined = Undefined
+class DiscardRef(NonValue):
+ writable = True
+
+ def __str__(self):
+ return '_'
+
+ def write(self, source):
+ assert isinstance(source, ExpressionInitializer)
+ return source.expr.discard(explicit=True)
+
+mkDiscardRef = DiscardRef
+
def endian_convert_expr(site, idx, endian, size):
"""Convert a bit index to little-endian (lsb=0) numbering.
@@ -4293,14 +4319,28 @@ def read(self):
mkStaticVariable = StaticVariable
-class StructMember(LValue):
+class StructMember(Expression):
priority = 160
explicit_type = True
@auto_init
def __init__(self, site, expr, sub, type, op):
+ # Write of StructMembers rely on them being C lvalues
+ assert not expr.writable or expr.c_lval
assert_type(site, expr, Expression)
assert_type(site, sub, str)
+ @property
+ def writable(self):
+ return self.expr.writable
+
+ @property
+ def addressable(self):
+ return self.expr.addressable
+
+ @property
+ def c_lval(self):
+ return self.expr.c_lval
+
def __str__(self):
s = str(self.expr)
if self.expr.priority < self.priority:
@@ -4314,11 +4354,12 @@ def read(self):
@property
def is_stack_allocated(self):
- return isinstance(self.expr, LValue) and self.expr.is_stack_allocated
+ return self.expr.is_stack_allocated
@property
def is_pointer_to_stack_allocation(self):
- return isinstance(self.type, TArray) and self.is_stack_allocated
+ return (isinstance(safe_realtype_shallow(self.type), TArray)
+ and self.is_stack_allocated)
def mkSubRef(site, expr, sub, op):
if isinstance(expr, NodeRef):
@@ -4425,18 +4466,28 @@ def is_stack_allocated(self):
@property
def is_pointer_to_stack_allocation(self):
- return isinstance(self.type, TArray) and self.is_stack_allocated
+ return (isinstance(safe_realtype_shallow(self.type), TArray)
+ and self.is_stack_allocated)
-class VectorRef(LValue):
+class VectorRef(Expression):
slots = ('type',)
@auto_init
def __init__(self, site, expr, idx):
+ assert not expr.writable or expr.c_lval
self.type = realtype(self.expr.ctype()).base
def read(self):
return 'VGET(%s, %s)' % (self.expr.read(), self.idx.read())
- def write(self, source):
- return "VSET(%s, %s, %s)" % (self.expr.read(), self.idx.read(),
- source.read())
+ # No need for write, VGET results in an lvalue
+
+ @property
+ def writable(self):
+ return self.expr.writable
+ @property
+ def addressable(self):
+ return self.expr.addressable
+ @property
+ def c_lval(self):
+ return self.expr.c_lval
def mkIndex(site, expr, idx):
if isinstance(idx, NonValue):
@@ -4510,7 +4561,7 @@ def read(self):
@property
def is_pointer_to_stack_allocation(self):
- return (isinstance(self.type, TPtr)
+ return (isinstance(safe_realtype_shallow(self.type), TPtr)
and self.expr.is_pointer_to_stack_allocation)
def mkCast(site, expr, new_type):
@@ -4653,7 +4704,6 @@ def mkCast(site, expr, new_type):
class RValue(Expression):
'''Wraps an lvalue to prohibit write. Useful when a composite
expression is reduced down to a single variable.'''
- writable = False
@auto_init
def __init__(self, site, expr): pass
def __str__(self):
@@ -4662,10 +4712,19 @@ def ctype(self):
return self.expr.ctype()
def read(self):
return self.expr.read()
- def discard(self): pass
+ def discard(self, explicit=False):
+ return self.expr.discard(explicit)
+ # Since addressable and readable are False this may only ever be leveraged
+ # by DMLC for optimization purposes
+ @property
+ def c_lval(self):
+ return self.expr.c_lval
+ @property
+ def is_pointer_to_stack_allocation(self):
+ return self.expr.is_pointer_to_stack_allocation
def mkRValue(expr):
- if isinstance(expr, LValue) or expr.writable:
+ if expr.addressable or expr.writable:
return RValue(expr.site, expr)
return expr
@@ -4847,15 +4906,35 @@ def assign_to(self, dest, typ):
# be UB as long as the session variable hasn't been initialized
# previously.
site = self.expr.site
- if deep_const(typ):
- out('memcpy((void *)&%s, (%s){%s}, sizeof %s);\n'
- % (dest.read(),
- TArray(typ, mkIntegerLiteral(site, 1)).declaration(''),
- mkCast(site, self.expr, typ).read(),
- dest.read()))
+ rt = safe_realtype_shallow(typ)
+ # There is a reasonable implementation for this case (memcpy), but it
+ # never occurs today
+ assert not isinstance(typ, TArray)
+ if isinstance(rt, TEndianInt):
+ return (f'{rt.dmllib_fun("copy")}((void *)&{dest},'
+ + f' {self.expr.read()})')
+ elif deep_const(typ):
+ shallow_deconst_typ = safe_realtype_unconst(typ)
+ # a const-qualified ExternStruct can be leveraged by the user as a
+ # sign that there is some const-qualified member unknown to DMLC
+ if (isinstance(typ, TExternStruct)
+ or deep_const(shallow_deconst_typ)):
+ # Expression statement to delimit lifetime of compound literal
+ # TODO it's possible to improve the efficiency of this by not
+ # using a compound literal if self.expr is c_lval. However,
+ # this requires require strict cmp to ensure safety, and it's
+ # unclear if that path could ever be taken.
+ return ('({ memcpy((void *)&%s, (%s){%s}, sizeof(%s)); })'
+ % (dest,
+ TArray(typ,
+ mkIntegerLiteral(site, 1)).declaration(''),
+ mkCast(site, self.expr, typ).read(),
+ dest))
+ else:
+ return (f'*({TPtr(shallow_deconst_typ).declaration("")})'
+ + f'&{dest} = {self.expr.read()}')
else:
- with disallow_linemarks():
- mkCopyData(site, self.expr, dest).toc()
+ return f'{dest} = {self.expr.read()}'
class CompoundInitializer(Initializer):
'''Initializer for a variable of struct or array type, using the
@@ -4883,21 +4962,12 @@ def assign_to(self, dest, typ):
'''output C statements to assign an lvalue'''
# (void *) cast to avoid GCC erroring if the target type is (partially)
# const-qualified. See ExpressionInitializer.assign_to
- if isinstance(typ, TNamed):
- out('memcpy((void *)&%s, &(%s)%s, sizeof %s);\n' %
- (dest.read(), typ.declaration(''), self.read(),
- dest.read()))
- elif isinstance(typ, TArray):
- out('memcpy((void *)%s, (%s)%s, sizeof %s);\n'
- % (dest.read(), typ.declaration(''),
- self.read(), dest.read()))
- elif isinstance(typ, TStruct):
- out('memcpy((void *)&%s, (%s){%s}, sizeof %s);\n' % (
- dest.read(),
- TArray(typ, mkIntegerLiteral(self.site, 1)).declaration(''),
- self.read(), dest.read()))
+ if isinstance(typ, (TNamed, TArray, TStruct)):
+ # Expression statement to delimit lifetime of compound literal
+ return ('({ memcpy((void *)&%s, &(%s)%s, sizeof(%s)); })'
+ % (dest, typ.declaration(''), self.read(), dest))
else:
- raise ICE(self.site, 'strange type %s' % typ)
+ raise ICE(self.site, f'unexpected type for initializer: {typ}')
class DesignatedStructInitializer(Initializer):
'''Initializer for a variable of an extern-declared struct type, using
@@ -4937,10 +5007,11 @@ def assign_to(self, dest, typ):
if isinstance(typ, StructType):
# (void *) cast to avoid GCC erroring if the target type is
# (partially) const-qualified. See ExpressionInitializer.assign_to
- out('memcpy((void *)&%s, (%s){%s}, sizeof %s);\n' % (
- dest.read(),
- TArray(typ, mkIntegerLiteral(self.site, 1)).declaration(''),
- self.read(), dest.read()))
+ return ('({ memcpy((void *)&%s, (%s){%s}, sizeof(%s)); })'
+ % (dest,
+ TArray(typ,
+ mkIntegerLiteral(self.site, 1)).declaration(''),
+ self.read(), dest))
else:
raise ICE(self.site, f'unexpected type for initializer: {typ}')
@@ -4979,8 +5050,7 @@ def assign_to(self, dest, typ):
THook))
# (void *) cast to avoid GCC erroring if the target type is
# (partially) const-qualified. See ExpressionInitializer.assign_to
- out('memset((void *)&%s, 0, sizeof(%s));\n'
- % (dest.read(), typ.declaration('')))
+ return f'memset((void *)&{dest}, 0, sizeof({typ.declaration("")}))'
class CompoundLiteral(Expression):
@auto_init
@@ -5039,8 +5109,7 @@ def toc(self):
# zero-initialize VLAs
self.type.print_declaration(self.name, unused = self.unused)
site_linemark(self.init.site)
- self.init.assign_to(mkLit(self.site, self.name, self.type),
- self.type)
+ out(self.init.assign_to(self.name, self.type) + ';\n')
else:
self.type.print_declaration(
self.name, init=self.init.read() if self.init else None,
@@ -5077,7 +5146,8 @@ def sym_declaration(sym, unused=False):
return None
# This will prevent warnings from the C compiler
- unused = unused or (refcount == 0) or sym.value.startswith("__")
+ unused = (unused or (refcount == 0 and dml.globals.suppress_wunused)
+ or sym.name.startswith("_") or sym.value.startswith("__"))
return mkDeclaration(sym.site, sym.value, sym.type,
sym.init, unused)
diff --git a/py/dml/dmlc.py b/py/dml/dmlc.py
index 1c5dad849..7e103eaac 100644
--- a/py/dml/dmlc.py
+++ b/py/dml/dmlc.py
@@ -455,9 +455,6 @@ def main(argv):
default="0",
help=('Limit the number of error messages to N'))
- #
- #
-
#