Merge commit '9f7afa99c67f039e43019ebd08d14a7f01e2d89c' into HEAD

This commit is contained in:
Thomas Schwinge 2024-03-22 10:07:29 +01:00
commit 8f9b47500d
215 changed files with 11723 additions and 3215 deletions

View File

@ -1,3 +1,7 @@
2024-01-08 Joseph Myers <josmyers@redhat.com>
* MAINTAINERS: Update my email address.
2023-12-30 Joseph Myers <jsm@polyomino.org.uk>
* MAINTAINERS: Update my email address.

View File

@ -34,7 +34,7 @@ Jeff Law <jlaw@ventanamicro.com>
Michael Meissner <gnu@the-meissners.org>
Jason Merrill <jason@redhat.com>
David S. Miller <davem@redhat.com>
Joseph Myers <jsm@polyomino.org.uk>
Joseph Myers <josmyers@redhat.com>
Richard Sandiford <richard.sandiford@arm.com>
Bernd Schmidt <bernds_cb1@t-online.de>
Ian Lance Taylor <ian@airs.com>
@ -155,7 +155,7 @@ cygwin, mingw-w64 Jonathan Yong <10walls@gmail.com>
Language Front Ends Maintainers
C front end/ISO C99 Joseph Myers <jsm@polyomino.org.uk>
C front end/ISO C99 Joseph Myers <josmyers@redhat.com>
Ada front end Arnaud Charlet <charlet@adacore.com>
Ada front end Eric Botcazou <ebotcazou@libertysurf.fr>
Ada front end Marc Poulhiès <poulhies@adacore.com>
@ -192,7 +192,7 @@ libquadmath Jakub Jelinek <jakub@redhat.com>
libvtv Caroline Tice <cmtice@google.com>
libphobos Iain Buclaw <ibuclaw@gdcproject.org>
line map Dodji Seketeli <dodji@redhat.com>
soft-fp Joseph Myers <jsm@polyomino.org.uk>
soft-fp Joseph Myers <josmyers@redhat.com>
scheduler (+ haifa) Jim Wilson <wilson@tuliptree.org>
scheduler (+ haifa) Michael Meissner <gnu@the-meissners.org>
scheduler (+ haifa) Jeff Law <jeffreyalaw@gmail.com>
@ -219,7 +219,7 @@ jump.cc David S. Miller <davem@redhat.com>
web pages Gerald Pfeifer <gerald@pfeifer.com>
config.sub/config.guess Ben Elliston <config-patches@gnu.org>
i18n Philipp Thomas <pth@suse.de>
i18n Joseph Myers <jsm@polyomino.org.uk>
i18n Joseph Myers <josmyers@redhat.com>
diagnostic messages Dodji Seketeli <dodji@redhat.com>
diagnostic messages David Malcolm <dmalcolm@redhat.com>
build machinery (*.in) Paolo Bonzini <bonzini@gnu.org>
@ -227,14 +227,14 @@ build machinery (*.in) Nathanael Nerode <neroden@gcc.gnu.org>
build machinery (*.in) Alexandre Oliva <aoliva@gcc.gnu.org>
build machinery (*.in) Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
docs co-maintainer Gerald Pfeifer <gerald@pfeifer.com>
docs co-maintainer Joseph Myers <jsm@polyomino.org.uk>
docs co-maintainer Joseph Myers <josmyers@redhat.com>
docs co-maintainer Sandra Loosemore <sandra@codesourcery.com>
docstring relicensing Gerald Pfeifer <gerald@pfeifer.com>
docstring relicensing Joseph Myers <jsm@polyomino.org.uk>
docstring relicensing Joseph Myers <josmyers@redhat.com>
predict.def Jan Hubicka <hubicka@ucw.cz>
gcov Jan Hubicka <hubicka@ucw.cz>
gcov Nathan Sidwell <nathan@acm.org>
option handling Joseph Myers <jsm@polyomino.org.uk>
option handling Joseph Myers <josmyers@redhat.com>
middle-end Jeff Law <jeffreyalaw@gmail.com>
middle-end Ian Lance Taylor <ian@airs.com>
middle-end Richard Biener <rguenther@suse.de>
@ -278,7 +278,7 @@ CTF, BTF, bpf port David Faust <david.faust@oracle.com>
dataflow Paolo Bonzini <bonzini@gnu.org>
dataflow Seongbae Park <seongbae.park@gmail.com>
dataflow Kenneth Zadeck <zadeck@naturalbridge.com>
driver Joseph Myers <jsm@polyomino.org.uk>
driver Joseph Myers <josmyers@redhat.com>
Fortran Harald Anlauf <anlauf@gmx.de>
Fortran Janne Blomqvist <jb@gcc.gnu.org>
Fortran Tobias Burnus <tobias@codesourcery.com>

View File

@ -312,6 +312,7 @@ flags_to_pass = { flag= GNATBIND ; };
flags_to_pass = { flag= GNATMAKE ; };
flags_to_pass = { flag= GDC ; };
flags_to_pass = { flag= GDCFLAGS ; };
flags_to_pass = { flag= GUILE ; };
// Target tools
flags_to_pass = { flag= AR_FOR_TARGET ; };

View File

@ -3,7 +3,7 @@
#
# Makefile for directory with subdirs to build.
# Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
# 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
# 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2023
# Free Software Foundation
#
# This file is free software; you can redistribute it and/or modify
@ -143,7 +143,8 @@ BASE_EXPORTS = \
M4="$(M4)"; export M4; \
SED="$(SED)"; export SED; \
AWK="$(AWK)"; export AWK; \
MAKEINFO="$(MAKEINFO)"; export MAKEINFO;
MAKEINFO="$(MAKEINFO)"; export MAKEINFO; \
GUILE="$(GUILE)"; export GUILE;
# This is the list of variables to export in the environment when
# configuring subdirectories for the build system.
@ -451,6 +452,8 @@ GM2FLAGS = $(CFLAGS)
PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
GUILE = guile
# Pass additional PGO and LTO compiler options to the PGO build.
BUILD_CFLAGS = $(PGO_BUILD_CFLAGS) $(PGO_BUILD_LTO_CFLAGS)
override CFLAGS += $(BUILD_CFLAGS)
@ -883,6 +886,7 @@ BASE_FLAGS_TO_PASS = \
"GNATMAKE=$(GNATMAKE)" \
"GDC=$(GDC)" \
"GDCFLAGS=$(GDCFLAGS)" \
"GUILE=$(GUILE)" \
"AR_FOR_TARGET=$(AR_FOR_TARGET)" \
"AS_FOR_TARGET=$(AS_FOR_TARGET)" \
"CC_FOR_TARGET=$(CC_FOR_TARGET)" \

View File

@ -6,7 +6,7 @@ in
#
# Makefile for directory with subdirs to build.
# Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
# 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
# 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2023
# Free Software Foundation
#
# This file is free software; you can redistribute it and/or modify
@ -146,7 +146,8 @@ BASE_EXPORTS = \
M4="$(M4)"; export M4; \
SED="$(SED)"; export SED; \
AWK="$(AWK)"; export AWK; \
MAKEINFO="$(MAKEINFO)"; export MAKEINFO;
MAKEINFO="$(MAKEINFO)"; export MAKEINFO; \
GUILE="$(GUILE)"; export GUILE;
# This is the list of variables to export in the environment when
# configuring subdirectories for the build system.
@ -454,6 +455,8 @@ GM2FLAGS = $(CFLAGS)
PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
GUILE = guile
# Pass additional PGO and LTO compiler options to the PGO build.
BUILD_CFLAGS = $(PGO_BUILD_CFLAGS) $(PGO_BUILD_LTO_CFLAGS)
override CFLAGS += $(BUILD_CFLAGS)

View File

@ -155,10 +155,10 @@ Security features implemented in GCC
GCC implements a number of security features that reduce the impact
of security issues in applications, such as -fstack-protector,
-fstack-clash-protection, _FORTIFY_SOURCE and so on. A failure of
these features to function perfectly in all situations is not an
exploitable vulnerability in itself since it does not affect the
correctness of programs. Further, they're dependent on heuristics
and may not always have full coverage for protection.
these features to function perfectly in all situations is not a
vulnerability in itself since it does not affect the correctness of
programs. Further, they're dependent on heuristics and may not
always have full coverage for protection.
Similarly, GCC may transform code in a way that the correctness of
the expressed algorithm is preserved, but supplementary properties

View File

@ -1,3 +1,10 @@
2024-01-08 Jonathan Wakely <jwakely@redhat.com>
* unicode/README: Add notes about generating libstdc++ tables.
* unicode/GraphemeBreakProperty.txt: New file.
* unicode/emoji-data.txt: New file.
* unicode/gen_libstdcxx_unicode_data.py: New file.
2024-01-05 Jonathan Wakely <jwakely@redhat.com>
* analyze_brprob.py: Remove stray text at end of comment.

View File

@ -1,3 +1,114 @@
2024-01-08 Ilya Leoshkevich <iii@linux.ibm.com>
PR sanitizer/113251
* varasm.cc (assemble_function_label_raw): Do not call
asan_function_start () without the current function.
2024-01-08 Cupertino Miranda <cupertino.miranda@oracle.com>
PR target/113225
* btfout.cc (btf_collect_datasec): Skip creating BTF info for
extern and kernel_helper attributed function decls.
2024-01-08 Cupertino Miranda <cupertino.miranda@oracle.com>
* btfout.cc (output_btf_strs): Changed.
2024-01-08 Tobias Burnus <tobias@codesourcery.com>
* config/gcn/mkoffload.cc (main): Handle gfx1100
when setting the default XNACK.
2024-01-08 Tobias Burnus <tobias@codesourcery.com>
* config.gcc (amdgcn-*-amdhsa): Accept --with-arch=gfx1100.
* config/gcn/gcn-hsa.h (NO_XNACK): Add gfx1100:
(ASM_SPEC): Handle gfx1100.
* config/gcn/gcn-opts.h (enum processor_type): Add PROCESSOR_GFX1100.
(enum gcn_isa): Add ISA_RDNA3.
(TARGET_GFX1100, TARGET_RDNA2_PLUS, TARGET_RDNA3): Define.
* config/gcn/gcn-valu.md: Change TARGET_RDNA2 to TARGET_RDNA2_PLUS.
* config/gcn/gcn.cc (gcn_option_override,
gcn_omp_device_kind_arch_isa, output_file_start): Handle gfx1100.
(gcn_global_address_p, gcn_addr_space_legitimate_address_p): Change
TARGET_RDNA2 to TARGET_RDNA2_PLUS.
(gcn_hsa_declare_function_name): Don't use '.amdhsa_reserve_flat_scratch'
with gfx1100.
* config/gcn/gcn.h (ASSEMBLER_DIALECT): Likewise.
(TARGET_CPU_CPP_BUILTINS): Define __RDNA3__, __gfx1030__ and
__gfx1100__.
* config/gcn/gcn.md: Change TARGET_RDNA2 to TARGET_RDNA2_PLUS.
* config/gcn/gcn.opt (Enum gpu_type): Add gfx1100.
* config/gcn/mkoffload.cc (EF_AMDGPU_MACH_AMDGCN_GFX1100): Define.
(isa_has_combined_avgprs, main): Handle gfx1100.
* config/gcn/t-omp-device (isa): Add gfx1100.
2024-01-08 Richard Biener <rguenther@suse.de>
* doc/invoke.texi (-mmovbe): Clarify.
2024-01-08 Richard Biener <rguenther@suse.de>
PR tree-optimization/113026
* tree-vect-loop.cc (vect_need_peeling_or_partial_vectors_p):
Avoid an epilog in more cases.
* tree-vect-loop-manip.cc (vect_do_peeling): Adjust the
epilogues niter upper bounds and estimates.
2024-01-08 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/113228
* gimplify.cc (recalculate_side_effects): Do nothing for SSA_NAMEs.
2024-01-08 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/113120
* gimple-lower-bitint.cc (gimple_lower_bitint): Fix handling of very
large _BitInt zero INTEGER_CST PHI argument.
2024-01-08 Jakub Jelinek <jakub@redhat.com>
PR tree-optimization/113119
* gimple-lower-bitint.cc (optimizable_arith_overflow): Punt if
both REALPART_EXPR and cast from IMAGPART_EXPR appear, but cast
is before REALPART_EXPR.
2024-01-08 Georg-Johann Lay <avr@gjlay.de>
PR target/112952
* config/avr/avr.cc (avr_handle_addr_attribute): Also print valid
range when diagnosing attribute "io" and "io_low" are out of range.
(avr_eval_addr_attrib): Don't ICE on empty address at that place.
(avr_insert_attributes): Reject if attribute "address", "io" or "io_low"
in contexts other than static storage.
(avr_asm_output_aligned_decl_common): Move output of decls with
attribute "address", "io", and "io_low" to...
(avr_output_addr_attrib): ...this new function.
(avr_asm_asm_output_aligned_bss): Remove output for decls with
attribute "address", "io", and "io_low".
(avr_encode_section_info): Rectify handling of decls with attribute
"address", "io", and "io_low".
2024-01-08 Andrew Stubbs <ams@codesourcery.com>
* config/gcn/mkoffload.cc (TEST_XNACK_UNSET): New.
(elf_flags): Remove XNACK from the default value.
(main): Set a default XNACK according to the arch.
2024-01-08 Andrew Stubbs <ams@codesourcery.com>
* config/gcn/mkoffload.cc (isa_has_combined_avgprs): Delete.
(process_asm): Don't count avgprs.
2024-01-08 Hongyu Wang <hongyu.wang@intel.com>
* config/i386/i386.opt: Add supported sub-features.
* doc/extend.texi: Add description for target attribute.
2024-01-08 Feng Wang <wangfeng@eswincomputing.com>
* config/riscv/vector.md: Modify avl_type operand index of zvbc ins.
2024-01-07 Roger Sayle <roger@nextmovesoftware.com>
Uros Bizjak <ubizjak@gmail.com>

View File

@ -1 +1 @@
20240108
20240109

View File

@ -1260,9 +1260,9 @@ package body Atree is
end if;
end Change_Node;
----------------
-- Copy_Slots --
----------------
------------------------
-- Copy_Dynamic_Slots --
------------------------
procedure Copy_Dynamic_Slots
(From, To : Node_Offset; Num_Slots : Slot_Count)
@ -1282,6 +1282,10 @@ package body Atree is
Destination_Slots := Source_Slots;
end Copy_Dynamic_Slots;
----------------
-- Copy_Slots --
----------------
procedure Copy_Slots (Source, Destination : Node_Id) is
pragma Debug (Validate_Node (Source));
pragma Assert (Source /= Destination);
@ -1292,6 +1296,12 @@ package body Atree is
Node_Offsets.Table (Node_Offsets.First .. Node_Offsets.Last);
begin
-- Empty_Or_Error use as described in types.ads
if Destination <= Empty_Or_Error or No (Source) then
pragma Assert (Serious_Errors_Detected > 0);
return;
end if;
Copy_Dynamic_Slots
(Off_F (Source), Off_F (Destination), S_Size);
All_Node_Offsets (Destination).Slots := All_Node_Offsets (Source).Slots;

View File

@ -2529,17 +2529,9 @@ package body Contracts is
Pragma_Argument_Associations => Args,
Class_Present => Class_Present);
Subp_Decl : Node_Id := Subp_Id;
Subp_Decl : constant Node_Id := Enclosing_Declaration (Subp_Id);
pragma Assert (Is_Declaration (Subp_Decl));
begin
-- Enclosing_Declaration may return, for example,
-- a N_Procedure_Specification node. Cope with this.
loop
Subp_Decl := Enclosing_Declaration (Subp_Decl);
exit when Is_Declaration (Subp_Decl);
Subp_Decl := Parent (Subp_Decl);
pragma Assert (Present (Subp_Decl));
end loop;
Insert_After_And_Analyze (Subp_Decl, Prag);
end Insert_Stable_Property_Check;

View File

@ -66,6 +66,12 @@ Aspect Abstract_State
This aspect is equivalent to :ref:`pragma Abstract_State<Pragma-Abstract_State>`.
Aspect Always_Terminates
========================
.. index:: Always_Terminates
This boolean aspect is equivalent to :ref:`pragma Always_Terminates<Pragma-Always_Terminates>`.
Aspect Annotate
===============

View File

@ -329,6 +329,20 @@ this pragma serves no purpose but is ignored
rather than rejected to allow common sets of sources to be used
in the two situations.
.. _Pragma-Always_Terminates:
Pragma Always_Terminates
========================
Syntax:
.. code-block:: ada
pragma Always_Terminates [ (boolean_EXPRESSION) ];
For the semantics of this pragma, see the entry for aspect ``Always_Terminates``
in the SPARK 2014 Reference Manual, section 7.1.2.
.. _Pragma-Annotate:
Pragma Annotate

View File

@ -6606,6 +6606,8 @@ package body Exp_Aggr is
Siz_Exp : Node_Id := Empty;
Count_Type : Entity_Id;
Is_Indexed_Aggregate : Boolean := False;
function Aggregate_Size return Int;
-- Compute number of entries in aggregate, including choices
-- that cover a range or subtype, as well as iterated constructs.
@ -7042,6 +7044,35 @@ package body Exp_Aggr is
("\this will result in infinite recursion??", Parent (N));
end if;
-- Determine whether this is an indexed aggregate (see RM 4.3.5(25/5)).
if Present (New_Indexed_Subp) then
if No (Add_Unnamed_Subp) then
Is_Indexed_Aggregate := True;
else
declare
Comp_Assns : constant List_Id := Component_Associations (N);
Comp_Assn : Node_Id;
begin
if Present (Comp_Assns)
and then not Is_Empty_List (Comp_Assns)
then
Comp_Assn := First (Comp_Assns);
if Nkind (Comp_Assn) = N_Component_Association
or else
(Nkind (Comp_Assn) = N_Iterated_Component_Association
and then Present (Defining_Identifier (Comp_Assn)))
then
Is_Indexed_Aggregate := True;
end if;
end if;
end;
end if;
end if;
---------------------------
-- Positional aggregate --
---------------------------
@ -7068,12 +7099,11 @@ package body Exp_Aggr is
Next (Comp);
end loop;
end;
end if;
-- Indexed aggregates are handled below. Unnamed aggregates
-- such as sets may include iterated component associations.
if No (New_Indexed_Subp) then
elsif not Is_Indexed_Aggregate then
Comp := First (Component_Associations (N));
while Present (Comp) loop
if Nkind (Comp) = N_Iterated_Component_Association then
@ -7128,15 +7158,16 @@ package body Exp_Aggr is
-- Indexed_Aggregate --
-----------------------
-- For an indexed aggregate there must be an Assigned_Indexeed
-- For an indexed aggregate there must be an Assigned_Indexed
-- subprogram. Note that unlike array aggregates, a container
-- aggregate must be fully positional or fully indexed. In the
-- first case the expansion has already taken place.
-- TBA: the keys for an indexed aggregate must provide a dense
-- range with no repetitions.
if Present (Assign_Indexed_Subp)
if Is_Indexed_Aggregate
and then Present (Component_Associations (N))
and then not Is_Empty_List (Component_Associations (N))
then
declare
Insert : constant Entity_Id := Entity (Assign_Indexed_Subp);
@ -7153,7 +7184,6 @@ package body Exp_Aggr is
Comp : Node_Id;
Index : Node_Id;
Pos : Int := 0;
Stat : Node_Id;
Key : Node_Id;
@ -7196,6 +7226,8 @@ package body Exp_Aggr is
end Expand_Range_Component;
begin
pragma Assert (No (Expressions (N)));
if Siz > 0 then
-- Modify the call to the constructor to allocate the
@ -7216,89 +7248,60 @@ package body Exp_Aggr is
Index)));
end if;
if Present (Expressions (N)) then
Comp := First (Expressions (N));
Comp := First (Component_Associations (N));
while Present (Comp) loop
-- The choice may be a static value, or a range with
-- static bounds.
-- Compute index position for successive components
-- in the list of expressions, and use the indexed
-- assignment procedure for each.
while Present (Comp) loop
if Nkind (Comp) = N_Component_Association then
Key := First (Choices (Comp));
while Present (Key) loop
Index := Make_Op_Add (Loc,
Left_Opnd => Type_Low_Bound (Index_Type),
Right_Opnd => Make_Integer_Literal (Loc, Pos));
-- If the expression is a box, the corresponding
-- component (s) is left uninitialized.
Stat := Make_Procedure_Call_Statement (Loc,
Name => New_Occurrence_Of (Insert, Loc),
Parameter_Associations =>
New_List (New_Occurrence_Of (Temp, Loc),
Index,
New_Copy_Tree (Comp)));
if Box_Present (Comp) then
goto Next_Key;
Pos := Pos + 1;
elsif Nkind (Key) = N_Range then
Append (Stat, Aggr_Code);
Next (Comp);
end loop;
end if;
-- Create loop for tne specified range,
-- with copies of the expression.
if Present (Component_Associations (N)) then
Comp := First (Component_Associations (N));
Stat :=
Expand_Range_Component (Key, Expression (Comp));
-- The choice may be a static value, or a range with
-- static bounds.
while Present (Comp) loop
if Nkind (Comp) = N_Component_Association then
Key := First (Choices (Comp));
while Present (Key) loop
-- If the expression is a box, the corresponding
-- component (s) is left uninitialized.
if Box_Present (Comp) then
goto Next_Key;
elsif Nkind (Key) = N_Range then
-- Create loop for tne specified range,
-- with copies of the expression.
Stat :=
Expand_Range_Component (Key, Expression (Comp));
else
Stat := Make_Procedure_Call_Statement (Loc,
Name => New_Occurrence_Of
(Entity (Assign_Indexed_Subp), Loc),
Parameter_Associations =>
New_List (New_Occurrence_Of (Temp, Loc),
New_Copy_Tree (Key),
New_Copy_Tree (Expression (Comp))));
end if;
Append (Stat, Aggr_Code);
<<Next_Key>>
Next (Key);
end loop;
else
-- Iterated component association. Discard
-- positional insertion procedure.
if No (Iterator_Specification (Comp)) then
Add_Named_Subp := Assign_Indexed_Subp;
Add_Unnamed_Subp := Empty;
else
Stat := Make_Procedure_Call_Statement (Loc,
Name => New_Occurrence_Of
(Entity (Assign_Indexed_Subp), Loc),
Parameter_Associations =>
New_List (New_Occurrence_Of (Temp, Loc),
New_Copy_Tree (Key),
New_Copy_Tree (Expression (Comp))));
end if;
Expand_Iterated_Component (Comp);
Append (Stat, Aggr_Code);
<<Next_Key>>
Next (Key);
end loop;
else
-- Iterated component association. Discard
-- positional insertion procedure.
if No (Iterator_Specification (Comp)) then
Add_Named_Subp := Assign_Indexed_Subp;
Add_Unnamed_Subp := Empty;
end if;
Next (Comp);
end loop;
end if;
Expand_Iterated_Component (Comp);
end if;
Next (Comp);
end loop;
end;
end if;

View File

@ -8653,17 +8653,8 @@ package body Exp_Attr is
--------------------------
function In_Available_Context (Ent : Entity_Id) return Boolean is
Decl : Node_Id := Enclosing_Declaration (Ent);
Decl : constant Node_Id := Enclosing_Declaration (Ent);
begin
-- Enclosing_Declaration does not always return a declaration;
-- cope with this irregularity.
if Decl in N_Subprogram_Specification_Id
and then Nkind (Parent (Decl)) in
N_Subprogram_Body | N_Subprogram_Declaration
then
Decl := Parent (Decl);
end if;
if Has_Declarations (Parent (Decl)) then
return In_Subtree (Attr_Ref, Root => Parent (Decl));
elsif Is_List_Member (Decl) then

View File

@ -316,11 +316,10 @@ package body Exp_Ch6 is
-- Insert the Post_Call list previously produced by routine Expand_Actuals
-- or Expand_Call_Helper into the tree.
function Is_True_Build_In_Place_Function_Call (N : Node_Id) return Boolean;
function Is_Function_Call_With_BIP_Formals (N : Node_Id) return Boolean;
-- Ada 2005 (AI-318-02): Returns True if N denotes a call to a function
-- that requires handling as a build-in-place call; returns False for
-- non-BIP function calls and also for calls to functions with inherited
-- BIP formals that do not require BIP formals. For example:
-- that requires handling as a build-in-place call, that is, BIP function
-- calls and calls to functions with inherited BIP formals. For example:
--
-- type Iface is limited interface;
-- function Get_Object return Iface;
@ -330,15 +329,14 @@ package body Exp_Ch6 is
-- type T1 is new Root1 and Iface with ...
-- function Get_Object return T1;
-- -- This primitive requires the BIP formals, and the evaluation of
-- -- Is_True_Build_In_Place_Function_Call returns True.
-- -- Is_Build_In_Place_Function_Call returns True.
--
-- type Root2 is tagged record ...
-- type T2 is new Root2 and Iface with ...
-- function Get_Object return T2;
-- -- This primitive inherits the BIP formals of the interface primitive
-- -- but, given that T2 is not a limited type, it does not require such
-- -- formals; therefore Is_True_Build_In_Place_Function_Call returns
-- -- False.
-- -- formals; therefore Is_Build_In_Place_Function_Call returns False.
procedure Replace_Renaming_Declaration_Id
(New_Decl : Node_Id;
@ -4906,8 +4904,8 @@ package body Exp_Ch6 is
-- inherited the BIP extra actuals but does not require them.
if Nkind (Call_Node) = N_Function_Call
and then Is_Build_In_Place_Function_Call (Call_Node)
and then not Is_True_Build_In_Place_Function_Call (Call_Node)
and then Is_Function_Call_With_BIP_Formals (Call_Node)
and then not Is_Build_In_Place_Function_Call (Call_Node)
then
Add_Dummy_Build_In_Place_Actuals (Subp,
Num_Added_Extra_Actuals => Num_Extra_Actuals);
@ -4918,8 +4916,8 @@ package body Exp_Ch6 is
-- inherited the BIP extra actuals but does not require them.
elsif Nkind (Call_Node) = N_Function_Call
and then Is_Build_In_Place_Function_Call (Call_Node)
and then not Is_True_Build_In_Place_Function_Call (Call_Node)
and then Is_Function_Call_With_BIP_Formals (Call_Node)
and then not Is_Build_In_Place_Function_Call (Call_Node)
then
Add_Dummy_Build_In_Place_Actuals (Subp);
end if;
@ -5614,7 +5612,7 @@ package body Exp_Ch6 is
pragma Assert (Ekind (Current_Subprogram) = E_Function);
pragma Assert
(Is_Build_In_Place_Function (Current_Subprogram) =
Is_True_Build_In_Place_Function_Call (Exp));
Is_Build_In_Place_Function_Call (Exp));
null;
end if;
@ -6803,17 +6801,6 @@ package body Exp_Ch6 is
end if;
end if;
-- Assert that if F says "return G(...);"
-- then F and G are both b-i-p, or neither b-i-p.
if Nkind (Exp) = N_Function_Call then
pragma Assert (Ekind (Scope_Id) = E_Function);
pragma Assert
(Is_Build_In_Place_Function (Scope_Id) =
Is_True_Build_In_Place_Function_Call (Exp));
null;
end if;
-- For the case of a simple return that does not come from an
-- extended return, in the case of build-in-place, we rewrite
-- "return <expression>;" to be:
@ -6833,7 +6820,7 @@ package body Exp_Ch6 is
pragma Assert
(Comes_From_Extended_Return_Statement (N)
or else not Is_True_Build_In_Place_Function_Call (Exp)
or else not Is_Build_In_Place_Function_Call (Exp)
or else Has_BIP_Formals (Scope_Id));
if not Comes_From_Extended_Return_Statement (N)
@ -6868,6 +6855,17 @@ package body Exp_Ch6 is
end;
end if;
-- Assert that if F says "return G(...);"
-- then F and G are both b-i-p, or neither b-i-p.
if Nkind (Exp) = N_Function_Call then
pragma Assert (Ekind (Scope_Id) = E_Function);
pragma Assert
(Is_Build_In_Place_Function (Scope_Id) =
Is_Build_In_Place_Function_Call (Exp));
null;
end if;
-- Here we have a simple return statement that is part of the expansion
-- of an extended return statement (either written by the user, or
-- generated by the above code).
@ -8111,6 +8109,66 @@ package body Exp_Ch6 is
Exp_Node : constant Node_Id := Unqual_Conv (N);
Function_Id : Entity_Id;
begin
-- Return False if the expander is currently inactive, since awareness
-- of build-in-place treatment is only relevant during expansion. Note
-- that Is_Build_In_Place_Function, which is called as part of this
-- function, is also conditioned this way, but we need to check here as
-- well to avoid blowing up on processing protected calls when expansion
-- is disabled (such as with -gnatc) since those would trip over the
-- raise of Program_Error below.
-- In SPARK mode, build-in-place calls are not expanded, so that we
-- may end up with a call that is neither resolved to an entity, nor
-- an indirect call.
if not Expander_Active or else Nkind (Exp_Node) /= N_Function_Call then
return False;
end if;
if Is_Entity_Name (Name (Exp_Node)) then
Function_Id := Entity (Name (Exp_Node));
-- In the case of an explicitly dereferenced call, use the subprogram
-- type generated for the dereference.
elsif Nkind (Name (Exp_Node)) = N_Explicit_Dereference then
Function_Id := Etype (Name (Exp_Node));
-- This may be a call to a protected function.
elsif Nkind (Name (Exp_Node)) = N_Selected_Component then
-- The selector in question might not have been analyzed due to a
-- previous error, so analyze it here to output the appropriate
-- error message instead of crashing when attempting to fetch its
-- entity.
if not Analyzed (Selector_Name (Name (Exp_Node))) then
Analyze (Selector_Name (Name (Exp_Node)));
end if;
Function_Id := Etype (Entity (Selector_Name (Name (Exp_Node))));
else
raise Program_Error;
end if;
declare
Result : constant Boolean := Is_Build_In_Place_Function (Function_Id);
-- So we can stop here in the debugger
begin
return Result;
end;
end Is_Build_In_Place_Function_Call;
---------------------------------------
-- Is_Function_Call_With_BIP_Formals --
---------------------------------------
function Is_Function_Call_With_BIP_Formals (N : Node_Id) return Boolean is
Exp_Node : constant Node_Id := Unqual_Conv (N);
Function_Id : Entity_Id;
begin
-- Return False if the expander is currently inactive, since awareness
-- of build-in-place treatment is only relevant during expansion. Note
@ -8178,41 +8236,7 @@ package body Exp_Ch6 is
end if;
end;
end if;
end Is_Build_In_Place_Function_Call;
------------------------------------------
-- Is_True_Build_In_Place_Function_Call --
------------------------------------------
function Is_True_Build_In_Place_Function_Call (N : Node_Id) return Boolean
is
Exp_Node : Node_Id;
Function_Id : Entity_Id;
begin
-- No action needed if we know that this is not a BIP function call
if not Is_Build_In_Place_Function_Call (N) then
return False;
end if;
Exp_Node := Unqual_Conv (N);
if Is_Entity_Name (Name (Exp_Node)) then
Function_Id := Entity (Name (Exp_Node));
elsif Nkind (Name (Exp_Node)) = N_Explicit_Dereference then
Function_Id := Etype (Name (Exp_Node));
elsif Nkind (Name (Exp_Node)) = N_Selected_Component then
Function_Id := Etype (Entity (Selector_Name (Name (Exp_Node))));
else
raise Program_Error;
end if;
return Is_Build_In_Place_Function (Function_Id);
end Is_True_Build_In_Place_Function_Call;
end Is_Function_Call_With_BIP_Formals;
-----------------------------------
-- Is_Build_In_Place_Result_Type --
@ -8368,14 +8392,6 @@ package body Exp_Ch6 is
Func_Call := Expression (Func_Call);
end if;
-- No action needed if the called function inherited the BIP extra
-- formals but it is not a true BIP function.
if not Is_True_Build_In_Place_Function_Call (Func_Call) then
pragma Assert (Is_Expanded_Build_In_Place_Call (Func_Call));
return;
end if;
-- Mark the call as processed as a build-in-place call
pragma Assert (not Is_Expanded_Build_In_Place_Call (Func_Call));
@ -8781,14 +8797,6 @@ package body Exp_Ch6 is
Result_Subt : Entity_Id;
begin
-- No action needed if the called function inherited the BIP extra
-- formals but it is not a true BIP function.
if not Is_True_Build_In_Place_Function_Call (Func_Call) then
pragma Assert (Is_Expanded_Build_In_Place_Call (Func_Call));
return;
end if;
-- Mark the call as processed as a build-in-place call
pragma Assert (not Is_Expanded_Build_In_Place_Call (Func_Call));

View File

@ -159,8 +159,7 @@ package Exp_Ch6 is
function Is_Build_In_Place_Function_Call (N : Node_Id) return Boolean;
-- Ada 2005 (AI-318-02): Returns True if N denotes a call to a function
-- that requires handling as a build-in-place call (possibly qualified or
-- converted); that is, BIP function calls, and calls to functions with
-- inherited BIP formals.
-- converted).
function Is_Build_In_Place_Result_Type (Typ : Entity_Id) return Boolean;
-- Ada 2005 (AI-318-02): Returns True if functions returning the type use

View File

@ -12012,19 +12012,71 @@ package body Exp_Util is
function Possible_Side_Effect_In_SPARK (Exp : Node_Id) return Boolean is
begin
-- Side-effect removal in SPARK should only occur when not inside a
-- generic and not doing a preanalysis, inside an object renaming or
-- a type declaration or a for-loop iteration scheme.
-- Side-effect removal in SPARK should only occur when not inside a
-- generic and not doing a preanalysis, inside an object renaming or
-- a type declaration or a for-loop iteration scheme.
return not Inside_A_Generic
if not Inside_A_Generic
and then Full_Analysis
and then Nkind (Enclosing_Declaration (Exp)) in
N_Component_Declaration
| N_Full_Type_Declaration
| N_Iterator_Specification
| N_Loop_Parameter_Specification
| N_Object_Renaming_Declaration
| N_Subtype_Declaration;
then
case Nkind (Enclosing_Declaration (Exp)) is
when N_Component_Declaration
| N_Full_Type_Declaration
| N_Iterator_Specification
| N_Loop_Parameter_Specification
| N_Object_Renaming_Declaration
=>
return True;
-- If the expression belongs to an itype declaration, then
-- check if side effects are allowed in the original
-- associated node.
when N_Subtype_Declaration =>
declare
Subt : constant Entity_Id :=
Defining_Identifier (Enclosing_Declaration (Exp));
begin
if Is_Itype (Subt) then
-- When this routine is called while the itype
-- is being created, the entity might not yet be
-- decorated with the associated node, but should
-- have the related expression.
if Present (Associated_Node_For_Itype (Subt)) then
return
Possible_Side_Effect_In_SPARK
(Associated_Node_For_Itype (Subt));
elsif Present (Related_Expression (Subt)) then
return
Possible_Side_Effect_In_SPARK
(Related_Expression (Subt));
-- When the itype doesn't have any indication of its
-- origin (which currently only happens for packed
-- array types created by freezing that shouldn't
-- be picked by GNATprove anyway), then we can
-- conservatively assume that the expression can
-- be kept as it appears in the source code.
else
pragma Assert (Is_Packed_Array_Impl_Type (Subt));
return False;
end if;
else
return True;
end if;
end;
when others =>
return False;
end case;
else
return False;
end if;
end Possible_Side_Effect_In_SPARK;
-- Local variables

View File

@ -8141,10 +8141,19 @@ package body Freeze is
Decl_Node : Node_Id;
begin
-- If E is an itype, pretend that it is declared in N
-- If E is an itype, pretend that it is declared in N except for a
-- class-wide subtype with an equivalent type, because this latter
-- type comes with a bona-fide declaration node.
if Is_Itype (E) then
Decl_Node := N;
if Ekind (E) = E_Class_Wide_Subtype
and then Present (Equivalent_Type (E))
then
Decl_Node := Declaration_Node (Equivalent_Type (E));
else
Decl_Node := N;
end if;
else
Decl_Node := Declaration_Node (E);
end if;

View File

@ -2111,7 +2111,7 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, bool definition)
tree *gnu_index_types = XALLOCAVEC (tree, ndim);
tree *gnu_temp_fields = XALLOCAVEC (tree, ndim);
tree gnu_max_size = size_one_node;
tree comp_type, tem, obj;
tree comp_type, fld, tem, obj;
Entity_Id gnat_index;
alias_set_type ptr_set = -1;
int index;
@ -2184,11 +2184,11 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, bool definition)
if the FIELD_DECLs are distinct as objects. */
if (COMPLETE_TYPE_P (gnu_fat_type))
{
tem = TYPE_FIELDS (gnu_fat_type);
if (TYPE_ALIAS_SET_KNOWN_P (TREE_TYPE (tem)))
ptr_set = TYPE_ALIAS_SET (TREE_TYPE (tem));
TREE_TYPE (tem) = ptr_type_node;
TREE_TYPE (DECL_CHAIN (tem)) = gnu_ptr_template;
fld = TYPE_FIELDS (gnu_fat_type);
if (TYPE_ALIAS_SET_KNOWN_P (TYPE_CANONICAL (TREE_TYPE (fld))))
ptr_set = TYPE_ALIAS_SET (TYPE_CANONICAL (TREE_TYPE (fld)));
TREE_TYPE (fld) = ptr_type_node;
TREE_TYPE (DECL_CHAIN (fld)) = gnu_ptr_template;
TYPE_DECL_SUPPRESS_DEBUG (TYPE_STUB_DECL (gnu_fat_type)) = 0;
for (tree t = gnu_fat_type; t; t = TYPE_NEXT_VARIANT (t))
SET_TYPE_UNCONSTRAINED_ARRAY (t, gnu_type);
@ -2197,15 +2197,15 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, bool definition)
{
/* We make the fields addressable for the sake of compatibility
with languages for which the regular fields are addressable. */
tem
fld
= create_field_decl (get_identifier ("P_ARRAY"),
ptr_type_node, gnu_fat_type,
NULL_TREE, NULL_TREE, 0, 1);
DECL_CHAIN (tem)
DECL_CHAIN (fld)
= create_field_decl (get_identifier ("P_BOUNDS"),
gnu_ptr_template, gnu_fat_type,
NULL_TREE, NULL_TREE, 0, 1);
finish_fat_pointer_type (gnu_fat_type, tem);
finish_fat_pointer_type (gnu_fat_type, fld);
SET_TYPE_UNCONSTRAINED_ARRAY (gnu_fat_type, gnu_type);
}
@ -2230,7 +2230,7 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, bool definition)
fields once we build them. */
tem = build3 (COMPONENT_REF, gnu_ptr_template,
build0 (PLACEHOLDER_EXPR, gnu_fat_type),
DECL_CHAIN (TYPE_FIELDS (gnu_fat_type)), NULL_TREE);
DECL_CHAIN (fld), NULL_TREE);
gnu_template_reference
= build_unary_op (INDIRECT_REF, gnu_template_type, tem);
TREE_READONLY (gnu_template_reference) = 1;
@ -2413,12 +2413,11 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, bool definition)
type since the implementation type may vary between constrained
subtypes and unconstrained base type. */
if (Present (PAT))
TREE_TYPE (TYPE_FIELDS (gnu_fat_type))
= build_pointer_type_for_mode (tem, ptr_mode, true);
TREE_TYPE (fld) = build_pointer_type_for_mode (tem, ptr_mode, true);
else
TREE_TYPE (TYPE_FIELDS (gnu_fat_type)) = build_pointer_type (tem);
TREE_TYPE (fld) = build_pointer_type (tem);
if (ptr_set != -1)
TYPE_ALIAS_SET (TREE_TYPE (TYPE_FIELDS (gnu_fat_type))) = ptr_set;
TYPE_ALIAS_SET (TYPE_CANONICAL (TREE_TYPE (fld))) = ptr_set;
/* If the maximum size doesn't overflow, use it. */
if (gnu_max_size
@ -6504,6 +6503,28 @@ gnat_to_gnu_subprog_type (Entity_Id gnat_subprog, bool definition,
}
break;
case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_N:
case BUILT_IN_ATOMIC_STORE_N:
/* This is a generic builtin overloaded on its second
parameter type, so do type resolution based on it. */
if (list_length (gnu_param_type_list) >= 3
&& type_for_atomic_builtin_p
(list_second (gnu_param_type_list)))
gnu_builtin_decl
= resolve_atomic_builtin
(fncode, list_second (gnu_param_type_list));
else
{
post_error
("??cannot import type-generic 'G'C'C builtin!",
gnat_subprog);
post_error
("\\?use a supported second parameter type",
gnat_subprog);
gnu_builtin_decl = NULL_TREE;
}
break;
case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N:
/* This is a generic builtin overloaded on its third
parameter type, so do type resolution based on it. */
@ -6525,9 +6546,7 @@ gnat_to_gnu_subprog_type (Entity_Id gnat_subprog, bool definition,
}
break;
case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_N:
case BUILT_IN_SYNC_LOCK_RELEASE_N:
case BUILT_IN_ATOMIC_STORE_N:
post_error
("??unsupported type-generic 'G'C'C builtin!",
gnat_subprog);

View File

@ -158,14 +158,14 @@ enum alias_set_op
ALIAS_SET_SUPERSET
};
/* Relate the alias sets of GNU_NEW_TYPE and GNU_OLD_TYPE according to OP.
/* Relate the alias sets of NEW_TYPE and OLD_TYPE according to OP.
If this is a multi-dimensional array type, do this recursively.
OP may be
- ALIAS_SET_COPY: the new set is made a copy of the old one.
- ALIAS_SET_SUPERSET: the new set is made a superset of the old one.
- ALIAS_SET_SUBSET: the new set is made a subset of the old one. */
extern void relate_alias_sets (tree gnu_new_type, tree gnu_old_type,
extern void relate_alias_sets (tree new_type, tree old_type,
enum alias_set_op op);
/* Given GNAT_ENTITY, an object (constant, variable, parameter, exception)
@ -1238,6 +1238,14 @@ operand_type (tree expr)
return TREE_TYPE (TREE_OPERAND (expr, 0));
}
/* Return the second value of a list. */
static inline tree
list_second (tree list)
{
return TREE_VALUE (TREE_CHAIN (list));
}
/* Return the third value of a list. */
static inline tree

View File

@ -1823,7 +1823,7 @@ set_reverse_storage_order_on_pad_type (tree type)
return canonicalize_pad_type (type);
}
/* Relate the alias sets of GNU_NEW_TYPE and GNU_OLD_TYPE according to OP.
/* Relate the alias sets of NEW_TYPE and OLD_TYPE according to OP.
If this is a multi-dimensional array type, do this recursively.
OP may be
@ -1832,30 +1832,28 @@ set_reverse_storage_order_on_pad_type (tree type)
- ALIAS_SET_SUBSET: the new set is made a subset of the old one. */
void
relate_alias_sets (tree gnu_new_type, tree gnu_old_type, enum alias_set_op op)
relate_alias_sets (tree new_type, tree old_type, enum alias_set_op op)
{
/* Remove any padding from GNU_OLD_TYPE. It doesn't matter in the case
of a one-dimensional array, since the padding has the same alias set
as the field type, but if it's a multi-dimensional array, we need to
see the inner types. */
while (TREE_CODE (gnu_old_type) == RECORD_TYPE
&& (TYPE_JUSTIFIED_MODULAR_P (gnu_old_type)
|| TYPE_PADDING_P (gnu_old_type)))
gnu_old_type = TREE_TYPE (TYPE_FIELDS (gnu_old_type));
while (TREE_CODE (old_type) == RECORD_TYPE
&& (TYPE_JUSTIFIED_MODULAR_P (old_type)
|| TYPE_PADDING_P (old_type)))
old_type = TREE_TYPE (TYPE_FIELDS (old_type));
/* Unconstrained array types are deemed incomplete and would thus be given
alias set 0. Retrieve the underlying array type. */
if (TREE_CODE (gnu_old_type) == UNCONSTRAINED_ARRAY_TYPE)
gnu_old_type
= TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_old_type))));
if (TREE_CODE (gnu_new_type) == UNCONSTRAINED_ARRAY_TYPE)
gnu_new_type
= TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_new_type))));
if (TREE_CODE (old_type) == UNCONSTRAINED_ARRAY_TYPE)
old_type = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (old_type))));
if (TREE_CODE (new_type) == UNCONSTRAINED_ARRAY_TYPE)
new_type = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (new_type))));
if (TREE_CODE (gnu_new_type) == ARRAY_TYPE
&& TREE_CODE (TREE_TYPE (gnu_new_type)) == ARRAY_TYPE
&& TYPE_MULTI_ARRAY_P (TREE_TYPE (gnu_new_type)))
relate_alias_sets (TREE_TYPE (gnu_new_type), TREE_TYPE (gnu_old_type), op);
if (TREE_CODE (new_type) == ARRAY_TYPE
&& TREE_CODE (TREE_TYPE (new_type)) == ARRAY_TYPE
&& TYPE_MULTI_ARRAY_P (TREE_TYPE (new_type)))
relate_alias_sets (TREE_TYPE (new_type), TREE_TYPE (old_type), op);
switch (op)
{
@ -1864,19 +1862,20 @@ relate_alias_sets (tree gnu_new_type, tree gnu_old_type, enum alias_set_op op)
aliasing settings because this can break the aliasing relationship
between the array type and its element type. */
if (flag_checking || flag_strict_aliasing)
gcc_assert (!(TREE_CODE (gnu_new_type) == ARRAY_TYPE
&& TREE_CODE (gnu_old_type) == ARRAY_TYPE
&& TYPE_NONALIASED_COMPONENT (gnu_new_type)
!= TYPE_NONALIASED_COMPONENT (gnu_old_type)));
gcc_assert (!(TREE_CODE (new_type) == ARRAY_TYPE
&& TREE_CODE (old_type) == ARRAY_TYPE
&& TYPE_NONALIASED_COMPONENT (new_type)
!= TYPE_NONALIASED_COMPONENT (old_type)));
TYPE_ALIAS_SET (gnu_new_type) = get_alias_set (gnu_old_type);
/* The alias set always lives on the TYPE_CANONICAL. */
TYPE_ALIAS_SET (TYPE_CANONICAL (new_type)) = get_alias_set (old_type);
break;
case ALIAS_SET_SUBSET:
case ALIAS_SET_SUPERSET:
{
alias_set_type old_set = get_alias_set (gnu_old_type);
alias_set_type new_set = get_alias_set (gnu_new_type);
alias_set_type old_set = get_alias_set (old_type);
alias_set_type new_set = get_alias_set (new_type);
/* Do nothing if the alias sets conflict. This ensures that we
never call record_alias_subset several times for the same pair
@ -1895,7 +1894,7 @@ relate_alias_sets (tree gnu_new_type, tree gnu_old_type, enum alias_set_op op)
gcc_unreachable ();
}
record_component_aliases (gnu_new_type);
record_component_aliases (new_type);
}
/* Record TYPE as a builtin type for Ada. NAME is the name of the type.

View File

@ -1142,14 +1142,10 @@ build_binary_op (enum tree_code op_code, tree result_type,
tree left_ref_type = TREE_TYPE (left_base_type);
tree right_ref_type = TREE_TYPE (right_base_type);
/* Anonymous access types in Ada 2005 can point to different
members of a tagged hierarchy or different function types. */
gcc_assert (TYPE_MAIN_VARIANT (left_ref_type)
== TYPE_MAIN_VARIANT (right_ref_type)
|| (TYPE_ALIGN_OK (left_ref_type)
&& TYPE_ALIGN_OK (right_ref_type))
|| (TREE_CODE (left_ref_type) == FUNCTION_TYPE
&& TREE_CODE (right_ref_type) == FUNCTION_TYPE));
/* Anonymous access types in Ada 2005 may point to compatible
object subtypes or function types in the language sense. */
gcc_assert (FUNCTION_POINTER_TYPE_P (left_ref_type)
== FUNCTION_POINTER_TYPE_P (right_ref_type));
best_type = left_base_type;
}

View File

@ -3,7 +3,7 @@
@setfilename gnat-style.info
@documentencoding UTF-8
@ifinfo
@*Generated by Sphinx 4.3.2.@*
@*Generated by Sphinx 5.3.0.@*
@end ifinfo
@settitle GNAT Coding Style A Guide for GNAT Developers
@defindex ge
@ -15,13 +15,11 @@
* gnat-style: (gnat-style.info). gnat-style
@end direntry
@definfoenclose strong,`,'
@definfoenclose emph,`,'
@c %**end of header
@copying
@quotation
GNAT Coding Style: A Guide for GNAT Developers , Dec 14, 2023
GNAT Coding Style: A Guide for GNAT Developers , Dec 21, 2023
AdaCore
@ -255,7 +253,7 @@ When declarations are commented with hanging comments, i.e.
comments after the declaration, there is no blank line before the
comment, and if it is absolutely necessary to have blank lines within
the comments, e.g. to make paragraph separations within a single comment,
these blank lines @emph{do} have a @code{--} (unlike the
these blank lines `do' have a @code{--} (unlike the
normal rule, which is to use entirely blank lines for separating
comment paragraphs). The comment starts at same level of indentation
as code it is commenting.
@ -304,12 +302,12 @@ Other_Id := 6; -- Second comment
@end example
@item
Short comments that fit on a single line are @emph{not} ended with a
Short comments that fit on a single line are `not' ended with a
period. Comments taking more than a line are punctuated in the normal
manner.
@item
Comments should focus on @emph{why} instead of @emph{what}.
Comments should focus on `why' instead of `what'.
Descriptions of what subprograms do go with the specification.
@item
@ -319,7 +317,7 @@ depend on the names of things. The names are supplementary, not
sufficient, as comments.
@item
@emph{Do not} put two spaces after periods in comments.
`Do not' put two spaces after periods in comments.
@end itemize
@node Declarations and Types,Expressions and Names,Lexical Elements,Top
@ -958,7 +956,7 @@ Copyright 2000, 2001, 2002, 2007, 2008 Free Software Foundation, Inc
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
@strong{Preamble}
`Preamble'
The purpose of this License is to make a manual, textbook, or other
functional and useful document “free” in the sense of freedom: to
@ -981,23 +979,23 @@ it can be used for any textual work, regardless of subject matter or
whether it is published as a printed book. We recommend this License
principally for works whose purpose is instruction or reference.
@strong{1. APPLICABILITY AND DEFINITIONS}
`1. APPLICABILITY AND DEFINITIONS'
This License applies to any manual or other work, in any medium, that
contains a notice placed by the copyright holder saying it can be
distributed under the terms of this License. Such a notice grants a
world-wide, royalty-free license, unlimited in duration, to use that
work under the conditions stated herein. The @strong{Document}, below,
work under the conditions stated herein. The `Document', below,
refers to any such manual or work. Any member of the public is a
licensee, and is addressed as “@strong{you}”. You accept the license if you
licensee, and is addressed as “`you'”. You accept the license if you
copy, modify or distribute the work in a way requiring permission
under copyright law.
A “@strong{Modified Version}” of the Document means any work containing the
A “`Modified Version'” of the Document means any work containing the
Document or a portion of it, either copied verbatim, or with
modifications and/or translated into another language.
A “@strong{Secondary Section}” is a named appendix or a front-matter section of
A “`Secondary Section'” is a named appendix or a front-matter section of
the Document that deals exclusively with the relationship of the
publishers or authors of the Document to the Documents overall subject
(or to related matters) and contains nothing that could fall directly
@ -1008,7 +1006,7 @@ connection with the subject or with related matters, or of legal,
commercial, philosophical, ethical or political position regarding
them.
The “@strong{Invariant Sections}” are certain Secondary Sections whose titles
The “`Invariant Sections'” are certain Secondary Sections whose titles
are designated, as being those of Invariant Sections, in the notice
that says that the Document is released under this License. If a
section does not fit the above definition of Secondary then it is not
@ -1016,12 +1014,12 @@ allowed to be designated as Invariant. The Document may contain zero
Invariant Sections. If the Document does not identify any Invariant
Sections then there are none.
The “@strong{Cover Texts}” are certain short passages of text that are listed,
The “`Cover Texts'” are certain short passages of text that are listed,
as Front-Cover Texts or Back-Cover Texts, in the notice that says that
the Document is released under this License. A Front-Cover Text may
be at most 5 words, and a Back-Cover Text may be at most 25 words.
A “@strong{Transparent}” copy of the Document means a machine-readable copy,
A “`Transparent'” copy of the Document means a machine-readable copy,
represented in a format whose specification is available to the
general public, that is suitable for revising the document
straightforwardly with generic text editors or (for images composed of
@ -1032,7 +1030,7 @@ to text formatters. A copy made in an otherwise Transparent file
format whose markup, or absence of markup, has been arranged to thwart
or discourage subsequent modification by readers is not Transparent.
An image format is not Transparent if used for any substantial amount
of text. A copy that is not “Transparent” is called @strong{Opaque}.
of text. A copy that is not “Transparent” is called `Opaque'.
Examples of suitable formats for Transparent copies include plain
ASCII without markup, Texinfo input format, LaTeX input format, SGML
@ -1045,22 +1043,22 @@ processing tools are not generally available, and the
machine-generated HTML, PostScript or PDF produced by some word
processors for output purposes only.
The “@strong{Title Page}” means, for a printed book, the title page itself,
The “`Title Page'” means, for a printed book, the title page itself,
plus such following pages as are needed to hold, legibly, the material
this License requires to appear in the title page. For works in
formats which do not have any title page as such, “Title Page” means
the text near the most prominent appearance of the works title,
preceding the beginning of the body of the text.
The “@strong{publisher}” means any person or entity that distributes
The “`publisher'” means any person or entity that distributes
copies of the Document to the public.
A section “@strong{Entitled XYZ}” means a named subunit of the Document whose
A section “`Entitled XYZ'” means a named subunit of the Document whose
title either is precisely XYZ or contains XYZ in parentheses following
text that translates XYZ in another language. (Here XYZ stands for a
specific section name mentioned below, such as “@strong{Acknowledgements}”,
@strong{Dedications}”, “@strong{Endorsements}”, or “@strong{History}”.)
To “@strong{Preserve the Title}
specific section name mentioned below, such as “`Acknowledgements'”,
`Dedications'”, “`Endorsements'”, or “`History'”.)
To “`Preserve the Title'
of such a section when you modify the Document means that it remains a
section “Entitled XYZ” according to this definition.
@ -1071,7 +1069,7 @@ License, but only as regards disclaiming warranties: any other
implication that these Warranty Disclaimers may have is void and has
no effect on the meaning of this License.
@strong{2. VERBATIM COPYING}
`2. VERBATIM COPYING'
You may copy and distribute the Document in any medium, either
commercially or noncommercially, provided that this License, the
@ -1086,7 +1084,7 @@ number of copies you must also follow the conditions in section 3.
You may also lend copies, under the same conditions stated above, and
you may publicly display copies.
@strong{3. COPYING IN QUANTITY}
`3. COPYING IN QUANTITY'
If you publish printed copies (or copies in media that commonly have
printed covers) of the Document, numbering more than 100, and the
@ -1123,7 +1121,7 @@ It is requested, but not required, that you contact the authors of the
Document well before redistributing any large number of copies, to give
them a chance to provide you with an updated version of the Document.
@strong{4. MODIFICATIONS}
`4. MODIFICATIONS'
You may copy and distribute a Modified Version of the Document under
the conditions of sections 2 and 3 above, provided that you release
@ -1240,7 +1238,7 @@ The author(s) and publisher(s) of the Document do not by this License
give permission to use their names for publicity for or to assert or
imply endorsement of any Modified Version.
@strong{5. COMBINING DOCUMENTS}
`5. COMBINING DOCUMENTS'
You may combine the Document with other documents released under this
License, under the terms defined in section 4 above for modified
@ -1264,7 +1262,7 @@ in the various original documents, forming one section Entitled
and any sections Entitled “Dedications”. You must delete all sections
Entitled “Endorsements”.
@strong{6. COLLECTIONS OF DOCUMENTS}
`6. COLLECTIONS OF DOCUMENTS'
You may make a collection consisting of the Document and other documents
released under this License, and replace the individual copies of this
@ -1277,7 +1275,7 @@ it individually under this License, provided you insert a copy of this
License into the extracted document, and follow this License in all
other respects regarding verbatim copying of that document.
@strong{7. AGGREGATION WITH INDEPENDENT WORKS}
`7. AGGREGATION WITH INDEPENDENT WORKS'
A compilation of the Document or its derivatives with other separate
and independent documents or works, in or on a volume of a storage or
@ -1296,7 +1294,7 @@ electronic equivalent of covers if the Document is in electronic form.
Otherwise they must appear on printed covers that bracket the whole
aggregate.
@strong{8. TRANSLATION}
`8. TRANSLATION'
Translation is considered a kind of modification, so you may
distribute translations of the Document under the terms of section 4.
@ -1316,7 +1314,7 @@ If a section in the Document is Entitled “Acknowledgements”,
its Title (section 1) will typically require changing the actual
title.
@strong{9. TERMINATION}
`9. TERMINATION'
You may not copy, modify, sublicense, or distribute the Document
except as expressly provided under this License. Any attempt
@ -1343,7 +1341,7 @@ this License. If your rights have been terminated and not permanently
reinstated, receipt of a copy of some or all of the same material does
not give you any rights to use it.
@strong{10. FUTURE REVISIONS OF THIS LICENSE}
`10. FUTURE REVISIONS OF THIS LICENSE'
The Free Software Foundation may publish new, revised versions
of the GNU Free Documentation License from time to time. Such new
@ -1364,7 +1362,7 @@ License can be used, that proxys public statement of acceptance of a
version permanently authorizes you to choose that version for the
Document.
@strong{11. RELICENSING}
`11. RELICENSING'
“Massive Multiauthor Collaboration Site” (or “MMC Site”) means any
World Wide Web server that publishes copyrightable works and also
@ -1393,7 +1391,7 @@ The operator of an MMC Site may republish an MMC contained in the site
under CC-BY-SA on the same site at any time before August 1, 2009,
provided the MMC is eligible for relicensing.
@strong{ADDENDUM: How to use this License for your documents}
`ADDENDUM: How to use this License for your documents'
To use this License in a document you have written, include a copy of
the License in the document and put the following copyright and

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1983,9 +1983,9 @@ package body Inline is
then
declare
Len1 : constant Positive :=
String (String'("cannot inline"))'Length;
String'("cannot inline")'Length;
Len2 : constant Positive :=
String (String'("info: no contextual analysis of"))'Length;
String'("info: no contextual analysis of")'Length;
New_Msg : String (1 .. Msg'Length + Len2 - Len1);
@ -2044,17 +2044,6 @@ package body Inline is
Error_Msg_NE (Msg (Msg'First .. Msg'Last - 1), N, Subp);
-- In GNATprove mode, issue an info message when -gnatd_f is set and
-- Suppress_Info is False, and indicate that the subprogram is not
-- always inlined by setting flag Is_Inlined_Always to False.
elsif GNATprove_Mode then
Set_Is_Inlined_Always (Subp, False);
if Debug_Flag_Underscore_F and not Suppress_Info then
Error_Msg_NE (Msg, N, Subp);
end if;
else
-- Do not emit warning if this is a predefined unit which is not
@ -2999,25 +2988,6 @@ package body Inline is
F := First_Formal (Subp);
A := First_Actual (N);
while Present (F) loop
if Present (Renamed_Object (F)) then
-- If expander is active, it is an error to try to inline a
-- recursive subprogram. In GNATprove mode, just indicate that the
-- inlining will not happen, and mark the subprogram as not always
-- inlined.
if GNATprove_Mode then
Cannot_Inline
("cannot inline call to recursive subprogram?", N, Subp);
Set_Is_Inlined_Always (Subp, False);
else
Error_Msg_N
("cannot inline call to recursive subprogram", N);
end if;
return;
end if;
-- Reset Last_Assignment for any parameters of mode out or in out, to
-- prevent spurious warnings about overwriting for assignments to the
-- formal in the inlined code.

View File

@ -29,6 +29,7 @@ with Einfo; use Einfo;
with Einfo.Utils; use Einfo.Utils;
with Elists; use Elists;
with Errout; use Errout;
with Exp_Tss; use Exp_Tss;
with Lib.Util; use Lib.Util;
with Nlists; use Nlists;
with Opt; use Opt;
@ -789,10 +790,15 @@ package body Lib.Xref is
elsif Kind = E_In_Out_Parameter
and then Is_Assignable (E)
then
-- For sure this counts as a normal read reference
-- We count it as a read reference unless we're calling a
-- type support subprogram such as deep finalize.
Set_Referenced (E);
Set_Last_Assignment (E, Empty);
if not Is_Entity_Name (Name (Call))
or else Get_TSS_Name (Entity (Name (Call))) = TSS_Null
then
Set_Referenced (E);
Set_Last_Assignment (E, Empty);
end if;
-- We count it as being referenced as an out parameter if the
-- option is set to warn on all out parameters, except that we

View File

@ -52,12 +52,11 @@
-- it provides "globbing patterns" that are useful in implementing
-- unix or DOS style wildcard matching for file names.
-- GNAT.Regpat (files g-regpat.ads/s-regpat.ads/g-regpat.adb)
-- GNAT.Regpat (files g-regpat.ads/s-regpat.ads/s-regpat.adb)
-- This is a more complete implementation of Unix-style regular
-- expressions, copied from the original V7 style regular expression
-- library written in C by Henry Spencer. It is functionally the
-- same as this library, and uses the same internal data structures
-- stored in a binary compatible manner.
-- expressions, copied from the Perl regular expression engine,
-- written originally in C by Henry Spencer. It is functionally the
-- same as that library.
-- GNAT.Spitbol.Patterns (files g-spipat.ads/g-spipat.adb)
-- This is a completely general pattern matching package based on the

View File

@ -121,8 +121,9 @@ is
with
Pre =>
Item /= Null_Ptr
and then Strlen (Item) <= size_t'Last - Offset
and then Strlen (Item) + Offset <= Chars'Length,
and then (Chars'First /= 0 or else Chars'Last /= size_t'Last)
and then Chars'Length <= size_t'Last - Offset
and then Chars'Length + Offset <= Strlen (Item),
Global => (In_Out => C_Memory);
procedure Update
@ -133,8 +134,8 @@ is
with
Pre =>
Item /= Null_Ptr
and then Strlen (Item) <= size_t'Last - Offset
and then Strlen (Item) + Offset <= Str'Length,
and then Str'Length <= size_t'Last - Offset
and then Str'Length + Offset <= Strlen (Item),
Global => (In_Out => C_Memory);
Update_Error : exception;

View File

@ -78,6 +78,19 @@ package System.Atomic_Primitives is
function Atomic_Load_32 is new Atomic_Load (uint32);
function Atomic_Load_64 is new Atomic_Load (uint64);
generic
type Atomic_Type is mod <>;
procedure Atomic_Store
(Ptr : Address;
Value : Atomic_Type;
Model : Mem_Model := Seq_Cst);
pragma Import (Intrinsic, Atomic_Store, "__atomic_store_n");
procedure Atomic_Store_8 is new Atomic_Store (uint8);
procedure Atomic_Store_16 is new Atomic_Store (uint16);
procedure Atomic_Store_32 is new Atomic_Store (uint32);
procedure Atomic_Store_64 is new Atomic_Store (uint64);
generic
type Atomic_Type is mod <>;
function Atomic_Compare_Exchange

View File

@ -76,6 +76,18 @@ package System.Atomic_Primitives is
function Atomic_Load_16 is new Atomic_Load (uint16);
function Atomic_Load_32 is new Atomic_Load (uint32);
generic
type Atomic_Type is mod <>;
procedure Atomic_Store
(Ptr : Address;
Value : Atomic_Type;
Model : Mem_Model := Seq_Cst);
pragma Import (Intrinsic, Atomic_Store, "__atomic_store_n");
procedure Atomic_Store_8 is new Atomic_Store (uint8);
procedure Atomic_Store_16 is new Atomic_Store (uint16);
procedure Atomic_Store_32 is new Atomic_Store (uint32);
generic
type Atomic_Type is mod <>;
function Atomic_Compare_Exchange

View File

@ -70,20 +70,6 @@ package body System.Finalization_Masters is
return System.Storage_Elements."+" (Addr, Offset);
end Add_Offset_To_Address;
------------
-- Attach --
------------
procedure Attach (N : not null FM_Node_Ptr; L : not null FM_Node_Ptr) is
begin
Lock_Task.all;
Attach_Unprotected (N, L);
Unlock_Task.all;
-- Note: No need to unlock in case of an exception because the above
-- code can never raise one.
end Attach;
------------------------
-- Attach_Unprotected --
------------------------

View File

@ -71,10 +71,6 @@ package System.Finalization_Masters is
type Finalization_Master_Ptr is access all Finalization_Master;
for Finalization_Master_Ptr'Storage_Size use 0;
procedure Attach (N : not null FM_Node_Ptr; L : not null FM_Node_Ptr);
-- Compiler interface, do not call from within the run-time. Prepend a
-- node to a specific finalization master.
procedure Attach_Unprotected
(N : not null FM_Node_Ptr;
L : not null FM_Node_Ptr);

View File

@ -130,7 +130,11 @@ package body Pprint is
end loop;
end;
Append (Buf, " => ");
Append (Buf, Expr_Name (Expression (Elmt)));
if Box_Present (Elmt) then
Append (Buf, "<>");
else
Append (Buf, Expr_Name (Expression (Elmt)));
end if;
-- Print parameter_association as "x => 12345"

View File

@ -918,7 +918,6 @@ package Rtsfind is
RE_Attr_Long_Long_Float, -- System.Fat_LLF
RE_Add_Offset_To_Address, -- System.Finalization_Masters
RE_Attach, -- System.Finalization_Masters
RE_Base_Pool, -- System.Finalization_Masters
RE_Finalization_Master, -- System.Finalization_Masters
RE_Finalization_Master_Ptr, -- System.Finalization_Masters
@ -2563,7 +2562,6 @@ package Rtsfind is
RE_Attr_Long_Long_Float => System_Fat_LLF,
RE_Add_Offset_To_Address => System_Finalization_Masters,
RE_Attach => System_Finalization_Masters,
RE_Base_Pool => System_Finalization_Masters,
RE_Finalization_Master => System_Finalization_Masters,
RE_Finalization_Master_Ptr => System_Finalization_Masters,

View File

@ -31,7 +31,6 @@ with Einfo.Utils; use Einfo.Utils;
with Elists; use Elists;
with Errout; use Errout;
with Expander; use Expander;
with Exp_Ch6; use Exp_Ch6;
with Exp_Tss; use Exp_Tss;
with Exp_Util; use Exp_Util;
with Freeze; use Freeze;
@ -3446,7 +3445,7 @@ package body Sem_Aggr is
-- associations (Add_Unnnamed is not allowed), so we issue an
-- error if there are positional associations.
if not Present (Comp_Assocs)
if No (Comp_Assocs)
and then Present (Expressions (N))
then
Error_Msg_N ("container aggregate must be "
@ -4232,11 +4231,6 @@ package body Sem_Aggr is
-- Verify that the type of the ancestor part is a non-private ancestor
-- of the expected type, which must be a type extension.
procedure Transform_BIP_Assignment (Typ : Entity_Id);
-- For an extension aggregate whose ancestor part is a build-in-place
-- call returning a nonlimited type, this is used to transform the
-- assignment to the ancestor part to use a temp.
----------------------------
-- Valid_Limited_Ancestor --
----------------------------
@ -4328,26 +4322,6 @@ package body Sem_Aggr is
return False;
end Valid_Ancestor_Type;
------------------------------
-- Transform_BIP_Assignment --
------------------------------
procedure Transform_BIP_Assignment (Typ : Entity_Id) is
Loc : constant Source_Ptr := Sloc (N);
Def_Id : constant Entity_Id := Make_Temporary (Loc, 'Y', A);
Obj_Decl : constant Node_Id :=
Make_Object_Declaration (Loc,
Defining_Identifier => Def_Id,
Constant_Present => True,
Object_Definition => New_Occurrence_Of (Typ, Loc),
Expression => A,
Has_Init_Expression => True);
begin
Set_Etype (Def_Id, Typ);
Set_Ancestor_Part (N, New_Occurrence_Of (Def_Id, Loc));
Insert_Action (N, Obj_Decl);
end Transform_BIP_Assignment;
-- Start of processing for Resolve_Extension_Aggregate
begin
@ -4521,19 +4495,8 @@ package body Sem_Aggr is
-- an AdaCore query to the ARG after this test was added.
Error_Msg_N ("ancestor part must be statically tagged", A);
else
-- We are using the build-in-place protocol, but we can't build
-- in place, because we need to call the function before
-- allocating the aggregate. Could do better for null
-- extensions, and maybe for nondiscriminated types.
-- This is wrong for limited, but those were wrong already.
if not Is_Inherently_Limited_Type (A_Type)
and then Is_Build_In_Place_Function_Call (A)
then
Transform_BIP_Assignment (A_Type);
end if;
Resolve_Record_Aggregate (N, Typ);
end if;
end if;

View File

@ -12133,9 +12133,13 @@ package body Sem_Attr is
| Attribute_Code_Address
=>
-- To be safe, assume that if the address of a variable is taken,
-- it may be modified via this address, so note modification.
-- it may be modified via this address, so note modification,
-- unless the address is compared directly, which should not be
-- considered a modification.
if Is_Variable (P) then
if Is_Variable (P)
and then Nkind (Parent (N)) not in N_Op_Compare
then
Note_Possible_Modification (P, Sure => False);
end if;

View File

@ -2541,6 +2541,12 @@ package body Sem_Ch12 is
end if;
end if;
if Subtype_Mark (Def) <= Empty_Or_Error then
pragma Assert (Serious_Errors_Detected > 0);
-- avoid passing bad argument to Entity
return;
end if;
-- If the parent type has a known size, so does the formal, which makes
-- legal representation clauses that involve the formal.
@ -13522,8 +13528,7 @@ package body Sem_Ch12 is
Ancestor := Get_Instance_Of (Ancestor);
else
Ancestor :=
Get_Instance_Of (Base_Type (Get_Instance_Of (A_Gen_T)));
Ancestor := Get_Instance_Of (Etype (Get_Instance_Of (A_Gen_T)));
end if;
-- Check whether parent is a previous formal of the current generic
@ -14181,124 +14186,120 @@ package body Sem_Ch12 is
if Get_Instance_Of (A_Gen_T) /= A_Gen_T then
Error_Msg_N ("duplicate instantiation of generic type", Actual);
return New_List (Error);
end if;
elsif not Is_Entity_Name (Actual)
if not Is_Entity_Name (Actual)
or else not Is_Type (Entity (Actual))
then
Error_Msg_NE
("expect valid subtype mark to instantiate &", Actual, Gen_T);
Abandon_Instantiation (Actual);
end if;
else
Act_T := Entity (Actual);
Act_T := Entity (Actual);
-- Ada 2005 (AI-216): An Unchecked_Union subtype shall only be passed
-- as a generic actual parameter if the corresponding formal type
-- does not have a known_discriminant_part, or is a formal derived
-- type that is an Unchecked_Union type.
-- Ada 2005 (AI-216): An Unchecked_Union subtype shall only be passed
-- as a generic actual parameter if the corresponding formal type
-- does not have a known_discriminant_part, or is a formal derived
-- type that is an Unchecked_Union type.
if Is_Unchecked_Union (Base_Type (Act_T)) then
if not Has_Discriminants (A_Gen_T)
or else (Is_Derived_Type (A_Gen_T)
and then Is_Unchecked_Union (A_Gen_T))
then
null;
else
Error_Msg_N ("unchecked union cannot be the actual for a "
& "discriminated formal type", Act_T);
end if;
end if;
-- Deal with fixed/floating restrictions
if Is_Floating_Point_Type (Act_T) then
Check_Restriction (No_Floating_Point, Actual);
elsif Is_Fixed_Point_Type (Act_T) then
Check_Restriction (No_Fixed_Point, Actual);
end if;
-- Deal with error of using incomplete type as generic actual.
-- This includes limited views of a type, even if the non-limited
-- view may be available.
if Ekind (Act_T) = E_Incomplete_Type
or else (Is_Class_Wide_Type (Act_T)
and then Ekind (Root_Type (Act_T)) = E_Incomplete_Type)
if Is_Unchecked_Union (Base_Type (Act_T)) then
if not Has_Discriminants (A_Gen_T)
or else (Is_Derived_Type (A_Gen_T)
and then Is_Unchecked_Union (A_Gen_T))
then
-- If the formal is an incomplete type, the actual can be
-- incomplete as well, but if an actual incomplete type has
-- a full view, then we'll retrieve that.
null;
else
Error_Msg_N ("unchecked union cannot be the actual for a "
& "discriminated formal type", Act_T);
if Ekind (A_Gen_T) = E_Incomplete_Type
and then No (Full_View (Act_T))
then
null;
end if;
end if;
elsif Is_Class_Wide_Type (Act_T)
or else No (Full_View (Act_T))
then
Error_Msg_N ("premature use of incomplete type", Actual);
Abandon_Instantiation (Actual);
-- Deal with fixed/floating restrictions
else
Act_T := Full_View (Act_T);
Set_Entity (Actual, Act_T);
if Is_Floating_Point_Type (Act_T) then
Check_Restriction (No_Floating_Point, Actual);
elsif Is_Fixed_Point_Type (Act_T) then
Check_Restriction (No_Fixed_Point, Actual);
end if;
if Has_Private_Component (Act_T) then
Error_Msg_N
("premature use of type with private component", Actual);
end if;
-- Deal with error of using incomplete type as generic actual.
-- This includes limited views of a type, even if the non-limited
-- view may be available.
if Ekind (Act_T) = E_Incomplete_Type
or else (Is_Class_Wide_Type (Act_T)
and then Ekind (Root_Type (Act_T)) = E_Incomplete_Type)
then
-- If the formal is an incomplete type, the actual can be
-- incomplete as well, but if an actual incomplete type has
-- a full view, then we'll retrieve that.
if Ekind (A_Gen_T) = E_Incomplete_Type
and then No (Full_View (Act_T))
then
null;
elsif Is_Class_Wide_Type (Act_T)
or else No (Full_View (Act_T))
then
Error_Msg_N ("premature use of incomplete type", Actual);
Abandon_Instantiation (Actual);
else
Act_T := Full_View (Act_T);
Set_Entity (Actual, Act_T);
if Has_Private_Component (Act_T) then
Error_Msg_N
("premature use of type with private component", Actual);
end if;
end if;
-- Deal with error of premature use of private type as generic actual
-- Deal with error of premature use of private type as generic actual,
-- which is allowed for incomplete formals.
elsif Is_Private_Type (Act_T)
elsif Ekind (A_Gen_T) /= E_Incomplete_Type then
if Is_Private_Type (Act_T)
and then Is_Private_Type (Base_Type (Act_T))
and then not Is_Generic_Type (Act_T)
and then not Is_Derived_Type (Act_T)
and then No (Full_View (Root_Type (Act_T)))
then
-- If the formal is an incomplete type, the actual can be
-- private or incomplete as well.
if Ekind (A_Gen_T) = E_Incomplete_Type then
null;
else
Error_Msg_N ("premature use of private type", Actual);
end if;
Error_Msg_N ("premature use of private type", Actual);
elsif Has_Private_Component (Act_T) then
Error_Msg_N
("premature use of type with private component", Actual);
end if;
end if;
Set_Instance_Of (A_Gen_T, Act_T);
Set_Instance_Of (A_Gen_T, Act_T);
-- If the type is generic, the class-wide type may also be used
-- If the type is generic, the class-wide type may also be used
if Is_Tagged_Type (A_Gen_T)
and then Is_Tagged_Type (Act_T)
and then not Is_Class_Wide_Type (A_Gen_T)
then
Set_Instance_Of (Class_Wide_Type (A_Gen_T),
Class_Wide_Type (Act_T));
end if;
if Is_Tagged_Type (A_Gen_T)
and then Is_Tagged_Type (Act_T)
and then not Is_Class_Wide_Type (A_Gen_T)
then
Set_Instance_Of (Class_Wide_Type (A_Gen_T),
Class_Wide_Type (Act_T));
end if;
if not Is_Abstract_Type (A_Gen_T)
and then Is_Abstract_Type (Act_T)
then
Error_Msg_N
("actual of non-abstract formal cannot be abstract", Actual);
end if;
if not Is_Abstract_Type (A_Gen_T)
and then Is_Abstract_Type (Act_T)
then
Error_Msg_N
("actual of non-abstract formal cannot be abstract", Actual);
end if;
-- A generic scalar type is a first subtype for which we generate
-- an anonymous base type. Indicate that the instance of this base
-- is the base type of the actual.
-- A generic scalar type is a first subtype for which we generate
-- an anonymous base type. Indicate that the instance of this base
-- is the base type of the actual.
if Is_Scalar_Type (A_Gen_T) then
Set_Instance_Of (Etype (A_Gen_T), Etype (Act_T));
end if;
if Is_Scalar_Type (A_Gen_T) then
Set_Instance_Of (Etype (A_Gen_T), Etype (Act_T));
end if;
Check_Shared_Variable_Control_Aspects;

View File

@ -3668,7 +3668,7 @@ package body Sem_Ch3 is
--------------------------------
procedure Analyze_Number_Declaration (N : Node_Id) is
E : constant Node_Id := Expression (N);
E : Node_Id := Expression (N);
Id : constant Entity_Id := Defining_Identifier (N);
Index : Interp_Index;
It : Interp;
@ -3694,14 +3694,13 @@ package body Sem_Ch3 is
Set_Is_Pure (Id, Is_Pure (Current_Scope));
-- Process expression, replacing error by integer zero, to avoid
-- cascaded errors or aborts further along in the processing
-- Replace Error by integer zero, which seems least likely to cause
-- cascaded errors.
if E = Error then
Rewrite (E, Make_Integer_Literal (Sloc (E), Uint_0));
pragma Assert (Serious_Errors_Detected > 0);
E := Make_Integer_Literal (Sloc (N), Uint_0);
Set_Expression (N, E);
Set_Error_Posted (E);
end if;
@ -18615,7 +18614,10 @@ package body Sem_Ch3 is
-- Otherwise we have a subtype mark without a constraint
elsif Error_Posted (S) then
Rewrite (S, New_Occurrence_Of (Any_Id, Sloc (S)));
-- Don't rewrite if S is Empty or Error
if S > Empty_Or_Error then
Rewrite (S, New_Occurrence_Of (Any_Id, Sloc (S)));
end if;
return Any_Type;
else

View File

@ -2304,7 +2304,9 @@ package body Sem_Ch4 is
while Present (It.Nam) loop
T := It.Typ;
if No (First_Formal (Base_Type (Designated_Type (T)))) then
if Is_Access_Type (T)
and then No (First_Formal (Base_Type (Designated_Type (T))))
then
Set_Etype (P, T);
else
Remove_Interp (I);

View File

@ -5325,10 +5325,13 @@ package body Sem_Ch6 is
-- Flag Is_Inlined_Always is True by default, and reversed to False for
-- those subprograms which could be inlined in GNATprove mode (because
-- Body_To_Inline is non-Empty) but should not be inlined.
-- Body_To_Inline is non-Empty) but should not be inlined. Flag
-- Is_Inlined is True by default and reversed to False when inlining
-- fails because the subprogram is detected to be recursive.
if GNATprove_Mode then
Set_Is_Inlined_Always (Designator);
Set_Is_Inlined (Designator);
end if;
-- Introduce new scope for analysis of the formals and the return type
@ -5370,6 +5373,7 @@ package body Sem_Ch6 is
if Ada_Version >= Ada_2005
and then not Is_Invariant_Procedure_Or_Body (Designator)
and then not Is_Init_Proc (Designator)
then
declare
Formal : Entity_Id;

View File

@ -861,7 +861,19 @@ package body Sem_Ch8 is
Defining_Identifier => Subt,
Subtype_Indication =>
Make_Subtype_From_Expr (Nam, Typ)));
Rewrite (Subtype_Mark (N), New_Occurrence_Of (Subt, Loc));
declare
New_Subtype_Mark : constant Node_Id :=
New_Occurrence_Of (Subt, Loc);
begin
if Present (Subtype_Mark (N)) then
Rewrite (Subtype_Mark (N), New_Subtype_Mark);
else
-- An Ada2022 renaming with no subtype mark
Set_Subtype_Mark (N, New_Subtype_Mark);
end if;
end;
Set_Etype (Nam, Subt);
-- Suppress discriminant checks on this subtype if the original
@ -6550,16 +6562,6 @@ package body Sem_Ch8 is
Decl := Enclosing_Declaration (E);
-- Enclosing_Declaration does not always return a
-- declaration; cope with this irregularity.
if Decl in N_Subprogram_Specification_Id
and then Nkind (Parent (Decl)) in
N_Subprogram_Body | N_Subprogram_Declaration
| N_Subprogram_Renaming_Declaration
then
Decl := Parent (Decl);
end if;
-- Look for the suprogram renaming declaration built
-- for a generic actual subprogram. Unclear why
-- Original_Node call is needed, but sometimes it is.
@ -6743,7 +6745,7 @@ package body Sem_Ch8 is
Id : Entity_Id := Gen_Trailer;
begin
loop
if not Present (Id) then
if No (Id) then
-- E_Trailer presumably occurred
-- earlier on the entity list than
-- Gen_Trailer. So E preceded the

View File

@ -2438,6 +2438,32 @@ package body Sem_Ch9 is
Entry_Name := Selector_Name (Entry_Name);
end if;
-- Ada 2012 (9.5.4(5.6/4): "If the target is a procedure, the name
-- shall denote a renaming of an entry or ...". We support this
-- language rule replacing the target procedure with the renamed
-- entry. Thus, reanalyzing the resulting requeue statement we
-- reuse all the Ada 2005 machinery to perform the analysis.
if Nkind (Entry_Name) in N_Has_Entity then
declare
Target_E : constant Entity_Id := Entity (Entry_Name);
begin
if Ada_Version >= Ada_2012
and then Ekind (Target_E) = E_Procedure
and then Convention (Target_E) = Convention_Entry
and then Nkind (Original_Node (Parent (Parent (Target_E))))
= N_Subprogram_Renaming_Declaration
then
Set_Name (N,
New_Copy_Tree
(Name (Original_Node (Parent (Parent (Target_E))))));
Analyze_Requeue (N);
return;
end if;
end;
end if;
-- If an explicit target object is given then we have to check the
-- restrictions of 9.5.4(6).

View File

@ -7193,7 +7193,7 @@ package body Sem_Res is
-- In GNATprove mode, expansion is disabled, but we want to inline some
-- subprograms to facilitate formal verification. Indirect calls through
-- a subprogram type or within a generic cannot be inlined. Inlining is
-- performed only for calls subject to SPARK_Mode on.
-- performed only for calls subject to SPARK_Mode => On.
elsif GNATprove_Mode
and then SPARK_Mode = On
@ -7206,10 +7206,13 @@ package body Sem_Res is
if Nkind (Nam_Decl) = N_Subprogram_Declaration then
Body_Id := Corresponding_Body (Nam_Decl);
-- Nothing to do if the subprogram is not eligible for inlining in
-- GNATprove mode, or inlining is disabled with switch -gnatdm
-- Nothing to do if the subprogram is not inlined (because it is
-- recursive, directly or indirectly), or is not eligible for
-- inlining GNATprove mode (because of properties of the
-- subprogram itself), or inlining has been disabled with switch
-- -gnatdm.
if not Is_Inlined_Always (Nam_UA)
if not Is_Inlined (Nam_UA)
or else not Can_Be_Inlined_In_GNATprove_Mode (Nam_UA, Body_Id)
or else Debug_Flag_M
then

View File

@ -98,6 +98,7 @@ package body Sem_SCIL is
-- Interface types are unsupported.
if Is_Interface (Ctrl_Typ)
or else From_Limited_With (Ctrl_Typ)
or else Is_RTE (Ctrl_Typ, RE_Interface_Tag)
or else (Is_Access_Type (Ctrl_Typ)
and then

View File

@ -7386,6 +7386,11 @@ package body Sem_Util is
Decl := Parent (Decl);
end loop;
-- cope with oddness in definition of N_Declaration
if Nkind (Decl) in N_Subprogram_Specification then
Decl := Parent (Decl);
end if;
return Decl;
end Enclosing_Declaration;

View File

@ -35,6 +35,8 @@ along with GCC; see the file COPYING3. If not see
#include "diagnostic-core.h"
#include "cgraph.h"
#include "varasm.h"
#include "stringpool.h" /* For lookup_attribute. */
#include "attribs.h" /* For lookup_attribute. */
#include "dwarf2out.h" /* For lookup_decl_die. */
static int btf_label_num;
@ -440,6 +442,11 @@ btf_collect_datasec (ctf_container_ref ctfc)
if (dtd == NULL)
continue;
if (DECL_EXTERNAL (func->decl)
&& (lookup_attribute ("kernel_helper",
DECL_ATTRIBUTES (func->decl))) != NULL_TREE)
continue;
/* Functions actually get two types: a BTF_KIND_FUNC_PROTO, and
also a BTF_KIND_FUNC. But the CTF container only allocates one
type per function, which matches closely with BTF_KIND_FUNC_PROTO.
@ -1105,17 +1112,20 @@ static void
output_btf_strs (ctf_container_ref ctfc)
{
ctf_string_t * ctf_string = ctfc->ctfc_strtable.ctstab_head;
static int str_pos = 0;
while (ctf_string)
{
dw2_asm_output_nstring (ctf_string->cts_str, -1, "btf_string");
dw2_asm_output_nstring (ctf_string->cts_str, -1, "btf_string, str_pos = 0x%x", str_pos);
str_pos += strlen(ctf_string->cts_str) + 1;
ctf_string = ctf_string->cts_next;
}
ctf_string = ctfc->ctfc_aux_strtable.ctstab_head;
while (ctf_string)
{
dw2_asm_output_nstring (ctf_string->cts_str, -1, "btf_aux_string");
dw2_asm_output_nstring (ctf_string->cts_str, -1, "btf_aux_string, str_pos = 0x%x", str_pos);
str_pos += strlen(ctf_string->cts_str) + 1;
ctf_string = ctf_string->cts_next;
}
}

View File

@ -3143,13 +3143,14 @@ handle_copy_attribute (tree *node, tree name, tree args,
if (ref == error_mark_node)
return NULL_TREE;
location_t loc = input_location;
if (DECL_P (decl))
loc = DECL_SOURCE_LOCATION (decl);
if (TREE_CODE (ref) == STRING_CST)
{
/* Explicitly handle this case since using a string literal
as an argument is a likely mistake. */
error_at (DECL_SOURCE_LOCATION (decl),
"%qE attribute argument cannot be a string",
name);
error_at (loc, "%qE attribute argument cannot be a string", name);
return NULL_TREE;
}
@ -3160,10 +3161,8 @@ handle_copy_attribute (tree *node, tree name, tree args,
/* Similar to the string case, since some function attributes
accept literal numbers as arguments (e.g., alloc_size or
nonnull) using one here is a likely mistake. */
error_at (DECL_SOURCE_LOCATION (decl),
"%qE attribute argument cannot be a constant arithmetic "
"expression",
name);
error_at (loc, "%qE attribute argument cannot be a constant arithmetic "
"expression", name);
return NULL_TREE;
}
@ -3171,12 +3170,11 @@ handle_copy_attribute (tree *node, tree name, tree args,
{
/* Another possible mistake (but indirect self-references aren't
and diagnosed and shouldn't be). */
if (warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wattributes,
if (warning_at (loc, OPT_Wattributes,
"%qE attribute ignored on a redeclaration "
"of the referenced symbol",
name))
inform (DECL_SOURCE_LOCATION (node[1]),
"previous declaration here");
"of the referenced symbol", name)
&& DECL_P (node[1]))
inform (DECL_SOURCE_LOCATION (node[1]), "previous declaration here");
return NULL_TREE;
}
@ -3196,7 +3194,8 @@ handle_copy_attribute (tree *node, tree name, tree args,
ref = TREE_OPERAND (ref, 1);
else
break;
} while (!DECL_P (ref));
}
while (!DECL_P (ref));
/* For object pointer expressions, consider those to be requests
to copy from their type, such as in:
@ -3228,8 +3227,7 @@ handle_copy_attribute (tree *node, tree name, tree args,
to a variable, or variable attributes to a function. */
if (warning (OPT_Wattributes,
"%qE attribute ignored on a declaration of "
"a different kind than referenced symbol",
name)
"a different kind than referenced symbol", name)
&& DECL_P (ref))
inform (DECL_SOURCE_LOCATION (ref),
"symbol %qD referenced by %qD declared here", ref, decl);
@ -3279,9 +3277,7 @@ handle_copy_attribute (tree *node, tree name, tree args,
}
else if (!TYPE_P (decl))
{
error_at (DECL_SOURCE_LOCATION (decl),
"%qE attribute must apply to a declaration",
name);
error_at (loc, "%qE attribute must apply to a declaration", name);
return NULL_TREE;
}

View File

@ -1373,8 +1373,8 @@ public:
bool map_supported_p ();
static tree get_origin (tree);
static tree maybe_unconvert_ref (tree);
tree get_origin (tree);
tree maybe_unconvert_ref (tree);
bool maybe_zero_length_array_section (tree);

View File

@ -2830,6 +2830,9 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
}
else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == TREE_LIST)
{
/* TODO: This can go away once we transition all uses of
TREE_LIST for representing OMP array sections to
OMP_ARRAY_SECTION. */
tree t;
for (t = OMP_CLAUSE_DECL (c);
TREE_CODE (t) == TREE_LIST; t = TREE_CHAIN (t))
@ -2838,6 +2841,17 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
bitmap_clear_bit (&allocate_head, DECL_UID (t));
break;
}
else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == OMP_ARRAY_SECTION)
{
tree t;
for (t = OMP_CLAUSE_DECL (c);
TREE_CODE (t) == OMP_ARRAY_SECTION;
t = TREE_OPERAND (t, 0))
;
if (DECL_P (t))
bitmap_clear_bit (&allocate_head, DECL_UID (t));
break;
}
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
@ -3379,6 +3393,7 @@ c_omp_address_inspector::map_supported_p ()
|| TREE_CODE (t) == SAVE_EXPR
|| TREE_CODE (t) == POINTER_PLUS_EXPR
|| TREE_CODE (t) == NON_LVALUE_EXPR
|| TREE_CODE (t) == OMP_ARRAY_SECTION
|| TREE_CODE (t) == NOP_EXPR)
if (TREE_CODE (t) == COMPOUND_EXPR)
t = TREE_OPERAND (t, 1);
@ -3408,7 +3423,8 @@ c_omp_address_inspector::get_origin (tree t)
else if (TREE_CODE (t) == POINTER_PLUS_EXPR
|| TREE_CODE (t) == SAVE_EXPR)
t = TREE_OPERAND (t, 0);
else if (TREE_CODE (t) == INDIRECT_REF
else if (!processing_template_decl_p ()
&& TREE_CODE (t) == INDIRECT_REF
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == REFERENCE_TYPE)
t = TREE_OPERAND (t, 0);
else
@ -3425,7 +3441,10 @@ c_omp_address_inspector::get_origin (tree t)
tree
c_omp_address_inspector::maybe_unconvert_ref (tree t)
{
if (TREE_CODE (t) == INDIRECT_REF
/* Be careful not to dereference the type if we're processing a
template decl, else it might be NULL. */
if (!processing_template_decl_p ()
&& TREE_CODE (t) == INDIRECT_REF
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == REFERENCE_TYPE)
return TREE_OPERAND (t, 0);

View File

@ -8445,7 +8445,7 @@ c_parser_for_statement (c_parser *parser, bool ivdep, unsigned short unroll,
build_int_cst (integer_type_node,
annot_expr_unroll_kind),
build_int_cst (integer_type_node, unroll));
if (novector && cond != error_mark_node)
if (novector && cond && cond != error_mark_node)
cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond,
build_int_cst (integer_type_node,
annot_expr_no_vector_kind),

View File

@ -350,7 +350,7 @@ aarch64*-*-*)
cxx_target_objs="aarch64-c.o"
d_target_objs="aarch64-d.o"
extra_objs="aarch64-builtins.o aarch-common.o aarch64-sve-builtins.o aarch64-sve-builtins-shapes.o aarch64-sve-builtins-base.o aarch64-sve-builtins-sve2.o aarch64-sve-builtins-sme.o cortex-a57-fma-steering.o aarch64-speculation.o falkor-tag-collision-avoidance.o aarch-bti-insert.o aarch64-cc-fusion.o aarch64-early-ra.o aarch64-ldp-fusion.o"
target_gtfiles="\$(srcdir)/config/aarch64/aarch64-builtins.cc \$(srcdir)/config/aarch64/aarch64-sve-builtins.h \$(srcdir)/config/aarch64/aarch64-sve-builtins.cc"
target_gtfiles="\$(srcdir)/config/aarch64/aarch64-builtins.h \$(srcdir)/config/aarch64/aarch64-builtins.cc \$(srcdir)/config/aarch64/aarch64-sve-builtins.h \$(srcdir)/config/aarch64/aarch64-sve-builtins.cc"
target_has_targetm_common=yes
;;
alpha*-*-*)
@ -4548,7 +4548,7 @@ case "${target}" in
for which in arch tune; do
eval "val=\$with_$which"
case ${val} in
"" | fiji | gfx900 | gfx906 | gfx908 | gfx90a | gfx1030)
"" | fiji | gfx900 | gfx906 | gfx908 | gfx90a | gfx1030 | gfx1100)
# OK
;;
*)

View File

@ -863,9 +863,11 @@ const char *aarch64_scalar_builtin_types[] = {
NULL
};
extern GTY(()) aarch64_simd_type_info aarch64_simd_types[];
#define ENTRY(E, M, Q, G) \
{E, "__" #E, #G "__" #E, NULL_TREE, NULL_TREE, E_##M##mode, qualifier_##Q},
GTY(()) struct aarch64_simd_type_info aarch64_simd_types [] = {
struct aarch64_simd_type_info aarch64_simd_types [] = {
#include "aarch64-simd-builtin-types.def"
};
#undef ENTRY

View File

@ -4222,6 +4222,253 @@ output_shift_loop (enum rtx_code code, rtx *operands)
return "";
}
/* See below where shifts are handled for explanation of this enum. */
enum arc_shift_alg
{
SHIFT_MOVE, /* Register-to-register move. */
SHIFT_LOOP, /* Zero-overhead loop implementation. */
SHIFT_INLINE, /* Mmultiple LSHIFTs and LSHIFT-PLUSs. */
SHIFT_AND_ROT, /* Bitwise AND, then ROTATERTs. */
SHIFT_SWAP, /* SWAP then multiple LSHIFTs/LSHIFT-PLUSs. */
SHIFT_AND_SWAP_ROT /* Bitwise AND, then SWAP, then ROTATERTs. */
};
struct arc_shift_info {
enum arc_shift_alg alg;
unsigned int cost;
};
/* Return shift algorithm context, an index into the following tables.
* 0 for -Os (optimize for size) 3 for -O2 (optimized for speed)
* 1 for -Os -mswap TARGET_V2 4 for -O2 -mswap TARGET_V2
* 2 for -Os -mswap !TARGET_V2 5 for -O2 -mswap !TARGET_V2 */
static unsigned int
arc_shift_context_idx ()
{
if (optimize_function_for_size_p (cfun))
{
if (!TARGET_SWAP)
return 0;
if (TARGET_V2)
return 1;
return 2;
}
else
{
if (!TARGET_SWAP)
return 3;
if (TARGET_V2)
return 4;
return 5;
}
}
static const arc_shift_info arc_ashl_alg[6][32] = {
{ /* 0: -Os. */
{ SHIFT_MOVE, COSTS_N_INSNS (1) }, /* 0 */
{ SHIFT_INLINE, COSTS_N_INSNS (1) }, /* 1 */
{ SHIFT_INLINE, COSTS_N_INSNS (2) }, /* 2 */
{ SHIFT_INLINE, COSTS_N_INSNS (2) }, /* 3 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 4 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 5 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 6 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 7 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 8 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 9 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 10 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 11 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 12 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 13 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 14 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 15 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 16 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 17 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 18 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 19 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 20 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 21 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 22 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 23 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 24 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 25 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 26 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 27 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 28 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (4) }, /* 29 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (3) }, /* 30 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (2) } /* 31 */
},
{ /* 1: -Os -mswap TARGET_V2. */
{ SHIFT_MOVE, COSTS_N_INSNS (1) }, /* 0 */
{ SHIFT_INLINE, COSTS_N_INSNS (1) }, /* 1 */
{ SHIFT_INLINE, COSTS_N_INSNS (2) }, /* 2 */
{ SHIFT_INLINE, COSTS_N_INSNS (2) }, /* 3 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 4 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 5 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 6 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 7 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 8 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 9 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 10 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 11 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 12 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 13 */
{ SHIFT_AND_SWAP_ROT, COSTS_N_INSNS (4) }, /* 14 */
{ SHIFT_AND_SWAP_ROT, COSTS_N_INSNS (3) }, /* 15 */
{ SHIFT_SWAP, COSTS_N_INSNS (1) }, /* 16 */
{ SHIFT_SWAP, COSTS_N_INSNS (2) }, /* 17 */
{ SHIFT_SWAP, COSTS_N_INSNS (3) }, /* 18 */
{ SHIFT_SWAP, COSTS_N_INSNS (3) }, /* 19 */
{ SHIFT_SWAP, COSTS_N_INSNS (4) }, /* 20 */
{ SHIFT_SWAP, COSTS_N_INSNS (4) }, /* 21 */
{ SHIFT_SWAP, COSTS_N_INSNS (4) }, /* 22 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 23 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 24 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 25 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 26 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 27 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 28 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (4) }, /* 29 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (3) }, /* 30 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (2) } /* 31 */
},
{ /* 2: -Os -mswap !TARGET_V2. */
{ SHIFT_MOVE, COSTS_N_INSNS (1) }, /* 0 */
{ SHIFT_INLINE, COSTS_N_INSNS (1) }, /* 1 */
{ SHIFT_INLINE, COSTS_N_INSNS (2) }, /* 2 */
{ SHIFT_INLINE, COSTS_N_INSNS (2) }, /* 3 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 4 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 5 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 6 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 7 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 8 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 9 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 10 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 11 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 12 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 13 */
{ SHIFT_AND_SWAP_ROT, COSTS_N_INSNS (4) }, /* 14 */
{ SHIFT_AND_SWAP_ROT, COSTS_N_INSNS (3) }, /* 15 */
{ SHIFT_SWAP, COSTS_N_INSNS (2) }, /* 16 */
{ SHIFT_SWAP, COSTS_N_INSNS (3) }, /* 17 */
{ SHIFT_SWAP, COSTS_N_INSNS (4) }, /* 18 */
{ SHIFT_SWAP, COSTS_N_INSNS (4) }, /* 19 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 20 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 21 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 22 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 23 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 24 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 25 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 26 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 27 */
{ SHIFT_LOOP, COSTS_N_INSNS (4) }, /* 28 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (4) }, /* 29 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (3) }, /* 30 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (2) } /* 31 */
},
{ /* 3: -O2. */
{ SHIFT_MOVE, COSTS_N_INSNS (1) }, /* 0 */
{ SHIFT_INLINE, COSTS_N_INSNS (1) }, /* 1 */
{ SHIFT_INLINE, COSTS_N_INSNS (2) }, /* 2 */
{ SHIFT_INLINE, COSTS_N_INSNS (2) }, /* 3 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 4 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 5 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 6 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 7 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 8 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 9 */
{ SHIFT_INLINE, COSTS_N_INSNS (5) }, /* 10 */
{ SHIFT_INLINE, COSTS_N_INSNS (5) }, /* 11 */
{ SHIFT_INLINE, COSTS_N_INSNS (5) }, /* 12 */
{ SHIFT_INLINE, COSTS_N_INSNS (6) }, /* 13 */
{ SHIFT_INLINE, COSTS_N_INSNS (6) }, /* 14 */
{ SHIFT_INLINE, COSTS_N_INSNS (6) }, /* 15 */
{ SHIFT_INLINE, COSTS_N_INSNS (7) }, /* 16 */
{ SHIFT_INLINE, COSTS_N_INSNS (7) }, /* 17 */
{ SHIFT_INLINE, COSTS_N_INSNS (7) }, /* 18 */
{ SHIFT_INLINE, COSTS_N_INSNS (8) }, /* 19 */
{ SHIFT_INLINE, COSTS_N_INSNS (8) }, /* 20 */
{ SHIFT_INLINE, COSTS_N_INSNS (8) }, /* 21 */
{ SHIFT_INLINE, COSTS_N_INSNS (9) }, /* 22 */
{ SHIFT_INLINE, COSTS_N_INSNS (9) }, /* 23 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (9) }, /* 24 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (8) }, /* 25 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (7) }, /* 26 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (6) }, /* 27 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (5) }, /* 28 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (4) }, /* 29 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (3) }, /* 30 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (2) } /* 31 */
},
{ /* 4: -O2 -mswap TARGET_V2. */
{ SHIFT_MOVE, COSTS_N_INSNS (1) }, /* 0 */
{ SHIFT_INLINE, COSTS_N_INSNS (1) }, /* 1 */
{ SHIFT_INLINE, COSTS_N_INSNS (2) }, /* 2 */
{ SHIFT_INLINE, COSTS_N_INSNS (2) }, /* 3 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 4 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 5 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 6 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 7 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 8 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 9 */
{ SHIFT_INLINE, COSTS_N_INSNS (5) }, /* 10 */
{ SHIFT_INLINE, COSTS_N_INSNS (5) }, /* 11 */
{ SHIFT_INLINE, COSTS_N_INSNS (5) }, /* 12 */
{ SHIFT_AND_SWAP_ROT, COSTS_N_INSNS (5) }, /* 13 */
{ SHIFT_AND_SWAP_ROT, COSTS_N_INSNS (4) }, /* 14 */
{ SHIFT_AND_SWAP_ROT, COSTS_N_INSNS (3) }, /* 15 */
{ SHIFT_SWAP, COSTS_N_INSNS (1) }, /* 16 */
{ SHIFT_SWAP, COSTS_N_INSNS (2) }, /* 17 */
{ SHIFT_SWAP, COSTS_N_INSNS (3) }, /* 18 */
{ SHIFT_SWAP, COSTS_N_INSNS (3) }, /* 19 */
{ SHIFT_SWAP, COSTS_N_INSNS (4) }, /* 20 */
{ SHIFT_SWAP, COSTS_N_INSNS (4) }, /* 21 */
{ SHIFT_SWAP, COSTS_N_INSNS (4) }, /* 22 */
{ SHIFT_SWAP, COSTS_N_INSNS (5) }, /* 23 */
{ SHIFT_SWAP, COSTS_N_INSNS (5) }, /* 24 */
{ SHIFT_SWAP, COSTS_N_INSNS (5) }, /* 25 */
{ SHIFT_SWAP, COSTS_N_INSNS (6) }, /* 26 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (6) }, /* 27 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (5) }, /* 28 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (4) }, /* 29 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (3) }, /* 30 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (2) } /* 31 */
},
{ /* 5: -O2 -mswap !TARGET_V2. */
{ SHIFT_MOVE, COSTS_N_INSNS (1) }, /* 0 */
{ SHIFT_INLINE, COSTS_N_INSNS (1) }, /* 1 */
{ SHIFT_INLINE, COSTS_N_INSNS (2) }, /* 2 */
{ SHIFT_INLINE, COSTS_N_INSNS (2) }, /* 3 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 4 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 5 */
{ SHIFT_INLINE, COSTS_N_INSNS (3) }, /* 6 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 7 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 8 */
{ SHIFT_INLINE, COSTS_N_INSNS (4) }, /* 9 */
{ SHIFT_INLINE, COSTS_N_INSNS (5) }, /* 10 */
{ SHIFT_INLINE, COSTS_N_INSNS (5) }, /* 11 */
{ SHIFT_INLINE, COSTS_N_INSNS (5) }, /* 12 */
{ SHIFT_AND_SWAP_ROT, COSTS_N_INSNS (5) }, /* 13 */
{ SHIFT_AND_SWAP_ROT, COSTS_N_INSNS (4) }, /* 14 */
{ SHIFT_AND_SWAP_ROT, COSTS_N_INSNS (3) }, /* 15 */
{ SHIFT_SWAP, COSTS_N_INSNS (2) }, /* 16 */
{ SHIFT_SWAP, COSTS_N_INSNS (3) }, /* 17 */
{ SHIFT_SWAP, COSTS_N_INSNS (4) }, /* 18 */
{ SHIFT_SWAP, COSTS_N_INSNS (4) }, /* 19 */
{ SHIFT_SWAP, COSTS_N_INSNS (5) }, /* 20 */
{ SHIFT_SWAP, COSTS_N_INSNS (5) }, /* 21 */
{ SHIFT_SWAP, COSTS_N_INSNS (5) }, /* 22 */
{ SHIFT_SWAP, COSTS_N_INSNS (6) }, /* 23 */
{ SHIFT_SWAP, COSTS_N_INSNS (6) }, /* 24 */
{ SHIFT_SWAP, COSTS_N_INSNS (6) }, /* 25 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (7) }, /* 26 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (6) }, /* 27 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (5) }, /* 28 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (4) }, /* 29 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (3) }, /* 30 */
{ SHIFT_AND_ROT, COSTS_N_INSNS (2) } /* 31 */
}
};
/* Split SImode left shift instruction. */
void
@ -4230,11 +4477,29 @@ arc_split_ashl (rtx *operands)
if (CONST_INT_P (operands[2]))
{
int n = INTVAL (operands[2]) & 0x1f;
if (n <= 9)
switch (arc_ashl_alg [arc_shift_context_idx ()][n].alg)
{
case SHIFT_MOVE:
emit_move_insn (operands[0], operands[1]);
return;
case SHIFT_SWAP:
if (!TARGET_V2)
{
emit_insn (gen_andsi3_i (operands[0], operands[1],
GEN_INT (0xffff)));
emit_insn (gen_rotrsi2_cnt16 (operands[0], operands[0]));
}
else
emit_insn (gen_ashlsi2_cnt16 (operands[0], operands[1]));
n -= 16;
if (n == 0)
emit_move_insn (operands[0], operands[1]);
else if (n <= 2)
return;
operands[1] = operands[0];
/* FALL THRU */
case SHIFT_INLINE:
if (n <= 2)
{
emit_insn (gen_ashlsi3_cnt1 (operands[0], operands[1]));
if (n == 2)
@ -4256,37 +4521,27 @@ arc_split_ashl (rtx *operands)
emit_insn (gen_ashlsi3_cnt1 (operands[0], operands[0]));
}
return;
}
else if (n >= 16 && n <= 22 && TARGET_SWAP && TARGET_V2)
{
emit_insn (gen_ashlsi2_cnt16 (operands[0], operands[1]));
if (n > 16)
{
operands[1] = operands[0];
operands[2] = GEN_INT (n - 16);
arc_split_ashl (operands);
}
case SHIFT_AND_ROT:
emit_insn (gen_andsi3_i (operands[0], operands[1],
GEN_INT ((1 << (32 - n)) - 1)));
for (; n < 32; n++)
emit_insn (gen_rotrsi3_cnt1 (operands[0], operands[0]));
return;
}
else if (n >= 29)
{
if (n < 31)
{
if (n == 29)
{
emit_insn (gen_andsi3_i (operands[0], operands[1],
GEN_INT (7)));
emit_insn (gen_rotrsi3_cnt1 (operands[0], operands[0]));
}
else
emit_insn (gen_andsi3_i (operands[0], operands[1],
GEN_INT (3)));
emit_insn (gen_rotrsi3_cnt1 (operands[0], operands[0]));
}
else
emit_insn (gen_andsi3_i (operands[0], operands[1], const1_rtx));
emit_insn (gen_rotrsi3_cnt1 (operands[0], operands[0]));
case SHIFT_AND_SWAP_ROT:
emit_insn (gen_andsi3_i (operands[0], operands[1],
GEN_INT ((1 << (32 - n)) - 1)));
emit_insn (gen_rotrsi2_cnt16 (operands[0], operands[0]));
for (; n < 16; n++)
emit_insn (gen_rotrsi3_cnt1 (operands[0], operands[0]));
return;
case SHIFT_LOOP:
break;
default:
gcc_unreachable ();
}
}
@ -5568,6 +5823,37 @@ arc_rtx_costs (rtx x, machine_mode mode, int outer_code,
If we need more than 12 insns to do a multiply, then go out-of-line,
since the call overhead will be < 10% of the cost of the multiply. */
case ASHIFT:
if (mode == DImode)
{
if (XEXP (x, 1) == const1_rtx)
{
*total += rtx_cost (XEXP (x, 0), mode, ASHIFT, 0, speed)
+ COSTS_N_INSNS (2);
return true;
}
return false;
}
if (TARGET_BARREL_SHIFTER)
{
*total = COSTS_N_INSNS (1);
if (CONST_INT_P (XEXP (x, 1)))
{
*total += rtx_cost (XEXP (x, 0), mode, ASHIFT, 0, speed);
return true;
}
}
else if (CONST_INT_P (XEXP (x, 1)))
{
unsigned int n = INTVAL (XEXP (x, 1)) & 0x1f;
*total = arc_ashl_alg[arc_shift_context_idx ()][n].cost
+ rtx_cost (XEXP (x, 0), mode, ASHIFT, 0, speed);
return true;
}
else
/* Variable shift loop takes 2 * n + 2 cycles. */
*total = speed ? COSTS_N_INSNS (64) : COSTS_N_INSNS (4);
return false;
case ASHIFTRT:
case LSHIFTRT:
case ROTATE:

View File

@ -1641,6 +1641,27 @@ begin cpu cortex-m35p
costs v7m
end cpu cortex-m35p
begin cpu cortex-m52
cname cortexm52
tune flags LDSCHED
architecture armv8.1-m.main+pacbti+mve.fp+fp.dp
option nopacbti remove pacbti
option nomve.fp remove mve_float
option nomve remove mve mve_float
option nofp remove ALL_FP mve_float
option nodsp remove MVE mve_float
option cdecp0 add cdecp0
option cdecp1 add cdecp1
option cdecp2 add cdecp2
option cdecp3 add cdecp3
option cdecp4 add cdecp4
option cdecp5 add cdecp5
option cdecp6 add cdecp6
option cdecp7 add cdecp7
isa quirk_no_asmcpu quirk_vlldm
costs v7m
end cpu cortex-m52
begin cpu cortex-m55
cname cortexm55
tune flags LDSCHED

View File

@ -282,6 +282,9 @@ Enum(processor_type) String(cortex-m33) Value( TARGET_CPU_cortexm33)
EnumValue
Enum(processor_type) String(cortex-m35p) Value( TARGET_CPU_cortexm35p)
EnumValue
Enum(processor_type) String(cortex-m52) Value( TARGET_CPU_cortexm52)
EnumValue
Enum(processor_type) String(cortex-m55) Value( TARGET_CPU_cortexm55)

View File

@ -49,7 +49,7 @@
cortexa710,cortexx1,cortexx1c,
neoversen1,cortexa75cortexa55,cortexa76cortexa55,
neoversev1,neoversen2,cortexm23,
cortexm33,cortexm35p,cortexm55,
starmc1,cortexm85,cortexr52,
cortexr52plus"
cortexm33,cortexm35p,cortexm52,
cortexm55,starmc1,cortexm85,
cortexr52,cortexr52plus"
(const (symbol_ref "((enum attr_tune) arm_tune)")))

View File

@ -10359,6 +10359,10 @@ avr_handle_addr_attribute (tree *node, tree name, tree args,
int flags ATTRIBUTE_UNUSED, bool *no_add)
{
bool io_p = startswith (IDENTIFIER_POINTER (name), "io");
HOST_WIDE_INT io_start = avr_arch->sfr_offset;
HOST_WIDE_INT io_end = strcmp (IDENTIFIER_POINTER (name), "io_low") == 0
? io_start + 0x1f
: io_start + 0x3f;
location_t loc = DECL_SOURCE_LOCATION (*node);
if (!VAR_P (*node))
@ -10382,12 +10386,10 @@ avr_handle_addr_attribute (tree *node, tree name, tree args,
}
else if (io_p
&& (!tree_fits_shwi_p (arg)
|| !(strcmp (IDENTIFIER_POINTER (name), "io_low") == 0
? low_io_address_operand : io_address_operand)
(GEN_INT (TREE_INT_CST_LOW (arg)), QImode)))
|| ! IN_RANGE (TREE_INT_CST_LOW (arg), io_start, io_end)))
{
warning_at (loc, OPT_Wattributes, "%qE attribute address "
"out of range", name);
warning_at (loc, OPT_Wattributes, "%qE attribute address out of "
"range 0x%x...0x%x", name, (int) io_start, (int) io_end);
*no_add = true;
}
else
@ -10413,6 +10415,12 @@ avr_handle_addr_attribute (tree *node, tree name, tree args,
warning_at (loc, OPT_Wattributes, "%qE attribute on non-volatile variable",
name);
// Optimizers must not draw any conclusions from "static int addr;" etc.
// because the contents of `addr' are not given by its initializer but
// by the contents at the address as specified by the attribute.
if (VAR_P (*node) && ! *no_add)
TREE_THIS_VOLATILE (*node) = 1;
return NULL_TREE;
}
@ -10430,7 +10438,6 @@ avr_eval_addr_attrib (rtx x)
attr = lookup_attribute ("io", DECL_ATTRIBUTES (decl));
if (!attr || !TREE_VALUE (attr))
attr = lookup_attribute ("io_low", DECL_ATTRIBUTES (decl));
gcc_assert (attr);
}
if (!attr || !TREE_VALUE (attr))
attr = lookup_attribute ("address", DECL_ATTRIBUTES (decl));
@ -10686,6 +10693,17 @@ avr_pgm_check_var_decl (tree node)
static void
avr_insert_attributes (tree node, tree *attributes)
{
if (VAR_P (node)
&& ! TREE_STATIC (node)
&& ! DECL_EXTERNAL (node))
{
const char *names[] = { "io", "io_low", "address", NULL };
for (const char **p = names; *p; ++p)
if (lookup_attribute (*p, *attributes))
error ("variable %q+D with attribute %qs must be located in "
"static storage", node, *p);
}
avr_pgm_check_var_decl (node);
if (TARGET_MAIN_IS_OS_TASK
@ -10746,37 +10764,11 @@ avr_insert_attributes (tree node, tree *attributes)
/* Track need of __do_clear_bss. */
void
avr_asm_output_aligned_decl_common (FILE * stream,
tree decl,
const char *name,
unsigned HOST_WIDE_INT size,
unsigned int align, bool local_p)
avr_asm_output_aligned_decl_common (FILE *stream, tree /* decl */,
const char *name,
unsigned HOST_WIDE_INT size,
unsigned int align, bool local_p)
{
rtx mem = decl == NULL_TREE ? NULL_RTX : DECL_RTL (decl);
rtx symbol;
if (mem != NULL_RTX && MEM_P (mem)
&& SYMBOL_REF_P ((symbol = XEXP (mem, 0)))
&& (SYMBOL_REF_FLAGS (symbol) & (SYMBOL_FLAG_IO | SYMBOL_FLAG_ADDRESS)))
{
if (!local_p)
{
fprintf (stream, "\t.globl\t");
assemble_name (stream, name);
fprintf (stream, "\n");
}
if (SYMBOL_REF_FLAGS (symbol) & SYMBOL_FLAG_ADDRESS)
{
assemble_name (stream, name);
fprintf (stream, " = %ld\n",
(long) INTVAL (avr_eval_addr_attrib (symbol)));
}
else if (local_p)
error_at (DECL_SOURCE_LOCATION (decl),
"static IO declaration for %q+D needs an address", decl);
return;
}
/* __gnu_lto_slim is just a marker for the linker injected by toplev.cc.
There is no need to trigger __do_clear_bss code for them. */
@ -10789,6 +10781,9 @@ avr_asm_output_aligned_decl_common (FILE * stream,
ASM_OUTPUT_ALIGNED_COMMON (stream, name, size, align);
}
/* Implement `ASM_OUTPUT_ALIGNED_BSS'. */
void
avr_asm_asm_output_aligned_bss (FILE *file, tree decl, const char *name,
unsigned HOST_WIDE_INT size, int align,
@ -10796,20 +10791,10 @@ avr_asm_asm_output_aligned_bss (FILE *file, tree decl, const char *name,
(FILE *, tree, const char *,
unsigned HOST_WIDE_INT, int))
{
rtx mem = decl == NULL_TREE ? NULL_RTX : DECL_RTL (decl);
rtx symbol;
if (!startswith (name, "__gnu_lto"))
avr_need_clear_bss_p = true;
if (mem != NULL_RTX && MEM_P (mem)
&& SYMBOL_REF_P ((symbol = XEXP (mem, 0)))
&& (SYMBOL_REF_FLAGS (symbol) & (SYMBOL_FLAG_IO | SYMBOL_FLAG_ADDRESS)))
{
if (!(SYMBOL_REF_FLAGS (symbol) & SYMBOL_FLAG_ADDRESS))
error_at (DECL_SOURCE_LOCATION (decl),
"IO definition for %q+D needs an address", decl);
avr_asm_output_aligned_decl_common (file, decl, name, size, align, false);
}
else
default_func (file, decl, name, size, align);
default_func (file, decl, name, size, align);
}
@ -10848,6 +10833,58 @@ avr_output_progmem_section_asm_op (const char *data)
}
/* A noswitch section callback to output symbol definitions for
attributes "io", "io_low" and "address". */
static bool
avr_output_addr_attrib (tree decl, const char *name,
unsigned HOST_WIDE_INT /* size */,
unsigned HOST_WIDE_INT /* align */)
{
gcc_assert (DECL_RTL_SET_P (decl));
FILE *stream = asm_out_file;
bool local_p = ! DECL_WEAK (decl) && ! TREE_PUBLIC (decl);
rtx symbol, mem = DECL_RTL (decl);
if (mem != NULL_RTX && MEM_P (mem)
&& SYMBOL_REF_P ((symbol = XEXP (mem, 0)))
&& (SYMBOL_REF_FLAGS (symbol) & (SYMBOL_FLAG_IO | SYMBOL_FLAG_ADDRESS)))
{
if (! local_p)
{
fprintf (stream, "\t%s\t", DECL_WEAK (decl) ? ".weak" : ".globl");
assemble_name (stream, name);
fprintf (stream, "\n");
}
if (SYMBOL_REF_FLAGS (symbol) & SYMBOL_FLAG_ADDRESS)
{
assemble_name (stream, name);
fprintf (stream, " = %ld\n",
(long) INTVAL (avr_eval_addr_attrib (symbol)));
}
else if (local_p)
{
const char *names[] = { "io", "io_low", "address", NULL };
for (const char **p = names; *p; ++p)
if (lookup_attribute (*p, DECL_ATTRIBUTES (decl)))
{
error ("static attribute %qs declaration for %q+D needs an "
"address", *p, decl);
break;
}
}
return true;
}
gcc_unreachable();
return false;
}
/* Implement `TARGET_ASM_INIT_SECTIONS'. */
static void
@ -10863,6 +10900,7 @@ avr_asm_init_sections (void)
readonly_data_section->unnamed.callback = avr_output_data_section_asm_op;
data_section->unnamed.callback = avr_output_data_section_asm_op;
bss_section->unnamed.callback = avr_output_bss_section_asm_op;
tls_comm_section->noswitch.callback = avr_output_addr_attrib;
}
@ -11045,15 +11083,17 @@ avr_encode_section_info (tree decl, rtx rtl, int new_decl_p)
tree io_low_attr = lookup_attribute ("io_low", attr);
tree io_attr = lookup_attribute ("io", attr);
tree address_attr = lookup_attribute ("address", attr);
if (io_low_attr
&& TREE_VALUE (io_low_attr) && TREE_VALUE (TREE_VALUE (io_low_attr)))
addr_attr = io_attr;
addr_attr = io_low_attr;
else if (io_attr
&& TREE_VALUE (io_attr) && TREE_VALUE (TREE_VALUE (io_attr)))
addr_attr = io_attr;
else
addr_attr = lookup_attribute ("address", attr);
addr_attr = address_attr;
if (io_low_attr
|| (io_attr && addr_attr
&& low_io_address_operand
@ -11068,6 +11108,36 @@ avr_encode_section_info (tree decl, rtx rtl, int new_decl_p)
don't use the exact value for constant propagation. */
if (addr_attr && !DECL_EXTERNAL (decl))
SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_ADDRESS;
if (io_attr || io_low_attr || address_attr)
{
if (DECL_INITIAL (decl))
{
/* Initializers are not yet parsed in TARGET_INSERT_ATTRIBUTES,
hence deny initializers now. The values of symbols with an
address attribute are determined by the attribute, not by
some initializer. */
error ("variable %q+D with attribute %qs must not have an "
"initializer", decl,
io_low_attr ? "io_low" : io_attr ? "io" : "address");
}
else
{
/* PR112952: The only way to output a variable declaration in a
custom manner is by means of a noswitch section callback.
There are only three noswitch sections: comm_section,
lcomm_section and tls_comm_section. And there is no way to
wire a custom noswitch section to a decl. As lcomm_section
is bypassed with -fdata-sections -fno-common, there is no
other way than making use of tls_comm_section. As we are
using that section anyway, also use it in the public case. */
DECL_COMMON (decl) = 1;
set_decl_section_name (decl, (const char*) nullptr);
set_decl_tls_model (decl, (tls_model) 2);
}
}
}
if (AVR_TINY

View File

@ -98,12 +98,12 @@
(match_test "REGNO (op) >= FIRST_PSEUDO_REGISTER || REGNO (op) <= 7")))
;; Constant suitable for the addsi3_r pattern.
;; No idea why we previously used RTX_OK_FOR_OFFSET with SI, HI an QI
;; modes. The instruction in question accepts 11 bit signed constants.
(define_constraint "Car"
"addsi3_r constant."
(and (match_code "const_int")
(ior (match_test "RTX_OK_FOR_OFFSET_P (SImode, op)")
(match_test "RTX_OK_FOR_OFFSET_P (HImode, op)")
(match_test "RTX_OK_FOR_OFFSET_P (QImode, op)"))))
(match_test "IN_RANGE (INTVAL (op), -1024, 1023)")))
;; The return address if it can be replaced with GPR_LR.
(define_constraint "Rra"

View File

@ -75,7 +75,7 @@ extern unsigned int gcn_local_sym_hash (const char *name);
supported for gcn. */
#define GOMP_SELF_SPECS ""
#define NO_XNACK "march=fiji:;march=gfx1030:;" \
#define NO_XNACK "march=fiji:;march=gfx1030:;march=gfx1100:;" \
/* These match the defaults set in gcn.cc. */ \
"!mxnack*|mxnack=default:%{march=gfx900|march=gfx906|march=gfx908:-mattr=-xnack};"
#define NO_SRAM_ECC "!march=*:;march=fiji:;march=gfx900:;march=gfx906:;"
@ -91,7 +91,7 @@ extern unsigned int gcn_local_sym_hash (const char *name);
"%{!march=*|march=fiji:--amdhsa-code-object-version=3} " \
"%{" NO_XNACK XNACKOPT "}" \
"%{" NO_SRAM_ECC SRAMOPT "} " \
"%{march=gfx1030:-mattr=+wavefrontsize64} " \
"%{march=gfx1030|march=gfx1100:-mattr=+wavefrontsize64} " \
"-filetype=obj"
#define LINK_SPEC "--pie --export-dynamic"
#define LIB_SPEC "-lc"

View File

@ -25,7 +25,8 @@ enum processor_type
PROCESSOR_VEGA20, // gfx906
PROCESSOR_GFX908,
PROCESSOR_GFX90a,
PROCESSOR_GFX1030
PROCESSOR_GFX1030,
PROCESSOR_GFX1100
};
#define TARGET_FIJI (gcn_arch == PROCESSOR_FIJI)
@ -34,6 +35,7 @@ enum processor_type
#define TARGET_GFX908 (gcn_arch == PROCESSOR_GFX908)
#define TARGET_GFX90a (gcn_arch == PROCESSOR_GFX90a)
#define TARGET_GFX1030 (gcn_arch == PROCESSOR_GFX1030)
#define TARGET_GFX1100 (gcn_arch == PROCESSOR_GFX1100)
/* Set in gcn_option_override. */
extern enum gcn_isa {
@ -41,6 +43,7 @@ extern enum gcn_isa {
ISA_GCN3,
ISA_GCN5,
ISA_RDNA2,
ISA_RDNA3,
ISA_CDNA1,
ISA_CDNA2
} gcn_isa;
@ -54,6 +57,8 @@ extern enum gcn_isa {
#define TARGET_CDNA2 (gcn_isa == ISA_CDNA2)
#define TARGET_CDNA2_PLUS (gcn_isa >= ISA_CDNA2)
#define TARGET_RDNA2 (gcn_isa == ISA_RDNA2)
#define TARGET_RDNA2_PLUS (gcn_isa >= ISA_RDNA2 && gcn_isa < ISA_CDNA1)
#define TARGET_RDNA3 (gcn_isa == ISA_RDNA3)
#define TARGET_M0_LDS_LIMIT (TARGET_GCN3)

View File

@ -1417,7 +1417,7 @@
[(match_operand:V_noHI 1 "register_operand" " v")
(match_operand:SI 2 "const_int_operand" " n")]
UNSPEC_MOV_DPP_SHR))]
"!TARGET_RDNA2"
"!TARGET_RDNA2_PLUS"
{
return gcn_expand_dpp_shr_insn (<MODE>mode, "v_mov_b32",
UNSPEC_MOV_DPP_SHR, INTVAL (operands[2]));
@ -4211,7 +4211,7 @@
(unspec:<SCALAR_MODE>
[(match_operand:V_ALL 1 "register_operand")]
REDUC_UNSPEC))]
"!TARGET_RDNA2"
"!TARGET_RDNA2_PLUS"
{
rtx tmp = gcn_expand_reduc_scalar (<MODE>mode, operands[1],
<reduc_unspec>);
@ -4265,7 +4265,7 @@
; GCN3 requires a carry out, GCN5 not
"!(TARGET_GCN3 && SCALAR_INT_MODE_P (<SCALAR_MODE>mode)
&& <reduc_unspec> == UNSPEC_PLUS_DPP_SHR)
&& !TARGET_RDNA2"
&& !TARGET_RDNA2_PLUS"
{
return gcn_expand_dpp_shr_insn (<MODE>mode, "<reduc_insn>",
<reduc_unspec>, INTVAL (operands[3]));
@ -4310,7 +4310,7 @@
(match_operand:SI 3 "const_int_operand" "n")]
UNSPEC_PLUS_CARRY_DPP_SHR))
(clobber (reg:DI VCC_REG))]
"!TARGET_RDNA2"
"!TARGET_RDNA2_PLUS"
{
return gcn_expand_dpp_shr_insn (<VnSI>mode, "v_add%^_u32",
UNSPEC_PLUS_CARRY_DPP_SHR,
@ -4328,7 +4328,7 @@
(match_operand:DI 4 "register_operand" "cV")]
UNSPEC_PLUS_CARRY_IN_DPP_SHR))
(clobber (reg:DI VCC_REG))]
"!TARGET_RDNA2"
"!TARGET_RDNA2_PLUS"
{
return gcn_expand_dpp_shr_insn (<MODE>mode, "v_addc%^_u32",
UNSPEC_PLUS_CARRY_IN_DPP_SHR,

View File

@ -139,6 +139,7 @@ gcn_option_override (void)
: gcn_arch == PROCESSOR_GFX908 ? ISA_CDNA1
: gcn_arch == PROCESSOR_GFX90a ? ISA_CDNA2
: gcn_arch == PROCESSOR_GFX1030 ? ISA_RDNA2
: gcn_arch == PROCESSOR_GFX1100 ? ISA_RDNA3
: ISA_UNKNOWN);
gcc_assert (gcn_isa != ISA_UNKNOWN);
@ -160,15 +161,17 @@ gcn_option_override (void)
acc_lds_size = 32768;
}
/* gfx803 "Fiji" and gfx1030 do not support XNACK. */
/* gfx803 "Fiji", gfx1030 and gfx1100 do not support XNACK. */
if (gcn_arch == PROCESSOR_FIJI
|| gcn_arch == PROCESSOR_GFX1030)
|| gcn_arch == PROCESSOR_GFX1030
|| gcn_arch == PROCESSOR_GFX1100)
{
if (flag_xnack == HSACO_ATTR_ON)
error ("-mxnack=on is incompatible with -march=%s",
error ("%<-mxnack=on%> is incompatible with %<-march=%s%>",
(gcn_arch == PROCESSOR_FIJI ? "fiji"
: gcn_arch == PROCESSOR_GFX1030 ? "gfx1030"
: NULL));
: gcn_arch == PROCESSOR_GFX1030 ? "gfx1030"
: gcn_arch == PROCESSOR_GFX1100 ? "gfx1100"
: NULL));
/* Allow HSACO_ATTR_ANY silently because that's the default. */
flag_xnack = HSACO_ATTR_OFF;
}
@ -1592,7 +1595,7 @@ gcn_global_address_p (rtx addr)
{
rtx base = XEXP (addr, 0);
rtx offset = XEXP (addr, 1);
int offsetbits = (TARGET_RDNA2 ? 11 : 12);
int offsetbits = (TARGET_RDNA2_PLUS ? 11 : 12);
bool immediate_p = (CONST_INT_P (offset)
&& INTVAL (offset) >= -(1 << 12)
&& INTVAL (offset) < (1 << 12));
@ -1725,7 +1728,7 @@ gcn_addr_space_legitimate_address_p (machine_mode mode, rtx x, bool strict,
rtx base = XEXP (x, 0);
rtx offset = XEXP (x, 1);
int offsetbits = (TARGET_RDNA2 ? 11 : 12);
int offsetbits = (TARGET_RDNA2_PLUS ? 11 : 12);
bool immediate_p = (GET_CODE (offset) == CONST_INT
/* Signed 12/13-bit immediate. */
&& INTVAL (offset) >= -(1 << offsetbits)
@ -3043,6 +3046,8 @@ gcn_omp_device_kind_arch_isa (enum omp_device_kind_arch_isa trait,
return gcn_arch == PROCESSOR_GFX90a;
if (strcmp (name, "gfx1030") == 0)
return gcn_arch == PROCESSOR_GFX1030;
if (strcmp (name, "gfx1100") == 0)
return gcn_arch == PROCESSOR_GFX1100;
return 0;
default:
gcc_unreachable ();
@ -6539,6 +6544,11 @@ output_file_start (void)
xnack = "";
sram_ecc = "";
break;
case PROCESSOR_GFX1100:
cpu = "gfx1100";
xnack = "";
sram_ecc = "";
break;
default: gcc_unreachable ();
}
@ -6664,7 +6674,6 @@ gcn_hsa_declare_function_name (FILE *file, const char *name, tree decl)
"\t .amdhsa_next_free_vgpr\t%i\n"
"\t .amdhsa_next_free_sgpr\t%i\n"
"\t .amdhsa_reserve_vcc\t1\n"
"\t .amdhsa_reserve_flat_scratch\t0\n"
"\t .amdhsa_reserve_xnack_mask\t%i\n"
"\t .amdhsa_private_segment_fixed_size\t0\n"
"\t .amdhsa_group_segment_fixed_size\t%u\n"
@ -6674,6 +6683,10 @@ gcn_hsa_declare_function_name (FILE *file, const char *name, tree decl)
sgpr,
xnack_enabled,
LDS_SIZE);
/* Not supported with 'architected flat scratch'. */
if (gcn_arch != PROCESSOR_GFX1100)
fprintf (file,
"\t .amdhsa_reserve_flat_scratch\t0\n");
if (gcn_arch == PROCESSOR_GFX90a)
fprintf (file,
"\t .amdhsa_accum_offset\t%i\n"

View File

@ -30,6 +30,8 @@
builtin_define ("__CDNA2__"); \
else if (TARGET_RDNA2) \
builtin_define ("__RDNA2__"); \
else if (TARGET_RDNA3) \
builtin_define ("__RDNA3__"); \
if (TARGET_FIJI) \
{ \
builtin_define ("__fiji__"); \
@ -41,11 +43,13 @@
builtin_define ("__gfx906__"); \
else if (TARGET_GFX908) \
builtin_define ("__gfx908__"); \
else if (TARGET_GFX90a) \
builtin_define ("__gfx90a__"); \
else if (TARGET_GFX1030) \
builtin_define ("__gfx1030"); \
else if (TARGET_GFX1100) \
builtin_define ("__gfx1100__"); \
} while (0)
#define ASSEMBLER_DIALECT (TARGET_RDNA2 ? 1 : 0)
#define ASSEMBLER_DIALECT (TARGET_RDNA2_PLUS ? 1 : 0)
/* Support for a compile-time default architecture and tuning.
The rules are:

View File

@ -299,10 +299,10 @@
(define_attr "enabled" ""
(cond [(and (eq_attr "rdna" "no")
(ne (symbol_ref "TARGET_RDNA2") (const_int 0)))
(ne (symbol_ref "TARGET_RDNA2_PLUS") (const_int 0)))
(const_int 0)
(and (eq_attr "rdna" "yes")
(eq (symbol_ref "TARGET_RDNA2") (const_int 0)))
(eq (symbol_ref "TARGET_RDNA2_PLUS") (const_int 0)))
(const_int 0)
(and (eq_attr "gcn_version" "gcn5")
(eq (symbol_ref "TARGET_GCN5_PLUS") (const_int 0)))
@ -2109,13 +2109,13 @@
return "s_load%o0\t%0, %A1 glc\;s_waitcnt\tlgkmcnt(0)\;"
"s_dcache_wb_vol";
case 1:
return (TARGET_RDNA2
return (TARGET_RDNA2_PLUS
? "flat_load%o0\t%0, %A1%O1 glc\;s_waitcnt\t0\;"
"buffer_gl0_inv"
: "flat_load%o0\t%0, %A1%O1 glc\;s_waitcnt\t0\;"
"buffer_wbinvl1_vol");
case 2:
return (TARGET_RDNA2
return (TARGET_RDNA2_PLUS
? "global_load%o0\t%0, %A1%O1 glc\;s_waitcnt\tvmcnt(0)\;"
"buffer_gl0_inv"
: "global_load%o0\t%0, %A1%O1 glc\;s_waitcnt\tvmcnt(0)\;"
@ -2131,13 +2131,13 @@
return "s_dcache_wb_vol\;s_load%o0\t%0, %A1 glc\;"
"s_waitcnt\tlgkmcnt(0)\;s_dcache_inv_vol";
case 1:
return (TARGET_RDNA2
return (TARGET_RDNA2_PLUS
? "buffer_gl0_inv\;flat_load%o0\t%0, %A1%O1 glc\;"
"s_waitcnt\t0\;buffer_gl0_inv"
: "buffer_wbinvl1_vol\;flat_load%o0\t%0, %A1%O1 glc\;"
"s_waitcnt\t0\;buffer_wbinvl1_vol");
case 2:
return (TARGET_RDNA2
return (TARGET_RDNA2_PLUS
? "buffer_gl0_inv\;global_load%o0\t%0, %A1%O1 glc\;"
"s_waitcnt\tvmcnt(0)\;buffer_gl0_inv"
: "buffer_wbinvl1_vol\;global_load%o0\t%0, %A1%O1 glc\;"
@ -2180,11 +2180,11 @@
case 0:
return "s_dcache_wb_vol\;s_store%o1\t%1, %A0 glc";
case 1:
return (TARGET_RDNA2
return (TARGET_RDNA2_PLUS
? "buffer_gl0_inv\;flat_store%o1\t%A0, %1%O0 glc"
: "buffer_wbinvl1_vol\;flat_store%o1\t%A0, %1%O0 glc");
case 2:
return (TARGET_RDNA2
return (TARGET_RDNA2_PLUS
? "buffer_gl0_inv\;global_store%o1\t%A0, %1%O0 glc"
: "buffer_wbinvl1_vol\;global_store%o1\t%A0, %1%O0 glc");
}
@ -2198,13 +2198,13 @@
return "s_dcache_wb_vol\;s_store%o1\t%1, %A0 glc\;"
"s_waitcnt\tlgkmcnt(0)\;s_dcache_inv_vol";
case 1:
return (TARGET_RDNA2
return (TARGET_RDNA2_PLUS
? "buffer_gl0_inv\;flat_store%o1\t%A0, %1%O0 glc\;"
"s_waitcnt\t0\;buffer_gl0_inv"
: "buffer_wbinvl1_vol\;flat_store%o1\t%A0, %1%O0 glc\;"
"s_waitcnt\t0\;buffer_wbinvl1_vol");
case 2:
return (TARGET_RDNA2
return (TARGET_RDNA2_PLUS
? "buffer_gl0_inv\;global_store%o1\t%A0, %1%O0 glc\;"
"s_waitcnt\tvmcnt(0)\;buffer_gl0_inv"
: "buffer_wbinvl1_vol\;global_store%o1\t%A0, %1%O0 glc\;"
@ -2252,13 +2252,13 @@
return "s_atomic_swap<X>\t%0, %1, %2 glc\;s_waitcnt\tlgkmcnt(0)\;"
"s_dcache_wb_vol\;s_dcache_inv_vol";
case 1:
return (TARGET_RDNA2
return (TARGET_RDNA2_PLUS
? "flat_atomic_swap<X>\t%0, %1, %2 glc\;s_waitcnt\t0\;"
"buffer_gl0_inv"
: "flat_atomic_swap<X>\t%0, %1, %2 glc\;s_waitcnt\t0\;"
"buffer_wbinvl1_vol");
case 2:
return (TARGET_RDNA2
return (TARGET_RDNA2_PLUS
? "global_atomic_swap<X>\t%0, %A1, %2%O1 glc\;"
"s_waitcnt\tvmcnt(0)\;buffer_gl0_inv"
: "global_atomic_swap<X>\t%0, %A1, %2%O1 glc\;"
@ -2273,13 +2273,13 @@
return "s_dcache_wb_vol\;s_atomic_swap<X>\t%0, %1, %2 glc\;"
"s_waitcnt\tlgkmcnt(0)";
case 1:
return (TARGET_RDNA2
return (TARGET_RDNA2_PLUS
? "buffer_gl0_inv\;flat_atomic_swap<X>\t%0, %1, %2 glc\;"
"s_waitcnt\t0"
: "buffer_wbinvl1_vol\;flat_atomic_swap<X>\t%0, %1, %2 glc\;"
"s_waitcnt\t0");
case 2:
return (TARGET_RDNA2
return (TARGET_RDNA2_PLUS
? "buffer_gl0_inv\;"
"global_atomic_swap<X>\t%0, %A1, %2%O1 glc\;"
"s_waitcnt\tvmcnt(0)"
@ -2297,13 +2297,13 @@
return "s_dcache_wb_vol\;s_atomic_swap<X>\t%0, %1, %2 glc\;"
"s_waitcnt\tlgkmcnt(0)\;s_dcache_inv_vol";
case 1:
return (TARGET_RDNA2
return (TARGET_RDNA2_PLUS
? "buffer_gl0_inv\;flat_atomic_swap<X>\t%0, %1, %2 glc\;"
"s_waitcnt\t0\;buffer_gl0_inv"
: "buffer_wbinvl1_vol\;flat_atomic_swap<X>\t%0, %1, %2 glc\;"
"s_waitcnt\t0\;buffer_wbinvl1_vol");
case 2:
return (TARGET_RDNA2
return (TARGET_RDNA2_PLUS
? "buffer_gl0_inv\;"
"global_atomic_swap<X>\t%0, %A1, %2%O1 glc\;"
"s_waitcnt\tvmcnt(0)\;buffer_gl0_inv"

View File

@ -43,6 +43,9 @@ Enum(gpu_type) String(gfx90a) Value(PROCESSOR_GFX90a)
EnumValue
Enum(gpu_type) String(gfx1030) Value(PROCESSOR_GFX1030)
EnumValue
Enum(gpu_type) String(gfx1100) Value(PROCESSOR_GFX1100)
march=
Target RejectNegative Negative(march=) Joined ToLower Enum(gpu_type) Var(gcn_arch) Init(PROCESSOR_FIJI)
Specify the name of the target GPU.

View File

@ -59,6 +59,8 @@
#define EF_AMDGPU_MACH_AMDGCN_GFX90a 0x3f
#undef EF_AMDGPU_MACH_AMDGCN_GFX1030
#define EF_AMDGPU_MACH_AMDGCN_GFX1030 0x36
#undef EF_AMDGPU_MACH_AMDGCN_GFX1100
#define EF_AMDGPU_MACH_AMDGCN_GFX1100 0x41
#define EF_AMDGPU_FEATURE_XNACK_V4 0x300 /* Mask. */
#define EF_AMDGPU_FEATURE_XNACK_UNSUPPORTED_V4 0x000
@ -971,6 +973,8 @@ main (int argc, char **argv)
elf_arch = EF_AMDGPU_MACH_AMDGCN_GFX90a;
else if (strcmp (argv[i], "-march=gfx1030") == 0)
elf_arch = EF_AMDGPU_MACH_AMDGCN_GFX1030;
else if (strcmp (argv[i], "-march=gfx1100") == 0)
elf_arch = EF_AMDGPU_MACH_AMDGCN_GFX1100;
#define STR "-mstack-size="
else if (startswith (argv[i], STR))
gcn_stack_size = atoi (argv[i] + strlen (STR));
@ -1014,6 +1018,7 @@ main (int argc, char **argv)
case EF_AMDGPU_MACH_AMDGCN_GFX906:
case EF_AMDGPU_MACH_AMDGCN_GFX908:
case EF_AMDGPU_MACH_AMDGCN_GFX1030:
case EF_AMDGPU_MACH_AMDGCN_GFX1100:
SET_XNACK_OFF (elf_flags);
break;
case EF_AMDGPU_MACH_AMDGCN_GFX90a:

View File

@ -1,4 +1,4 @@
omp-device-properties-gcn: $(srcdir)/config/gcn/gcn.cc
echo kind: gpu > $@
echo arch: amdgcn gcn >> $@
echo isa: fiji gfx803 gfx900 gfx906 gfx908 gfx90a gfx1030 >> $@
echo isa: fiji gfx803 gfx900 gfx906 gfx908 gfx90a gfx1030 gfx1100 >> $@

View File

@ -352,7 +352,8 @@ ix86_convert_const_wide_int_to_broadcast (machine_mode mode, rtx op)
bool ok = ix86_expand_vector_init_duplicate (false, vector_mode,
target,
GEN_INT (val_broadcast));
gcc_assert (ok);
if (!ok)
return nullptr;
target = lowpart_subreg (mode, target, vector_mode);
return target;
}
@ -599,19 +600,11 @@ ix86_broadcast_from_constant (machine_mode mode, rtx op)
&& INTEGRAL_MODE_P (mode))
return nullptr;
unsigned int msize = GET_MODE_SIZE (mode);
unsigned int inner_size = GET_MODE_SIZE (GET_MODE_INNER ((mode)));
/* Convert CONST_VECTOR to a non-standard SSE constant integer
broadcast only if vector broadcast is available. */
if (standard_sse_constant_p (op, mode))
return nullptr;
/* vpbroadcast[b,w] is available under TARGET_AVX2.
or TARGET_AVX512BW for zmm. */
if (inner_size < 4 && !(msize == 64 ? TARGET_AVX512BW : TARGET_AVX2))
return nullptr;
if (GET_MODE_INNER (mode) == TImode)
return nullptr;
@ -705,22 +698,22 @@ ix86_expand_vector_move (machine_mode mode, rtx operands[])
{
/* Broadcast to XMM/YMM/ZMM register from an integer
constant or scalar mem. */
op1 = gen_reg_rtx (mode);
if (FLOAT_MODE_P (mode)
|| (!TARGET_64BIT && GET_MODE_INNER (mode) == DImode)
/* vbroadcastss/vbroadcastsd only supports memory operand
w/o AVX2, force them into memory to avoid spill to
memory. */
|| (GET_MODE_SIZE (mode) == 32
&& (GET_MODE_INNER (mode) == DImode
|| GET_MODE_INNER (mode) == SImode)
&& !TARGET_AVX2))
rtx tmp = gen_reg_rtx (mode);
if (FLOAT_MODE_P (mode))
first = force_const_mem (GET_MODE_INNER (mode), first);
bool ok = ix86_expand_vector_init_duplicate (false, mode,
op1, first);
gcc_assert (ok);
emit_move_insn (op0, op1);
return;
tmp, first);
if (!ok && !TARGET_64BIT && GET_MODE_INNER (mode) == DImode)
{
first = force_const_mem (GET_MODE_INNER (mode), first);
ok = ix86_expand_vector_init_duplicate (false, mode,
tmp, first);
}
if (ok)
{
emit_move_insn (op0, tmp);
return;
}
}
}
@ -15714,6 +15707,42 @@ ix86_expand_vector_init_duplicate (bool mmx_ok, machine_mode mode,
switch (mode)
{
case E_V2DImode:
if (CONST_INT_P (val))
{
int tmp = (int)INTVAL (val);
if (tmp == (int)(INTVAL (val) >> 32))
{
rtx reg = gen_reg_rtx (V4SImode);
ok = ix86_vector_duplicate_value (V4SImode, reg,
GEN_INT (tmp));
if (ok)
{
emit_move_insn (target, gen_lowpart (V2DImode, reg));
return true;
}
}
}
return ix86_vector_duplicate_value (mode, target, val);
case E_V4DImode:
if (CONST_INT_P (val))
{
int tmp = (int)INTVAL (val);
if (tmp == (int)(INTVAL (val) >> 32))
{
rtx reg = gen_reg_rtx (V8SImode);
ok = ix86_vector_duplicate_value (V8SImode, reg,
GEN_INT (tmp));
if (ok)
{
emit_move_insn (target, gen_lowpart (V4DImode, reg));
return true;
}
}
}
return ix86_vector_duplicate_value (mode, target, val);
case E_V2SImode:
case E_V2SFmode:
if (!mmx_ok)
@ -15721,11 +15750,9 @@ ix86_expand_vector_init_duplicate (bool mmx_ok, machine_mode mode,
/* FALLTHRU */
case E_V4DFmode:
case E_V4DImode:
case E_V8SFmode:
case E_V8SImode:
case E_V2DFmode:
case E_V2DImode:
case E_V4SFmode:
case E_V4SImode:
case E_V16SImode:
@ -15742,6 +15769,8 @@ ix86_expand_vector_init_duplicate (bool mmx_ok, machine_mode mode,
rtx x;
val = gen_lowpart (SImode, val);
if (CONST_INT_P (val))
return false;
x = gen_rtx_TRUNCATE (HImode, val);
x = gen_rtx_VEC_DUPLICATE (mode, x);
emit_insn (gen_rtx_SET (target, x));
@ -15766,6 +15795,8 @@ ix86_expand_vector_init_duplicate (bool mmx_ok, machine_mode mode,
rtx x;
val = gen_lowpart (SImode, val);
if (CONST_INT_P (val))
return false;
x = gen_rtx_TRUNCATE (HImode, val);
x = gen_rtx_VEC_DUPLICATE (mode, x);
emit_insn (gen_rtx_SET (target, x));
@ -15791,6 +15822,10 @@ ix86_expand_vector_init_duplicate (bool mmx_ok, machine_mode mode,
goto widen;
case E_V8HImode:
if (CONST_INT_P (val))
goto widen;
/* FALLTHRU */
case E_V8HFmode:
case E_V8BFmode:
if (TARGET_AVX2)
@ -15838,6 +15873,8 @@ ix86_expand_vector_init_duplicate (bool mmx_ok, machine_mode mode,
goto widen;
case E_V16QImode:
if (CONST_INT_P (val))
goto widen;
if (TARGET_AVX2)
return ix86_vector_duplicate_value (mode, target, val);
@ -15857,7 +15894,13 @@ ix86_expand_vector_init_duplicate (bool mmx_ok, machine_mode mode,
val = convert_modes (wsmode, smode, val, true);
if (smode == QImode && !TARGET_PARTIAL_REG_STALL)
if (CONST_INT_P (val))
{
x = simplify_binary_operation (ASHIFT, wsmode, val,
GEN_INT (GET_MODE_BITSIZE (smode)));
val = simplify_binary_operation (IOR, wsmode, val, x);
}
else if (smode == QImode && !TARGET_PARTIAL_REG_STALL)
emit_insn (gen_insv_1 (wsmode, val, val));
else
{
@ -15870,15 +15913,20 @@ ix86_expand_vector_init_duplicate (bool mmx_ok, machine_mode mode,
x = gen_reg_rtx (wvmode);
ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
gcc_assert (ok);
if (!ok)
return false;
emit_move_insn (target, gen_lowpart (GET_MODE (target), x));
return ok;
return true;
}
case E_V16HImode:
case E_V32QImode:
if (CONST_INT_P (val))
goto widen;
/* FALLTHRU */
case E_V16HFmode:
case E_V16BFmode:
case E_V32QImode:
if (TARGET_AVX2)
return ix86_vector_duplicate_value (mode, target, val);
else
@ -15904,7 +15952,8 @@ ix86_expand_vector_init_duplicate (bool mmx_ok, machine_mode mode,
rtx x = gen_reg_rtx (hvmode);
ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
gcc_assert (ok);
if (!ok)
return false;
x = gen_rtx_VEC_CONCAT (mode, x, x);
emit_insn (gen_rtx_SET (target, x));
@ -15941,7 +15990,8 @@ ix86_expand_vector_init_duplicate (bool mmx_ok, machine_mode mode,
rtx x = gen_reg_rtx (hvmode);
ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
gcc_assert (ok);
if (!ok)
return false;
x = gen_rtx_VEC_CONCAT (mode, x, x);
emit_insn (gen_rtx_SET (target, x));
@ -16913,6 +16963,12 @@ ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
all_same = false;
}
/* If all values are identical, broadcast the value. */
if (all_same
&& ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
XVECEXP (vals, 0, 0)))
return;
/* Constants are best loaded from the constant pool. */
if (n_var == 0)
{
@ -16920,12 +16976,6 @@ ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
return;
}
/* If all values are identical, broadcast the value. */
if (all_same
&& ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
XVECEXP (vals, 0, 0)))
return;
/* Values where only one field is non-constant are best loaded from
the pool and overwritten via move later. */
if (n_var == 1)

View File

@ -465,6 +465,11 @@
(V16HI "w")
(V32QI "w")])
;; Half modes of all LASX vector modes, in lower-case.
(define_mode_attr lasxhalf [(V32QI "v16qi") (V16HI "v8hi")
(V8SI "v4si") (V4DI "v2di")
(V8SF "v4sf") (V4DF "v2df")])
(define_expand "vec_init<mode><unitmode>"
[(match_operand:LASX 0 "register_operand")
(match_operand:LASX 1 "")]
@ -474,9 +479,9 @@
DONE;
})
(define_expand "vec_initv32qiv16qi"
[(match_operand:V32QI 0 "register_operand")
(match_operand:V16QI 1 "")]
(define_expand "vec_init<mode><lasxhalf>"
[(match_operand:LASX 0 "register_operand")
(match_operand:<VHMODE256_ALL> 1 "")]
"ISA_HAS_LASX"
{
loongarch_expand_vector_group_init (operands[0], operands[1]);
@ -577,6 +582,21 @@
[(set_attr "type" "simd_insert")
(set_attr "mode" "<MODE>")])
(define_insn "@vec_concatz<mode>"
[(set (match_operand:LASX 0 "register_operand" "=f")
(vec_concat:LASX
(match_operand:<VHMODE256_ALL> 1 "nonimmediate_operand")
(match_operand:<VHMODE256_ALL> 2 "const_0_operand")))]
"ISA_HAS_LASX"
{
if (MEM_P (operands[1]))
return "vld\t%w0,%1";
else
return "vori.b\t%w0,%w1,0";
}
[(set_attr "type" "simd_splat")
(set_attr "mode" "<MODE>")])
(define_insn "vec_concat<mode>"
[(set (match_operand:LASX 0 "register_operand" "=f")
(vec_concat:LASX

View File

@ -9847,10 +9847,46 @@ loongarch_gen_const_int_vector_shuffle (machine_mode mode, int val)
void
loongarch_expand_vector_group_init (rtx target, rtx vals)
{
rtx ops[2] = { force_reg (E_V16QImode, XVECEXP (vals, 0, 0)),
force_reg (E_V16QImode, XVECEXP (vals, 0, 1)) };
emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (E_V32QImode, ops[0],
ops[1])));
machine_mode vmode = GET_MODE (target);
machine_mode half_mode = VOIDmode;
rtx low = XVECEXP (vals, 0, 0);
rtx high = XVECEXP (vals, 0, 1);
switch (vmode)
{
case E_V32QImode:
half_mode = V16QImode;
break;
case E_V16HImode:
half_mode = V8HImode;
break;
case E_V8SImode:
half_mode = V4SImode;
break;
case E_V4DImode:
half_mode = V2DImode;
break;
case E_V8SFmode:
half_mode = V4SFmode;
break;
case E_V4DFmode:
half_mode = V2DFmode;
break;
default:
gcc_unreachable ();
}
if (high == CONST0_RTX (half_mode))
emit_insn (gen_vec_concatz (vmode, target, low, high));
else
{
if (!register_operand (low, half_mode))
low = force_reg (half_mode, low);
if (!register_operand (high, half_mode))
high = force_reg (half_mode, high);
emit_insn (gen_rtx_SET (target,
gen_rtx_VEC_CONCAT (vmode, low, high)));
}
}
/* Expand initialization of a vector which has all same elements. */

View File

@ -957,7 +957,9 @@
if (scratch)
emit_move_insn (operands[0], scratch);
DONE;
})
}
[(set_attr "isa" "*,am33")])
(define_insn_and_split "negsi2"
[(set (match_operand:SI 0 "register_operand" "=D,&r")

View File

@ -2127,6 +2127,212 @@ public:
}
};
/* Below implements are vector crypto */
/* Implements vandn.[vv,vx] */
class vandn : public function_base
{
public:
rtx expand (function_expander &e) const override
{
switch (e.op_info->op)
{
case OP_TYPE_vv:
return e.use_exact_insn (code_for_pred_vandn (e.vector_mode ()));
case OP_TYPE_vx:
return e.use_exact_insn (code_for_pred_vandn_scalar (e.vector_mode ()));
default:
gcc_unreachable ();
}
}
};
/* Implements vrol/vror/clz/ctz. */
template<rtx_code CODE>
class bitmanip : public function_base
{
public:
bool apply_tail_policy_p () const override
{
return (CODE == CLZ || CODE == CTZ) ? false : true;
}
bool apply_mask_policy_p () const override
{
return (CODE == CLZ || CODE == CTZ) ? false : true;
}
bool has_merge_operand_p () const override
{
return (CODE == CLZ || CODE == CTZ) ? false : true;
}
rtx expand (function_expander &e) const override
{
switch (e.op_info->op)
{
case OP_TYPE_v:
case OP_TYPE_vv:
return e.use_exact_insn (code_for_pred_v (CODE, e.vector_mode ()));
case OP_TYPE_vx:
return e.use_exact_insn (code_for_pred_v_scalar (CODE, e.vector_mode ()));
default:
gcc_unreachable ();
}
}
};
/* Implements vbrev/vbrev8/vrev8. */
template<int UNSPEC>
class b_reverse : public function_base
{
public:
rtx expand (function_expander &e) const override
{
return e.use_exact_insn (code_for_pred_v (UNSPEC, e.vector_mode ()));
}
};
class vwsll : public function_base
{
public:
rtx expand (function_expander &e) const override
{
switch (e.op_info->op)
{
case OP_TYPE_vv:
return e.use_exact_insn (code_for_pred_vwsll (e.vector_mode ()));
case OP_TYPE_vx:
return e.use_exact_insn (code_for_pred_vwsll_scalar (e.vector_mode ()));
default:
gcc_unreachable ();
}
}
};
/* Implements clmul */
template<int UNSPEC>
class clmul : public function_base
{
public:
rtx expand (function_expander &e) const override
{
switch (e.op_info->op)
{
case OP_TYPE_vv:
return e.use_exact_insn (
code_for_pred_vclmul (UNSPEC, e.vector_mode ()));
case OP_TYPE_vx:
return e.use_exact_insn
(code_for_pred_vclmul_scalar (UNSPEC, e.vector_mode ()));
default:
gcc_unreachable ();
}
}
};
/* Implements vghsh/vsh2ms/vsha2c[hl]. */
template<int UNSPEC>
class vg_nhab : public function_base
{
public:
bool apply_mask_policy_p () const override { return false; }
bool use_mask_predication_p () const override { return false; }
bool has_merge_operand_p () const override { return false; }
rtx expand (function_expander &e) const override
{
return e.use_exact_insn (code_for_pred_v (UNSPEC, e.vector_mode ()));
}
};
/* Implements vgmul/vaes*. */
template<int UNSPEC>
class crypto_vv : public function_base
{
public:
bool apply_mask_policy_p () const override { return false; }
bool use_mask_predication_p () const override { return false; }
bool has_merge_operand_p () const override { return false; }
rtx expand (function_expander &e) const override
{
poly_uint64 nunits = 0U;
switch (e.op_info->op)
{
case OP_TYPE_vv:
if (UNSPEC == UNSPEC_VGMUL)
return e.use_exact_insn
(code_for_pred_crypto_vv (UNSPEC, UNSPEC, e.vector_mode ()));
else
return e.use_exact_insn
(code_for_pred_crypto_vv (UNSPEC + 1, UNSPEC + 1, e.vector_mode ()));
case OP_TYPE_vs:
/* Calculate the ratio between arg0 and arg1*/
gcc_assert (multiple_p (GET_MODE_BITSIZE (e.arg_mode (0)),
GET_MODE_BITSIZE (e.arg_mode (1)), &nunits));
if (maybe_eq (nunits, 1U))
return e.use_exact_insn (code_for_pred_crypto_vvx1_scalar
(UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
else if (maybe_eq (nunits, 2U))
return e.use_exact_insn (code_for_pred_crypto_vvx2_scalar
(UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
else if (maybe_eq (nunits, 4U))
return e.use_exact_insn (code_for_pred_crypto_vvx4_scalar
(UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
else if (maybe_eq (nunits, 8U))
return e.use_exact_insn (code_for_pred_crypto_vvx8_scalar
(UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
else
return e.use_exact_insn (code_for_pred_crypto_vvx16_scalar
(UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
default:
gcc_unreachable ();
}
}
};
/* Implements vaeskf1/vsm4k. */
template<int UNSPEC>
class crypto_vi : public function_base
{
public:
bool apply_mask_policy_p () const override { return false; }
bool use_mask_predication_p () const override { return false; }
rtx expand (function_expander &e) const override
{
return e.use_exact_insn
(code_for_pred_crypto_vi_scalar (UNSPEC, e.vector_mode ()));
}
};
/* Implements vaeskf2/vsm3c. */
template<int UNSPEC>
class vaeskf2_vsm3c : public function_base
{
public:
bool apply_mask_policy_p () const override { return false; }
bool use_mask_predication_p () const override { return false; }
bool has_merge_operand_p () const override { return false; }
rtx expand (function_expander &e) const override
{
return e.use_exact_insn
(code_for_pred_vi_nomaskedoff_scalar (UNSPEC, e.vector_mode ()));
}
};
/* Implements vsm3me. */
class vsm3me : public function_base
{
public:
bool apply_mask_policy_p () const override { return false; }
bool use_mask_predication_p () const override { return false; }
rtx expand (function_expander &e) const override
{
return e.use_exact_insn (code_for_pred_vsm3me (e.vector_mode ()));
}
};
static CONSTEXPR const vsetvl<false> vsetvl_obj;
static CONSTEXPR const vsetvl<true> vsetvlmax_obj;
static CONSTEXPR const loadstore<false, LST_UNIT_STRIDE, false> vle_obj;
@ -2384,6 +2590,35 @@ static CONSTEXPR const seg_indexed_store<UNSPEC_UNORDERED> vsuxseg_obj;
static CONSTEXPR const seg_indexed_store<UNSPEC_ORDERED> vsoxseg_obj;
static CONSTEXPR const vlsegff vlsegff_obj;
/* Crypto Vector */
static CONSTEXPR const vandn vandn_obj;
static CONSTEXPR const bitmanip<ROTATE> vrol_obj;
static CONSTEXPR const bitmanip<ROTATERT> vror_obj;
static CONSTEXPR const b_reverse<UNSPEC_VBREV> vbrev_obj;
static CONSTEXPR const b_reverse<UNSPEC_VBREV8> vbrev8_obj;
static CONSTEXPR const b_reverse<UNSPEC_VREV8> vrev8_obj;
static CONSTEXPR const bitmanip<CLZ> vclz_obj;
static CONSTEXPR const bitmanip<CTZ> vctz_obj;
static CONSTEXPR const vwsll vwsll_obj;
static CONSTEXPR const clmul<UNSPEC_VCLMUL> vclmul_obj;
static CONSTEXPR const clmul<UNSPEC_VCLMULH> vclmulh_obj;
static CONSTEXPR const vg_nhab<UNSPEC_VGHSH> vghsh_obj;
static CONSTEXPR const crypto_vv<UNSPEC_VGMUL> vgmul_obj;
static CONSTEXPR const crypto_vv<UNSPEC_VAESEF> vaesef_obj;
static CONSTEXPR const crypto_vv<UNSPEC_VAESEM> vaesem_obj;
static CONSTEXPR const crypto_vv<UNSPEC_VAESDF> vaesdf_obj;
static CONSTEXPR const crypto_vv<UNSPEC_VAESDM> vaesdm_obj;
static CONSTEXPR const crypto_vv<UNSPEC_VAESZ> vaesz_obj;
static CONSTEXPR const crypto_vi<UNSPEC_VAESKF1> vaeskf1_obj;
static CONSTEXPR const vaeskf2_vsm3c<UNSPEC_VAESKF2> vaeskf2_obj;
static CONSTEXPR const vg_nhab<UNSPEC_VSHA2MS> vsha2ms_obj;
static CONSTEXPR const vg_nhab<UNSPEC_VSHA2CH> vsha2ch_obj;
static CONSTEXPR const vg_nhab<UNSPEC_VSHA2CL> vsha2cl_obj;
static CONSTEXPR const crypto_vi<UNSPEC_VSM4K> vsm4k_obj;
static CONSTEXPR const crypto_vv<UNSPEC_VSM4R> vsm4r_obj;
static CONSTEXPR const vsm3me vsm3me_obj;
static CONSTEXPR const vaeskf2_vsm3c<UNSPEC_VSM3C> vsm3c_obj;
/* Declare the function base NAME, pointing it to an instance
of class <NAME>_obj. */
#define BASE(NAME) \
@ -2645,5 +2880,32 @@ BASE (vloxseg)
BASE (vsuxseg)
BASE (vsoxseg)
BASE (vlsegff)
/* Crypto vector */
BASE (vandn)
BASE (vbrev)
BASE (vbrev8)
BASE (vrev8)
BASE (vclz)
BASE (vctz)
BASE (vrol)
BASE (vror)
BASE (vwsll)
BASE (vclmul)
BASE (vclmulh)
BASE (vghsh)
BASE (vgmul)
BASE (vaesef)
BASE (vaesem)
BASE (vaesdf)
BASE (vaesdm)
BASE (vaesz)
BASE (vaeskf1)
BASE (vaeskf2)
BASE (vsha2ms)
BASE (vsha2ch)
BASE (vsha2cl)
BASE (vsm4k)
BASE (vsm4r)
BASE (vsm3me)
BASE (vsm3c)
} // end namespace riscv_vector

View File

@ -280,6 +280,34 @@ extern const function_base *const vloxseg;
extern const function_base *const vsuxseg;
extern const function_base *const vsoxseg;
extern const function_base *const vlsegff;
/* Below function_base are Vectro Crypto*/
extern const function_base *const vandn;
extern const function_base *const vbrev;
extern const function_base *const vbrev8;
extern const function_base *const vrev8;
extern const function_base *const vclz;
extern const function_base *const vctz;
extern const function_base *const vrol;
extern const function_base *const vror;
extern const function_base *const vwsll;
extern const function_base *const vclmul;
extern const function_base *const vclmulh;
extern const function_base *const vghsh;
extern const function_base *const vgmul;
extern const function_base *const vaesef;
extern const function_base *const vaesem;
extern const function_base *const vaesdf;
extern const function_base *const vaesdm;
extern const function_base *const vaesz;
extern const function_base *const vaeskf1;
extern const function_base *const vaeskf2;
extern const function_base *const vsha2ms;
extern const function_base *const vsha2ch;
extern const function_base *const vsha2cl;
extern const function_base *const vsm4k;
extern const function_base *const vsm4r;
extern const function_base *const vsm3me;
extern const function_base *const vsm3c;
}
} // end namespace riscv_vector

View File

@ -79,8 +79,6 @@ DEF_RVV_FUNCTION (vsoxei64, indexed_loadstore, none_m_preds, all_v_scalar_ptr_ee
// 7.7. Unit-stride Fault-Only-First Loads
DEF_RVV_FUNCTION (vleff, fault_load, full_preds, all_v_scalar_const_ptr_size_ptr_ops)
// TODO: 7.8. Vector Load/Store Segment Instructions
/* 11. Vector Integer Arithmetic Instructions. */
// 11.1. Vector Single-Width Integer Add and Subtract
@ -630,6 +628,8 @@ DEF_RVV_FUNCTION (vset, vset, none_preds, all_v_vset_tuple_ops)
DEF_RVV_FUNCTION (vget, vget, none_preds, all_v_vget_tuple_ops)
DEF_RVV_FUNCTION (vcreate, vcreate, none_preds, all_v_vcreate_tuple_ops)
DEF_RVV_FUNCTION (vundefined, vundefined, none_preds, all_none_void_tuple_ops)
// 7.8. Vector Load/Store Segment Instructions
DEF_RVV_FUNCTION (vlseg, seg_loadstore, full_preds, tuple_v_scalar_const_ptr_ops)
DEF_RVV_FUNCTION (vsseg, seg_loadstore, none_m_preds, tuple_v_scalar_ptr_ops)
DEF_RVV_FUNCTION (vlsseg, seg_loadstore, full_preds, tuple_v_scalar_const_ptr_ptrdiff_ops)
@ -653,4 +653,98 @@ DEF_RVV_FUNCTION (vsoxseg, seg_indexed_loadstore, none_m_preds, tuple_v_scalar_p
DEF_RVV_FUNCTION (vlsegff, seg_fault_load, full_preds, tuple_v_scalar_const_ptr_size_ptr_ops)
#undef REQUIRED_EXTENSIONS
/* Definiation of crypto vector intrinsic functions */
// ZVBB and ZVKB
#define REQUIRED_EXTENSIONS ZVBB_EXT
DEF_RVV_FUNCTION (vbrev, alu, full_preds, u_vv_ops)
DEF_RVV_FUNCTION (vclz, alu, none_m_preds, u_vv_ops)
DEF_RVV_FUNCTION (vctz, alu, none_m_preds, u_vv_ops)
DEF_RVV_FUNCTION (vwsll, alu, full_preds, u_wvv_ops)
DEF_RVV_FUNCTION (vwsll, alu, full_preds, u_shift_wvx_ops)
#undef REQUIRED_EXTENSIONS
#define REQUIRED_EXTENSIONS ZVBB_OR_ZVKB_EXT
DEF_RVV_FUNCTION (vandn, alu, full_preds, u_vvv_ops)
DEF_RVV_FUNCTION (vandn, alu, full_preds, u_vvx_ops)
DEF_RVV_FUNCTION (vbrev8, alu, full_preds, u_vv_ops)
DEF_RVV_FUNCTION (vrev8, alu, full_preds, u_vv_ops)
DEF_RVV_FUNCTION (vrol, alu, full_preds, u_vvv_ops)
DEF_RVV_FUNCTION (vror, alu, full_preds, u_vvv_ops)
DEF_RVV_FUNCTION (vror, alu, full_preds, u_shift_vvx_ops)
DEF_RVV_FUNCTION (vrol, alu, full_preds, u_shift_vvx_ops)
#undef REQUIRED_EXTENSIONS
//ZVBC
#define REQUIRED_EXTENSIONS ZVBC_EXT
DEF_RVV_FUNCTION (vclmul, alu, full_preds, u_vvv_crypto_sew64_ops)
DEF_RVV_FUNCTION (vclmul, alu, full_preds, u_vvx_crypto_sew64_ops)
DEF_RVV_FUNCTION (vclmulh, alu, full_preds, u_vvv_crypto_sew64_ops)
DEF_RVV_FUNCTION (vclmulh, alu, full_preds, u_vvx_crypto_sew64_ops)
#undef REQUIRED_EXTENSIONS
//ZVKG
#define REQUIRED_EXTENSIONS ZVKG_EXT
DEF_RVV_FUNCTION(vghsh, no_mask_policy, none_tu_preds, u_vvvv_crypto_sew32_ops)
DEF_RVV_FUNCTION(vgmul, no_mask_policy, none_tu_preds, u_vvv_crypto_sew32_ops)
#undef REQUIRED_EXTENSIONS
//ZVKNED
#define REQUIRED_EXTENSIONS ZVKNED_EXT
DEF_RVV_FUNCTION (vaesef, crypto_vv, none_tu_preds, u_vvv_crypto_sew32_ops)
DEF_RVV_FUNCTION (vaesef, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_ops)
DEF_RVV_FUNCTION (vaesef, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x2_ops)
DEF_RVV_FUNCTION (vaesef, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x4_ops)
DEF_RVV_FUNCTION (vaesef, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x8_ops)
DEF_RVV_FUNCTION (vaesef, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x16_ops)
DEF_RVV_FUNCTION (vaesem, crypto_vv, none_tu_preds, u_vvv_crypto_sew32_ops)
DEF_RVV_FUNCTION (vaesem, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_ops)
DEF_RVV_FUNCTION (vaesem, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x2_ops)
DEF_RVV_FUNCTION (vaesem, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x4_ops)
DEF_RVV_FUNCTION (vaesem, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x8_ops)
DEF_RVV_FUNCTION (vaesem, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x16_ops)
DEF_RVV_FUNCTION (vaesdf, crypto_vv, none_tu_preds, u_vvv_crypto_sew32_ops)
DEF_RVV_FUNCTION (vaesdf, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_ops)
DEF_RVV_FUNCTION (vaesdf, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x2_ops)
DEF_RVV_FUNCTION (vaesdf, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x4_ops)
DEF_RVV_FUNCTION (vaesdf, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x8_ops)
DEF_RVV_FUNCTION (vaesdf, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x16_ops)
DEF_RVV_FUNCTION (vaesdm, crypto_vv, none_tu_preds, u_vvv_crypto_sew32_ops)
DEF_RVV_FUNCTION (vaesdm, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_ops)
DEF_RVV_FUNCTION (vaesdm, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x2_ops)
DEF_RVV_FUNCTION (vaesdm, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x4_ops)
DEF_RVV_FUNCTION (vaesdm, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x8_ops)
DEF_RVV_FUNCTION (vaesdm, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x16_ops)
DEF_RVV_FUNCTION (vaesz, crypto_vv_no_op_type, none_tu_preds, u_vvs_crypto_sew32_ops)
DEF_RVV_FUNCTION (vaesz, crypto_vv_no_op_type, none_tu_preds, u_vvs_crypto_sew32_lmul_x2_ops)
DEF_RVV_FUNCTION (vaesz, crypto_vv_no_op_type, none_tu_preds, u_vvs_crypto_sew32_lmul_x4_ops)
DEF_RVV_FUNCTION (vaesz, crypto_vv_no_op_type, none_tu_preds, u_vvs_crypto_sew32_lmul_x8_ops)
DEF_RVV_FUNCTION (vaesz, crypto_vv_no_op_type, none_tu_preds, u_vvs_crypto_sew32_lmul_x16_ops)
DEF_RVV_FUNCTION (vaeskf1, crypto_vi, none_tu_preds, u_vv_size_crypto_sew32_ops)
DEF_RVV_FUNCTION (vaeskf2, crypto_vi, none_tu_preds, u_vvv_size_crypto_sew32_ops)
#undef REQUIRED_EXTENSIONS
//ZVKNHA
//ZVKNHA and ZVKNHB
#define REQUIRED_EXTENSIONS ZVKNHA_OR_ZVKNHB_EXT
DEF_RVV_FUNCTION (vsha2ms, no_mask_policy, none_tu_preds, u_vvvv_crypto_sew32_ops)
DEF_RVV_FUNCTION (vsha2ch, no_mask_policy, none_tu_preds, u_vvvv_crypto_sew32_ops)
DEF_RVV_FUNCTION (vsha2cl, no_mask_policy, none_tu_preds, u_vvvv_crypto_sew32_ops)
#undef REQUIRED_EXTENSIONS
#define REQUIRED_EXTENSIONS ZVKNHB_EXT
DEF_RVV_FUNCTION (vsha2ms, no_mask_policy, none_tu_preds, u_vvvv_crypto_sew64_ops)
DEF_RVV_FUNCTION (vsha2ch, no_mask_policy, none_tu_preds, u_vvvv_crypto_sew64_ops)
DEF_RVV_FUNCTION (vsha2cl, no_mask_policy, none_tu_preds, u_vvvv_crypto_sew64_ops)
#undef REQUIRED_EXTENSIONS
//Zvksed
#define REQUIRED_EXTENSIONS ZVKSED_EXT
DEF_RVV_FUNCTION (vsm4k, crypto_vi, none_tu_preds, u_vv_size_crypto_sew32_ops)
DEF_RVV_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvv_crypto_sew32_ops)
DEF_RVV_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_ops)
DEF_RVV_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x2_ops)
DEF_RVV_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x4_ops)
DEF_RVV_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x8_ops)
DEF_RVV_FUNCTION (vsm4r, crypto_vv, none_tu_preds, u_vvs_crypto_sew32_lmul_x16_ops)
#undef REQUIRED_EXTENSIONS
//Zvksh
#define REQUIRED_EXTENSIONS ZVKSH_EXT
DEF_RVV_FUNCTION (vsm3me, no_mask_policy, none_tu_preds, u_vvv_crypto_sew32_ops)
DEF_RVV_FUNCTION (vsm3c, crypto_vi, none_tu_preds, u_vvv_size_crypto_sew32_ops)
#undef REQUIRED_EXTENSIONS
#undef DEF_RVV_FUNCTION

View File

@ -984,6 +984,89 @@ struct seg_fault_load_def : public build_base
}
};
/* vsm4r/vaes* class. */
struct crypto_vv_def : public build_base
{
char *get_name (function_builder &b, const function_instance &instance,
bool overloaded_p) const override
{
/* Return nullptr if it can not be overloaded. */
if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred))
return nullptr;
b.append_base_name (instance.base_name);
b.append_name (operand_suffixes[instance.op_info->op]);
if (!overloaded_p)
{
if (instance.op_info->op == OP_TYPE_vv)
b.append_name (type_suffixes[instance.type.index].vector);
else
{
vector_type_index arg0_type_idx
= instance.op_info->args[1].get_function_type_index
(instance.type.index);
b.append_name (type_suffixes[arg0_type_idx].vector);
vector_type_index ret_type_idx
= instance.op_info->ret.get_function_type_index
(instance.type.index);
b.append_name (type_suffixes[ret_type_idx].vector);
}
}
b.append_name (predication_suffixes[instance.pred]);
return b.finish_name ();
}
};
/* vaeskf1/vaeskf2/vsm4k/vsm3c class. */
struct crypto_vi_def : public build_base
{
char *get_name (function_builder &b, const function_instance &instance,
bool overloaded_p) const override
{
/* Return nullptr if it can not be overloaded. */
if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred))
return nullptr;
b.append_base_name (instance.base_name);
if (!overloaded_p)
{
b.append_name (operand_suffixes[instance.op_info->op]);
b.append_name (type_suffixes[instance.type.index].vector);
}
b.append_name (predication_suffixes[instance.pred]);
return b.finish_name ();
}
};
/* vaesz class. */
struct crypto_vv_no_op_type_def : public build_base
{
char *get_name (function_builder &b, const function_instance &instance,
bool overloaded_p) const override
{
/* Return nullptr if it can not be overloaded. */
if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred))
return nullptr;
b.append_base_name (instance.base_name);
if (!overloaded_p)
{
b.append_name (operand_suffixes[instance.op_info->op]);
vector_type_index arg0_type_idx
= instance.op_info->args[1].get_function_type_index
(instance.type.index);
b.append_name (type_suffixes[arg0_type_idx].vector);
vector_type_index ret_type_idx
= instance.op_info->ret.get_function_type_index
(instance.type.index);
b.append_name (type_suffixes[ret_type_idx].vector);
}
b.append_name (predication_suffixes[instance.pred]);
return b.finish_name ();
}
};
SHAPE(vsetvl, vsetvl)
SHAPE(vsetvl, vsetvlmax)
SHAPE(loadstore, loadstore)
@ -1012,5 +1095,7 @@ SHAPE(vlenb, vlenb)
SHAPE(seg_loadstore, seg_loadstore)
SHAPE(seg_indexed_loadstore, seg_indexed_loadstore)
SHAPE(seg_fault_load, seg_fault_load)
SHAPE(crypto_vv, crypto_vv)
SHAPE(crypto_vi, crypto_vi)
SHAPE(crypto_vv_no_op_type, crypto_vv_no_op_type)
} // end namespace riscv_vector

View File

@ -52,6 +52,10 @@ extern const function_shape *const vlenb;
extern const function_shape *const seg_loadstore;
extern const function_shape *const seg_indexed_loadstore;
extern const function_shape *const seg_fault_load;
/* Below function_shape are Vectro Crypto*/
extern const function_shape *const crypto_vv;
extern const function_shape *const crypto_vi;
extern const function_shape *const crypto_vv_no_op_type;
}
} // end namespace riscv_vector

View File

@ -339,6 +339,18 @@ along with GCC; see the file COPYING3. If not see
#define DEF_RVV_TUPLE_OPS(TYPE, REQUIRE)
#endif
/* Use "DEF_RVV_CRYPTO_SEW32_OPS" macro include all SEW=32 types
which will be iterated and registered as intrinsic functions. */
#ifndef DEF_RVV_CRYPTO_SEW32_OPS
#define DEF_RVV_CRYPTO_SEW32_OPS(TYPE, REQUIRE)
#endif
/* Use "DEF_RVV_CRYPTO_SEW64_OPS" macro include all SEW=64 types
which will be iterated and registered as intrinsic functions. */
#ifndef DEF_RVV_CRYPTO_SEW64_OPS
#define DEF_RVV_CRYPTO_SEW64_OPS(TYPE, REQUIRE)
#endif
DEF_RVV_I_OPS (vint8mf8_t, RVV_REQUIRE_MIN_VLEN_64)
DEF_RVV_I_OPS (vint8mf4_t, 0)
DEF_RVV_I_OPS (vint8mf2_t, 0)
@ -1355,6 +1367,17 @@ DEF_RVV_TUPLE_OPS (vfloat64m2x3_t, RVV_REQUIRE_ELEN_FP_64)
DEF_RVV_TUPLE_OPS (vfloat64m2x4_t, RVV_REQUIRE_ELEN_FP_64)
DEF_RVV_TUPLE_OPS (vfloat64m4x2_t, RVV_REQUIRE_ELEN_FP_64)
DEF_RVV_CRYPTO_SEW32_OPS (vuint32mf2_t, RVV_REQUIRE_MIN_VLEN_64)
DEF_RVV_CRYPTO_SEW32_OPS (vuint32m1_t, 0)
DEF_RVV_CRYPTO_SEW32_OPS (vuint32m2_t, 0)
DEF_RVV_CRYPTO_SEW32_OPS (vuint32m4_t, 0)
DEF_RVV_CRYPTO_SEW32_OPS (vuint32m8_t, 0)
DEF_RVV_CRYPTO_SEW64_OPS (vuint64m1_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_CRYPTO_SEW64_OPS (vuint64m2_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_CRYPTO_SEW64_OPS (vuint64m4_t, RVV_REQUIRE_ELEN_64)
DEF_RVV_CRYPTO_SEW64_OPS (vuint64m8_t, RVV_REQUIRE_ELEN_64)
#undef DEF_RVV_I_OPS
#undef DEF_RVV_U_OPS
#undef DEF_RVV_F_OPS
@ -1406,3 +1429,5 @@ DEF_RVV_TUPLE_OPS (vfloat64m4x2_t, RVV_REQUIRE_ELEN_FP_64)
#undef DEF_RVV_LMUL2_OPS
#undef DEF_RVV_LMUL4_OPS
#undef DEF_RVV_TUPLE_OPS
#undef DEF_RVV_CRYPTO_SEW32_OPS
#undef DEF_RVV_CRYPTO_SEW64_OPS

View File

@ -521,6 +521,19 @@ static const rvv_type_info tuple_ops[] = {
#include "riscv-vector-builtins-types.def"
{NUM_VECTOR_TYPES, 0}};
/* Below types will be registered for vector-crypto intrinsic functions*/
/* A list of sew32 will be registered for vector-crypto intrinsic functions. */
static const rvv_type_info crypto_sew32_ops[] = {
#define DEF_RVV_CRYPTO_SEW32_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
#include "riscv-vector-builtins-types.def"
{NUM_VECTOR_TYPES, 0}};
/* A list of sew64 will be registered for vector-crypto intrinsic functions. */
static const rvv_type_info crypto_sew64_ops[] = {
#define DEF_RVV_CRYPTO_SEW64_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
#include "riscv-vector-builtins-types.def"
{NUM_VECTOR_TYPES, 0}};
static CONSTEXPR const rvv_arg_type_info rvv_arg_type_info_end
= rvv_arg_type_info (NUM_BASE_TYPES);
@ -754,6 +767,11 @@ static CONSTEXPR const rvv_arg_type_info v_size_args[]
= {rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info (RVV_BASE_size),
rvv_arg_type_info_end};
/* A list of args for vector_type func (double demote_type, size_t) function. */
static CONSTEXPR const rvv_arg_type_info wv_size_args[]
= {rvv_arg_type_info (RVV_BASE_double_trunc_vector),
rvv_arg_type_info (RVV_BASE_size),rvv_arg_type_info_end};
/* A list of args for vector_type func (vector_type, vector_type, size)
* function. */
static CONSTEXPR const rvv_arg_type_info vv_size_args[]
@ -1044,6 +1062,14 @@ static CONSTEXPR const rvv_op_info u_v_ops
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
end_args /* Args */};
/* A static operand information for vector_type func (vector_type)
* function registration. */
static CONSTEXPR const rvv_op_info u_vv_ops
= {u_ops, /* Types */
OP_TYPE_v, /* Suffix */
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
v_args /* Args */};
/* A static operand information for unsigned long func (vector_type)
* function registration. */
static CONSTEXPR const rvv_op_info b_ulong_m_ops
@ -2174,6 +2200,14 @@ static CONSTEXPR const rvv_op_info u_wvv_ops
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
wvv_args /* Args */};
/* A static operand information for vector_type func (double demote type, size type)
* function registration. */
static CONSTEXPR const rvv_op_info u_shift_wvx_ops
= {wextu_ops, /* Types */
OP_TYPE_vx, /* Suffix */
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
wv_size_args /* Args */};
/* A static operand information for vector_type func (double demote type, double
* demote scalar_type) function registration. */
static CONSTEXPR const rvv_op_info i_wvx_ops
@ -2604,6 +2638,101 @@ static CONSTEXPR const rvv_op_info all_v_vcreate_lmul4_x2_ops
rvv_arg_type_info (RVV_BASE_vlmul_ext_x2), /* Return type */
ext_vcreate_args /* Args */};
/* A static operand information for vector_type func (vector_type).
Some ins just supports SEW=32, such as crypto vectol Zvkg extension.
* function registration. */
static CONSTEXPR const rvv_arg_type_info vs_lmul_x2_args[]
= {rvv_arg_type_info (RVV_BASE_vlmul_ext_x2),
rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end};
static CONSTEXPR const rvv_arg_type_info vs_lmul_x4_args[]
= {rvv_arg_type_info (RVV_BASE_vlmul_ext_x4),
rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end};
static CONSTEXPR const rvv_arg_type_info vs_lmul_x8_args[]
= {rvv_arg_type_info (RVV_BASE_vlmul_ext_x8),
rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end};
static CONSTEXPR const rvv_arg_type_info vs_lmul_x16_args[]
= {rvv_arg_type_info (RVV_BASE_vlmul_ext_x16),
rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end};
static CONSTEXPR const rvv_op_info u_vvv_crypto_sew32_ops
= {crypto_sew32_ops, /* Types */
OP_TYPE_vv, /* Suffix */
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
vv_args /* Args */};
static CONSTEXPR const rvv_op_info u_vvvv_crypto_sew32_ops
= {crypto_sew32_ops, /* Types */
OP_TYPE_vv, /* Suffix */
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
vvv_args /* Args */};
static CONSTEXPR const rvv_op_info u_vvv_size_crypto_sew32_ops
= {crypto_sew32_ops, /* Types */
OP_TYPE_vi, /* Suffix */
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
vv_size_args /* Args */};
static CONSTEXPR const rvv_op_info u_vv_size_crypto_sew32_ops
= {crypto_sew32_ops, /* Types */
OP_TYPE_vi, /* Suffix */
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
v_size_args /* Args */};
static CONSTEXPR const rvv_op_info u_vvs_crypto_sew32_ops
= {crypto_sew32_ops, /* Types */
OP_TYPE_vs, /* Suffix */
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
vv_args /* Args */};
static CONSTEXPR const rvv_op_info u_vvs_crypto_sew32_lmul_x2_ops
= {crypto_sew32_ops, /* Types */
OP_TYPE_vs, /* Suffix */
rvv_arg_type_info (RVV_BASE_vlmul_ext_x2), /* Return type */
vs_lmul_x2_args /* Args */};
static CONSTEXPR const rvv_op_info u_vvs_crypto_sew32_lmul_x4_ops
= {crypto_sew32_ops, /* Types */
OP_TYPE_vs, /* Suffix */
rvv_arg_type_info (RVV_BASE_vlmul_ext_x4), /* Return type */
vs_lmul_x4_args /* Args */};
static CONSTEXPR const rvv_op_info u_vvs_crypto_sew32_lmul_x8_ops
= {crypto_sew32_ops, /* Types */
OP_TYPE_vs, /* Suffix */
rvv_arg_type_info (RVV_BASE_vlmul_ext_x8), /* Return type */
vs_lmul_x8_args /* Args */};
static CONSTEXPR const rvv_op_info u_vvs_crypto_sew32_lmul_x16_ops
= {crypto_sew32_ops, /* Types */
OP_TYPE_vs, /* Suffix */
rvv_arg_type_info (RVV_BASE_vlmul_ext_x16), /* Return type */
vs_lmul_x16_args /* Args */};
/* A static operand information for vector_type func (vector_type).
Some ins just supports SEW=64, such as crypto vectol Zvbc extension
vclmul.vv, vclmul.vx.
* function registration. */
static CONSTEXPR const rvv_op_info u_vvv_crypto_sew64_ops
= {crypto_sew64_ops, /* Types */
OP_TYPE_vv, /* Suffix */
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
vv_args /* Args */};
static CONSTEXPR const rvv_op_info u_vvx_crypto_sew64_ops
= {crypto_sew64_ops, /* Types */
OP_TYPE_vx, /* Suffix */
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
vx_args /* Args */};
static CONSTEXPR const rvv_op_info u_vvvv_crypto_sew64_ops
= {crypto_sew64_ops, /* Types */
OP_TYPE_vv, /* Suffix */
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
vvv_args /* Args */};
/* A list of all RVV base function types. */
static CONSTEXPR const function_type_info function_types[] = {
#define DEF_RVV_TYPE_INDEX( \
@ -4176,7 +4305,9 @@ registered_function::overloaded_hash (const vec<tree, va_gc> &arglist)
__riscv_vset(vint8m2_t dest, size_t index, vint8m1_t value); The reason
is the same as above. */
if ((instance.base == bases::vget && (i == (len - 1)))
|| (instance.base == bases::vset && (i == (len - 2))))
|| ((instance.base == bases::vset
|| instance.shape == shapes::crypto_vi)
&& (i == (len - 2))))
argument_types.safe_push (size_type_node);
/* Vector fixed-point arithmetic instructions requiring argument vxrm.
For example: vuint32m4_t __riscv_vaaddu(vuint32m4_t vs2,

View File

@ -558,6 +558,7 @@ DEF_RVV_TYPE (vfloat64m8_t, 17, __rvv_float64m8_t, double, RVVM8DF, _f64m8,
DEF_RVV_OP_TYPE (vv)
DEF_RVV_OP_TYPE (vx)
DEF_RVV_OP_TYPE (vi)
DEF_RVV_OP_TYPE (v)
DEF_RVV_OP_TYPE (wv)
DEF_RVV_OP_TYPE (wx)

View File

@ -241,7 +241,7 @@ loop_invariant_op_p (class loop *loop,
if (SSA_NAME_IS_DEFAULT_DEF (op)
|| !flow_bb_inside_loop_p (loop, gimple_bb (SSA_NAME_DEF_STMT (op))))
return true;
return gimple_uid (SSA_NAME_DEF_STMT (op)) & 1;
return false;
}
/* Return true if the variable should be counted into liveness. */

View File

@ -10105,6 +10105,7 @@ potential_constant_expression_1 (tree t, bool want_rval, bool strict, bool now,
case OACC_ENTER_DATA:
case OACC_EXIT_DATA:
case OACC_UPDATE:
case OMP_ARRAY_SECTION:
/* GCC internal stuff. */
case VA_ARG_EXPR:
case TRANSACTION_EXPR:

View File

@ -7069,6 +7069,7 @@ extern void grokclassfn (tree, tree,
enum overload_flags);
extern tree grok_array_decl (location_t, tree, tree,
vec<tree, va_gc> **, tsubst_flags_t);
extern tree grok_omp_array_section (location_t, tree, tree, tree);
extern tree delete_sanity (location_t, tree, tree, bool,
int, tsubst_flags_t);
extern tree check_classfn (tree, tree, tree);
@ -8172,6 +8173,7 @@ inline tree build_x_binary_op (const op_location_t &loc,
}
extern tree build_x_array_ref (location_t, tree, tree,
tsubst_flags_t);
extern tree build_omp_array_section (location_t, tree, tree, tree);
extern tree build_x_unary_op (location_t,
enum tree_code, cp_expr,
tree, tsubst_flags_t);

View File

@ -617,6 +617,43 @@ grok_array_decl (location_t loc, tree array_expr, tree index_exp,
return expr;
}
/* Build an OMP_ARRAY_SECTION expression, handling usage in template
definitions, etc. */
tree
grok_omp_array_section (location_t loc, tree array_expr, tree index,
tree length)
{
tree orig_array_expr = array_expr;
tree orig_index = index;
tree orig_length = length;
if (error_operand_p (array_expr)
|| error_operand_p (index)
|| error_operand_p (length))
return error_mark_node;
if (processing_template_decl
&& (type_dependent_expression_p (array_expr)
|| type_dependent_expression_p (index)
|| type_dependent_expression_p (length)))
return build_min_nt_loc (loc, OMP_ARRAY_SECTION, array_expr, index, length);
index = fold_non_dependent_expr (index);
length = fold_non_dependent_expr (length);
/* NOTE: We can pass through invalidly-typed index/length fields
here (e.g. if the user tries to use a floating-point index/length).
This is diagnosed later in semantics.cc:handle_omp_array_sections_1. */
tree expr = build_omp_array_section (loc, array_expr, index, length);
if (processing_template_decl)
expr = build_min_non_dep (OMP_ARRAY_SECTION, expr, orig_array_expr,
orig_index, orig_length);
return expr;
}
/* Given the cast expression EXP, checking out its validity. Either return
an error_mark_node if there was an unavoidable error, return a cast to
void for trying to delete a pointer w/ the value 0, or return the

View File

@ -2497,6 +2497,15 @@ dump_expr (cxx_pretty_printer *pp, tree t, int flags)
pp_cxx_right_bracket (pp);
break;
case OMP_ARRAY_SECTION:
dump_expr (pp, TREE_OPERAND (t, 0), flags);
pp_cxx_left_bracket (pp);
dump_expr (pp, TREE_OPERAND (t, 1), flags);
pp_colon (pp);
dump_expr (pp, TREE_OPERAND (t, 2), flags);
pp_cxx_right_bracket (pp);
break;
case UNARY_PLUS_EXPR:
dump_unary_op (pp, "+", t, flags);
break;

View File

@ -4459,6 +4459,9 @@ cp_parser_new (cp_lexer *lexer)
parser->omp_declare_simd = NULL;
parser->oacc_routine = NULL;
/* Disallow OpenMP array sections in expressions. */
parser->omp_array_section_p = false;
/* Not declaring an implicit function template. */
parser->auto_is_implicit_function_template_parm_p = false;
parser->fully_implicit_function_template_p = false;
@ -5462,6 +5465,7 @@ static cp_expr
cp_parser_statement_expr (cp_parser *parser)
{
cp_token_position start = cp_parser_start_tentative_firewall (parser);
auto oas = make_temp_override (parser->omp_array_section_p, false);
/* Consume the '('. */
location_t start_loc = cp_lexer_peek_token (parser->lexer)->location;
@ -8299,6 +8303,7 @@ cp_parser_postfix_open_square_expression (cp_parser *parser,
releasing_vec expression_list = NULL;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
bool saved_greater_than_is_operator_p;
bool saved_colon_corrects_to_scope_p;
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
@ -8306,6 +8311,10 @@ cp_parser_postfix_open_square_expression (cp_parser *parser,
saved_greater_than_is_operator_p = parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = true;
saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
if (parser->omp_array_section_p)
parser->colon_corrects_to_scope_p = false;
/* Parse the index expression. */
/* ??? For offsetof, there is a question of what to allow here. If
offsetof is not being used in an integral constant expression context,
@ -8316,7 +8325,8 @@ cp_parser_postfix_open_square_expression (cp_parser *parser,
constant expressions here. */
if (for_offsetof)
index = cp_parser_constant_expression (parser);
else
else if (!parser->omp_array_section_p
|| cp_lexer_next_token_is_not (parser->lexer, CPP_COLON))
{
if (cxx_dialect >= cxx23
&& cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_SQUARE))
@ -8372,6 +8382,68 @@ cp_parser_postfix_open_square_expression (cp_parser *parser,
parser->greater_than_is_operator_p = saved_greater_than_is_operator_p;
if (cxx_dialect >= cxx23
&& parser->omp_array_section_p
&& expression_list.get () != NULL
&& vec_safe_length (expression_list) > 1)
{
error_at (loc, "cannot use multidimensional subscript in OpenMP array "
"section");
index = error_mark_node;
}
if (parser->omp_array_section_p
&& cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
cp_lexer_consume_token (parser->lexer);
tree length = NULL_TREE;
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_SQUARE))
{
if (cxx_dialect >= cxx23)
{
cp_expr expr
= cp_parser_parenthesized_expression_list_elt (parser,
/*cast_p=*/
false,
/*allow_exp_p=*/
true,
/*non_cst_p=*/
NULL);
if (expr == error_mark_node)
length = error_mark_node;
else
length = expr.get_value ();
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
{
error_at (loc, "cannot use multidimensional subscript in "
"OpenMP array section");
length = error_mark_node;
}
}
else
length
= cp_parser_expression (parser, NULL, /*cast_p=*/false,
/*decltype_p=*/false,
/*warn_comma_p=*/warn_comma_subscript);
}
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
if (index == error_mark_node || length == error_mark_node)
{
cp_parser_skip_to_closing_square_bracket (parser);
return error_mark_node;
}
else
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
return grok_omp_array_section (input_location, postfix_expression, index,
length);
}
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
/* Look for the closing `]'. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
@ -8700,6 +8772,7 @@ cp_parser_parenthesized_expression_list (cp_parser* parser,
{
vec<tree, va_gc> *expression_list;
bool saved_greater_than_is_operator_p;
bool saved_omp_array_section_p;
/* Assume all the expressions will be constant. */
if (non_constant_p)
@ -8717,6 +8790,9 @@ cp_parser_parenthesized_expression_list (cp_parser* parser,
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = true;
saved_omp_array_section_p = parser->omp_array_section_p;
parser->omp_array_section_p = false;
cp_expr expr (NULL_TREE);
/* Consume expressions until there are no more. */
@ -8783,12 +8859,14 @@ cp_parser_parenthesized_expression_list (cp_parser* parser,
{
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
parser->omp_array_section_p = saved_omp_array_section_p;
return NULL;
}
}
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
parser->omp_array_section_p = saved_omp_array_section_p;
return expression_list;
}
@ -11299,6 +11377,7 @@ cp_parser_lambda_expression (cp_parser* parser)
cp_binding_level* implicit_template_scope = parser->implicit_template_scope;
bool auto_is_implicit_function_template_parm_p
= parser->auto_is_implicit_function_template_parm_p;
bool saved_omp_array_section_p = parser->omp_array_section_p;
parser->num_template_parameter_lists = 0;
parser->in_statement = 0;
@ -11307,6 +11386,7 @@ cp_parser_lambda_expression (cp_parser* parser)
parser->implicit_template_parms = 0;
parser->implicit_template_scope = 0;
parser->auto_is_implicit_function_template_parm_p = false;
parser->omp_array_section_p = false;
/* The body of a lambda in a discarded statement is not discarded. */
bool discarded = in_discarded_stmt;
@ -11357,6 +11437,7 @@ cp_parser_lambda_expression (cp_parser* parser)
parser->implicit_template_scope = implicit_template_scope;
parser->auto_is_implicit_function_template_parm_p
= auto_is_implicit_function_template_parm_p;
parser->omp_array_section_p = saved_omp_array_section_p;
}
/* This field is only used during parsing of the lambda. */
@ -25917,6 +25998,7 @@ cp_parser_braced_list (cp_parser *parser, bool *non_constant_p /*=nullptr*/)
{
tree initializer;
location_t start_loc = cp_lexer_peek_token (parser->lexer)->location;
auto oas = make_temp_override (parser->omp_array_section_p, false);
/* Consume the `{' token. */
matching_braces braces;
@ -37862,7 +37944,7 @@ struct omp_dim
static tree
cp_parser_omp_var_list_no_open (cp_parser *parser, enum omp_clause_code kind,
tree list, bool *colon,
bool allow_deref = false)
bool map_lvalue = false)
{
auto_vec<omp_dim> dims;
bool array_section_p;
@ -37879,6 +37961,104 @@ cp_parser_omp_var_list_no_open (cp_parser *parser, enum omp_clause_code kind,
if (kind == OMP_CLAUSE_DEPEND || kind == OMP_CLAUSE_AFFINITY)
cp_parser_parse_tentatively (parser);
/* This condition doesn't include OMP_CLAUSE_DEPEND or
OMP_CLAUSE_AFFINITY since lvalue ("locator list") parsing for those is
handled further down the function. */
else if (map_lvalue
&& (kind == OMP_CLAUSE_MAP
|| kind == OMP_CLAUSE_TO
|| kind == OMP_CLAUSE_FROM))
{
auto s = make_temp_override (parser->omp_array_section_p, true);
token = cp_lexer_peek_token (parser->lexer);
location_t loc = token->location;
decl = cp_parser_assignment_expression (parser);
/* This code rewrites a parsed expression containing various tree
codes used to represent array accesses into a more uniform nest of
OMP_ARRAY_SECTION nodes before it is processed by
semantics.cc:handle_omp_array_sections_1. It might be more
efficient to move this logic to that function instead, analysing
the parsed expression directly rather than this preprocessed
form. */
dims.truncate (0);
if (TREE_CODE (decl) == OMP_ARRAY_SECTION)
{
while (TREE_CODE (decl) == OMP_ARRAY_SECTION)
{
tree low_bound = TREE_OPERAND (decl, 1);
tree length = TREE_OPERAND (decl, 2);
dims.safe_push (omp_dim (low_bound, length, loc, false));
decl = TREE_OPERAND (decl, 0);
}
while (TREE_CODE (decl) == ARRAY_REF
|| TREE_CODE (decl) == INDIRECT_REF
|| TREE_CODE (decl) == COMPOUND_EXPR)
{
if (REFERENCE_REF_P (decl))
break;
if (TREE_CODE (decl) == COMPOUND_EXPR)
{
decl = TREE_OPERAND (decl, 1);
STRIP_NOPS (decl);
}
else if (TREE_CODE (decl) == INDIRECT_REF)
{
dims.safe_push (omp_dim (integer_zero_node,
integer_one_node, loc, true));
decl = TREE_OPERAND (decl, 0);
}
else /* ARRAY_REF. */
{
tree index = TREE_OPERAND (decl, 1);
dims.safe_push (omp_dim (index, integer_one_node, loc,
true));
decl = TREE_OPERAND (decl, 0);
}
}
/* Bare references have their own special handling, so remove
the explicit dereference added by convert_from_reference. */
if (REFERENCE_REF_P (decl))
decl = TREE_OPERAND (decl, 0);
for (int i = dims.length () - 1; i >= 0; i--)
decl = grok_omp_array_section (loc, decl, dims[i].low_bound,
dims[i].length);
}
else if (TREE_CODE (decl) == INDIRECT_REF)
{
bool ref_p = REFERENCE_REF_P (decl);
/* If we have "*foo" and
- it's an indirection of a reference, "unconvert" it, i.e.
strip the indirection (to just "foo").
- it's an indirection of a pointer, turn it into
"foo[0:1]". */
decl = TREE_OPERAND (decl, 0);
STRIP_NOPS (decl);
if (!ref_p)
decl = grok_omp_array_section (loc, decl, integer_zero_node,
integer_one_node);
}
else if (TREE_CODE (decl) == ARRAY_REF)
{
tree idx = TREE_OPERAND (decl, 1);
decl = TREE_OPERAND (decl, 0);
STRIP_NOPS (decl);
decl = grok_omp_array_section (loc, decl, idx, integer_one_node);
}
else if (TREE_CODE (decl) == NON_LVALUE_EXPR
|| CONVERT_EXPR_P (decl))
decl = TREE_OPERAND (decl, 0);
goto build_clause;
}
token = cp_lexer_peek_token (parser->lexer);
if (kind != 0
&& cp_parser_is_keyword (token, RID_THIS))
@ -37957,8 +38137,7 @@ cp_parser_omp_var_list_no_open (cp_parser *parser, enum omp_clause_code kind,
case OMP_CLAUSE_TO:
start_component_ref:
while (cp_lexer_next_token_is (parser->lexer, CPP_DOT)
|| (allow_deref
&& cp_lexer_next_token_is (parser->lexer, CPP_DEREF)))
|| cp_lexer_next_token_is (parser->lexer, CPP_DEREF))
{
cpp_ttype ttype
= cp_lexer_next_token_is (parser->lexer, CPP_DOT)
@ -38044,9 +38223,7 @@ cp_parser_omp_var_list_no_open (cp_parser *parser, enum omp_clause_code kind,
|| kind == OMP_CLAUSE_TO)
&& !array_section_p
&& (cp_lexer_next_token_is (parser->lexer, CPP_DOT)
|| (allow_deref
&& cp_lexer_next_token_is (parser->lexer,
CPP_DEREF))))
|| cp_lexer_next_token_is (parser->lexer, CPP_DEREF)))
{
for (unsigned i = 0; i < dims.length (); i++)
{
@ -38058,8 +38235,9 @@ cp_parser_omp_var_list_no_open (cp_parser *parser, enum omp_clause_code kind,
}
else
for (unsigned i = 0; i < dims.length (); i++)
decl = tree_cons (dims[i].low_bound, dims[i].length, decl);
decl = build_omp_array_section (input_location, decl,
dims[i].low_bound,
dims[i].length);
break;
default:
break;
@ -38080,6 +38258,7 @@ cp_parser_omp_var_list_no_open (cp_parser *parser, enum omp_clause_code kind,
cp_parser_parse_definitely (parser);
}
build_clause:
tree u = build_omp_clause (token->location, kind);
OMP_CLAUSE_DECL (u) = decl;
OMP_CLAUSE_CHAIN (u) = list;
@ -38129,7 +38308,7 @@ cp_parser_omp_var_list_no_open (cp_parser *parser, enum omp_clause_code kind,
static tree
cp_parser_omp_var_list (cp_parser *parser, enum omp_clause_code kind, tree list,
bool allow_deref = false)
bool map_lvalue = false)
{
if (parser->lexer->in_omp_decl_attribute)
{
@ -38148,7 +38327,7 @@ cp_parser_omp_var_list (cp_parser *parser, enum omp_clause_code kind, tree list,
if (cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return cp_parser_omp_var_list_no_open (parser, kind, list, NULL,
allow_deref);
map_lvalue);
return list;
}
@ -38217,7 +38396,7 @@ cp_parser_oacc_data_clause (cp_parser *parser, pragma_omp_clause c_kind,
gcc_unreachable ();
}
tree nl, c;
nl = cp_parser_omp_var_list (parser, OMP_CLAUSE_MAP, list, true);
nl = cp_parser_omp_var_list (parser, OMP_CLAUSE_MAP, list, false);
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_SET_MAP_KIND (c, kind);
@ -41157,8 +41336,13 @@ cp_parser_omp_clause_map (cp_parser *parser, tree list)
cp_lexer_consume_token (parser->lexer);
}
/* We introduce a scope here so that errors parsing e.g. "always", "close"
tokens do not propagate to later directives that might use them
legally. */
begin_scope (sk_omp, NULL);
nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_MAP, list,
NULL, true);
finish_scope ();
for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_SET_MAP_KIND (c, kind);

View File

@ -412,6 +412,9 @@ struct GTY(()) cp_parser {
appear. */
bool omp_attrs_forbidden_p;
/* TRUE if an OpenMP array section is allowed. */
bool omp_array_section_p;
/* Tracks the function's template parameter list when declaring a function
using generic type parameters. This is either a new chain in the case of a
fully implicit function template or an extension of the function's existing

View File

@ -16838,6 +16838,7 @@ tsubst (tree t, tree args, tsubst_flags_t complain, tree in_decl)
case CALL_EXPR:
case ARRAY_REF:
case SCOPE_REF:
case OMP_ARRAY_SECTION:
/* We should use one of the expression tsubsts for these codes. */
gcc_unreachable ();
@ -17432,6 +17433,21 @@ tsubst_omp_clause_decl (tree decl, tree args, tsubst_flags_t complain,
= OMP_CLAUSE_DOACROSS_SINK_NEGATIVE (decl);
return ret;
}
else if (TREE_CODE (decl) == OMP_ARRAY_SECTION)
{
tree low_bound
= tsubst_stmt (TREE_OPERAND (decl, 1), args, complain, in_decl);
tree length = tsubst_stmt (TREE_OPERAND (decl, 2), args, complain,
in_decl);
tree base = tsubst_omp_clause_decl (TREE_OPERAND (decl, 0), args,
complain, in_decl, NULL);
if (TREE_OPERAND (decl, 0) == base
&& TREE_OPERAND (decl, 1) == low_bound
&& TREE_OPERAND (decl, 2) == length)
return decl;
return build3 (OMP_ARRAY_SECTION, TREE_TYPE (base), base, low_bound,
length);
}
tree ret = tsubst_stmt (decl, args, complain, in_decl);
/* Undo convert_from_reference tsubst_expr could have called. */
if (decl
@ -20230,6 +20246,27 @@ tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl)
RECUR (TREE_OPERAND (t, 1)),
complain|decltype_flag));
case OMP_ARRAY_SECTION:
{
tree op0 = RECUR (TREE_OPERAND (t, 0));
tree op1 = NULL_TREE, op2 = NULL_TREE;
if (op0 == error_mark_node)
RETURN (error_mark_node);
if (TREE_OPERAND (t, 1))
{
op1 = RECUR (TREE_OPERAND (t, 1));
if (op1 == error_mark_node)
RETURN (error_mark_node);
}
if (TREE_OPERAND (t, 2))
{
op2 = RECUR (TREE_OPERAND (t, 2));
if (op2 == error_mark_node)
RETURN (error_mark_node);
}
RETURN (build_omp_array_section (EXPR_LOCATION (t), op0, op1, op2));
}
case SIZEOF_EXPR:
if (PACK_EXPANSION_P (TREE_OPERAND (t, 0))
|| ARGUMENT_PACK_P (TREE_OPERAND (t, 0)))

View File

@ -5426,7 +5426,7 @@ handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
{
tree ret, low_bound, length, type;
bool openacc = (ort & C_ORT_ACC) != 0;
if (TREE_CODE (t) != TREE_LIST)
if (TREE_CODE (t) != OMP_ARRAY_SECTION)
{
if (error_operand_p (t))
return error_mark_node;
@ -5448,7 +5448,9 @@ handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
ret = t_refto;
if (TREE_CODE (t) == FIELD_DECL)
ret = finish_non_static_data_member (t, NULL_TREE, NULL_TREE);
else if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL)
else if (!VAR_P (t)
&& (openacc || !EXPR_P (t))
&& TREE_CODE (t) != PARM_DECL)
{
if (processing_template_decl && TREE_CODE (t) != OVERLOAD)
return NULL_TREE;
@ -5481,16 +5483,16 @@ handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
&& (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
&& TREE_CODE (TREE_CHAIN (t)) == FIELD_DECL)
TREE_CHAIN (t) = omp_privatize_field (TREE_CHAIN (t), false);
ret = handle_omp_array_sections_1 (c, TREE_CHAIN (t), types,
&& TREE_CODE (TREE_OPERAND (t, 0)) == FIELD_DECL)
TREE_OPERAND (t, 0) = omp_privatize_field (TREE_OPERAND (t, 0), false);
ret = handle_omp_array_sections_1 (c, TREE_OPERAND (t, 0), types,
maybe_zero_len, first_non_one, ort);
if (ret == error_mark_node || ret == NULL_TREE)
return ret;
type = TREE_TYPE (ret);
low_bound = TREE_PURPOSE (t);
length = TREE_VALUE (t);
low_bound = TREE_OPERAND (t, 1);
length = TREE_OPERAND (t, 2);
if ((low_bound && type_dependent_expression_p (low_bound))
|| (length && type_dependent_expression_p (length)))
return NULL_TREE;
@ -5696,7 +5698,7 @@ handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
tree lb = cp_save_expr (low_bound);
if (lb != low_bound)
{
TREE_PURPOSE (t) = lb;
TREE_OPERAND (t, 1) = lb;
low_bound = lb;
}
}
@ -5727,14 +5729,14 @@ handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
array-section-subscript, the array section could be non-contiguous. */
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_AFFINITY
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
&& TREE_CODE (TREE_CHAIN (t)) == TREE_LIST)
&& TREE_CODE (TREE_OPERAND (t, 0)) == OMP_ARRAY_SECTION)
{
/* If any prior dimension has a non-one length, then deem this
array section as non-contiguous. */
for (tree d = TREE_CHAIN (t); TREE_CODE (d) == TREE_LIST;
d = TREE_CHAIN (d))
for (tree d = TREE_OPERAND (t, 0); TREE_CODE (d) == OMP_ARRAY_SECTION;
d = TREE_OPERAND (d, 0))
{
tree d_length = TREE_VALUE (d);
tree d_length = TREE_OPERAND (d, 2);
if (d_length == NULL_TREE || !integer_onep (d_length))
{
error_at (OMP_CLAUSE_LOCATION (c),
@ -5757,7 +5759,7 @@ handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
tree lb = cp_save_expr (low_bound);
if (lb != low_bound)
{
TREE_PURPOSE (t) = lb;
TREE_OPERAND (t, 1) = lb;
low_bound = lb;
}
/* Temporarily disable -fstrong-eval-order for array reductions.
@ -5835,10 +5837,12 @@ handle_omp_array_sections (tree &c, enum c_omp_region_type ort)
return false;
for (i = num, t = OMP_CLAUSE_DECL (c); i > 0;
t = TREE_CHAIN (t))
t = TREE_OPERAND (t, 0))
{
tree low_bound = TREE_PURPOSE (t);
tree length = TREE_VALUE (t);
gcc_assert (TREE_CODE (t) == OMP_ARRAY_SECTION);
tree low_bound = TREE_OPERAND (t, 1);
tree length = TREE_OPERAND (t, 2);
i--;
if (low_bound
@ -6951,8 +6955,8 @@ cp_oacc_check_attachments (tree c)
tree t = OMP_CLAUSE_DECL (c);
tree type;
while (TREE_CODE (t) == TREE_LIST)
t = TREE_CHAIN (t);
while (TREE_CODE (t) == OMP_ARRAY_SECTION)
t = TREE_OPERAND (t, 0);
type = TREE_TYPE (t);
@ -7059,7 +7063,7 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
case OMP_CLAUSE_TASK_REDUCTION:
field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP);
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == TREE_LIST)
if (TREE_CODE (t) == OMP_ARRAY_SECTION)
{
if (handle_omp_array_sections (c, ort))
{
@ -7075,10 +7079,10 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
remove = true;
break;
}
if (TREE_CODE (t) == TREE_LIST)
if (TREE_CODE (t) == OMP_ARRAY_SECTION)
{
while (TREE_CODE (t) == TREE_LIST)
t = TREE_CHAIN (t);
while (TREE_CODE (t) == OMP_ARRAY_SECTION)
t = TREE_OPERAND (t, 0);
}
else
{
@ -8102,7 +8106,7 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
else
last_iterators = NULL_TREE;
if (TREE_CODE (t) == TREE_LIST)
if (TREE_CODE (t) == OMP_ARRAY_SECTION)
{
if (handle_omp_array_sections (c, ort))
remove = true;
@ -8262,7 +8266,7 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
auto_vec<omp_addr_token *, 10> addr_tokens;
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == TREE_LIST)
if (TREE_CODE (t) == OMP_ARRAY_SECTION)
{
grp_start_p = pc;
grp_sentinel = OMP_CLAUSE_CHAIN (c);
@ -8272,7 +8276,7 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
else
{
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) != TREE_LIST
if (TREE_CODE (t) != OMP_ARRAY_SECTION
&& !type_dependent_expression_p (t)
&& !omp_mappable_type (TREE_TYPE (t)))
{
@ -8455,7 +8459,8 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH))
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH
|| (!openacc && EXPR_P (t))))
break;
if (DECL_P (t))
error_at (OMP_CLAUSE_LOCATION (c),
@ -8854,15 +8859,15 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
case OMP_CLAUSE_HAS_DEVICE_ADDR:
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == TREE_LIST)
if (TREE_CODE (t) == OMP_ARRAY_SECTION)
{
if (handle_omp_array_sections (c, ort))
remove = true;
else
{
t = OMP_CLAUSE_DECL (c);
while (TREE_CODE (t) == TREE_LIST)
t = TREE_CHAIN (t);
while (TREE_CODE (t) == OMP_ARRAY_SECTION)
t = TREE_OPERAND (t, 0);
while (INDIRECT_REF_P (t)
|| TREE_CODE (t) == ARRAY_REF)
t = TREE_OPERAND (t, 0);
@ -9234,10 +9239,10 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
if (DECL_P (t))
bitmap_clear_bit (&aligned_head, DECL_UID (t));
}
else if (TREE_CODE (t) == TREE_LIST)
else if (TREE_CODE (t) == OMP_ARRAY_SECTION)
{
while (TREE_CODE (t) == TREE_LIST)
t = TREE_CHAIN (t);
while (TREE_CODE (t) == OMP_ARRAY_SECTION)
t = TREE_OPERAND (t, 0);
if (DECL_P (t))
bitmap_clear_bit (&aligned_head, DECL_UID (t));
t = OMP_CLAUSE_DECL (c);

View File

@ -4796,6 +4796,56 @@ build_x_array_ref (location_t loc, tree arg1, tree arg2,
return expr;
}
/* Build an OpenMP array section reference, creating an exact type for the
resulting expression based on the element type and bounds if possible. If
we have variable bounds, create an incomplete array type for the result
instead. */
tree
build_omp_array_section (location_t loc, tree array_expr, tree index,
tree length)
{
tree type = TREE_TYPE (array_expr);
gcc_assert (type);
type = non_reference (type);
tree sectype, eltype = TREE_TYPE (type);
/* It's not an array or pointer type. Just reuse the type of the
original expression as the type of the array section (an error will be
raised anyway, later). */
if (eltype == NULL_TREE)
sectype = TREE_TYPE (array_expr);
else
{
tree idxtype = NULL_TREE;
/* If we know the integer bounds, create an index type with exact
low/high (or zero/length) bounds. Otherwise, create an incomplete
array type. (This mostly only affects diagnostics.) */
if (index != NULL_TREE
&& length != NULL_TREE
&& TREE_CODE (index) == INTEGER_CST
&& TREE_CODE (length) == INTEGER_CST)
{
tree low = fold_convert (sizetype, index);
tree high = fold_convert (sizetype, length);
high = size_binop (PLUS_EXPR, low, high);
high = size_binop (MINUS_EXPR, high, size_one_node);
idxtype = build_range_type (sizetype, low, high);
}
else if ((index == NULL_TREE || integer_zerop (index))
&& length != NULL_TREE
&& TREE_CODE (length) == INTEGER_CST)
idxtype = build_index_type (length);
sectype = build_array_type (eltype, idxtype);
}
return build3_loc (loc, OMP_ARRAY_SECTION, sectype, array_expr, index,
length);
}
/* Return whether OP is an expression of enum type cast to integer
type. In C++ even unsigned enum types are cast to signed integer
types. We do not want to issue warnings about comparisons between

View File

@ -23103,7 +23103,7 @@ Permissible names are: @samp{arm7tdmi}, @samp{arm7tdmi-s}, @samp{arm710t},
@samp{cortex-r7}, @samp{cortex-r8}, @samp{cortex-r52}, @samp{cortex-r52plus},
@samp{cortex-m0}, @samp{cortex-m0plus}, @samp{cortex-m1}, @samp{cortex-m3},
@samp{cortex-m4}, @samp{cortex-m7}, @samp{cortex-m23}, @samp{cortex-m33},
@samp{cortex-m35p}, @samp{cortex-m55}, @samp{cortex-m85}, @samp{cortex-x1},
@samp{cortex-m35p}, @samp{cortex-m52}, @samp{cortex-m55}, @samp{cortex-m85}, @samp{cortex-x1},
@samp{cortex-x1c}, @samp{cortex-m1.small-multiply}, @samp{cortex-m0.small-multiply},
@samp{cortex-m0plus.small-multiply}, @samp{exynos-m1}, @samp{marvell-pj4},
@samp{neoverse-n1}, @samp{neoverse-n2}, @samp{neoverse-v1}, @samp{xscale},
@ -23169,34 +23169,34 @@ The following extension options are common to the listed CPUs:
@table @samp
@item +nodsp
Disable the DSP instructions on @samp{cortex-m33}, @samp{cortex-m35p},
@samp{cortex-m55} and @samp{cortex-m85}. Also disable the M-Profile Vector
Extension (MVE) integer and single precision floating-point instructions on
@samp{cortex-m55} and @samp{cortex-m85}.
@samp{cortex-m52}, @samp{cortex-m55} and @samp{cortex-m85}.
Also disable the M-Profile Vector Extension (MVE) integer and
single precision floating-point instructions on
@samp{cortex-m52}, @samp{cortex-m55} and @samp{cortex-m85}.
@item +nopacbti
Disable the Pointer Authentication and Branch Target Identification Extension
on @samp{cortex-m85}.
on @samp{cortex-m52} and @samp{cortex-m85}.
@item +nomve
Disable the M-Profile Vector Extension (MVE) integer and single precision
floating-point instructions on @samp{cortex-m55} and @samp{cortex-m85}.
floating-point instructions on @samp{cortex-m52}, @samp{cortex-m55} and @samp{cortex-m85}.
@item +nomve.fp
Disable the M-Profile Vector Extension (MVE) single precision floating-point
instructions on @samp{cortex-m55} and @samp{cortex-m85}.
instructions on @samp{cortex-m52}, @samp{cortex-m55} and @samp{cortex-m85}.
@item +cdecp0, +cdecp1, ... , +cdecp7
Enable the Custom Datapath Extension (CDE) on selected coprocessors according
to the numbers given in the options in the range 0 to 7 on @samp{cortex-m55}.
to the numbers given in the options in the range 0 to 7 on @samp{cortex-m52} and @samp{cortex-m55}.
@item +nofp
Disables the floating-point instructions on @samp{arm9e},
@samp{arm946e-s}, @samp{arm966e-s}, @samp{arm968e-s}, @samp{arm10e},
@samp{arm1020e}, @samp{arm1022e}, @samp{arm926ej-s},
@samp{arm1026ej-s}, @samp{cortex-r5}, @samp{cortex-r7}, @samp{cortex-r8},
@samp{cortex-m4}, @samp{cortex-m7}, @samp{cortex-m33}, @samp{cortex-m35p}
@samp{cortex-m4}, @samp{cortex-m7}, @samp{cortex-m33}, @samp{cortex-m35p},
@samp{cortex-m55} and @samp{cortex-m85}.
@samp{cortex-m52}, @samp{cortex-m55} and @samp{cortex-m85}.
Disables the floating-point and SIMD instructions on
@samp{generic-armv7-a}, @samp{cortex-a5}, @samp{cortex-a7},
@samp{cortex-a8}, @samp{cortex-a9}, @samp{cortex-a12},
@ -23539,9 +23539,9 @@ Development Tools Engineering Specification", which can be found on
Mitigate against a potential security issue with the @code{VLLDM} instruction
in some M-profile devices when using CMSE (CVE-2021-365465). This option is
enabled by default when the option @option{-mcpu=} is used with
@code{cortex-m33}, @code{cortex-m35p}, @code{cortex-m55}, @code{cortex-m85}
or @code{star-mc1}. The option @option{-mno-fix-cmse-cve-2021-35465} can be used
to disable the mitigation.
@code{cortex-m33}, @code{cortex-m35p}, @code{cortex-m52}, @code{cortex-m55},
@code{cortex-m85} or @code{star-mc1}. The option @option{-mno-fix-cmse-cve-2021-35465}
can be used to disable the mitigation.
@opindex mstack-protector-guard
@opindex mstack-protector-guard-offset
@ -34708,8 +34708,8 @@ see @ref{Other Builtins} for details.
@opindex mmovbe
@item -mmovbe
This option enables use of the @code{movbe} instruction to implement
@code{__builtin_bswap32} and @code{__builtin_bswap64}.
This option enables use of the @code{movbe} instruction to optimize
byte swapping of four and eight byte entities.
@opindex mshstk
@item -mshstk

View File

@ -1,3 +1,9 @@
2024-01-08 Harald Anlauf <anlauf@gmx.de>
PR fortran/113245
* trans-intrinsic.cc (gfc_conv_intrinsic_size): Use
gfc_conv_expr_present() for proper check of optional DIM argument.
2024-01-06 Harald Anlauf <anlauf@gmx.de>
José Rui Faustino de Sousa <jrfsousa@gmail.com>

View File

@ -8025,9 +8025,6 @@ gfc_conv_intrinsic_size (gfc_se * se, gfc_expr * expr)
argse.data_not_needed = 1;
gfc_conv_expr (&argse, actual->expr);
gfc_add_block_to_block (&se->pre, &argse.pre);
cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
argse.expr, null_pointer_node);
cond = gfc_evaluate_now (cond, &se->pre);
/* 'block2' contains the arg2 absent case, 'block' the arg2 present
case; size_var can be used in both blocks. */
tree size_var = gfc_create_var (TREE_TYPE (size), "size");
@ -8038,6 +8035,7 @@ gfc_conv_intrinsic_size (gfc_se * se, gfc_expr * expr)
tmp = fold_build2_loc (input_location, MODIFY_EXPR,
TREE_TYPE (size_var), size_var, size);
gfc_add_expr_to_block (&block2, tmp);
cond = gfc_conv_expr_present (actual->expr->symtree->n.sym);
tmp = build3_v (COND_EXPR, cond, gfc_finish_block (&block),
gfc_finish_block (&block2));
gfc_add_expr_to_block (&se->pre, tmp);

View File

@ -305,6 +305,7 @@ optimizable_arith_overflow (gimple *stmt)
imm_use_iterator ui;
use_operand_p use_p;
int seen = 0;
gimple *realpart = NULL, *cast = NULL;
FOR_EACH_IMM_USE_FAST (use_p, ui, lhs)
{
gimple *g = USE_STMT (use_p);
@ -317,6 +318,7 @@ optimizable_arith_overflow (gimple *stmt)
if ((seen & 1) != 0)
return 0;
seen |= 1;
realpart = g;
}
else if (gimple_assign_rhs_code (g) == IMAGPART_EXPR)
{
@ -338,13 +340,35 @@ optimizable_arith_overflow (gimple *stmt)
if (!INTEGRAL_TYPE_P (TREE_TYPE (lhs2))
|| TREE_CODE (TREE_TYPE (lhs2)) == BITINT_TYPE)
return 0;
cast = use_stmt;
}
else
return 0;
}
if ((seen & 2) == 0)
return 0;
return seen == 3 ? 2 : 1;
if (seen == 3)
{
/* Punt if the cast stmt appears before realpart stmt, because
if both appear, the lowering wants to emit all the code
at the location of realpart stmt. */
gimple_stmt_iterator gsi = gsi_for_stmt (realpart);
unsigned int cnt = 0;
do
{
gsi_prev_nondebug (&gsi);
if (gsi_end_p (gsi) || gsi_stmt (gsi) == cast)
return 0;
if (gsi_stmt (gsi) == stmt)
return 2;
/* If realpart is too far from stmt, punt as well.
Usually it will appear right after it. */
if (++cnt == 32)
return 0;
}
while (1);
}
return 1;
}
/* If STMT is some kind of comparison (GIMPLE_COND, comparison assignment)
@ -6582,8 +6606,12 @@ gimple_lower_bitint (void)
= build_array_type_nelts (large_huge.m_limb_type,
nelts);
tree ptype = build_pointer_type (TREE_TYPE (v1));
tree off = fold_convert (ptype,
TYPE_SIZE_UNIT (TREE_TYPE (c)));
tree off;
if (c)
off = fold_convert (ptype,
TYPE_SIZE_UNIT (TREE_TYPE (c)));
else
off = build_zero_cst (ptype);
tree vd = build2 (MEM_REF, vtype,
build_fold_addr_expr (v1), off);
g = gimple_build_assign (vd, build_zero_cst (vtype));

View File

@ -3344,6 +3344,9 @@ recalculate_side_effects (tree t)
return;
default:
if (code == SSA_NAME)
/* No side-effects. */
return;
gcc_unreachable ();
}
}
@ -13499,7 +13502,11 @@ gimplify_adjust_omp_clauses_1 (splay_tree_node n, void *data)
if (TREE_CODE (dtype) == REFERENCE_TYPE)
dtype = TREE_TYPE (dtype);
/* FIRSTPRIVATE_POINTER doesn't work well if we have a
multiply-indirected pointer. */
multiply-indirected pointer. If we have a reference to a pointer to
a pointer, it's possible that this should really be
GOMP_MAP_FIRSTPRIVATE_REFERENCE -- but that also doesn't work at the
moment, so stick with this. (See PR113279 and testcases
baseptrs-{4,6}.C:ref2ptrptr_offset_decl_member_slice). */
if (TREE_CODE (dtype) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (dtype)) == POINTER_TYPE)
OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_POINTER);
@ -17756,6 +17763,9 @@ gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
case TREE_LIST:
gcc_unreachable ();
case OMP_ARRAY_SECTION:
gcc_unreachable ();
case COMPOUND_EXPR:
ret = gimplify_compound_expr (expr_p, pre_p, fallback != fb_none);
break;

File diff suppressed because it is too large Load Diff

View File

@ -34,11 +34,6 @@ DEFINITION MODULE Sets ;
FROM SymbolKey IMPORT PerformOperation ;
EXPORT QUALIFIED Set,
InitSet, KillSet,
IncludeElementIntoSet, ExcludeElementFromSet,
NoOfElementsInSet, IsElementInSet,
ForeachElementInSetDo, DuplicateSet ;
TYPE
Set ;
@ -101,4 +96,11 @@ PROCEDURE ExcludeElementFromSet (s: Set; i: CARDINAL) ;
PROCEDURE IncludeElementIntoSet (s: Set; i: CARDINAL) ;
(*
EqualSet - return TRUE if left = right.
*)
PROCEDURE EqualSet (left, right: Set) : BOOLEAN ;
END Sets.

View File

@ -31,9 +31,9 @@ FROM Assertion IMPORT Assert ;
CONST
BitsetSize = SIZE(BITSET) ;
MaxBitset = MAX(BITSET) ;
BitsPerByte = (MaxBitset+1) DIV BitsetSize ;
BitsetSize = SIZE (BITSET) ;
MaxBitset = MAX (BITSET) ;
BitsPerByte = (MaxBitset + 1) DIV BitsetSize ;
Debugging = FALSE ;
TYPE
@ -315,4 +315,59 @@ BEGIN
END IncludeElementIntoSet ;
(*
EqualSet - return TRUE if left = right.
*)
PROCEDURE EqualSet (left, right: Set) : BOOLEAN ;
VAR
v : PtrToByte ;
lptr,
rptr: PtrToBitset ;
last,
el : CARDINAL ;
BEGIN
IF (left^.init = right^.init) AND
(left^.start = right^.start) AND
(left^.end = right^.end) AND
(left^.elements = right^.elements)
THEN
(* Now check contents. *)
el := left^.start ;
last := left^.end ;
WHILE el <= last DO
lptr := findPos (left^.pb, el) ;
rptr := findPos (right^.pb, el) ;
IF el + BitsetSize < last
THEN
(* We can check complete bitset, *)
IF lptr^ # rptr^
THEN
RETURN FALSE
END ;
INC (el, BitsetSize) ;
v := PtrToByte (lptr) ;
INC (v, BitsetSize) ; (* Avoid implications of C address arithmetic in mc PtrToByte *)
lptr := PtrToBitset (v) ;
v := PtrToByte (rptr) ;
INC (v, BitsetSize) ; (* Avoid implications of C address arithmetic in mc PtrToByte *)
rptr := PtrToBitset (v)
ELSE
(* We must check remaining bits only. *)
WHILE (el <= last) AND (el >= left^.init) DO
IF IsElementInSet (left, el) # IsElementInSet (right, el)
THEN
RETURN FALSE
END ;
INC (el)
END ;
RETURN TRUE
END
END ;
RETURN TRUE
END ;
RETURN FALSE
END EqualSet ;
END Sets.

View File

@ -2641,7 +2641,8 @@ fill_slots_from_thread (rtx_jump_insn *insn, rtx condition,
arithmetic insn after the jump insn and put the arithmetic insn in the
delay slot. If we can't do this, return. */
if (delay_list->is_empty () && likely
&& new_thread && !ANY_RETURN_P (new_thread)
&& new_thread
&& !ANY_RETURN_P (new_thread)
&& NONJUMP_INSN_P (new_thread)
&& !RTX_FRAME_RELATED_P (new_thread)
&& GET_CODE (PATTERN (new_thread)) != ASM_INPUT
@ -2729,14 +2730,16 @@ fill_slots_from_thread (rtx_jump_insn *insn, rtx condition,
gcc_assert (thread_if_true);
if (new_thread && simplejump_or_return_p (new_thread)
if (new_thread
&& simplejump_or_return_p (new_thread)
&& redirect_with_delay_list_safe_p (insn,
JUMP_LABEL (new_thread),
*delay_list))
new_thread = follow_jumps (JUMP_LABEL (new_thread), insn,
&crossing);
new_thread = follow_jumps (JUMP_LABEL (new_thread), insn, &crossing);
if (ANY_RETURN_P (new_thread))
if (!new_thread)
label = find_end_label (simple_return_rtx);
else if (ANY_RETURN_P (new_thread))
label = find_end_label (new_thread);
else if (LABEL_P (new_thread))
label = new_thread;

Some files were not shown because too many files have changed in this diff Show More