1- #include < cstdio>
2- #include < cstring>
31#include < unordered_set>
42#include < unordered_map>
5- #include < fstream>
6- #include < cstdlib>
7- #include < sstream>
8- #include < limits>
93#include < queue>
104
115#include " vtr_assert.h"
1711
1812#include " read_xml_arch_file.h"
1913#include " globals.h"
20- #include " atom_netlist.h"
2114#include " prepack.h"
2215#include " pack_types.h"
2316#include " pack.h"
24- #include " read_blif.h"
2517#include " cluster.h"
2618#include " SetupGrid.h"
2719#include " re_cluster.h"
20+ #include " noc_aware_cluster_util.h"
2821
2922/* #define DUMP_PB_GRAPH 1 */
3023/* #define DUMP_BLIF_INPUT 1 */
@@ -42,127 +35,6 @@ static bool try_size_device_grid(const t_arch& arch,
4235 */
4336static int count_models (const t_model* user_models);
4437
45- static std::vector<AtomBlockId> find_noc_router_atoms () {
46- const auto & atom_ctx = g_vpr_ctx.atom ();
47-
48- // NoC router atoms are expected to have a specific blif model
49- const std::string noc_router_blif_model_name = " noc_router_adapter_block" ;
50-
51- // stores found NoC router atoms
52- std::vector<AtomBlockId> noc_router_atoms;
53-
54- // iterate over all atoms and find those whose blif model matches
55- for (auto atom_id : atom_ctx.nlist .blocks ()) {
56- const t_model* model = atom_ctx.nlist .block_model (atom_id);
57- if (noc_router_blif_model_name == model->name ) {
58- noc_router_atoms.push_back (atom_id);
59- }
60- }
61-
62- return noc_router_atoms;
63- }
64-
65- static void update_noc_reachability_partitions (const std::vector<AtomBlockId>& noc_atoms) {
66- const auto & atom_ctx = g_vpr_ctx.atom ();
67- auto & constraints = g_vpr_ctx.mutable_floorplanning ().constraints ;
68- const auto & high_fanout_thresholds = g_vpr_ctx.cl_helper ().high_fanout_thresholds ;
69- const auto & device_ctx = g_vpr_ctx.device ();
70- const auto & grid = device_ctx.grid ;
71-
72- t_logical_block_type_ptr logic_block_type = infer_logic_block_type (grid);
73- const size_t high_fanout_threshold = high_fanout_thresholds.get_threshold (logic_block_type->name );
74-
75- // get the total number of atoms
76- const size_t n_atoms = atom_ctx.nlist .blocks ().size ();
77-
78- vtr::vector<AtomBlockId, bool > atom_visited (n_atoms, false );
79-
80- int exclusivity_cnt = 0 ;
81-
82- RegionRectCoord unconstrained_rect{0 ,
83- 0 ,
84- std::numeric_limits<int >::max (),
85- std::numeric_limits<int >::max (),
86- 0 };
87- Region unconstrained_region;
88- unconstrained_region.set_region_rect (unconstrained_rect);
89-
90- for (auto noc_atom_id : noc_atoms) {
91- // check if this NoC router has already been visited
92- if (atom_visited[noc_atom_id]) {
93- continue ;
94- }
95-
96- exclusivity_cnt++;
97-
98- PartitionRegion associated_noc_partition_region;
99- associated_noc_partition_region.set_exclusivity_index (exclusivity_cnt);
100- associated_noc_partition_region.add_to_part_region (unconstrained_region);
101-
102- Partition associated_noc_partition;
103- associated_noc_partition.set_name (atom_ctx.nlist .block_name (noc_atom_id));
104- associated_noc_partition.set_part_region (associated_noc_partition_region);
105- auto associated_noc_partition_id = (PartitionId)constraints.get_num_partitions ();
106- constraints.add_partition (associated_noc_partition);
107-
108- const PartitionId noc_partition_id = constraints.get_atom_partition (noc_atom_id);
109-
110- if (noc_partition_id == PartitionId::INVALID ()) {
111- constraints.add_constrained_atom (noc_atom_id, associated_noc_partition_id);
112- } else { // noc atom is already in a partition
113- auto & noc_partition = constraints.get_mutable_partition (noc_partition_id);
114- auto & noc_partition_region = noc_partition.get_mutable_part_region ();
115- VTR_ASSERT (noc_partition_region.get_exclusivity_index () < 0 );
116- noc_partition_region.set_exclusivity_index (exclusivity_cnt);
117- }
118-
119- std::queue<AtomBlockId> q;
120- q.push (noc_atom_id);
121- atom_visited[noc_atom_id] = true ;
122-
123- while (!q.empty ()) {
124- AtomBlockId current_atom = q.front ();
125- q.pop ();
126-
127- PartitionId atom_partition_id = constraints.get_atom_partition (current_atom);
128- if (atom_partition_id == PartitionId::INVALID ()) {
129- constraints.add_constrained_atom (current_atom, associated_noc_partition_id);
130- } else {
131- auto & atom_partition = constraints.get_mutable_partition (atom_partition_id);
132- auto & atom_partition_region = atom_partition.get_mutable_part_region ();
133- VTR_ASSERT (atom_partition_region.get_exclusivity_index () < 0 || current_atom == noc_atom_id);
134- atom_partition_region.set_exclusivity_index (exclusivity_cnt);
135- }
136-
137- for (auto pin : atom_ctx.nlist .block_pins (current_atom)) {
138- AtomNetId net_id = atom_ctx.nlist .pin_net (pin);
139- size_t net_fanout = atom_ctx.nlist .net_sinks (net_id).size ();
140-
141- if (net_fanout >= high_fanout_threshold) {
142- continue ;
143- }
144-
145- AtomBlockId driver_atom_id = atom_ctx.nlist .net_driver_block (net_id);
146- if (!atom_visited[driver_atom_id]) {
147- q.push (driver_atom_id);
148- atom_visited[driver_atom_id] = true ;
149- }
150-
151- for (auto sink_pin : atom_ctx.nlist .net_sinks (net_id)) {
152- AtomBlockId sink_atom_id = atom_ctx.nlist .pin_block (sink_pin);
153- if (!atom_visited[sink_atom_id]) {
154- q.push (sink_atom_id);
155- atom_visited[sink_atom_id] = true ;
156- }
157- }
158-
159- }
160- }
161-
162- }
163- }
164-
165-
16638bool try_pack (t_packer_opts* packer_opts,
16739 const t_analysis_opts* analysis_opts,
16840 const t_arch* arch,
0 commit comments