freedreno/ir3: new pre-RA scheduler

This replaces the depth-first search scheduler with a more traditional
ready-list scheduler.  It primarily tries to reduce register pressure
(number of live values), with the exception of trying to schedule kills
as early as possible.  (Earlier iterations of this scheduler had a
tendency to push kills later, and in particular moving texture fetches
which may not be necessary ahead of kills.)

Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4440>
This commit is contained in:
Rob Clark 2019-11-22 11:14:25 -08:00 committed by Marge Bot
parent 0f22f85fe7
commit d2f4d332db
3 changed files with 443 additions and 413 deletions

View File

@ -1190,7 +1190,6 @@ void ir3_remove_nops(struct ir3 *ir);
/* depth calculation: */
struct ir3_shader_variant;
void ir3_insert_by_depth(struct ir3_instruction *instr, struct list_head *list);
void ir3_depth(struct ir3 *ir, struct ir3_shader_variant *so);
/* fp16 conversion folding */

View File

@ -48,23 +48,6 @@
* blocks depth sorted list, which is used by the scheduling pass.
*/
void
ir3_insert_by_depth(struct ir3_instruction *instr, struct list_head *list)
{
/* remove from existing spot in list: */
list_delinit(&instr->node);
/* find where to re-insert instruction: */
foreach_instr (pos, list) {
if (pos->depth > instr->depth) {
list_add(&instr->node, &pos->node);
return;
}
}
/* if we get here, we didn't find an insertion spot: */
list_addtail(&instr->node, list);
}
static void
ir3_instr_depth(struct ir3_instruction *instr, unsigned boost, bool falsedep)
{
@ -97,8 +80,6 @@ ir3_instr_depth(struct ir3_instruction *instr, unsigned boost, bool falsedep)
if (!is_meta(instr))
instr->depth++;
ir3_insert_by_depth(instr, &instr->block->instr_list);
}
static bool

File diff suppressed because it is too large Load Diff