-rw-r--r-- | gcl/o/alloc.c | 5 | ||||
-rwxr-xr-x | gcl/o/gbc.c | 27 | ||||
-rwxr-xr-x | gcl/o/main.c | 4 | ||||
-rwxr-xr-x | gcl/o/sgbc.c | 23 |
diff --git a/gcl/o/alloc.c b/gcl/o/alloc.c index a160667e0..8bc99738e 100644 --- a/gcl/o/alloc.c +++ b/gcl/o/alloc.c @@ -618,8 +618,11 @@ add_pages(struct typemanager *tm,fixnum m) { case t_relocatable: - if (rb_pointer>rb_end) + if (rb_pointer>rb_end) { + fprintf(stderr,"Moving relblock low before expanding relblock pages\n"); + fflush(stderr); GBC(t_relocatable); + } nrbpage+=m; rb_end+=m*PAGESIZE; rb_limit+=m*PAGESIZE; diff --git a/gcl/o/gbc.c b/gcl/o/gbc.c index 3fe73e959..a5236e427 100755 --- a/gcl/o/gbc.c +++ b/gcl/o/gbc.c @@ -1292,22 +1292,31 @@ GBC(enum type t) { sgc_enabled ? rb_start : #endif heap_end+holepage*PAGESIZE,*new_end=new_start+nrbpage*PAGESIZE; + char *start=rb_pointer<rb_end ? rb_start : rb_end; + ufixnum size=rb_pointer-start; rb_pointer=(rb_pointer<rb_end) ? rb_end : rb_start; rb_limit=rb_pointer+(new_end-new_start)-2*RB_GETA; relb_shift=0; - if (new_start<rb_start) { - if (rb_pointer==rb_start) - rb_pointer=new_start; + if (new_start!=rb_start) { + if ((new_start<start && new_start+size>=start) || + (new_start<start+size && new_start+size>=start+size)) + relb_shift=new_start-rb_pointer; else - relb_shift=new_start-rb_pointer; - } else if (new_start>rb_start) { - if (rb_pointer==rb_end) - rb_pointer=new_end; - else - relb_shift=new_end-rb_pointer; + rb_pointer=new_start; } + /* if (new_start<rb_start) { */ + /* if (rb_pointer==rb_start) */ + /* rb_pointer=new_start; */ + /* else */ + /* relb_shift=new_start-rb_pointer; */ + /* } else if (new_start>rb_start) { */ + /* if (rb_pointer==rb_end) */ + /* rb_pointer=new_start; */ + /* else */ + /* relb_shift=new_end-rb_pointer; */ + /* } */ alloc_page(-(holepage+2*nrbpage)); diff --git a/gcl/o/main.c b/gcl/o/main.c index 0589e8a42..e34dacdb0 100755 --- a/gcl/o/main.c +++ b/gcl/o/main.c @@ -270,6 +270,10 @@ minimize_image(void) { holepage=new_holepage=1; GBC(t_relocatable); if (in_sgc) sgc_start(); + if (rb_pointer>rb_end) { + fprintf(stderr,"Moving relblock low before image save\n"); + fflush(stderr); + } new = (void *)(((((ufixnum)rb_pointer)+ PAGESIZE-1)/PAGESIZE)*PAGESIZE); core_end = new; rb_end=rb_limit=new; diff --git a/gcl/o/sgbc.c b/gcl/o/sgbc.c index 9e0f53a53..61b2c587a 100755 --- a/gcl/o/sgbc.c +++ b/gcl/o/sgbc.c @@ -1196,18 +1196,25 @@ sgc_start(void) { /* Now allocate the sgc relblock. We do this as the tail end of the ordinary rb. */ { - char *new; + /* char *new; */ tm=tm_of(t_relocatable); { + if (rb_pointer>rb_end) { + fprintf(stderr,"Moving relblock low at sgc start\n"); + fflush(stderr); + GBC(t_relocatable); + } old_rb_start=rb_start; - if(((unsigned long)WSGC(tm)) && allocate_more_pages) { - new=alloc_relblock(((unsigned long)WSGC(tm))*PAGESIZE); - /* the above may cause a gc, shifting the relblock */ - old_rb_start=rb_start; - new= PAGE_ROUND_UP(new); - } else new=PAGE_ROUND_UP(rb_pointer); - rb_start=rb_pointer=new; + rb_start=rb_pointer=PAGE_ROUND_UP(rb_pointer); + + /* if(((unsigned long)WSGC(tm)) && allocate_more_pages) { */ + /* new=alloc_relblock(((unsigned long)WSGC(tm))*PAGESIZE); */ + /* /\* the above may cause a gc, shifting the relblock *\/ */ + /* old_rb_start=rb_start; */ + /* new= PAGE_ROUND_UP(new); */ + /* } else new=PAGE_ROUND_UP(rb_pointer); */ + /* rb_start=rb_pointer=new; */ } } /* the relblock has been allocated */ |