Path: utzoo!utgpu!jarvis.csri.toronto.edu!rutgers!tut.cis.ohio-state.edu!LAB.ULTRA.NYU.EDU!andersnb
From: andersnb@LAB.ULTRA.NYU.EDU (Brian Anderson)
Newsgroups: gnu.g++.bug
Subject: Compiler catches signal 6 ...
Message-ID: <8909272041.AA04876@lab.ultra.nyu.edu>
Date: 27 Sep 89 20:41:56 GMT
Sender: daemon@tut.cis.ohio-state.edu
Distribution: gnu
Organization: GNUs Not Usenet
Lines: 445

Date: Wed, 27 Sep 89 16:14:38 EDT
From: Mail Delivery Subsystem 
Subject: Returned mail: User unknown
To: andersnb@shasha.cs.nyu.edu

   ----- Transcript of session follows -----
550 bug-g++... User unknown

   ----- Unsent message follows -----
Received:  by shasha.cs.nyu.edu (3.2/25-eef)
	id AA04631; Wed, 27 Sep 89 16:14:38 EDT
Date: Wed, 27 Sep 89 16:14:38 EDT
From: Brian G. Anderson 
Full-Name: Brian G. Anderson
Message-Id: <8909272014.AA04631@shasha.cs.nyu.edu>
To: bug-g++
Subject: g++ reports that cc1plus caught signal 6...

The two files included after this mail text (files.cc files.h) cause
g++ to report that cc1plus caught signal 6 when run with the following
command line:

g++ -c files.cc

The code has many coding errors (I was attempting to debug the code at
the time), but no other errors are reported except the signal error.
I run this on a SUN 3/50 with SUN OS 3.5.  My g++ compiler version is
"1.35.1-".  I poked around a little: I preprocessed the file seperatly
and ran cc1plus on the preprocessed file.  The program reported the
errors in the code.  After running cc1plus under gdb I could catch no
signal 6; cc1plus ran fine.  Could you see what's going on..

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Brian G. Anderson                                       |
NYU Ultracomputer Research Project                     |||
715 Broadway  Rm. 1006                                |||||
New York, NY  10003                                   |||||
(212) 998-3346                                     --- //\ ---
arpa:	andersnb@cmcl2                             ----/ \----
uucp:	{ihnp4,seismo}!cmcl2!andersnb              ----   ----


*************files.h**************
/* Files.h
   contains the declarations for the memory buffer class and the tupl
file layout routines.
*/

#include 
#include "errors.h"

/* The in memory page structures.  Memory pages are variable length,
but must have enough room for
sizeof(weight_val)+sizeof(int)+sizeof(owner_val).  Any remaining space
is referenced through the data pointer.  A disk page must fit into the
data space of a memory page.  Each page has an owner and a weight when
it is in use.  The greater the weight the greater the difficulty in
"lifting" the pages contents out of memory to make room.  */

typedef int weight_val;
typedef int owner_val;
typedef int page_idx;

class mem_page {
 private:
  enum {dirty=0, locked = 1, pinned = 2};
  weight_val heavy;
  long flags;
  owner_val own_id;
  
 public:
  char data[0];

  inline void in_use(const owner_val own, const weight_val wght)
    {if(own != 0) fatal("mem_page::in_use","page is not free");
     own_id = own; heavy = wght;}
  inline void clearflags() {flags=0;}
  inline void soiled() {setbit(flags,dirty);}
  inline void clean() {clearbit(flags,dirty);}
  inline int issoiled() {return testbit(flags, dirty);}
  inline void lock() {setbit(flags,locked);}
  inline void unlock() {clearbit(flags,locked);}
  inline int islocked() {return testbit(flags,locked);}
  inline void free() {own_id=0;}
  inline owner_val owner() {return own_id;}
  inline weight_val weight() {return heavy;}
};

/* This is the memory pages class.  The programmer specifies the
number of pages and the size at compiler time.  This class is meant to
be inherited for use by objects that will use the memory pages buffer
pool to bring things in and out of memory from disk.  On shared
everything machine architectures, objects derived from mem_pages are
guaranteed to be in shared memory.  When this is working fully,
concurrent access will be supported.*/

class mem_pages: errors {
 private:
  int p_size;
  int n_pages;
  char *pages;

 public:
  inline mem_pages(int size, int number) {
    pages = new char[size*number];
    p_size= size;
    n_pages = number;
  }

  inline ~mem_pages() { delete pages;}

  inline mem_page& operator[](page_idx idx) {
    // check if the index is in the range of the object;
    if(idx > n_pages) error("mem_pages::[]","invalid index");
      
    return *((mem_page*)(&pages[idx*p_size]));
  }
  inline int page(void *ptr) {
    if(ptr < (void*) pages || ptr - (void*)pages > n_pages*p_size)
      fatal("mem_pages::lock","pointer not in range of memory pages");
    return ((long)pages - (long)ptr)/p_size;
  }
  inline void lock(int idx) {(*this)[idx].lock();}
  inline void lock(void *ptr) {(*this)[page(ptr)].lock();}
  inline void unlock(int idx) {(*this)[idx].unlock();}
  inline void unlock(void *ptr) {(*this)[page(ptr)].unlock();}
  inline int islocked(int idx) {(*this)[idx].islocked();}
  inline int islocked(void *ptr) {return (*this)[page(ptr)].islocked();}

  inline int number_pages() {return n_pages;}
  inline int page_size() {return p_size;}

  page_idx find_free(int, int);

//  int save_page()=0;
};


**********
******files.cc**********
#include "files.h"

/* Memory Pages Methods */
int mem_pages::find_free(int own, int wgt) {
  for(int i=0; i slots_used -1 ) t_idx & ~(1 << (t_sig -1));
  }
};

struct hash_bucket {
  page_idx link;
  int n_elem;
  char data[0];
};

/* Hash pointer methods */
contents_ptr hash_ptr::operator[](int index){
  const int max_on_page =  (mem->page_size - sizeof(struct hash_header))/
    sizeof(page_idx);
  
  // lock the head of the hash table in memory
  // int keep = mem->islocked(idx);
  // mem->lock(idx);
  struct hash_header *h_head = (struct hash_header *) (*mem)[idx].data;
  
  // does the header have any elements in it?
  if(!h_head->slots_used) {
    // the table is completely empty
    h_head->slots_used = 1;
    h_head->data[0] = mem->find_free(1,1);
    if(h_head->data[0] = 0)
      fatal("hash_ptr::[]","find_free unable to find free page");
    
    struct hash_bucket *b_ptr = (struct hash_bucket*)(*mem)[idx].data;
    h_bucket->n_elem = 0;
    h_bucket->link = 0;
    // new page so it's dirty
    (*mem)[idx].soiled();
    return;
  }
  
  // find the appropriate bucket chain.  first find the number of significant
  // digits and mask all but those from index.  If index is still greater than
  // the number of entries in the table the most significant bit of the index
  // of the index will be removed to yield a valid index.  This index finds
  // the bucket pointer to the first bucket in the chain of buckets for 
  // this hash value.
  int e_idx = h_head->hash(index);
  struct hash_table* e_table = h_head;
  int o_e_idx = e_idx;
  
  // move to the appropriate header page
  struct hash_bucket* e_ptr = find_index(e_table, o_e_idx);
  struct hash_bucket* b_ptr = e_ptr;
  
  // we have a bucket chain, follow it looking for desired index
  int ff= 0, pages_seen = 0;
  
  while(1){
    content_ptr c_ptr(*this);
    
    // Check if contents of bucket is empty
    if(!first(mem->page(b_ptr), c_ptr))
      fatal("hash_ptr::[]","found empty bucket in chain");
    
    // start searching for element in the bucket
    do {
      if(index(c_ptr) == index)
	return c_ptr;
    } while(next(c_ptr));
    
    // didn't find any element with the desired index
    // is there any space that could be used later?
    ff ^= n_free(c_ptr);
    pages_seen++;
    
    // follow link to next bucket
    if(b_ptr->link)
      b_ptr = (struct hash_bucket *)(*mem)[b_ptr->link].data;
    else break;
  }
  
  // The index wasn't found. Check if there are too many buckets on this chain
  // or the element we will add is going to overflow
  
  if(pages_seen > bucket_max || (pages_seen + 1 == bucket_max && !ff)){
    // caclulate the bucket indicies
    int s_l_idx = h_head->hash(h_head->slots_used);
    int s_h_idx = ++(h_head->slots_used);
    
    // are we splitting the bucket we will add to?
    int split_add = (e_idx == s_h_idx);
    
    // move to low bucket
    struct hash_table* s_l_table = h_head;
    struct hash_bucket* s_l_ptr = find_index(s_l_table, s_l_idx);
    if(s_l_idx == -1)
      fatal("hash_ptr::[]","unable to find low element in hash table");
    
    // move to high bucket, but we might have to create it first
    struct hash_table* s_h_table = s_l_table;
    struct hash_bucket* s_h_ptr = find_index(s_h_table, s_h_idx);
    if(s_h_idx == -1){
      s_h_table->link = mem->find_free(1,1);
      s_h_table = (struct hash_bucket*)(*mem)[s_h_table->link].data;
      s_h_table->link = 0;
      s_h_idx = 0;
    }
    
    // set up for splitting the buckets
    struct hash_bucket* source_ptr = s_l_ptr;
    s_l_table->data[s_l_idx] = mem->find_free(1,1);
    s_l_ptr = (struct hash_bucket*)(*mem)[s_l_table->data[s_l_idx]].data;
    s_l_ptr->link = 0;
    s_l_ptr->n_elements = 0;
    s_h_table->data[s_h_idx] = mem->find_free(1,1);
    s_h_ptr = (struct hash_bucket*)(*mem)[s_h_table->data[s_h_idx]].data;
    s_l_ptr->link = 0;
    s_l_ptr->n_elements = 0;
    
    // if added element will move then point to high bucket if that is new
    // hash bucket.
    if(split_add && h_head->hash(index) == s_h_idx){
      e_table = s_h_table;
      e_ptr = s_h_ptr;
      o_e_idx = s_h_idx;
    }
    
    // move down source chain moving to appropriate high/low bucket according
    // to the new hashed index.
    contents_ptr s_c_ptr(*this);
    if(!first(mem->page(source_ptr), s_c_ptr))
      fatal("hash_ptr::[]","found empty bucket in chain");
    contents_ptr h_c_ptr(*this);
    contents_ptr l_c_ptr(*this);
    while(){
      if(h_head->hash(s_c_ptr.index) == s_h_idx){
	d_c_ptr = &h_c_ptr;
	d_ptr = &s_h_ptr;
      } else if(h_head->hash(s_c_ptr.index) == s_l_idx){
	d_c_ptr = &l_c_ptr;
	d_ptr = &s_l_ptr;
      } else
	fatal("hash_ptr::[]","index doesn't agree with any split index");

      if(!h_head->get_free(mem->page(*d_ptr),*d_c_ptr)){
	// no more space in this bucket so make new one
	(*d_ptr)->link = mem->find_free(1, 1);
	*d_ptr = (struct hash_bucket*)(*mem)[(*d_ptr)->link].data;
	(*d_ptr)->link = 0;
	(*d_ptr)->n_elements = 0;
      }

      // move the item
      move(s_c_ptr,*d_c_ptr);

      // move to next item
      if(!s_c_ptr.next()){
	// no more items in this unit so move to next in bucket
	if(!source_ptr->link){
	  (*mem)[mem->page(source_ptr)].free();
	  break;
	}
	if(!first(source_ptr->link, s_c_ptr))
	  fatal("hash_ptr::[]","empty bucket on chain");
	
	// move to next bucket and free the old one
	struct hash_bucket* old = source_ptr;
	source_ptr = (struct hash_bucket*)(*mem)[source_ptr->link].data;
	(*mem)[mem->page(old)].free();
      }
}}

// everything is kosher now so get a free element from the proper bucket
while(1){
  content_ptr c_ptr(*this);
  
  // didn't find any element with the desired index
  // is there any space that could be used later?
  if(get_free(mem->page(e_ptr),c_ptr))
    return c_ptr;
  
  // didn't find any thing, follow link to next bucket
  if(e_ptr->link)
    e_ptr = (struct hash_bucket *)(*mem)[e_ptr->link].data;
  else {
    // ran out of buckets so make a new one
    e_ptr->link = mem->find_free(1 , 1);
    e_ptr = (struct hash_bucket *)(*mem)[e_ptr->link].data;
    e_ptr->link = 0;
  }
}
}

struct hash_bucket *hash_ptr::find_index(struct hash_header& h_table,
					 int& idx){
  const int max_on_page =  (mem->page_size - sizeof(struct hash_header))/
    sizeof(page_idx);

  int skip = idx / max_on_page;
  int offset = idx % max_on_page;

  // skip to the appropriate header page
  for(;skip;skip--){
    // check if there is a link to follow
    if(!h_table->link){
      idx = -1;
      return 0;
    }

    h_table = (struct hash_header *) (*mem)[h_table->link].data;
  }

  idx = offset;

  // return the pointer to the bucket
  return (struct hash_bucket *)(*mem)[h_table->data[offset]].data;
}