Initial import of gofrontend repository.

Immediately preceding revision history may be found at
svn://gcc.gnu.org/svn/gcc/branches/gccgo, in the gcc/go and libgo
subdirectories.
diff --git a/libgo/runtime/array.h b/libgo/runtime/array.h
new file mode 100644
index 0000000..9ffbf6b
--- /dev/null
+++ b/libgo/runtime/array.h
@@ -0,0 +1,25 @@
+/* array.h -- the open array type for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+/* An open array is an instance of this structure.  */
+
+struct __go_open_array
+{
+  /* The elements of the array.  In use in the compiler this is a
+     pointer to the element type.  */
+  void* __values;
+  /* The number of elements in the array.  Note that this is "int",
+     not "size_t".  The language definition says that "int" is large
+     enough to hold the size of any allocated object.  Using "int"
+     saves 8 bytes per slice header on a 64-bit system with 32-bit
+     ints.  */
+  int __count;
+  /* The capacity of the array--the number of elements that can fit in
+     the __VALUES field.  */
+  int __capacity;
+};
diff --git a/libgo/runtime/cgo2c.c b/libgo/runtime/cgo2c.c
new file mode 100644
index 0000000..a5f794e
--- /dev/null
+++ b/libgo/runtime/cgo2c.c
@@ -0,0 +1,730 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/* Translate a .cgo file into a .c file.  A .cgo file is a combination
+   of a limited form of Go with C.  */
+
+/*
+   package PACKAGENAME
+   {# line}
+   func NAME([NAME TYPE { , NAME TYPE }]) [(NAME TYPE { , NAME TYPE })] \{
+     C code with proper brace nesting
+   \}
+*/
+
+/* We generate C code which implements the function such that it can
+   be called from Go and executes the C code.  */
+
+#include <assert.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+/* Whether we're emitting for gcc */
+static int gcc;
+
+/* Package prefix to use; only meaningful for gcc */
+static const char *prefix;
+
+/* File and line number */
+static const char *file;
+static unsigned int lineno;
+
+/* List of names and types.  */
+struct params {
+	struct params *next;
+	char *name;
+	char *type;
+};
+
+/* index into type_table */
+enum {
+	Bool,
+	Float,
+	Int,
+	Uint,
+	Uintptr,
+	String,
+	Slice,
+};
+
+static struct {
+	char *name;
+	int size;
+} type_table[] = {
+	/* variable sized first, for easy replacement */
+	/* order matches enum above */
+	/* default is 32-bit architecture sizes */
+	"bool",		1,
+	"float",	4,
+	"int",		4,
+	"uint",		4,
+	"uintptr",	4,
+	"String",	8,
+	"Slice",	12,
+
+	/* fixed size */
+	"float32",	4,
+	"float64",	8,
+	"byte",		1,
+	"int8",		1,
+	"uint8",	1,
+	"int16",	2,
+	"uint16",	2,
+	"int32",	4,
+	"uint32",	4,
+	"int64",	8,
+	"uint64",	8,
+
+	NULL,
+};
+
+/* Fixed structure alignment (non-gcc only) */
+int structround = 4;
+
+/* Unexpected EOF.  */
+static void
+bad_eof(void)
+{
+	fprintf(stderr, "%s:%u: unexpected EOF\n", file, lineno);
+	exit(1);
+}
+
+/* Out of memory.  */
+static void
+bad_mem(void)
+{
+	fprintf(stderr, "%s:%u: out of memory\n", file, lineno);
+	exit(1);
+}
+
+/* Allocate memory without fail.  */
+static void *
+xmalloc(unsigned int size)
+{
+	void *ret = malloc(size);
+	if (ret == NULL)
+		bad_mem();
+	return ret;
+}
+
+/* Reallocate memory without fail.  */
+static void*
+xrealloc(void *buf, unsigned int size)
+{
+	void *ret = realloc(buf, size);
+	if (ret == NULL)
+		bad_mem();
+	return ret;
+}
+
+/* Free a list of parameters.  */
+static void
+free_params(struct params *p)
+{
+	while (p != NULL) {
+		struct params *next;
+
+		next = p->next;
+		free(p->name);
+		free(p->type);
+		free(p);
+		p = next;
+	}
+}
+
+/* Read a character, tracking lineno.  */
+static int
+getchar_update_lineno(void)
+{
+	int c;
+
+	c = getchar();
+	if (c == '\n')
+		++lineno;
+	return c;
+}
+
+/* Read a character, giving an error on EOF, tracking lineno.  */
+static int
+getchar_no_eof(void)
+{
+	int c;
+
+	c = getchar_update_lineno();
+	if (c == EOF)
+		bad_eof();
+	return c;
+}
+
+/* Read a character, skipping comments.  */
+static int
+getchar_skipping_comments(void)
+{
+	int c;
+
+	while (1) {
+		c = getchar_update_lineno();
+		if (c != '/')
+			return c;
+
+		c = getchar();
+		if (c == '/') {
+			do {
+				c = getchar_update_lineno();
+			} while (c != EOF && c != '\n');
+			return c;
+		} else if (c == '*') {
+			while (1) {
+				c = getchar_update_lineno();
+				if (c == EOF)
+					return EOF;
+				if (c == '*') {
+					do {
+						c = getchar_update_lineno();
+					} while (c == '*');
+					if (c == '/')
+						break;
+				}
+			}
+		} else {
+			ungetc(c, stdin);
+			return '/';
+		}
+	}
+}
+
+/* Read and return a token.  Tokens are delimited by whitespace or by
+   [(),{}].  The latter are all returned as single characters.  */
+static char *
+read_token(void)
+{
+	int c;
+	char *buf;
+	unsigned int alc, off;
+	const char* delims = "(),{}";
+
+	while (1) {
+		c = getchar_skipping_comments();
+		if (c == EOF)
+			return NULL;
+		if (!isspace(c))
+			break;
+	}
+	alc = 16;
+	buf = xmalloc(alc + 1);
+	off = 0;
+	if (strchr(delims, c) != NULL) {
+		buf[off] = c;
+		++off;
+	} else {
+		while (1) {
+			if (off >= alc) {
+				alc *= 2;
+				buf = xrealloc(buf, alc + 1);
+			}
+			buf[off] = c;
+			++off;
+			c = getchar_skipping_comments();
+			if (c == EOF)
+				break;
+			if (isspace(c) || strchr(delims, c) != NULL) {
+				if (c == '\n')
+					lineno--;
+				ungetc(c, stdin);
+				break;
+			}
+		}
+	}
+	buf[off] = '\0';
+	return buf;
+}
+
+/* Read a token, giving an error on EOF.  */
+static char *
+read_token_no_eof(void)
+{
+	char *token = read_token();
+	if (token == NULL)
+		bad_eof();
+	return token;
+}
+
+/* Read the package clause, and return the package name.  */
+static char *
+read_package(void)
+{
+	char *token;
+
+	token = read_token_no_eof();
+	if (strcmp(token, "package") != 0) {
+		fprintf(stderr,
+			"%s:%u: expected \"package\", got \"%s\"\n",
+			file, lineno, token);
+		exit(1);
+	}
+	return read_token_no_eof();
+}
+
+/* Read and copy preprocessor lines.  */
+static void
+read_preprocessor_lines(void)
+{
+	while (1) {
+		int c;
+
+		do {
+			c = getchar_skipping_comments();
+		} while (isspace(c));
+		if (c != '#') {
+			ungetc(c, stdin);
+			break;
+		}
+		putchar(c);
+		do {
+			c = getchar_update_lineno();
+			putchar(c);
+		} while (c != '\n');
+	}
+}
+
+/* Read a type in Go syntax and return a type in C syntax.  We only
+   permit basic types and pointers.  */
+static char *
+read_type(void)
+{
+	char *p, *op, *q;
+	int pointer_count;
+	unsigned int len;
+
+	p = read_token_no_eof();
+	if (*p != '*')
+		return p;
+	op = p;
+	pointer_count = 0;
+	while (*p == '*') {
+		++pointer_count;
+		++p;
+	}
+	len = strlen(p);
+	q = xmalloc(len + pointer_count + 1);
+	memcpy(q, p, len);
+	while (pointer_count > 0) {
+		q[len] = '*';
+		++len;
+		--pointer_count;
+	}
+	q[len] = '\0';
+	free(op);
+	return q;
+}
+
+/* Return the size of the given type. */
+static int
+type_size(char *p)
+{
+	int i;
+
+	if(p[strlen(p)-1] == '*')
+		return type_table[Uintptr].size;
+
+	for(i=0; type_table[i].name; i++)
+		if(strcmp(type_table[i].name, p) == 0)
+			return type_table[i].size;
+	fprintf(stderr, "%s:%u: unknown type %s\n", file, lineno, p);
+	exit(1);
+	return 0;
+}
+
+/* Read a list of parameters.  Each parameter is a name and a type.
+   The list ends with a ')'.  We have already read the '('.  */
+static struct params *
+read_params(int *poffset)
+{
+	char *token;
+	struct params *ret, **pp, *p;
+	int offset, size, rnd;
+
+	ret = NULL;
+	pp = &ret;
+	token = read_token_no_eof();
+	offset = 0;
+	if (strcmp(token, ")") != 0) {
+		while (1) {
+			p = xmalloc(sizeof(struct params));
+			p->name = token;
+			p->type = read_type();
+			p->next = NULL;
+			*pp = p;
+			pp = &p->next;
+
+			size = type_size(p->type);
+			rnd = size;
+			if(rnd > structround)
+				rnd = structround;
+			if(offset%rnd)
+				offset += rnd - offset%rnd;
+			offset += size;
+
+			token = read_token_no_eof();
+			if (strcmp(token, ",") != 0)
+				break;
+			token = read_token_no_eof();
+		}
+	}
+	if (strcmp(token, ")") != 0) {
+		fprintf(stderr, "%s:%u: expected '('\n",
+			file, lineno);
+		exit(1);
+	}
+	if (poffset != NULL)
+		*poffset = offset;
+	return ret;
+}
+
+/* Read a function header.  This reads up to and including the initial
+   '{' character.  Returns 1 if it read a header, 0 at EOF.  */
+static int
+read_func_header(char **name, struct params **params, int *paramwid, struct params **rets)
+{
+	int lastline;
+	char *token;
+
+	lastline = -1;
+	while (1) {
+		token = read_token();
+		if (token == NULL)
+			return 0;
+		if (strcmp(token, "func") == 0) {
+			if(lastline != -1)
+				printf("\n");
+			break;
+		}
+		if (lastline != lineno) {
+			if (lastline == lineno-1)
+				printf("\n");
+			else
+				printf("\n#line %d \"%s\"\n", lineno, file);
+			lastline = lineno;
+		}
+		printf("%s ", token);
+	}
+
+	*name = read_token_no_eof();
+
+	token = read_token();
+	if (token == NULL || strcmp(token, "(") != 0) {
+		fprintf(stderr, "%s:%u: expected \"(\"\n",
+			file, lineno);
+		exit(1);
+	}
+	*params = read_params(paramwid);
+
+	token = read_token();
+	if (token == NULL || strcmp(token, "(") != 0)
+		*rets = NULL;
+	else {
+		*rets = read_params(NULL);
+		token = read_token();
+	}
+	if (token == NULL || strcmp(token, "{") != 0) {
+		fprintf(stderr, "%s:%u: expected \"{\"\n",
+			file, lineno);
+		exit(1);
+	}
+	return 1;
+}
+
+/* Write out parameters.  */
+static void
+write_params(struct params *params, int *first)
+{
+	struct params *p;
+
+	for (p = params; p != NULL; p = p->next) {
+		if (*first)
+			*first = 0;
+		else
+			printf(", ");
+		printf("%s %s", p->type, p->name);
+	}
+}
+
+/* Write a 6g function header.  */
+static void
+write_6g_func_header(char *package, char *name, struct params *params,
+		     int paramwid, struct params *rets)
+{
+	int first, n;
+
+	printf("void\n%s·%s(", package, name);
+	first = 1;
+	write_params(params, &first);
+
+	/* insert padding to align output struct */
+	if(rets != NULL && paramwid%structround != 0) {
+		n = structround - paramwid%structround;
+		if(n & 1)
+			printf(", uint8");
+		if(n & 2)
+			printf(", uint16");
+		if(n & 4)
+			printf(", uint32");
+	}
+
+	write_params(rets, &first);
+	printf(")\n{\n");
+}
+
+/* Write a 6g function trailer.  */
+static void
+write_6g_func_trailer(struct params *rets)
+{
+	struct params *p;
+
+	for (p = rets; p != NULL; p = p->next)
+		printf("\tFLUSH(&%s);\n", p->name);
+	printf("}\n");
+}
+
+/* Define the gcc function return type if necessary.  */
+static void
+define_gcc_return_type(char *package, char *name, struct params *rets)
+{
+	struct params *p;
+
+	if (rets == NULL || rets->next == NULL)
+		return;
+	printf("struct %s_%s_ret {\n", package, name);
+	for (p = rets; p != NULL; p = p->next)
+		printf("  %s %s;\n", p->type, p->name);
+	printf("};\n");
+}
+
+/* Write out the gcc function return type.  */
+static void
+write_gcc_return_type(char *package, char *name, struct params *rets)
+{
+	if (rets == NULL)
+		printf("void");
+	else if (rets->next == NULL)
+		printf("%s", rets->type);
+	else
+		printf("struct %s_%s_ret", package, name);
+}
+
+/* Write out a gcc function header.  */
+static void
+write_gcc_func_header(char *package, char *name, struct params *params,
+		      struct params *rets)
+{
+	int first;
+	struct params *p;
+
+	define_gcc_return_type(package, name, rets);
+	write_gcc_return_type(package, name, rets);
+	printf(" %s_%s(", package, name);
+	first = 1;
+	write_params(params, &first);
+	printf(") asm (\"");
+	if (prefix != NULL)
+	  printf("%s.", prefix);
+	printf("%s.%s\");\n", package, name);
+	write_gcc_return_type(package, name, rets);
+	printf(" %s_%s(", package, name);
+	first = 1;
+	write_params(params, &first);
+	printf(")\n{\n");
+	for (p = rets; p != NULL; p = p->next)
+		printf("  %s %s;\n", p->type, p->name);
+}
+
+/* Write out a gcc function trailer.  */
+static void
+write_gcc_func_trailer(char *package, char *name, struct params *rets)
+{
+	if (rets == NULL)
+		;
+	else if (rets->next == NULL)
+		printf("return %s;\n", rets->name);
+	else {
+		struct params *p;
+
+		printf("  {\n    struct %s_%s_ret __ret;\n", package, name);
+		for (p = rets; p != NULL; p = p->next)
+			printf("    __ret.%s = %s;\n", p->name, p->name);
+		printf("    return __ret;\n  }\n");
+	}
+	printf("}\n");
+}
+
+/* Write out a function header.  */
+static void
+write_func_header(char *package, char *name,
+		  struct params *params, int paramwid,
+		  struct params *rets)
+{
+	if (gcc)
+		write_gcc_func_header(package, name, params, rets);
+	else
+		write_6g_func_header(package, name, params, paramwid, rets);
+	printf("#line %d \"%s\"\n", lineno, file);
+}
+
+/* Write out a function trailer.  */
+static void
+write_func_trailer(char *package, char *name,
+		   struct params *rets)
+{
+	if (gcc)
+		write_gcc_func_trailer(package, name, rets);
+	else
+		write_6g_func_trailer(rets);
+}
+
+/* Read and write the body of the function, ending in an unnested }
+   (which is read but not written).  */
+static void
+copy_body(void)
+{
+	int nesting = 0;
+	while (1) {
+		int c;
+
+		c = getchar_no_eof();
+		if (c == '}' && nesting == 0)
+			return;
+		putchar(c);
+		switch (c) {
+		default:
+			break;
+		case '{':
+			++nesting;
+			break;
+		case '}':
+			--nesting;
+			break;
+		case '/':
+			c = getchar_update_lineno();
+			putchar(c);
+			if (c == '/') {
+				do {
+					c = getchar_no_eof();
+					putchar(c);
+				} while (c != '\n');
+			} else if (c == '*') {
+				while (1) {
+					c = getchar_no_eof();
+					putchar(c);
+					if (c == '*') {
+						do {
+							c = getchar_no_eof();
+							putchar(c);
+						} while (c == '*');
+						if (c == '/')
+							break;
+					}
+				}
+			}
+			break;
+		case '"':
+		case '\'':
+			{
+				int delim = c;
+				do {
+					c = getchar_no_eof();
+					putchar(c);
+					if (c == '\\') {
+						c = getchar_no_eof();
+						putchar(c);
+						c = '\0';
+					}
+				} while (c != delim);
+			}
+			break;
+		}
+	}
+}
+
+/* Process the entire file.  */
+static void
+process_file(void)
+{
+	char *package, *name;
+	struct params *params, *rets;
+	int paramwid;
+
+	package = read_package();
+	read_preprocessor_lines();
+	while (read_func_header(&name, &params, &paramwid, &rets)) {
+		write_func_header(package, name, params, paramwid, rets);
+		copy_body();
+		write_func_trailer(package, name, rets);
+		free(name);
+		free_params(params);
+		free_params(rets);
+	}
+	free(package);
+}
+
+static void
+usage(void)
+{
+	fprintf(stderr, "Usage: cgo2c [--6g | --gcc] [--go-prefix PREFIX] [file]\n");
+	exit(1);
+}
+
+int
+main(int argc, char **argv)
+{
+	char *goarch;
+
+	while(argc > 1 && argv[1][0] == '-') {
+		if(strcmp(argv[1], "-") == 0)
+			break;
+		if(strcmp(argv[1], "--6g") == 0)
+			gcc = 0;
+		else if(strcmp(argv[1], "--gcc") == 0)
+			gcc = 1;
+		else if (strcmp(argv[1], "--go-prefix") == 0 && argc > 2) {
+			prefix = argv[2];
+			argc--;
+			argv++;
+		} else
+			usage();
+		argc--;
+		argv++;
+	}
+
+	if(argc <= 1 || strcmp(argv[1], "-") == 0) {
+		file = "<stdin>";
+		process_file();
+		return 0;
+	}
+
+	if(argc > 2)
+		usage();
+
+	file = argv[1];
+	if(freopen(file, "r", stdin) == 0) {
+		fprintf(stderr, "open %s: %s\n", file, strerror(errno));
+		exit(1);
+	}
+
+	if(!gcc) {
+		// 6g etc; update size table
+		goarch = getenv("GOARCH");
+		if(goarch != NULL && strcmp(goarch, "amd64") == 0) {
+			type_table[Uintptr].size = 8;
+			type_table[String].size = 16;
+			type_table[Slice].size = 8+4+4;
+			structround = 8;
+		}
+	}
+
+	process_file();
+	return 0;
+}
diff --git a/libgo/runtime/chan.cgo b/libgo/runtime/chan.cgo
new file mode 100644
index 0000000..bfc9533
--- /dev/null
+++ b/libgo/runtime/chan.cgo
@@ -0,0 +1,38 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+#include "config.h"
+#include "channel.h"
+
+typedef _Bool bool;
+typedef unsigned char byte;
+typedef struct __go_channel chan;
+
+/* Do a nonblocking channel receive.  */
+
+func chanrecv2(c *chan, val *byte) (pres bool) {
+	if (c->element_size > 8) {
+		return __go_receive_nonblocking_big(c, val);
+	} else {
+		struct __go_receive_nonblocking_small rs;
+		union {
+			char b[8];
+			uint64_t v;
+		} u;
+
+		rs = __go_receive_nonblocking_small (c);
+		if (!rs.__success) {
+			return 0;
+		}
+		u.v = rs.__val;
+#ifndef WORDS_BIGENDIAN
+		__builtin_memcpy(val, u.b, c->element_size);
+#else
+		__builtin_memcpy(val, u.b + 8 - c->element_size,
+				 c->element_size);
+#endif
+		return 1;
+	}
+}
diff --git a/libgo/runtime/channel.h b/libgo/runtime/channel.h
new file mode 100644
index 0000000..b0d1347
--- /dev/null
+++ b/libgo/runtime/channel.h
@@ -0,0 +1,147 @@
+/* channel.h -- the channel type for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+#include <pthread.h>
+
+/* This structure is used when a select is waiting for a synchronous
+   channel.  */
+
+struct __go_channel_select
+{
+  /* A pointer to the next select waiting for this channel.  */
+  struct __go_channel_select *next;
+  /* A pointer to the channel which this select will use.  This starts
+     out as NULL and is set to the first channel which synchs up with
+     this one.  This variable to which this points may only be
+     accessed when __go_select_data_mutex is held.  */
+  struct __go_channel **selected;
+  /* A pointer to a variable which must be set to true if the
+     goroutine which sets *SELECTED wants to read from the channel,
+     false if it wants to write to it.  */
+  _Bool *is_read;
+};
+
+/* A channel is a pointer to this structure.  */
+
+struct __go_channel
+{
+  /* A mutex to control access to the channel.  */
+  pthread_mutex_t lock;
+  /* A condition variable.  This is signalled when data is added to
+     the channel and when data is removed from the channel.  */
+  pthread_cond_t cond;
+  /* The size of elements on this channel.  */
+  size_t element_size;
+  /* Number of operations on closed channel.  */
+  unsigned short closed_op_count;
+  /* True if a goroutine is waiting to send on a synchronous
+     channel.  */
+  _Bool waiting_to_send;
+  /* True if a goroutine is waiting to receive on a synchronous
+     channel.  */
+  _Bool waiting_to_receive;
+  /* True if this channel was selected for send in a select statement.
+     This looks out all other sends.  */
+  _Bool selected_for_send;
+  /* True if this channel was selected for receive in a select
+     statement.  This locks out all other receives.  */
+  _Bool selected_for_receive;
+  /* True if this channel has been closed.  */
+  _Bool is_closed;
+  /* True if at least one null value has been read from a closed
+     channel.  */
+  _Bool saw_close;
+  /* The list of select statements waiting to send on a synchronous
+     channel.  */
+  struct __go_channel_select *select_send_queue;
+  /* The list of select statements waiting to receive on a synchronous
+     channel.  */
+  struct __go_channel_select *select_receive_queue;
+  /* If a select statement is waiting for this channel, it sets these
+     pointers.  When something happens on the channel, the channel
+     locks the mutex, signals the condition, and unlocks the
+     mutex.  */
+  pthread_mutex_t *select_mutex;
+  pthread_cond_t *select_cond;
+  /* The number of entries in the circular buffer.  */
+  unsigned int num_entries;
+  /* Where to store the next value.  */
+  unsigned int next_store;
+  /* Where to fetch the next value.  If next_fetch == next_store, the
+     buffer is empty.  If next_store + 1 == next_fetch, the buffer is
+     full.  */
+  unsigned int next_fetch;
+  /* The circular buffer.  */
+  uint64_t data[];
+};
+
+/* The mutex used to control access to the value pointed to by the
+   __go_channel_select selected field.  No additional mutexes may be
+   acquired while this mutex is held.  */
+extern pthread_mutex_t __go_select_data_mutex;
+
+/* Maximum permitted number of operations on a closed channel.  */
+#define MAX_CLOSED_OPERATIONS (0x100)
+
+extern struct __go_channel *__go_new_channel (size_t, size_t);
+
+extern _Bool __go_synch_with_select (struct __go_channel *, _Bool);
+
+extern void __go_broadcast_to_select (struct __go_channel *);
+
+extern _Bool __go_send_acquire (struct __go_channel *, _Bool);
+
+#define SEND_NONBLOCKING_ACQUIRE_SPACE 0
+#define SEND_NONBLOCKING_ACQUIRE_NOSPACE 1
+#define SEND_NONBLOCKING_ACQUIRE_CLOSED 2
+
+extern int __go_send_nonblocking_acquire (struct __go_channel *);
+
+extern void __go_send_release (struct __go_channel *);
+
+extern void __go_send_small (struct __go_channel *, uint64_t, _Bool);
+
+extern _Bool __go_send_nonblocking_small (struct __go_channel *, uint64_t);
+
+extern void __go_send_big (struct __go_channel *, const void *, _Bool);
+
+extern _Bool __go_send_nonblocking_big (struct __go_channel *, const void *);
+
+extern _Bool __go_receive_acquire (struct __go_channel *, _Bool);
+
+#define RECEIVE_NONBLOCKING_ACQUIRE_DATA 0
+#define RECEIVE_NONBLOCKING_ACQUIRE_NODATA 1
+#define RECEIVE_NONBLOCKING_ACQUIRE_CLOSED 2
+
+extern int __go_receive_nonblocking_acquire (struct __go_channel *);
+
+extern uint64_t __go_receive_small (struct __go_channel *, _Bool);
+
+extern void __go_receive_release (struct __go_channel *);
+
+struct __go_receive_nonblocking_small
+{
+  uint64_t __val;
+  _Bool __success;
+};
+
+extern struct __go_receive_nonblocking_small
+__go_receive_nonblocking_small (struct __go_channel *);
+
+extern void __go_receive_big (struct __go_channel *, void *, _Bool);
+
+extern _Bool __go_receive_nonblocking_big (struct __go_channel *, void *);
+
+extern void __go_unlock_and_notify_selects (struct __go_channel *);
+
+extern _Bool __go_builtin_closed (struct __go_channel *);
+
+extern void __go_builtin_close (struct __go_channel *);
+
+extern size_t __go_chan_len (struct __go_channel *);
+
+extern size_t __go_chan_cap (struct __go_channel *);
diff --git a/libgo/runtime/defs.h b/libgo/runtime/defs.h
new file mode 100644
index 0000000..67ad212
--- /dev/null
+++ b/libgo/runtime/defs.h
@@ -0,0 +1,12 @@
+/* defs.h -- runtime definitions for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+/* The gc library uses this file for system defines, and generates it
+   automatically using the godefs program.  The logical thing to put
+   here for gccgo would be #include statements for system header
+   files.  We can't do that, though, because runtime.h #define's the
+   standard types.  So we #include the system headers from runtime.h
+   instead.  */
diff --git a/libgo/runtime/go-alloc.h b/libgo/runtime/go-alloc.h
new file mode 100644
index 0000000..c880a04
--- /dev/null
+++ b/libgo/runtime/go-alloc.h
@@ -0,0 +1,11 @@
+/* go-alloc.h -- allocate memory for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+#include <stdint.h>
+
+extern void *__go_alloc (unsigned int __attribute__ ((mode (pointer))));
+extern void __go_free (void *);
diff --git a/libgo/runtime/go-bad-index.c b/libgo/runtime/go-bad-index.c
new file mode 100644
index 0000000..f0652f4
--- /dev/null
+++ b/libgo/runtime/go-bad-index.c
@@ -0,0 +1,15 @@
+/* go-bad-index.c -- bad array/string index error in Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-panic.h"
+
+extern void __go_bad_index () __attribute__ ((noreturn));
+
+void
+__go_bad_index ()
+{
+  __go_panic_msg ("index out of range");
+}
diff --git a/libgo/runtime/go-breakpoint.c b/libgo/runtime/go-breakpoint.c
new file mode 100644
index 0000000..bb6eddc
--- /dev/null
+++ b/libgo/runtime/go-breakpoint.c
@@ -0,0 +1,15 @@
+/* go-breakpoint.c -- the runtime.Breakpoint function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <sched.h>
+
+void Breakpoint (void) asm ("libgo_runtime.runtime.Breakpoint");
+
+void
+Breakpoint (void)
+{
+  __builtin_trap ();
+}
diff --git a/libgo/runtime/go-byte-array-to-string.c b/libgo/runtime/go-byte-array-to-string.c
new file mode 100644
index 0000000..cf67fa1
--- /dev/null
+++ b/libgo/runtime/go-byte-array-to-string.c
@@ -0,0 +1,21 @@
+/* go-byte-array-to-string.c -- convert an array of bytes to a string in Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+#include "go-string.h"
+
+const struct __go_string *
+__go_byte_array_to_string (const void* p, size_t len)
+{
+  const unsigned char *bytes;
+  struct __go_string *ret;
+
+  bytes = (const unsigned char *) p;
+  ret = __go_alloc (sizeof (struct __go_string) + len);
+  ret->__length = len;
+  __builtin_memcpy (ret->__data, bytes, len);
+  return ret;
+}
diff --git a/libgo/runtime/go-caller.c b/libgo/runtime/go-caller.c
new file mode 100644
index 0000000..2df6d5b
--- /dev/null
+++ b/libgo/runtime/go-caller.c
@@ -0,0 +1,40 @@
+/* go-caller.c -- runtime.Caller for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+/* Implement runtime.Caller.  */
+
+#include <stdint.h>
+
+#include "go-string.h"
+
+/* The values returned by runtime.Caller.  */
+
+struct caller_ret
+{
+  uintptr_t pc;
+  const struct __go_string *file;
+  int line;
+  _Bool ok;
+};
+
+/* Implement runtime.Caller.  */
+
+struct caller_ret Caller (int n) asm ("libgo_runtime.runtime.Caller");
+
+struct caller_ret
+Caller (int n __attribute__ ((unused)))
+{
+  struct caller_ret ret;
+
+  /* A proper implementation needs to dig through the debugging
+     information.  */
+  ret.pc = (uint64_t) (uintptr_t) __builtin_return_address (1);
+  ret.file = NULL;
+  ret.line = 0;
+  ret.ok = 0;
+
+  return ret;
+}
diff --git a/libgo/runtime/go-can-convert-interface.c b/libgo/runtime/go-can-convert-interface.c
new file mode 100644
index 0000000..3e6ad89
--- /dev/null
+++ b/libgo/runtime/go-can-convert-interface.c
@@ -0,0 +1,76 @@
+/* go-can-convert-interface.c -- can we convert to an interface?
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+
+#include "go-type.h"
+#include "interface.h"
+
+/* Return whether we can convert from the type in FROM_DESCRIPTOR to
+   the interface in TO_DESCRIPTOR.  This is used for type
+   switches.  */
+
+_Bool
+__go_can_convert_to_interface (
+    const struct __go_type_descriptor *to_descriptor,
+    const struct __go_type_descriptor *from_descriptor)
+{
+  const struct __go_interface_type *to_interface;
+  int to_method_count;
+  const struct __go_interface_method *to_method;
+  const struct __go_uncommon_type *from_uncommon;
+  int from_method_count;
+  const struct __go_method *from_method;
+  int i;
+
+  /* In a type switch FROM_DESCRIPTOR can be NULL.  */
+  if (from_descriptor == NULL)
+    return 0;
+
+  assert (to_descriptor->__code == GO_INTERFACE);
+  to_interface = (const struct __go_interface_type *) to_descriptor;
+  to_method_count = to_interface->__methods.__count;
+  to_method = ((const struct __go_interface_method *)
+	       to_interface->__methods.__values);
+
+  from_uncommon = from_descriptor->__uncommon;
+  if (from_uncommon == NULL)
+    {
+      from_method_count = 0;
+      from_method = NULL;
+    }
+  else
+    {
+      from_method_count = from_uncommon->__methods.__count;
+      from_method = ((const struct __go_method *)
+		     from_uncommon->__methods.__values);
+    }
+
+  for (i = 0; i < to_method_count; ++i)
+    {
+      while (from_method_count > 0
+	     && (!__go_ptr_strings_equal (from_method->__name,
+					  to_method->__name)
+		 || !__go_ptr_strings_equal (from_method->__pkg_path,
+					     to_method->__pkg_path)))
+	{
+	  ++from_method;
+	  --from_method_count;
+	}
+
+      if (from_method_count == 0)
+	return 0;
+
+      if (from_method->__hash != to_method->__hash)
+	return 0;
+
+      ++to_method;
+      ++from_method;
+      --from_method_count;
+    }
+
+  return 1;
+}
diff --git a/libgo/runtime/go-chan-cap.c b/libgo/runtime/go-chan-cap.c
new file mode 100644
index 0000000..287d55e
--- /dev/null
+++ b/libgo/runtime/go-chan-cap.c
@@ -0,0 +1,41 @@
+/* go-chan-cap.c -- the cap function applied to a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include "channel.h"
+
+/* Return the cap function applied to a channel--the size of the
+   buffer.  This could be done inline but I'm doing it as a function
+   for now to make it easy to change the channel structure.  */
+
+size_t
+__go_chan_cap (struct __go_channel *channel)
+{
+  int i;
+  size_t ret;
+
+  if (channel == NULL)
+    return 0;
+
+  i = pthread_mutex_lock (&channel->lock);
+  assert (i == 0);
+
+  if (channel->num_entries == 0)
+    ret = 0;
+  else
+    {
+      /* One slot is always unused.  We added 1 when we created the
+	 channel.  */
+      ret = channel->num_entries - 1;
+    }
+
+  i = pthread_mutex_unlock (&channel->lock);
+  assert  (i == 0);
+
+  return ret;
+}
diff --git a/libgo/runtime/go-chan-len.c b/libgo/runtime/go-chan-len.c
new file mode 100644
index 0000000..f02166f
--- /dev/null
+++ b/libgo/runtime/go-chan-len.c
@@ -0,0 +1,41 @@
+/* go-chan-len.c -- the len function applied to a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include "channel.h"
+
+/* Return the len function applied to a channel--the number of
+   elements in the buffer.  This could be done inline but I'm doing it
+   as a function for now to make it easy to change the channel
+   structure.  */
+
+size_t
+__go_chan_len (struct __go_channel *channel)
+{
+  int i;
+  size_t ret;
+
+  if (channel == NULL)
+    return 0;
+
+  i = pthread_mutex_lock (&channel->lock);
+  assert (i == 0);
+
+  if (channel->num_entries == 0)
+    ret = 0;
+  else if (channel->next_fetch == channel->next_store)
+    ret = 0;
+  else
+    ret = ((channel->next_store + channel->num_entries - channel->next_fetch)
+	   % channel->num_entries);
+
+  i = pthread_mutex_unlock (&channel->lock);
+  assert  (i == 0);
+
+  return ret;
+}
diff --git a/libgo/runtime/go-close.c b/libgo/runtime/go-close.c
new file mode 100644
index 0000000..8d2a951
--- /dev/null
+++ b/libgo/runtime/go-close.c
@@ -0,0 +1,34 @@
+/* go-close.c -- the builtin close function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+
+#include "channel.h"
+
+/* Close a channel.  After a channel is closed, sends are no longer
+   permitted.  Receives always return zero.  */
+
+void
+__go_builtin_close (struct __go_channel *channel)
+{
+  int i;
+
+  i = pthread_mutex_lock (&channel->lock);
+  assert (i == 0);
+
+  while (channel->selected_for_send)
+    {
+      i = pthread_cond_wait (&channel->cond, &channel->lock);
+      assert (i == 0);
+    }
+
+  channel->is_closed = 1;
+
+  i = pthread_cond_broadcast (&channel->cond);
+  assert (i == 0);
+
+  __go_unlock_and_notify_selects (channel);
+}
diff --git a/libgo/runtime/go-closed.c b/libgo/runtime/go-closed.c
new file mode 100644
index 0000000..ef2b31a
--- /dev/null
+++ b/libgo/runtime/go-closed.c
@@ -0,0 +1,35 @@
+/* go-closed.c -- the builtin closed function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+
+#include "channel.h"
+
+/* Return whether a channel is closed.  We only return true after at
+   least one nil value has been read from the channel.  */
+
+_Bool
+__go_builtin_closed (struct __go_channel *channel)
+{
+  int i;
+  _Bool ret;
+
+  i = pthread_mutex_lock (&channel->lock);
+  assert (i == 0);
+
+  while (channel->selected_for_receive)
+    {
+      i = pthread_cond_wait (&channel->cond, &channel->lock);
+      assert (i == 0);
+    }
+
+  ret = channel->saw_close;
+
+  i = pthread_mutex_unlock (&channel->lock);
+  assert (i == 0);
+
+  return ret;
+}
diff --git a/libgo/runtime/go-construct-map.c b/libgo/runtime/go-construct-map.c
new file mode 100644
index 0000000..f3f641a
--- /dev/null
+++ b/libgo/runtime/go-construct-map.c
@@ -0,0 +1,33 @@
+/* go-construct-map.c -- construct a map from an initializer.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+#include "map.h"
+
+struct __go_map *
+__go_construct_map (const struct __go_map_descriptor *descriptor,
+		    size_t count, size_t entry_size, size_t val_offset,
+		    size_t val_size, const void *ventries)
+{
+  struct __go_map *ret;
+  const unsigned char *entries;
+  size_t i;
+
+  ret = __go_new_map (descriptor, count);
+
+  entries = (const unsigned char *) ventries;
+  for (i = 0; i < count; ++i)
+    {
+      void *val = __go_map_index (ret, entries, 1);
+      __builtin_memcpy (val, entries + val_offset, val_size);
+      entries += entry_size;
+    }
+
+  return ret;
+}
diff --git a/libgo/runtime/go-convert-interface.c b/libgo/runtime/go-convert-interface.c
new file mode 100644
index 0000000..ae8692d
--- /dev/null
+++ b/libgo/runtime/go-convert-interface.c
@@ -0,0 +1,128 @@
+/* go-convert-interface.c -- convert interfaces for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include "go-alloc.h"
+#include "go-panic.h"
+#include "go-refcount.h"
+#include "interface.h"
+
+/* Convert one interface type into another interface type.
+   LHS_DESCRIPTOR is the type descriptor of the resulting interface.
+   RHS is the interface we are converting, a pointer to struct
+   __go_interface.  We need to build a new set of interface method
+   pointers.  If any interface method is not implemented by the
+   object, the conversion fails.  If SUCCESS is not NULL, we set it to
+   whether or not the conversion succeeds.  If SUCCESS is NULL, and
+   the conversion fails, we panic.  */
+
+struct __go_interface *
+__go_convert_interface (const struct __go_type_descriptor* lhs_descriptor,
+			const void *rhs_arg, _Bool *success)
+{
+  const struct __go_interface *rhs = (const struct __go_interface *) rhs_arg;
+  const struct __go_interface_type *lhs_interface;
+  int lhs_method_count;
+  const struct __go_interface_method* lhs_methods;
+  const void **methods;
+  struct __go_interface *ret;
+
+  if (rhs == NULL)
+    {
+      if (success != NULL)
+	*success = 1;
+      return NULL;
+    }
+
+  assert (lhs_descriptor->__code == GO_INTERFACE);
+  lhs_interface = (const struct __go_interface_type *) lhs_descriptor;
+  lhs_method_count = lhs_interface->__methods.__count;
+  lhs_methods = ((const struct __go_interface_method *)
+		 lhs_interface->__methods.__values);
+
+  if (lhs_method_count == 0)
+    methods = NULL;
+  else
+    {
+      const struct __go_uncommon_type *rhs_uncommon;
+      int rhs_method_count;
+      const struct __go_method *p_rhs_method;
+      int i;
+
+      methods = (const void **) __go_alloc (lhs_method_count
+					    * sizeof (void *));
+
+      rhs_uncommon = rhs->__type_descriptor->__uncommon;
+      if (rhs_uncommon == NULL)
+	{
+	  rhs_method_count = 0;
+	  p_rhs_method = NULL;
+	}
+      else
+	{
+	  rhs_method_count = rhs_uncommon->__methods.__count;
+	  p_rhs_method = ((const struct __go_method *)
+			  rhs_uncommon->__methods.__values);
+	}
+
+      for (i = 0; i < lhs_method_count; ++i)
+	{
+	  const struct __go_interface_method *p_lhs_method;
+
+	  p_lhs_method = &lhs_methods[i];
+
+	  while (rhs_method_count > 0
+		 && (!__go_ptr_strings_equal (p_lhs_method->__name,
+					      p_rhs_method->__name)
+		     || !__go_ptr_strings_equal (p_lhs_method->__pkg_path,
+						 p_rhs_method->__pkg_path)))
+	    {
+	      ++p_rhs_method;
+	      --rhs_method_count;
+	    }
+
+	  if (rhs_method_count == 0)
+	    {
+	      if (success != NULL)
+		{
+		  *success = 0;
+		  return NULL;
+		}
+	      __go_print_msg (1, "interface conversion failed: no '");
+	      __go_print_string (1, *p_lhs_method->__name);
+	      __go_panic_msg ("' method");
+	    }
+
+	  if (p_lhs_method->__hash != p_rhs_method->__hash)
+	    {
+	      if (success != NULL)
+		{
+		  *success = 0;
+		  return NULL;
+		}
+	      __go_print_msg (1, "interface conversion failed: '");
+	      __go_print_string (1, *p_lhs_method->__name);
+	      __go_panic_msg ("' method has wrong type");
+	    }
+
+	  methods[i] = p_rhs_method->__function;
+	}
+    }
+
+  ret = (struct __go_interface *) __go_alloc (sizeof (struct __go_interface));
+  ret->__type_descriptor = rhs->__type_descriptor;
+  ret->__methods = methods;
+  ret->__object = rhs->__object;
+
+  __go_increment_refcount (ret->__object, ret->__type_descriptor);
+
+  if (success != NULL)
+    *success = 1;
+
+  return ret;
+}
diff --git a/libgo/runtime/go-defer.c b/libgo/runtime/go-defer.c
new file mode 100644
index 0000000..8f22e80
--- /dev/null
+++ b/libgo/runtime/go-defer.c
@@ -0,0 +1,56 @@
+/* go-defer.c -- manage the defer stack.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "go-alloc.h"
+
+/* The defer stack is a list of these structures.  */
+
+struct __defer_stack
+{
+  /* The next entry in the stack.  */
+  struct __defer_stack* __next;
+
+  /* The function to call.  */
+  void (*__pfn) (void *);
+
+  /* The argument to pass to the function.  */
+  void *__arg;
+};
+
+/* This function is called each time we need to defer a call.  */
+
+void *
+__go_defer (void *stack, void (*pfn) (void *), void *arg)
+{
+  struct __defer_stack *n;
+
+  n = (struct __defer_stack *) __go_alloc (sizeof (struct __defer_stack));
+  n->__next = (struct __defer_stack *) stack;
+  n->__pfn = pfn;
+  n->__arg = arg;
+  return (void *) n;
+}
+
+/* This function is called when we want to undefer the stack.  */
+
+void
+__go_undefer (void *arg)
+{
+  struct __defer_stack *p;
+
+  p = (struct __defer_stack *) arg;
+  while (p != NULL)
+    {
+      struct __defer_stack *n;
+
+      n = p->__next;
+      (*p->__pfn) (p->__arg);
+      __go_free (p);
+      p = n;
+    }
+}
diff --git a/libgo/runtime/go-go.c b/libgo/runtime/go-go.c
new file mode 100644
index 0000000..56785a4
--- /dev/null
+++ b/libgo/runtime/go-go.c
@@ -0,0 +1,86 @@
+/* go-go.c -- the go function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <pthread.h>
+
+#include "config.h"
+#include "go-alloc.h"
+#include "runtime.h"
+
+#undef int /* FIXME */
+
+/* What to call.  */
+
+struct call
+{
+  void (*pfn) (void *);
+  void *arg;
+  struct M *m;
+};
+
+/* Start the thread.  */
+
+static void *
+start_go_thread (void *thread_arg)
+{
+  struct call *pc = (struct call *) thread_arg;
+  void (*pfn) (void *);
+  void *arg;
+
+  pfn = pc->pfn;
+  arg = pc->arg;
+  m = pc->m;
+  free (pc);
+
+  (*pfn) (arg);
+
+  return NULL;
+}
+
+/* Implement the go statement.  */
+
+void
+__go_go (void (*pfn) (void*), void* arg)
+{
+  int i;
+  pthread_attr_t attr;
+  struct call *pc;
+
+  i = pthread_attr_init (&attr);
+  assert (i == 0);
+  i = pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
+  assert (i == 0);
+
+#ifdef LINKER_SUPPORTS_SPLIT_STACK
+  /* The linker knows how to handle calls between code which uses
+     -fsplit-stack and code which does not.  That means that we can
+     run with a smaller stack and rely on the -fsplit-stack support to
+     save us.  The GNU/Linux glibc library won't let us have a very
+     small stack, but we make it as small as we can.  */
+#ifndef PTHREAD_STACK_MIN
+#define PTHREAD_STACK_MIN 8192
+#endif
+  i = pthread_attr_setstacksize (&attr, PTHREAD_STACK_MIN);
+  assert (i == 0);
+#endif
+
+  pc = malloc (sizeof (struct call));
+  pc->pfn = pfn;
+  pc->arg = arg;
+  pc->m = __go_alloc (sizeof (M));
+  __builtin_memset (pc->m, 0, sizeof (M));
+  pc->m->mcache = allocmcache ();
+
+  pthread_t tid;
+  i = pthread_create (&tid, &attr, start_go_thread, pc);
+  assert (i == 0);
+
+  i = pthread_attr_destroy (&attr);
+  assert (i == 0);
+}
diff --git a/libgo/runtime/go-goexit.c b/libgo/runtime/go-goexit.c
new file mode 100644
index 0000000..25f8d05
--- /dev/null
+++ b/libgo/runtime/go-goexit.c
@@ -0,0 +1,17 @@
+/* go-goexit.c -- the runtime.Goexit function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <pthread.h>
+
+void Goexit (void) asm ("libgo_runtime.runtime.Goexit");
+
+void
+Goexit (void)
+{
+  pthread_exit (NULL);
+  assert (0);
+}
diff --git a/libgo/runtime/go-gomaxprocs.c b/libgo/runtime/go-gomaxprocs.c
new file mode 100644
index 0000000..04dc448
--- /dev/null
+++ b/libgo/runtime/go-gomaxprocs.c
@@ -0,0 +1,15 @@
+/* go-gomaxprocs.c -- runtime.GOMAXPROCS.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+/* This is the runtime.GOMAXPROCS function.  This currently does
+   nothing, since each goroutine runs in a separate thread anyhow.  */
+
+void GOMAXPROCS (int) asm ("libgo_runtime.runtime.GOMAXPROCS");
+
+void
+GOMAXPROCS (int n __attribute__ ((unused)))
+{
+}
diff --git a/libgo/runtime/go-int-array-to-string.c b/libgo/runtime/go-int-array-to-string.c
new file mode 100644
index 0000000..c5e46f2
--- /dev/null
+++ b/libgo/runtime/go-int-array-to-string.c
@@ -0,0 +1,83 @@
+/* go-int-array-to-string.c -- convert an array of ints to a string in Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+
+#include "go-alloc.h"
+#include "go-string.h"
+
+const struct __go_string *
+__go_int_array_to_string (const void* p, size_t len)
+{
+  const int *ints;
+  size_t slen;
+  size_t i;
+  struct __go_string *ret;
+  unsigned char *s;
+
+  ints = (const int *) p;
+
+  slen = 0;
+  for (i = 0; i < len; ++i)
+    {
+      int v;
+
+      v = ints[i];
+
+      if (v > 0x10ffff)
+	v = 0xfffd;
+
+      if (v <= 0x7f)
+	slen += 1;
+      else if (v <= 0x7ff)
+	slen += 2;
+      else if (v <= 0xffff)
+	slen += 3;
+      else
+	slen += 4;
+    }
+
+  ret = __go_alloc (sizeof (struct __go_string) + slen);
+  ret->__length = slen;
+
+  s = ret->__data;
+  for (i = 0; i < len; ++i)
+    {
+      int v;
+
+      v = ints[i];
+
+      /* If V is out of range for UTF-8, substitute the replacement
+	 character.  */
+      if (v > 0x10ffff)
+	v = 0xfffd;
+
+      if (v <= 0x7f)
+	*s++ = v;
+      else if (v <= 0x7ff)
+	{
+	  *s++ = 0xc0 | ((v >> 6) & 0x1f);
+	  *s++ = 0x80 | (v & 0x3f);
+	}
+      else if (v <= 0xffff)
+	{
+	  *s++ = 0xe0 | ((v >> 12) & 0xf);
+	  *s++ = 0x80 | ((v >> 6) & 0x3f);
+	  *s++ = 0x80 | (v & 0x3f);
+	}
+      else
+	{
+	  *s++ = 0xf0 | ((v >> 18) & 0x7);
+	  *s++ = 0x80 | ((v >> 12) & 0x3f);
+	  *s++ = 0x80 | ((v >> 6) & 0x3f);
+	  *s++ = 0x80 | (v & 0x3f);
+	}
+    }
+
+  assert ((size_t) (s - ret->__data) == slen);
+
+  return ret;
+}
diff --git a/libgo/runtime/go-int-to-string.c b/libgo/runtime/go-int-to-string.c
new file mode 100644
index 0000000..15f0cdc
--- /dev/null
+++ b/libgo/runtime/go-int-to-string.c
@@ -0,0 +1,57 @@
+/* go-int-to-string.c -- convert an integer to a string in Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+#include "go-string.h"
+
+const struct __go_string *
+__go_int_to_string (int v)
+{
+  char buf[4];
+  size_t len;
+  struct __go_string *ret;
+
+  if (v <= 0x7f)
+    {
+      buf[0] = v;
+      len = 1;
+    }
+  else if (v <= 0x7ff)
+    {
+      buf[0] = 0xc0 + (v >> 6);
+      buf[1] = 0x80 + (v & 0x3f);
+      len = 2;
+    }
+  else
+    {
+      /* If the value is out of range for UTF-8, turn it into the
+	 "replacement character".  */
+      if (v > 0x10ffff)
+	v = 0xfffd;
+
+      if (v <= 0xffff)
+	{
+	  buf[0] = 0xe0 + (v >> 12);
+	  buf[1] = 0x80 + ((v >> 6) & 0x3f);
+	  buf[2] = 0x80 + (v & 0x3f);
+	  len = 3;
+	}
+      else
+	{
+	  buf[0] = 0xf0 + (v >> 18);
+	  buf[1] = 0x80 + ((v >> 12) & 0x3f);
+	  buf[2] = 0x80 + ((v >> 6) & 0x3f);
+	  buf[3] = 0x80 + (v & 0x3f);
+	  len = 4;
+	}
+    }
+
+  ret = (struct __go_string *) __go_alloc (sizeof (struct __go_string) + len);
+  ret->__length = len;
+  __builtin_memcpy (ret->__data, buf, len);
+
+  return ret;
+}
diff --git a/libgo/runtime/go-interface-compare.c b/libgo/runtime/go-interface-compare.c
new file mode 100644
index 0000000..c434dc9
--- /dev/null
+++ b/libgo/runtime/go-interface-compare.c
@@ -0,0 +1,37 @@
+/* go-interface-compare.c -- compare two interface values.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+#include "interface.h"
+
+/* Compare two interface values.  Return 0 for equal, not zero for not
+   equal (return value is like strcmp).  */
+
+int
+__go_interface_compare (const void *left_arg, const void* right_arg)
+{
+  const struct __go_interface *left =
+    (const struct __go_interface *) left_arg;
+  const struct __go_interface *right =
+    (const struct __go_interface *) right_arg;
+
+  if (left == NULL && right == NULL)
+    return 0;
+  if (left == NULL || right == NULL)
+    return 1;
+  if (!__go_type_descriptors_equal (left->__type_descriptor,
+				    right->__type_descriptor))
+    return 1;
+  if (__go_is_pointer_type (left->__type_descriptor))
+    return left->__object == right->__object ? 0 : 1;
+  if (!left->__type_descriptor->__equal (left->__object, right->__object,
+					 left->__type_descriptor->__size))
+    return 1;
+  return 0;
+}
diff --git a/libgo/runtime/go-interface-to-object.c b/libgo/runtime/go-interface-to-object.c
new file mode 100644
index 0000000..b499977
--- /dev/null
+++ b/libgo/runtime/go-interface-to-object.c
@@ -0,0 +1,29 @@
+/* go-interface-to-object.c -- get an object from an interface.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include "interface.h"
+
+/* Get an object from an interface.  This checks that the types match,
+   and crashes if they don't.  */
+
+void
+__go_interface_to_object (void *result,
+			  const struct __go_type_descriptor *lhs_descriptor,
+			  size_t object_size, const void *rhs_arg)
+{
+  const struct __go_interface *rhs = (const struct __go_interface *) rhs_arg;
+
+  assert (!__go_is_pointer_type (lhs_descriptor));
+  if ((rhs->__type_descriptor != lhs_descriptor
+       && !__go_type_descriptors_equal (rhs->__type_descriptor,
+					lhs_descriptor))
+      || rhs->__type_descriptor->__size != object_size)
+    abort ();
+  __builtin_memcpy (result, rhs->__object, object_size);
+}
diff --git a/libgo/runtime/go-interface-to-pointer.c b/libgo/runtime/go-interface-to-pointer.c
new file mode 100644
index 0000000..d2ed918
--- /dev/null
+++ b/libgo/runtime/go-interface-to-pointer.c
@@ -0,0 +1,28 @@
+/* go-interface-to-pointer.c -- get a pointer from an interface.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include "interface.h"
+
+/* Get a pointer from an interface.  This checks that the types match,
+   and crashes if they don't.  */
+
+void *
+__go_interface_to_pointer (const struct __go_type_descriptor *lhs_descriptor,
+			   const void *rhs_arg)
+{
+  const struct __go_interface *rhs = (const struct __go_interface *) rhs_arg;
+
+  assert (__go_is_pointer_type (lhs_descriptor));
+  if (rhs->__type_descriptor != lhs_descriptor
+      && !__go_type_descriptors_equal (rhs->__type_descriptor, lhs_descriptor)
+      && lhs_descriptor->__code != GO_UNSAFE_POINTER
+      && rhs->__type_descriptor->__code != GO_UNSAFE_POINTER)
+    abort ();
+  return rhs->__object;
+}
diff --git a/libgo/runtime/go-lock-os-thread.c b/libgo/runtime/go-lock-os-thread.c
new file mode 100644
index 0000000..204f11d
--- /dev/null
+++ b/libgo/runtime/go-lock-os-thread.c
@@ -0,0 +1,24 @@
+/* go-lock-os-thread.c -- the LockOSThread and UnlockOSThread functions.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+/* The runtime.LockOSThread and runtime.UnlockOSThread functions are
+   meaningless in the current implementation, since for us a goroutine
+   always stays on a single OS thread.  */
+
+extern void LockOSThread (void) __asm__ ("libgo_runtime.runtime.LockOSThread");
+
+void
+LockOSThread (void)
+{
+}
+
+extern void UnlockOSThread (void)
+  __asm__ ("libgo_runtime.runtime.UnlockOSThread");
+
+void
+UnlockOSThread (void)
+{
+}
diff --git a/libgo/runtime/go-main.c b/libgo/runtime/go-main.c
new file mode 100644
index 0000000..d3c720d
--- /dev/null
+++ b/libgo/runtime/go-main.c
@@ -0,0 +1,87 @@
+/* go-main.c -- the main function for a Go program.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdlib.h>
+#include <time.h>
+
+#include "go-alloc.h"
+#include "array.h"
+#include "go-signal.h"
+#include "go-string.h"
+
+#include "runtime.h"
+
+#undef int
+#undef char
+#undef unsigned
+
+/* The main function for a Go program.  This records the command line
+   parameters, calls the real main function, and returns a zero status
+   if the real main function returns.  */
+
+extern char **environ;
+
+extern struct __go_open_array Args asm ("libgo_os.os.Args");
+
+extern struct __go_open_array Envs asm ("libgo_os.os.Envs");
+
+/* These functions are created for the main package.  */
+extern void __go_init_main (void);
+extern void real_main (void) asm ("main.main");
+
+/* The main function.  */
+
+int
+main (int argc, char **argv)
+{
+  int i;
+  struct __go_string **values;
+
+  mallocinit ();
+
+  Args.__count = argc;
+  Args.__capacity = argc;
+  values = __go_alloc (argc * sizeof (struct __go_string *));
+  for (i = 0; i < argc; ++i)
+    {
+      size_t len;
+      struct __go_string *s;
+
+      len = __builtin_strlen (argv[i]);
+      s = __go_alloc (sizeof (struct __go_string) + len);
+      s->__length = len;
+      __builtin_memcpy (&s->__data[0], argv[i], len);
+      values[i] = s;
+    }
+  Args.__values = values;
+
+  for (i = 0; environ[i] != NULL; ++i)
+    ;
+  Envs.__count = i;
+  Envs.__capacity = i;
+  values = __go_alloc (i * sizeof (struct __go_string *));
+  for (i = 0; environ[i] != NULL; ++i)
+    {
+      size_t len;
+      struct __go_string *s;
+
+      len = __builtin_strlen (environ[i]);
+      s = __go_alloc (sizeof (struct __go_string) + len);
+      s->__length = len;
+      __builtin_memcpy (&s->__data[0], environ[i], len);
+      values[i] = s;
+    }
+  Envs.__values = values;
+
+  __initsig ();
+
+  srandom ((unsigned int) time (NULL));
+
+  __go_init_main ();
+  real_main ();
+
+  return 0;
+}
diff --git a/libgo/runtime/go-map-delete.c b/libgo/runtime/go-map-delete.c
new file mode 100644
index 0000000..d7e35b5
--- /dev/null
+++ b/libgo/runtime/go-map-delete.c
@@ -0,0 +1,52 @@
+/* go-map-delete.c -- delete an entry from a map.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+#include "go-alloc.h"
+#include "map.h"
+
+/* Delete the entry matching KEY from MAP.  */
+
+void
+__go_map_delete (struct __go_map *map, const void *key)
+{
+  const struct __go_map_descriptor *descriptor;
+  const struct __go_type_descriptor *key_descriptor;
+  size_t key_offset;
+  _Bool (*equal) (const void*, const void*, size_t);
+  size_t key_hash;
+  size_t key_size;
+  size_t bucket_index;
+  void **pentry;
+
+  descriptor = map->__descriptor;
+
+  key_descriptor = descriptor->__map_descriptor->__key_type;
+  key_offset = descriptor->__key_offset;
+  key_size = key_descriptor->__size;
+  assert (key_size != 0 && key_size != -1U);
+  equal = key_descriptor->__equal;
+
+  key_hash = key_descriptor->__hash (key, key_size);
+  bucket_index = key_hash % map->__bucket_count;
+
+  pentry = map->__buckets + bucket_index;
+  while (*pentry != NULL)
+    {
+      char *entry = (char *) *pentry;
+      if (equal (key, entry + key_offset, key_size))
+	{
+	  *pentry = *(void **) entry;
+	  __go_free (entry);
+	  map->__element_count -= 1;
+	  break;
+	}
+      pentry = (void **) entry;
+    }
+}
diff --git a/libgo/runtime/go-map-index.c b/libgo/runtime/go-map-index.c
new file mode 100644
index 0000000..9cdb27e
--- /dev/null
+++ b/libgo/runtime/go-map-index.c
@@ -0,0 +1,127 @@
+/* go-map-index.c -- find or insert an entry in a map.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+#include "go-alloc.h"
+#include "map.h"
+
+/* Rehash MAP to a larger size.  */
+
+static void
+__go_map_rehash (struct __go_map *map)
+{
+  const struct __go_map_descriptor *descriptor;
+  const struct __go_type_descriptor *key_descriptor;
+  size_t key_offset;
+  size_t key_size;
+  size_t (*hash) (const void *, size_t);
+  size_t old_bucket_count;
+  void **old_buckets;
+  size_t new_bucket_count;
+  void **new_buckets;
+  size_t i;
+
+  descriptor = map->__descriptor;
+
+  key_descriptor = descriptor->__map_descriptor->__key_type;
+  key_offset = descriptor->__key_offset;
+  key_size = key_descriptor->__size;
+  hash = key_descriptor->__hash;
+
+  old_bucket_count = map->__bucket_count;
+  old_buckets = map->__buckets;
+
+  new_bucket_count = __go_map_next_prime (old_bucket_count * 2);
+  new_buckets = (void **) __go_alloc (new_bucket_count * sizeof (void *));
+  __builtin_memset (new_buckets, 0, new_bucket_count * sizeof (void *));
+
+  for (i = 0; i < old_bucket_count; ++i)
+    {
+      char* entry;
+      char* next;
+
+      for (entry = old_buckets[i]; entry != NULL; entry = next)
+	{
+	  size_t key_hash;
+	  size_t new_bucket_index;
+
+	  /* We could speed up rehashing at the cost of memory space
+	     by caching the hash code.  */
+	  key_hash = hash (entry + key_offset, key_size);
+	  new_bucket_index = key_hash % new_bucket_count;
+
+	  next = *(char **) entry;
+	  *(char **) entry = new_buckets[new_bucket_index];
+	  new_buckets[new_bucket_index] = entry;
+	}
+    }
+
+  __go_free (old_buckets);
+
+  map->__bucket_count = new_bucket_count;
+  map->__buckets = new_buckets;
+}
+
+/* Find KEY in MAP, return a pointer to the value.  If KEY is not
+   present, then if INSERT is false, return NULL, and if INSERT is
+   true, insert a new value and zero-initialize it before returning a
+   pointer to it.  */
+
+void *
+__go_map_index (struct __go_map *map, const void *key, _Bool insert)
+{
+  const struct __go_map_descriptor *descriptor;
+  const struct __go_type_descriptor *key_descriptor;
+  size_t key_offset;
+  _Bool (*equal) (const void*, const void*, size_t);
+  size_t key_hash;
+  size_t key_size;
+  size_t bucket_index;
+  char *entry;
+
+  descriptor = map->__descriptor;
+
+  key_descriptor = descriptor->__map_descriptor->__key_type;
+  key_offset = descriptor->__key_offset;
+  key_size = key_descriptor->__size;
+  assert (key_size != 0 && key_size != -1U);
+  equal = key_descriptor->__equal;
+
+  key_hash = key_descriptor->__hash (key, key_size);
+  bucket_index = key_hash % map->__bucket_count;
+
+  entry = (char *) map->__buckets[bucket_index];
+  while (entry != NULL)
+    {
+      if (equal (key, entry + key_offset, key_size))
+	return entry + descriptor->__val_offset;
+      entry = *(char **) entry;
+    }
+
+  if (!insert)
+    return NULL;
+
+  if (map->__element_count >= map->__bucket_count)
+    {
+      __go_map_rehash (map);
+      bucket_index = key_hash % map->__bucket_count;
+    }
+
+  entry = (char *) __go_alloc (descriptor->__entry_size);
+  __builtin_memset (entry, 0, descriptor->__entry_size);
+
+  __builtin_memcpy (entry + key_offset, key, key_size);
+
+  *(char **) entry = map->__buckets[bucket_index];
+  map->__buckets[bucket_index] = entry;
+
+  map->__element_count += 1;
+
+  return entry + descriptor->__val_offset;
+}
diff --git a/libgo/runtime/go-map-len.c b/libgo/runtime/go-map-len.c
new file mode 100644
index 0000000..75b7473
--- /dev/null
+++ b/libgo/runtime/go-map-len.c
@@ -0,0 +1,21 @@
+/* go-map-len.c -- return the length of a map.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "map.h"
+
+/* Return the length of a map.  This could be done inline, of course,
+   but I'm doing it as a function for now to make it easy to chang the
+   map structure.  */
+
+size_t
+__go_map_len (struct __go_map *map)
+{
+  if (map == NULL)
+    return 0;
+  return map->__element_count;
+}
diff --git a/libgo/runtime/go-map-range.c b/libgo/runtime/go-map-range.c
new file mode 100644
index 0000000..dc7aac1
--- /dev/null
+++ b/libgo/runtime/go-map-range.c
@@ -0,0 +1,102 @@
+/* go-map-range.c -- implement a range clause over a map.
+
+   Copyright 2009, 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+
+#include "map.h"
+
+/* Initialize a range over a map.  */
+
+void
+__go_mapiterinit (const struct __go_map *h, struct __go_hash_iter *it)
+{
+  it->entry = NULL;
+  if (h != NULL)
+    {
+      it->map = h;
+      it->next_entry = NULL;
+      it->bucket = -1U;
+      __go_mapiternext(it);
+    }
+}
+
+/* Move to the next iteration, updating *HITER.  */
+
+void
+__go_mapiternext (struct __go_hash_iter *it)
+{
+  const void *entry;
+
+  entry = it->next_entry;
+  if (entry == NULL)
+    {
+      const struct __go_map *map;
+      size_t bucket;
+
+      map = it->map;
+      bucket = it->bucket;
+      while (1)
+	{
+	  ++bucket;
+	  if (bucket >= map->__bucket_count)
+	    {
+	      /* Map iteration is complete.  */
+	      it->entry = NULL;
+	      return;
+	    }
+	  entry = map->__buckets[bucket];
+	  if (entry != NULL)
+	    break;
+	}
+      it->bucket = bucket;
+    }
+  it->entry = entry;
+  it->next_entry = *(const void * const *) entry;
+}
+
+/* Get the key of the current iteration.  */
+
+void
+__go_mapiter1 (struct __go_hash_iter *it, unsigned char *key)
+{
+  const struct __go_map *map;
+  const struct __go_map_descriptor *descriptor;
+  const struct __go_type_descriptor *key_descriptor;
+  const char *p;
+
+  map = it->map;
+  descriptor = map->__descriptor;
+  key_descriptor = descriptor->__map_descriptor->__key_type;
+  p = it->entry;
+  assert(p != NULL);
+  __builtin_memcpy (key, p + descriptor->__key_offset, key_descriptor->__size);
+}
+
+/* Get the key and value of the current iteration.  */
+
+void
+__go_mapiter2 (struct __go_hash_iter *it, unsigned char *key,
+	       unsigned char *val)
+{
+  const struct __go_map *map;
+  const struct __go_map_descriptor *descriptor;
+  const struct __go_map_type *map_descriptor;
+  const struct __go_type_descriptor *key_descriptor;
+  const struct __go_type_descriptor *val_descriptor;
+  const char *p;
+
+  map = it->map;
+  descriptor = map->__descriptor;
+  map_descriptor = descriptor->__map_descriptor;
+  key_descriptor = map_descriptor->__key_type;
+  val_descriptor = map_descriptor->__val_type;
+  p = it->entry;
+  assert(p != NULL);
+  __builtin_memcpy (key, p + descriptor->__key_offset,
+		    key_descriptor->__size);
+  __builtin_memcpy (val, p + descriptor->__val_offset,
+		    val_descriptor->__size);
+}
diff --git a/libgo/runtime/go-new-channel.c b/libgo/runtime/go-new-channel.c
new file mode 100644
index 0000000..be1eb78
--- /dev/null
+++ b/libgo/runtime/go-new-channel.c
@@ -0,0 +1,54 @@
+/* go-new-channel.c -- allocate a new channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+#include <assert.h>
+
+#include "channel.h"
+
+#include "go-alloc.h"
+
+struct __go_channel*
+__go_new_channel (size_t element_size, size_t entries)
+{
+  struct __go_channel* ret;
+  size_t alloc_size;
+  int i;
+
+  alloc_size = (element_size + sizeof (uint64_t) - 1) / sizeof (uint64_t);
+
+  /* We use a circular buffer which means that when next_fetch ==
+     next_store we don't know whether the buffer is empty or full.  So
+     we allocate an extra space, and always leave a space open.
+     FIXME.  */
+  if (entries != 0)
+    ++entries;
+
+  ret = (struct __go_channel*) __go_alloc (sizeof (struct __go_channel)
+					   + ((entries == 0 ? 1 : entries)
+					      * alloc_size
+					      * sizeof (uint64_t)));
+  i = pthread_mutex_init (&ret->lock, NULL);
+  assert (i == 0);
+  i = pthread_cond_init (&ret->cond, NULL);
+  assert (i == 0);
+  ret->element_size = element_size;
+  ret->closed_op_count = 0;
+  ret->waiting_to_send = 0;
+  ret->waiting_to_receive = 0;
+  ret->selected_for_send = 0;
+  ret->selected_for_receive = 0;
+  ret->is_closed = 0;
+  ret->saw_close = 0;
+  ret->select_send_queue = NULL;
+  ret->select_receive_queue = NULL;
+  ret->select_mutex = NULL;
+  ret->select_cond = NULL;
+  ret->num_entries = entries;
+  ret->next_store = 0;
+  ret->next_fetch = 0;
+  return ret;
+}
diff --git a/libgo/runtime/go-new-interface-object.c b/libgo/runtime/go-new-interface-object.c
new file mode 100644
index 0000000..56a88b9
--- /dev/null
+++ b/libgo/runtime/go-new-interface-object.c
@@ -0,0 +1,31 @@
+/* go-new-interface-object.c -- make a new interface from a non-pointer.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+
+#include "go-alloc.h"
+#include "interface.h"
+
+/* Allocate a new interface for a type which is not represented as a
+   pointer.  OBJECT points to the value.  */
+
+struct __go_interface *
+__go_new_interface_object (const struct __go_type_descriptor *type_descriptor,
+			   void *methods, size_t object_size,
+			   const void *object)
+{
+  struct __go_interface *ret;
+
+  assert (!__go_is_pointer_type (type_descriptor));
+  assert (object_size == type_descriptor->__size);
+  ret = __go_alloc (sizeof (struct __go_interface));
+  ret->__type_descriptor = type_descriptor;
+  ret->__methods = methods;
+  ret->__object = __go_alloc (object_size);
+  /* FIXME: Set reference count.  */
+  __builtin_memcpy (ret->__object, object, object_size);
+  return ret;
+}
diff --git a/libgo/runtime/go-new-interface-pointer.c b/libgo/runtime/go-new-interface-pointer.c
new file mode 100644
index 0000000..e39d966
--- /dev/null
+++ b/libgo/runtime/go-new-interface-pointer.c
@@ -0,0 +1,31 @@
+/* go-new-interface-pointer.c -- make a new interface from a pointer value.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+
+#include "go-alloc.h"
+#include "go-refcount.h"
+#include "interface.h"
+
+/* Allocate a new interface for a type which is represented as a
+   pointer.  OBJECT is the value to be stored in the interface.  */
+
+struct __go_interface *
+__go_new_interface_pointer (const struct __go_type_descriptor *type_descriptor,
+			    void *methods, void *object)
+{
+  struct __go_interface *ret;
+
+  assert (__go_is_pointer_type (type_descriptor));
+  ret = __go_alloc (sizeof (struct __go_interface));
+  ret->__type_descriptor = type_descriptor;
+  ret->__methods = methods;
+  ret->__object = object;
+
+  __go_increment_refcount (object, type_descriptor);
+
+  return ret;
+}
diff --git a/libgo/runtime/go-new-map.c b/libgo/runtime/go-new-map.c
new file mode 100644
index 0000000..5ac1a10
--- /dev/null
+++ b/libgo/runtime/go-new-map.c
@@ -0,0 +1,121 @@
+/* go-new-map.c -- allocate a new map.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+#include "map.h"
+
+/* List of prime numbers, copied from libstdc++/src/hashtable.c.  */
+
+static const unsigned long prime_list[] = /* 256 + 1 or 256 + 48 + 1 */
+{
+  2ul, 3ul, 5ul, 7ul, 11ul, 13ul, 17ul, 19ul, 23ul, 29ul, 31ul,
+  37ul, 41ul, 43ul, 47ul, 53ul, 59ul, 61ul, 67ul, 71ul, 73ul, 79ul,
+  83ul, 89ul, 97ul, 103ul, 109ul, 113ul, 127ul, 137ul, 139ul, 149ul,
+  157ul, 167ul, 179ul, 193ul, 199ul, 211ul, 227ul, 241ul, 257ul,
+  277ul, 293ul, 313ul, 337ul, 359ul, 383ul, 409ul, 439ul, 467ul,
+  503ul, 541ul, 577ul, 619ul, 661ul, 709ul, 761ul, 823ul, 887ul,
+  953ul, 1031ul, 1109ul, 1193ul, 1289ul, 1381ul, 1493ul, 1613ul,
+  1741ul, 1879ul, 2029ul, 2179ul, 2357ul, 2549ul, 2753ul, 2971ul,
+  3209ul, 3469ul, 3739ul, 4027ul, 4349ul, 4703ul, 5087ul, 5503ul,
+  5953ul, 6427ul, 6949ul, 7517ul, 8123ul, 8783ul, 9497ul, 10273ul,
+  11113ul, 12011ul, 12983ul, 14033ul, 15173ul, 16411ul, 17749ul,
+  19183ul, 20753ul, 22447ul, 24281ul, 26267ul, 28411ul, 30727ul,
+  33223ul, 35933ul, 38873ul, 42043ul, 45481ul, 49201ul, 53201ul,
+  57557ul, 62233ul, 67307ul, 72817ul, 78779ul, 85229ul, 92203ul,
+  99733ul, 107897ul, 116731ul, 126271ul, 136607ul, 147793ul,
+  159871ul, 172933ul, 187091ul, 202409ul, 218971ul, 236897ul,
+  256279ul, 277261ul, 299951ul, 324503ul, 351061ul, 379787ul,
+  410857ul, 444487ul, 480881ul, 520241ul, 562841ul, 608903ul,
+  658753ul, 712697ul, 771049ul, 834181ul, 902483ul, 976369ul,
+  1056323ul, 1142821ul, 1236397ul, 1337629ul, 1447153ul, 1565659ul,
+  1693859ul, 1832561ul, 1982627ul, 2144977ul, 2320627ul, 2510653ul,
+  2716249ul, 2938679ul, 3179303ul, 3439651ul, 3721303ul, 4026031ul,
+  4355707ul, 4712381ul, 5098259ul, 5515729ul, 5967347ul, 6456007ul,
+  6984629ul, 7556579ul, 8175383ul, 8844859ul, 9569143ul, 10352717ul,
+  11200489ul, 12117689ul, 13109983ul, 14183539ul, 15345007ul,
+  16601593ul, 17961079ul, 19431899ul, 21023161ul, 22744717ul,
+  24607243ul, 26622317ul, 28802401ul, 31160981ul, 33712729ul,
+  36473443ul, 39460231ul, 42691603ul, 46187573ul, 49969847ul,
+  54061849ul, 58488943ul, 63278561ul, 68460391ul, 74066549ul,
+  80131819ul, 86693767ul, 93793069ul, 101473717ul, 109783337ul,
+  118773397ul, 128499677ul, 139022417ul, 150406843ul, 162723577ul,
+  176048909ul, 190465427ul, 206062531ul, 222936881ul, 241193053ul,
+  260944219ul, 282312799ul, 305431229ul, 330442829ul, 357502601ul,
+  386778277ul, 418451333ul, 452718089ul, 489790921ul, 529899637ul,
+  573292817ul, 620239453ul, 671030513ul, 725980837ul, 785430967ul,
+  849749479ul, 919334987ul, 994618837ul, 1076067617ul, 1164186217ul,
+  1259520799ul, 1362662261ul, 1474249943ul, 1594975441ul, 1725587117ul,
+  1866894511ul, 2019773507ul, 2185171673ul, 2364114217ul, 2557710269ul,
+  2767159799ul, 2993761039ul, 3238918481ul, 3504151727ul, 3791104843ul,
+  4101556399ul, 4294967291ul,
+#if __SIZEOF_LONG__ >= 8
+  6442450933ul, 8589934583ul, 12884901857ul, 17179869143ul,
+  25769803693ul, 34359738337ul, 51539607367ul, 68719476731ul,
+  103079215087ul, 137438953447ul, 206158430123ul, 274877906899ul,
+  412316860387ul, 549755813881ul, 824633720731ul, 1099511627689ul,
+  1649267441579ul, 2199023255531ul, 3298534883309ul, 4398046511093ul,
+  6597069766607ul, 8796093022151ul, 13194139533241ul, 17592186044399ul,
+  26388279066581ul, 35184372088777ul, 52776558133177ul, 70368744177643ul,
+  105553116266399ul, 140737488355213ul, 211106232532861ul, 281474976710597ul,
+  562949953421231ul, 1125899906842597ul, 2251799813685119ul,
+  4503599627370449ul, 9007199254740881ul, 18014398509481951ul,
+  36028797018963913ul, 72057594037927931ul, 144115188075855859ul,
+  288230376151711717ul, 576460752303423433ul,
+  1152921504606846883ul, 2305843009213693951ul,
+  4611686018427387847ul, 9223372036854775783ul,
+  18446744073709551557ul
+#endif
+};
+
+/* Return the next number from PRIME_LIST >= N.  */
+
+unsigned long
+__go_map_next_prime (unsigned long n)
+{
+  size_t low;
+  size_t high;
+
+  low = 0;
+  high = sizeof prime_list / sizeof prime_list[0];
+  while (low < high)
+    {
+      size_t mid;
+
+      mid = (low + high / 2);
+
+      /* Here LOW <= MID < HIGH.  */
+
+      if (prime_list[mid] < n)
+	high = mid;
+      else if (prime_list[mid] > n)
+	low = mid + 1;
+      else
+	return n;
+    }
+  if (low >= sizeof prime_list / sizeof prime_list[0])
+    return n;
+  return prime_list[low];
+}
+
+/* Allocate a new map.  */
+
+struct __go_map *
+__go_new_map (const struct __go_map_descriptor *descriptor, size_t entries)
+{
+  struct __go_map *ret;
+
+  if (entries == 0)
+    entries = 5;
+  else
+    entries = __go_map_next_prime (entries);
+  ret = (struct __go_map *) __go_alloc (sizeof (struct __go_map));
+  ret->__descriptor = descriptor;
+  ret->__element_count = 0;
+  ret->__bucket_count = entries;
+  ret->__buckets = (void **) __go_alloc (entries * sizeof (void *));
+  __builtin_memset (ret->__buckets, 0, entries * sizeof (void *));
+  return ret;
+}
diff --git a/libgo/runtime/go-new.c b/libgo/runtime/go-new.c
new file mode 100644
index 0000000..a7c8854
--- /dev/null
+++ b/libgo/runtime/go-new.c
@@ -0,0 +1,17 @@
+/* go-new.c -- the generic go new() function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+
+void *
+__go_new (size_t size)
+{
+  void *ret;
+
+  ret = __go_alloc (size);
+  __builtin_memset (ret, 0, size);
+  return ret;
+}
diff --git a/libgo/runtime/go-note.c b/libgo/runtime/go-note.c
new file mode 100644
index 0000000..0bae55a
--- /dev/null
+++ b/libgo/runtime/go-note.c
@@ -0,0 +1,75 @@
+/* go-note.c -- implement notesleep, notewakeup and noteclear.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+/* A note is a one-time notification.  noteclear clears the note.
+   notesleep waits for a call to notewakeup.  notewakeup wakes up
+   every thread waiting on the note.  */
+
+#include <assert.h>
+
+#include "runtime.h"
+
+/* We use a single global lock and condition variable.  It would be
+   better to use a futex on Linux.  */
+
+static pthread_mutex_t note_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t note_cond = PTHREAD_COND_INITIALIZER;
+
+/* noteclear is called before any calls to notesleep or
+   notewakeup.  */
+
+void
+noteclear (Note* n)
+{
+  int32 i;
+
+  i = pthread_mutex_lock (&note_lock);
+  assert (i == 0);
+
+  n->woken = 0;
+
+  i = pthread_mutex_unlock (&note_lock);
+  assert (i == 0);
+}
+
+/* Wait until notewakeup is called.  */
+
+void
+notesleep (Note* n)
+{
+  int32 i;
+
+  i = pthread_mutex_lock (&note_lock);
+  assert (i == 0);
+
+  while (!n->woken)
+    {
+      i = pthread_cond_wait (&note_cond, &note_lock);
+      assert (i == 0);
+    }
+
+  i = pthread_mutex_unlock (&note_lock);
+  assert (i == 0);
+}
+
+/* Wake up every thread sleeping on the note.  */
+
+void
+notewakeup (Note *n)
+{
+  int32 i;
+
+  i = pthread_mutex_lock (&note_lock);
+  assert (i == 0);
+
+  n->woken = 1;
+
+  i = pthread_cond_broadcast (&note_cond);
+  assert (i == 0);
+
+  i = pthread_mutex_unlock (&note_lock);
+  assert (i == 0);
+}
diff --git a/libgo/runtime/go-panic.c b/libgo/runtime/go-panic.c
new file mode 100644
index 0000000..7845fdd
--- /dev/null
+++ b/libgo/runtime/go-panic.c
@@ -0,0 +1,35 @@
+/* go-panic.c -- support for the go panic statement.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "go-panic.h"
+
+/* This implements __go_panic which is used for the panic
+   statement.  */
+
+void
+__go_panic ()
+{
+  fputs ("\npanic\n", stderr);
+  abort ();
+}
+
+/* These are used by the runtime library.  */
+
+void
+__go_panic_msg (const char* msg)
+{
+  __go_print_msg (1, msg);
+  __go_panic ();
+}
+
+void
+__go_print_msg (_Bool is_panic, const char* msg)
+{
+  fputs (msg, is_panic ? stderr : stdout);
+}
diff --git a/libgo/runtime/go-panic.h b/libgo/runtime/go-panic.h
new file mode 100644
index 0000000..a61e98e
--- /dev/null
+++ b/libgo/runtime/go-panic.h
@@ -0,0 +1,16 @@
+/* go-panic.h -- declare the go panic statement.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+extern void __go_panic (void)
+  __attribute__ ((noreturn));
+
+extern void __go_panic_msg (const char* msg)
+  __attribute__ ((noreturn));
+
+extern void __go_print_msg (_Bool is_panic, const char* msg);
+
+struct __go_string;
+extern void __go_print_string (_Bool is_panic, const struct __go_string *);
diff --git a/libgo/runtime/go-print.c b/libgo/runtime/go-print.c
new file mode 100644
index 0000000..e698b52
--- /dev/null
+++ b/libgo/runtime/go-print.c
@@ -0,0 +1,71 @@
+/* go-print.c -- support for the go print statement.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+#include <stdio.h>
+
+#include "array.h"
+#include "go-panic.h"
+#include "go-string.h"
+
+/* This implements the various little functions which are called by
+   the predeclared functions print/println/panic/panicln.  */
+
+void
+__go_print_space (_Bool is_panic)
+{
+  putc (' ', is_panic ? stderr : stdout);
+}
+
+void
+__go_print_nl (_Bool is_panic)
+{
+  putc ('\n', is_panic ? stderr : stdout);
+}
+
+void
+__go_print_string (_Bool is_panic, const struct __go_string* val)
+{
+  fprintf (is_panic ? stderr : stdout, "%.*s", (int) val->__length,
+	   (const char *) &val->__data[0]);
+}
+
+void
+__go_print_uint64 (_Bool is_panic, uint64_t val)
+{
+  fprintf (is_panic ? stderr : stdout, "%llu", (unsigned long long) val);
+}
+
+void
+__go_print_int64 (_Bool is_panic, int64_t val)
+{
+  fprintf (is_panic ? stderr : stdout, "%lld", (long long) val);
+}
+
+void
+__go_print_double (_Bool is_panic, double val)
+{
+  fprintf (is_panic ? stderr : stdout, "%.24g", val);
+}
+
+void
+__go_print_bool (_Bool is_panic, _Bool val)
+{
+  fputs (val ? "true" : "false", is_panic ? stderr : stdout);
+}
+
+void
+__go_print_pointer (_Bool is_panic, void *val)
+{
+  fprintf (is_panic ? stderr : stdout, "%p", val);
+}
+
+void
+__go_print_slice (_Bool is_panic, struct __go_open_array val)
+{
+  fprintf (is_panic ? stderr : stdout, "[%d/%d]%p",
+	   val.__count, val.__capacity, val.__values);
+}
diff --git a/libgo/runtime/go-rec-big.c b/libgo/runtime/go-rec-big.c
new file mode 100644
index 0000000..01b6d9a
--- /dev/null
+++ b/libgo/runtime/go-rec-big.c
@@ -0,0 +1,31 @@
+/* go-rec-big.c -- receive something larger than 64 bits on a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+#include <assert.h>
+
+#include "channel.h"
+
+void
+__go_receive_big (struct __go_channel *channel, void *val, _Bool for_select)
+{
+  size_t alloc_size;
+  size_t offset;
+
+  alloc_size = ((channel->element_size + sizeof (uint64_t) - 1)
+		/ sizeof (uint64_t));
+
+  if (!__go_receive_acquire (channel, for_select))
+    {
+      __builtin_memset (val, 0, channel->element_size);
+      return;
+    }
+
+  offset = channel->next_fetch * alloc_size;
+  __builtin_memcpy (val, &channel->data[offset], channel->element_size);
+
+  __go_receive_release (channel);
+}
diff --git a/libgo/runtime/go-rec-nb-big.c b/libgo/runtime/go-rec-nb-big.c
new file mode 100644
index 0000000..17f6128
--- /dev/null
+++ b/libgo/runtime/go-rec-nb-big.c
@@ -0,0 +1,42 @@
+/* go-rec-nb-big.c -- nonblocking receive of something big on a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+#include <assert.h>
+
+#include "channel.h"
+
+_Bool
+__go_receive_nonblocking_big (struct __go_channel* channel, void *val)
+{
+  size_t alloc_size;
+  size_t offset;
+
+  assert (channel->element_size > sizeof (uint64_t));
+
+  alloc_size = ((channel->element_size + sizeof (uint64_t) - 1)
+		/ sizeof (uint64_t));
+
+  int data = __go_receive_nonblocking_acquire (channel);
+  if (data != RECEIVE_NONBLOCKING_ACQUIRE_DATA)
+    {
+      if (data == RECEIVE_NONBLOCKING_ACQUIRE_NODATA)
+	return 0;
+      else
+	{
+	  /* Channel is closed.  */
+	  __builtin_memset (val, 0, channel->element_size);
+	  return 1;
+	}
+    }
+
+  offset = channel->next_store * alloc_size;
+  __builtin_memcpy (val, &channel->data[offset], channel->element_size);
+
+  __go_receive_release (channel);
+
+  return 1;
+}
diff --git a/libgo/runtime/go-rec-nb-small.c b/libgo/runtime/go-rec-nb-small.c
new file mode 100644
index 0000000..27aaae9
--- /dev/null
+++ b/libgo/runtime/go-rec-nb-small.c
@@ -0,0 +1,123 @@
+/* go-rec-nb-small.c -- nonblocking receive of something smal on a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+#include <assert.h>
+
+#include "go-panic.h"
+#include "channel.h"
+
+/* Prepare to receive something on a nonblocking channel.  */
+
+int
+__go_receive_nonblocking_acquire (struct __go_channel *channel)
+{
+  int i;
+  _Bool has_data;
+
+  i = pthread_mutex_lock (&channel->lock);
+  assert (i == 0);
+
+  while (channel->selected_for_receive)
+    {
+      i = pthread_cond_wait (&channel->cond, &channel->lock);
+      assert (i == 0);
+    }
+
+  if (channel->is_closed
+      && (channel->num_entries == 0
+	  ? channel->next_store == 0
+	  : channel->next_fetch == channel->next_store))
+    {
+      if (channel->saw_close)
+	{
+	  ++channel->closed_op_count;
+	  if (channel->closed_op_count >= MAX_CLOSED_OPERATIONS)
+	    __go_panic_msg ("too many operations on closed channel");
+	}
+      channel->saw_close = 1;
+      __go_unlock_and_notify_selects (channel);
+      return RECEIVE_NONBLOCKING_ACQUIRE_CLOSED;
+    }
+
+  if (channel->num_entries > 0)
+    has_data = channel->next_fetch != channel->next_store;
+  else
+    {
+      if (channel->waiting_to_receive)
+	{
+	  /* Some other goroutine is already waiting for data on this
+	     channel, so we can't pick it up.  */
+	  has_data = 0;
+	}
+      else if (channel->next_store > 0)
+	{
+	  /* There is data on the channel.  */
+	  has_data = 1;
+	}
+      else if (__go_synch_with_select (channel, 0))
+	{
+	  /* We synched up with a select sending data, so there will
+	     be data for us shortly.  Tell the select to go, and then
+	     wait for the data.  */
+	  __go_broadcast_to_select (channel);
+
+	  while (channel->next_store == 0)
+	    {
+	      i = pthread_cond_wait (&channel->cond, &channel->lock);
+	      assert (i == 0);
+	    }
+
+	  has_data = 1;
+	}
+      else
+	{
+	  /* Otherwise there is no data.  */
+	  has_data = 0;
+	}
+
+      if (has_data)
+	{
+	  channel->waiting_to_receive = 1;
+	  assert (channel->next_store == 1);
+	}
+    }
+
+  if (!has_data)
+    {
+      i = pthread_mutex_unlock (&channel->lock);
+      assert (i == 0);
+      return RECEIVE_NONBLOCKING_ACQUIRE_NODATA;
+    }
+
+  return RECEIVE_NONBLOCKING_ACQUIRE_DATA;
+}
+
+/* Receive something 64 bits or smaller on a nonblocking channel.  */
+
+struct __go_receive_nonblocking_small
+__go_receive_nonblocking_small (struct __go_channel *channel)
+{
+  struct __go_receive_nonblocking_small ret;
+
+  assert (channel->element_size <= sizeof (uint64_t));
+
+  int data = __go_receive_nonblocking_acquire (channel);
+  if (data != RECEIVE_NONBLOCKING_ACQUIRE_DATA)
+    {
+      ret.__val = 0;
+      ret.__success = data == RECEIVE_NONBLOCKING_ACQUIRE_CLOSED;
+      return ret;
+    }
+
+  ret.__val = channel->data[channel->next_fetch];
+
+  __go_receive_release (channel);
+
+  ret.__success = 1;
+
+  return ret;
+}
diff --git a/libgo/runtime/go-rec-small.c b/libgo/runtime/go-rec-small.c
new file mode 100644
index 0000000..a8a61a4
--- /dev/null
+++ b/libgo/runtime/go-rec-small.c
@@ -0,0 +1,286 @@
+/* go-rec-small.c -- receive something smaller than 64 bits on a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+#include <assert.h>
+
+#include "go-panic.h"
+#include "channel.h"
+
+/* This mutex controls access to the selected field of struct
+   __go_channel_select.  While this mutex is held, no other mutexes
+   may be acquired.  */
+
+pthread_mutex_t __go_select_data_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/* Try to synchronize with a select waiting on a sychronized channel.
+   This is used by a send or receive.  The channel is locked.  This
+   returns true if it was able to synch.  */
+
+_Bool
+__go_synch_with_select (struct __go_channel *channel, _Bool is_send)
+{
+  struct __go_channel_select *p;
+  int i;
+
+  assert (channel->num_entries == 0);
+
+  i = pthread_mutex_lock (&__go_select_data_mutex);
+  assert (i == 0);
+
+  for (p = (is_send
+	    ? channel->select_receive_queue
+	    : channel->select_send_queue);
+       p != NULL;
+       p = p->next)
+    {
+      if (*p->selected == NULL)
+	{
+	  *p->selected = channel;
+	  *p->is_read = !is_send;
+	  if (is_send)
+	    channel->selected_for_receive = 1;
+	  else
+	    channel->selected_for_send = 1;
+	  break;
+	}
+    }
+
+  i = pthread_mutex_unlock (&__go_select_data_mutex);
+  assert (i == 0);
+
+  /* The caller is responsible for signalling the select condition
+     variable so that the other select knows that something has
+     changed.  We can't signal it here because we can't acquire the
+     select mutex while we hold a channel lock.  */
+
+  return p != NULL;
+}
+
+/* If we synch with a select, then we need to signal the select that
+   something has changed.  This requires grabbing the select mutex,
+   which can only be done when the channel is unlocked.  This routine
+   does the signalling.  It is called with the channel locked.  It
+   unlocks the channel, broadcasts the signal and relocks the
+   channel.  */
+
+void
+__go_broadcast_to_select (struct __go_channel *channel)
+{
+  pthread_mutex_t *select_mutex;
+  pthread_cond_t *select_cond;
+  int i;
+
+  select_mutex = channel->select_mutex;
+  select_cond = channel->select_cond;
+
+  i = pthread_mutex_unlock (&channel->lock);
+  assert (i == 0);
+
+  assert (select_mutex != NULL && select_cond != NULL);
+
+  i = pthread_mutex_lock (select_mutex);
+  assert (i == 0);
+
+  i = pthread_cond_broadcast (select_cond);
+  assert (i == 0);
+
+  i = pthread_mutex_unlock (select_mutex);
+  assert (i == 0);
+
+  i = pthread_mutex_lock (&channel->lock);
+  assert (i == 0);
+}
+
+/* Prepare to receive something on a channel.  Return true if the
+   channel is acquired, false if it is closed.  */
+
+_Bool
+__go_receive_acquire (struct __go_channel *channel, _Bool for_select)
+{
+  int i;
+  _Bool my_wait_lock;
+  _Bool synched_with_select;
+
+  my_wait_lock = 0;
+  synched_with_select = 0;
+
+  i = pthread_mutex_lock (&channel->lock);
+  assert (i == 0);
+
+  while (1)
+    {
+      _Bool need_broadcast;
+
+      need_broadcast = 0;
+
+      /* Check whether the channel is closed.  */
+      if (channel->is_closed
+	  && (channel->num_entries == 0
+	      ? channel->next_store == 0
+	      : channel->next_fetch == channel->next_store))
+	{
+	  if (channel->saw_close)
+	    {
+	      ++channel->closed_op_count;
+	      if (channel->closed_op_count >= MAX_CLOSED_OPERATIONS)
+		__go_panic_msg ("too many operations on closed channel");
+	    }
+	  channel->saw_close = 1;
+	  channel->selected_for_receive = 0;
+	  __go_unlock_and_notify_selects (channel);
+	  return 0;
+	}
+
+      /* If somebody else has the channel locked for receiving, we
+	 have to wait.  If FOR_SELECT is true, then we are the one
+	 with the lock.  */
+      if (!channel->selected_for_receive || for_select)
+	{
+	  if (channel->num_entries == 0)
+	    {
+	      /* If somebody else is waiting to receive, we have to
+		 wait.  */
+	      if (!channel->waiting_to_receive || my_wait_lock)
+		{
+		  _Bool was_marked;
+
+		  /* Lock the channel so that we get to receive
+		     next.  */
+		  was_marked = channel->waiting_to_receive;
+		  channel->waiting_to_receive = 1;
+		  my_wait_lock = 1;
+
+		  /* See if there is a value to receive.  */
+		  if (channel->next_store > 0)
+		    return 1;
+
+		  /* If we haven't already done so, try to synch with
+		     a select waiting to send on this channel.  If we
+		     have already synched with a select, we are just
+		     looping until the select eventually causes
+		     something to be sent.  */
+		  if (!synched_with_select && !for_select)
+		    {
+		      if (__go_synch_with_select (channel, 0))
+			{
+			  synched_with_select = 1;
+			  need_broadcast = 1;
+			}
+		    }
+
+		  /* If we marked the channel as waiting, we need to
+		     signal, because something changed.  It needs to
+		     be a broadcast since there might be other
+		     receivers waiting.  */
+		  if (!was_marked)
+		    {
+		      i = pthread_cond_broadcast (&channel->cond);
+		      assert (i == 0);
+		    }
+		}
+	    }
+	  else
+	    {
+	      /* If there is a value on the channel, we are OK.  */
+	      if (channel->next_fetch != channel->next_store)
+		return 1;
+	    }
+	}
+
+      /* If we just synched with a select, then we need to signal the
+	 select condition variable.  We can only do that if we unlock
+	 the channel.  So we need to unlock, signal, lock, and go
+	 around the loop again without waiting.  */
+      if (need_broadcast)
+	{
+	  __go_broadcast_to_select (channel);
+	  continue;
+	}
+
+      /* Wait for something to change, then loop around and try
+	 again.  */
+
+      i = pthread_cond_wait (&channel->cond, &channel->lock);
+      assert (i == 0);
+    }
+}
+
+/* Finished receiving something on a channel.  */
+
+void
+__go_receive_release (struct __go_channel *channel)
+{
+  int i;
+
+  if (channel->num_entries != 0)
+    channel->next_fetch = (channel->next_fetch + 1) % channel->num_entries;
+  else
+    {
+      /* For a synchronous receiver, we tell the sender that we picked
+	 up the value by setting the next_store field back to 0.
+	 Using the mutexes should implement a memory barrier.  */
+      assert (channel->next_store == 1);
+      channel->next_store = 0;
+
+      channel->waiting_to_receive = 0;
+    }
+
+  channel->selected_for_receive = 0;
+
+  /* This is a broadcast to make sure that a synchronous sender sees
+     it.  */
+  i = pthread_cond_broadcast (&channel->cond);
+  assert (i == 0);
+
+  __go_unlock_and_notify_selects (channel);
+}
+
+/* Unlock a channel and notify any waiting selects that something
+   happened.  */
+
+void
+__go_unlock_and_notify_selects (struct __go_channel *channel)
+{
+  pthread_mutex_t* select_mutex;
+  pthread_cond_t* select_cond;
+  int i;
+
+  select_mutex = channel->select_mutex;
+  select_cond = channel->select_cond;
+
+  i = pthread_mutex_unlock (&channel->lock);
+  assert (i == 0);
+
+  if (select_mutex != NULL)
+    {
+      i = pthread_mutex_lock (select_mutex);
+      assert (i == 0);
+      i = pthread_cond_broadcast (select_cond);
+      assert (i == 0);
+      i = pthread_mutex_unlock (select_mutex);
+      assert (i == 0);
+    }
+}
+
+/* Receive something 64 bits or smaller on a channel.  */
+
+uint64_t
+__go_receive_small (struct __go_channel *channel, _Bool for_select)
+{
+  uint64_t ret;
+
+  assert (channel->element_size <= sizeof (uint64_t));
+
+  if (!__go_receive_acquire (channel, for_select))
+    return 0;
+
+  ret = channel->data[channel->next_fetch];
+
+  __go_receive_release (channel);
+
+  return ret;
+}
diff --git a/libgo/runtime/go-refcount-decrement.c b/libgo/runtime/go-refcount-decrement.c
new file mode 100644
index 0000000..85dd39e
--- /dev/null
+++ b/libgo/runtime/go-refcount-decrement.c
@@ -0,0 +1,34 @@
+/* go-refcount-decrement.c -- decrement reference count.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include "go-refcount.h"
+
+/* Decrement the reference count for a pointer.  */
+
+void
+__go_decrement_refcount (void *value,
+			 const struct __go_type_descriptor *descriptor)
+{
+  struct
+  {
+    struct __go_refcount q;
+    struct __go_refcount_entry e;
+  } s;
+  struct __go_refcount *queue;
+
+  queue = &s.q;
+  __builtin_memset (queue, 0, sizeof (struct __go_refcount));
+  queue->__caller = __go_refcount_head;
+  __go_refcount_head = queue;
+  queue->__decrement_computed_count = 1;
+  queue->__entries[0].__descriptor = descriptor;
+  queue->__entries[0].__value = value;
+  __go_refcount_flush_queue (queue);
+  __go_refcount_head = queue->__caller;
+}
diff --git a/libgo/runtime/go-refcount-flush.c b/libgo/runtime/go-refcount-flush.c
new file mode 100644
index 0000000..642a292
--- /dev/null
+++ b/libgo/runtime/go-refcount-flush.c
@@ -0,0 +1,224 @@
+/* go-refcount-flush.c -- flush reference count queue.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include "go-alloc.h"
+#include "go-refcount.h"
+#include "runtime.h"
+#include "malloc.h"
+
+/* The head of the list of reference count queues.  This variable is
+   maintained by the code generated by the compiler.  It exists in
+   order to initialize the __caller field of struct __go_refcount.  */
+
+__thread struct __go_refcount *__go_refcount_head;
+
+void
+__go_refcount_flush_queue (struct __go_refcount *queue)
+{
+  unsigned int allocated;
+  struct __go_refcount_msg *msg;
+  struct __go_refcount *prc;
+  unsigned int increment_count;
+  unsigned int count;
+  unsigned int offset;
+  unsigned int i;
+  unsigned int decrement_count;
+
+  /* Gather all the increments.  We gather up all the increments in
+     this function and in all the callers.  */
+  allocated = 0;
+  msg = NULL;
+  increment_count = 0;
+  for (prc = queue; prc != NULL; prc = prc->__caller)
+    {
+      count = prc->__increment_copy_count;
+      if (count > 0)
+	{
+	  offset = (prc->__decrement_new_count
+		    + prc->__decrement_computed_count
+		    + prc->__decrement_old_count);
+	  for (i = offset; i < offset + count; ++i)
+	    {
+	      struct __go_refcount_entry *pre;
+	      void *value;
+	      struct __go_refcount *qrc;
+	      _Bool found;
+	      unsigned int up_count;
+	      const unsigned int up_count_limit = 20;
+
+	      pre = &prc->__entries[i];
+	      value = pre->__value;
+	      if (value == NULL)
+		continue;
+	      pre->__value = NULL;
+
+	      /* If we find an increment for something which has a
+		 decrement queued up, we can discard both the
+		 increment and the decrement.  Don't look up more than
+		 20 stack frames, so that this doesn't take too
+		 long.  */
+	      found = 0;
+	      for (qrc = queue, up_count = 0;
+		   qrc != NULL && up_count < up_count_limit && !found;
+		   qrc = qrc->__caller, ++up_count)
+		{
+		  unsigned int c;
+		  unsigned int j;
+
+		  c = (qrc->__decrement_new_count
+		       + qrc->__decrement_computed_count
+		       + qrc->__decrement_old_count);
+		  for (j = 0; j < c; ++j)
+		    {
+		      struct __go_refcount_entry *qre;
+
+		      qre = &qrc->__entries[j];
+		      if (qre->__value == value)
+			{
+			  qre->__value = NULL;
+			  found = 1;
+			  break;
+			}
+		    }
+		}
+
+	      if (!found)
+		{
+		  if (qrc != NULL)
+		    qrc->__did_not_scan_decrements = 1;
+		  if (increment_count >= allocated)
+		    {
+		      unsigned int new_allocated;
+		      struct __go_refcount_msg *new_msg;
+
+		      new_allocated = allocated == 0 ? 32 : allocated * 2;
+		      new_msg = __go_alloc (sizeof (struct __go_refcount_msg)
+					    + new_allocated * sizeof (void *));
+		      if (allocated > 0)
+			{
+			  __builtin_memcpy (new_msg, msg,
+					    (sizeof (struct __go_refcount_msg)
+					     + allocated * sizeof (void *)));
+			  __go_free (msg);
+			}
+		      allocated = new_allocated;
+		      msg = new_msg;
+		    }
+		  msg->__pointers[increment_count] = value;
+		  ++increment_count;
+		}
+	    }
+	}
+
+      /* If we've already scanned the callers for increments, we can
+	 stop now.  */
+      if (prc->__callers_were_scanned)
+	break;
+
+      /* We are going to scan all the callers of PRC for increments;
+	 mark it now to avoid future scanning.  */
+      prc->__callers_were_scanned = 1;
+    }
+
+  /* Gather up the decrements.  We can only send the decrements from
+     the current function.  That is because callers may have queued up
+     decrements for temporary objects they created in order to call
+     this function.  If we apply those decrements now, we will discard
+     objects that we might still be using.  */
+  decrement_count = 0;
+
+  if (queue->__did_not_scan_decrements)
+    {
+      if (queue->__caller != NULL)
+	queue->__caller->__did_not_scan_decrements = 1;
+      count = (queue->__decrement_new_count
+	       + queue->__decrement_computed_count
+	       + queue->__decrement_old_count);
+      offset = 0;
+    }
+  else
+    {
+      /* Any decrements in __decrement_new_count can be freed
+	 immediately.  We did not see any increments for these
+	 objects; if we did, the increment would have cleared the
+	 decrement in the loop above.  */
+      count = queue->__decrement_new_count;
+      for (i = 0; i < count; ++i)
+	{
+	  struct __go_refcount_entry *pre;
+	  void *value;
+	  size_t size;
+
+	  pre = &queue->__entries[i];
+	  value = pre->__value;
+	  if (value == NULL)
+	    continue;
+	  pre->__value = NULL;
+
+	  /* FIXME: For debugging.  We can't just free a slice because
+	     it has an embedded pointer.  */
+	  if (pre->__descriptor->__code != GO_SLICE
+	      && mlookup (value, NULL, NULL, NULL))
+	    {
+	      size = pre->__descriptor->__size;
+
+#if 0
+	      /* FIXME: This is wrong if the value has embedded pointers.  */
+	      __builtin_memset (value, 0xa5, size);
+	      __go_free (value);
+#endif
+	    }
+	}
+
+      count = queue->__decrement_computed_count + queue->__decrement_old_count;
+      offset = queue->__decrement_new_count;
+    }
+
+  for (i = offset; i < offset + count; ++i)
+    {
+      struct __go_refcount_entry *pre;
+      void *value;
+
+      pre = &queue->__entries[i];
+      value = pre->__value;
+      if (value == NULL)
+	continue;
+      pre->__value = NULL;
+
+      if (increment_count + decrement_count >= allocated)
+	{
+	  unsigned int new_allocated;
+	  struct __go_refcount_msg *new_msg;
+
+	  new_allocated = allocated == 0 ? 32 : allocated * 2;
+	  new_msg = __go_alloc (sizeof (struct __go_refcount_msg)
+				+ new_allocated * sizeof (void *));
+	  if (allocated > 0)
+	    {
+	      __builtin_memcpy (new_msg, msg,
+				(sizeof (struct __go_refcount_msg)
+				 + allocated * sizeof (void *)));
+	      __go_free (msg);
+	    }
+	  allocated = new_allocated;
+	  msg = new_msg;
+	}
+      msg->__pointers[increment_count + decrement_count] = value;
+      ++decrement_count;
+    }
+
+  if (increment_count == 0 && decrement_count == 0)
+    assert (msg == NULL);
+  else
+    {
+      msg->__increments = increment_count;
+      msg->__decrements = decrement_count;
+      __go_send_refcount_msg (msg);
+    }
+}
diff --git a/libgo/runtime/go-refcount-increment.c b/libgo/runtime/go-refcount-increment.c
new file mode 100644
index 0000000..b304efe
--- /dev/null
+++ b/libgo/runtime/go-refcount-increment.c
@@ -0,0 +1,34 @@
+/* go-refcount-increment.c -- increment reference count.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include "go-refcount.h"
+
+/* Increment the reference count for a pointer.  */
+
+void
+__go_increment_refcount (void *value,
+			 const struct __go_type_descriptor *descriptor)
+{
+  struct
+  {
+    struct __go_refcount q;
+    struct __go_refcount_entry e;
+  } s;
+  struct __go_refcount *queue;
+
+  queue = &s.q;
+  __builtin_memset (queue, 0, sizeof (struct __go_refcount));
+  queue->__caller = __go_refcount_head;
+  __go_refcount_head = queue;
+  queue->__increment_copy_count = 1;
+  queue->__entries[0].__descriptor = descriptor;
+  queue->__entries[0].__value = value;
+  __go_refcount_flush_queue (queue);
+  __go_refcount_head = queue->__caller;
+}
diff --git a/libgo/runtime/go-refcount-msg.c b/libgo/runtime/go-refcount-msg.c
new file mode 100644
index 0000000..2fd9db5
--- /dev/null
+++ b/libgo/runtime/go-refcount-msg.c
@@ -0,0 +1,46 @@
+/* go-refcount-msg.c -- send reference counts to GC thread.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "go-alloc.h"
+#include "go-refcount.h"
+
+/* Send a reference count message to the GC thread.  */
+
+void
+__go_send_refcount_msg (struct __go_refcount_msg *msg)
+{
+  static _Bool init;
+  static _Bool debug;
+
+  if (!init)
+    {
+      debug = getenv ("GO_DEBUG_REFCOUNT") != NULL;
+      init = 1;
+    }
+
+  if (debug)
+    {
+      unsigned int count;
+      unsigned int i;
+      unsigned int offset;
+
+      count = msg->__increments;
+      for (i = 0; i < count; ++i)
+	fprintf (stderr, "Increment refcount %p\n", msg->__pointers[i]);
+      offset = count;
+      count = msg->__decrements;
+      for (i = 0; i < count; ++i)
+	fprintf (stderr, "Decrement refcount %p\n",
+		 msg->__pointers[i + offset]);
+    }
+
+  /* FIXME: Not implemented.  */
+
+  __go_free (msg);
+}
diff --git a/libgo/runtime/go-refcount.h b/libgo/runtime/go-refcount.h
new file mode 100644
index 0000000..a376e40
--- /dev/null
+++ b/libgo/runtime/go-refcount.h
@@ -0,0 +1,92 @@
+/* go-refcount.h -- reference count structure.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#ifndef LIBGO_GO_REFCOUNT_H
+#define LIBGO_GO_REFCOUNT_H
+
+#include "go-type.h"
+
+/* Each entry in the reference count table looks like this.  */
+
+struct __go_refcount_entry
+{
+  /* The type descriptor.  */
+  const struct __go_type_descriptor *__descriptor;
+  /* If the value is a pointer, or in general if the size of the value
+     is the size of a pointer, this holds the value itself.  Otherwise
+     it holds a pointer to the value.  This will be NULL when there is
+     no value whose reference count needs adjusting.  */
+  void *__value;
+};
+
+/* Each function which requires reference count adjustments has a
+   local variable whose type is this structure.  */
+
+struct __go_refcount
+{
+  /* A pointer to the reference count structure for the calling
+     function.  This will be set to NULL if there is no caller with
+     reference counts, or if all reference counts in the caller have
+     been processed.  */
+  struct __go_refcount *__caller;
+  /* This flag is initialized to false.  It is set to true when we
+     have scanned all the callers for increments.  We only need to
+     scan the callers once, since we zero out each increment as we
+     find it.  */
+  _Bool __callers_were_scanned;
+  /* This flag is initialized to false.  It is set to true if we have
+     stopped scanning for decrements because there were too many stack
+     frames.  When this flag is true we can not simply free newly
+     allocated values.  */
+  _Bool __did_not_scan_decrements;
+
+  /* The number of entries for newly allocated values.  */
+  unsigned short __decrement_new_count;
+  /* The number of entries for computed values.  */
+  unsigned short __decrement_computed_count;
+  /* The number of entries for old values which must be
+     decremented.  */
+  unsigned short __decrement_old_count;
+  /* The number of entries for copied values which must be
+     incremented.  */
+  unsigned short __increment_copy_count;
+
+  /* The actual entries.  The number of elements in this array is the
+     sum of the count variables.  */
+  struct __go_refcount_entry __entries[];
+};
+
+/* The struct used to send reference count adjustments to the GC
+   thread.  */
+
+struct __go_refcount_msg
+{
+  /* The current epoch.  */
+  unsigned int __epoch;
+  /* The number of pointers whose reference counts need to be
+     incremented.  */
+  unsigned short __increments;
+  /* The number of pointers whose reference counts need to be
+     decremented.  */
+  unsigned short __decrements;
+  /* The pointers whose reference counts need adjusting.  The
+     increment pointers come first, followed by the decrement
+     pointers.  */
+  void *__pointers[];
+};
+
+/* The head of the list of reference count structures.  */
+
+extern __thread struct __go_refcount *__go_refcount_head;
+
+extern void __go_increment_refcount (void *,
+				     const struct __go_type_descriptor *);
+
+extern void __go_refcount_flush_queue (struct __go_refcount *);
+
+extern void __go_send_refcount_msg (struct __go_refcount_msg *);
+
+#endif /* !defined (LIBGO_GO_REFCOUNT_H) */
diff --git a/libgo/runtime/go-reflect-call.c b/libgo/runtime/go-reflect-call.c
new file mode 100644
index 0000000..a24b933
--- /dev/null
+++ b/libgo/runtime/go-reflect-call.c
@@ -0,0 +1,318 @@
+/* go-reflect-call.c -- call reflection support for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "ffi.h"
+
+#include "go-alloc.h"
+#include "go-type.h"
+
+/* Forward declaration.  */
+
+static ffi_type *go_type_to_ffi (const struct __go_type_descriptor *);
+
+/* Return an ffi_type for a Go array type.  The libffi library does
+   not have any builtin support for passing arrays as values.  We work
+   around this by pretending that the array is a struct.  */
+
+static ffi_type *
+go_array_to_ffi (const struct __go_array_type *descriptor)
+{
+  ffi_type *ret;
+  uintptr_t len;
+  ffi_type *element;
+  uintptr_t i;
+
+  ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
+  __builtin_memset (ret, 0, sizeof (ffi_type));
+  ret->type = FFI_TYPE_STRUCT;
+  len = descriptor->__len;
+  ret->elements = (ffi_type **) __go_alloc ((len + 1) * sizeof (ffi_type *));
+  element = go_type_to_ffi (descriptor->__element_type);
+  for (i = 0; i < len; ++i)
+    ret->elements[i] = element;
+  ret->elements[len] = NULL;
+  return ret;
+}
+
+/* Return an ffi_type for a Go slice type.  This describes the
+   __go_open_array type defines in array.h.  */
+
+static ffi_type *
+go_slice_to_ffi (
+    const struct __go_slice_type *descriptor __attribute__ ((unused)))
+{
+  ffi_type *ret;
+
+  ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
+  __builtin_memset (ret, 0, sizeof (ffi_type));
+  ret->type = FFI_TYPE_STRUCT;
+  ret->elements = (ffi_type **) __go_alloc (4 * sizeof (ffi_type *));
+  ret->elements[0] = &ffi_type_pointer;
+  ret->elements[1] = &ffi_type_sint;
+  ret->elements[2] = &ffi_type_sint;
+  ret->elements[3] = NULL;
+  return ret;
+}
+
+/* Return an ffi_type for a Go struct type.  */
+
+static ffi_type *
+go_struct_to_ffi (const struct __go_struct_type *descriptor)
+{
+  ffi_type *ret;
+  int field_count;
+  const struct __go_struct_field *fields;
+  int i;
+
+  ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
+  __builtin_memset (ret, 0, sizeof (ffi_type));
+  ret->type = FFI_TYPE_STRUCT;
+  field_count = descriptor->__fields.__count;
+  fields = (const struct __go_struct_field *) descriptor->__fields.__values;
+  ret->elements = (ffi_type **) __go_alloc ((field_count + 1)
+					    * sizeof (ffi_type *));
+  for (i = 0; i < field_count; ++i)
+    ret->elements[i] = go_type_to_ffi (fields[i].__type);
+  ret->elements[field_count] = NULL;
+  return ret;
+}
+
+/* Return an ffi_type for a type described by a
+   __go_type_descriptor.  */
+
+static ffi_type *
+go_type_to_ffi (const struct __go_type_descriptor *descriptor)
+{
+  switch (descriptor->__code)
+    {
+    case GO_BOOL:
+      if (sizeof (_Bool) == 1)
+	return &ffi_type_uint8;
+      else if (sizeof (_Bool) == sizeof (int))
+	return &ffi_type_uint;
+      abort ();
+    case GO_FLOAT32:
+      if (sizeof (float) == 4)
+	return &ffi_type_float;
+      abort ();
+    case GO_FLOAT64:
+      if (sizeof (double) == 8)
+	return &ffi_type_double;
+      abort ();
+    case GO_FLOAT:
+      return &ffi_type_float;
+    case GO_INT16:
+      return &ffi_type_sint16;
+    case GO_INT32:
+      return &ffi_type_sint32;
+    case GO_INT64:
+      return &ffi_type_sint64;
+    case GO_INT8:
+      return &ffi_type_sint8;
+    case GO_INT:
+      return &ffi_type_sint;
+    case GO_UINT16:
+      return &ffi_type_uint16;
+    case GO_UINT32:
+      return &ffi_type_uint32;
+    case GO_UINT64:
+      return &ffi_type_uint64;
+    case GO_UINT8:
+      return &ffi_type_uint8;
+    case GO_UINT:
+      return &ffi_type_uint;
+    case GO_UINTPTR:
+      if (sizeof (void *) == 2)
+	return &ffi_type_uint16;
+      else if (sizeof (void *) == 4)
+	return &ffi_type_uint32;
+      else if (sizeof (void *) == 8)
+	return &ffi_type_uint64;
+      abort ();
+    case GO_ARRAY:
+      return go_array_to_ffi ((const struct __go_array_type *) descriptor);
+    case GO_SLICE:
+      return go_slice_to_ffi ((const struct __go_slice_type *) descriptor);
+    case GO_STRUCT:
+      return go_struct_to_ffi ((const struct __go_struct_type *) descriptor);
+    case GO_STRING:
+    case GO_DOTDOTDOT:
+    case GO_CHAN:
+    case GO_FUNC:
+    case GO_INTERFACE:
+    case GO_MAP:
+    case GO_PTR:
+    case GO_UNSAFE_POINTER:
+      /* These types are always pointers, and for FFI purposes nothing
+	 else matters.  */
+      return &ffi_type_pointer;
+    default:
+      abort ();
+    }
+}
+
+/* Return the return type for a function, given the number of out
+   parameters and their types.  */
+
+static ffi_type *
+go_func_return_ffi (const struct __go_func_type *func)
+{
+  int count;
+  const struct __go_type_descriptor **types;
+  ffi_type *ret;
+  int i;
+
+  count = func->__out.__count;
+  if (count == 0)
+    return &ffi_type_void;
+
+  types = (const struct __go_type_descriptor **) func->__out.__values;
+
+  if (count == 1)
+    return go_type_to_ffi (types[0]);
+
+  ret = (ffi_type *) __go_alloc (sizeof (ffi_type));
+  __builtin_memset (ret, 0, sizeof (ffi_type));
+  ret->type = FFI_TYPE_STRUCT;
+  ret->elements = (ffi_type **) __go_alloc ((count + 1) * sizeof (ffi_type *));
+  for (i = 0; i < count; ++i)
+    ret->elements[i] = go_type_to_ffi (types[i]);
+  ret->elements[count] = NULL;
+  return ret;
+}
+
+/* Build an ffi_cif structure for a function described by a
+   __go_func_type structure.  */
+
+static void
+go_func_to_cif (const struct __go_func_type *func, _Bool is_interface,
+		ffi_cif *cif)
+{
+  int num_params;
+  const struct __go_type_descriptor **in_types;
+  size_t num_args;
+  ffi_type **args;
+  int off;
+  int i;
+  ffi_type *rettype;
+  ffi_status status;
+
+  num_params = func->__in.__count;
+  in_types = ((const struct __go_type_descriptor **)
+	      func->__in.__values);
+
+  num_args = num_params + (is_interface ? 1 : 0);
+  args = (ffi_type **) __go_alloc (num_args * sizeof (ffi_type *));
+  if (is_interface)
+    args[0] = &ffi_type_pointer;
+  off = is_interface ? 1 : 0;
+  for (i = 0; i < num_params; ++i)
+    args[i + off] = go_type_to_ffi (in_types[i]);
+
+  rettype = go_func_return_ffi (func);
+
+  status = ffi_prep_cif (cif, FFI_DEFAULT_ABI, num_args, rettype, args);
+  assert (status == FFI_OK);
+}
+
+/* Get the total size required for the result parameters of a
+   function.  */
+
+static size_t
+go_results_size (const struct __go_func_type *func)
+{
+  int count;
+  const struct __go_type_descriptor **types;
+  size_t off;
+  size_t maxalign;
+  int i;
+
+  count = func->__out.__count;
+  if (count == 0)
+    return 0;
+
+  types = (const struct __go_type_descriptor **) func->__out.__values;
+
+  off = 0;
+  maxalign = 0;
+  for (i = 0; i < count; ++i)
+    {
+      size_t align;
+
+      align = types[i]->__field_align;
+      if (align > maxalign)
+	maxalign = align;
+      off = (off + align - 1) & ~ (align - 1);
+      off += types[i]->__size;
+    }
+
+  off = (off + maxalign - 1) & ~ (maxalign - 1);
+
+  return off;
+}
+
+/* Copy the results of calling a function via FFI from CALL_RESULT
+   into the addresses in RESULTS.  */
+
+static void
+go_set_results (const struct __go_func_type *func, unsigned char *call_result,
+		void **results)
+{
+  int count;
+  const struct __go_type_descriptor **types;
+  size_t off;
+  int i;
+
+  count = func->__out.__count;
+  if (count == 0)
+    return;
+
+  types = (const struct __go_type_descriptor **) func->__out.__values;
+
+  off = 0;
+  for (i = 0; i < count; ++i)
+    {
+      size_t align;
+      size_t size;
+
+      align = types[i]->__field_align;
+      size = types[i]->__size;
+      off = (off + align - 1) & ~ (align - 1);
+      __builtin_memcpy (results[i], call_result + off, size);
+      off += size;
+    }
+}
+
+/* Call a function.  The type of the function is FUNC_TYPE, and the
+   address is FUNC_ADDR.  PARAMS is an array of parameter addresses.
+   RESULTS is an array of result addresses.  */
+
+extern void call (const struct __go_func_type *, const void *, _Bool, void **,
+		  void **)
+  asm ("libgo_reflect.reflect.call");
+
+void
+call (const struct __go_func_type *func_type, const void *func_addr,
+      _Bool is_interface, void **params, void **results)
+{
+  ffi_cif cif;
+  unsigned char *call_result;
+
+  assert (func_type->__common.__code == GO_FUNC);
+  go_func_to_cif (func_type, is_interface, &cif);
+
+  call_result = (unsigned char *) malloc (go_results_size (func_type));
+
+  ffi_call (&cif, func_addr, call_result, params);
+
+  go_set_results (func_type, call_result, results);
+
+  free (call_result);
+}
diff --git a/libgo/runtime/go-reflect-chan.c b/libgo/runtime/go-reflect-chan.c
new file mode 100644
index 0000000..412cfee
--- /dev/null
+++ b/libgo/runtime/go-reflect-chan.c
@@ -0,0 +1,148 @@
+/* go-reflect-chan.c -- channel reflection support for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdlib.h>
+#include <stdint.h>
+
+#include "config.h"
+#include "go-type.h"
+#include "channel.h"
+
+/* This file implements support for reflection on channels.  These
+   functions are called from reflect/value.go.  */
+
+extern unsigned char *makechan (const struct __go_type_descriptor *, uint32_t)
+  asm ("libgo_reflect.reflect.makechan");
+
+unsigned char *
+makechan (const struct __go_type_descriptor *typ, uint32_t size)
+{
+  return (unsigned char *) __go_new_channel (typ->__size, size);
+}
+
+extern void chansend (unsigned char *, unsigned char *, _Bool *)
+  asm ("libgo_reflect.reflect.chansend");
+
+void
+chansend (unsigned char *ch, unsigned char *val, _Bool *pres)
+{
+  struct __go_channel *channel = (struct __go_channel *) ch;
+
+  if (channel->element_size <= sizeof (uint64_t))
+    {
+      union
+      {
+	char b[sizeof (uint64_t)];
+	uint64_t v;
+      } u;
+
+      __builtin_memset (u.b, 0, sizeof (uint64_t));
+#ifndef WORDS_BIGENDIAN
+      __builtin_memcpy (u.b, val, channel->element_size);
+#else
+      __builtin_memcpy (u.b + sizeof (uint64_t) - channel->element_size, val,
+			channel->element_size);
+#endif
+      if (pres == NULL)
+	__go_send_small (channel, u.v, 0);
+      else
+	*pres = __go_send_nonblocking_small (channel, u.v);
+    }
+  else
+    {
+      if (pres == NULL)
+	__go_send_big (channel, val, 0);
+      else
+	*pres = __go_send_nonblocking_big (channel, val);
+    }
+}
+
+extern void chanrecv (unsigned char *, unsigned char *, _Bool *)
+  asm ("libgo_reflect.reflect.chanrecv");
+
+void
+chanrecv (unsigned char *ch, unsigned char *val, _Bool *pres)
+{
+  struct __go_channel *channel = (struct __go_channel *) ch;
+
+  if (channel->element_size <= sizeof (uint64_t))
+    {
+      union
+      {
+	char b[sizeof (uint64_t)];
+	uint64_t v;
+      } u;
+
+      if (pres == NULL)
+	u.v = __go_receive_small (channel, 0);
+      else
+	{
+	  struct __go_receive_nonblocking_small s;
+
+	  s = __go_receive_nonblocking_small (channel);
+	  *pres = s.__success;
+	  if (!s.__success)
+	    return;
+	  u.v = s.__val;
+	}
+
+#ifndef WORDS_BIGENDIAN
+      __builtin_memcpy (val, u.b, channel->element_size);
+#else
+      __builtin_memcpy (val, u.b + sizeof (uint64_t) - channel->element_size,
+			channel->element_size);
+#endif
+    }
+  else
+    {
+      if (pres == NULL)
+	__go_receive_big (channel, val, 0);
+      else
+	*pres = __go_receive_nonblocking_big (channel, val);
+    }
+}
+
+extern _Bool chanclosed (unsigned char *)
+  asm ("libgo_reflect.reflect.chanclosed");
+
+_Bool
+chanclosed (unsigned char *ch)
+{
+  struct __go_channel *channel = (struct __go_channel *) ch;
+
+  return __go_builtin_closed (channel);
+}
+
+extern void chanclose (unsigned char *)
+  asm ("libgo_reflect.reflect.chanclose");
+
+void
+chanclose (unsigned char *ch)
+{
+  struct __go_channel *channel = (struct __go_channel *) ch;
+
+  __go_builtin_close (channel);
+}
+
+extern int32_t chanlen (unsigned char *) asm ("libgo_reflect.reflect.chanlen");
+
+int32_t
+chanlen (unsigned char *ch)
+{
+  struct __go_channel *channel = (struct __go_channel *) ch;
+
+  return (int32_t) __go_chan_len (channel);
+}
+
+extern int32_t chancap (unsigned char *) asm ("libgo_reflect.reflect.chancap");
+
+int32_t
+chancap (unsigned char *ch)
+{
+  struct __go_channel *channel = (struct __go_channel *) ch;
+
+  return (int32_t) __go_chan_cap (channel);
+}
diff --git a/libgo/runtime/go-reflect-map.c b/libgo/runtime/go-reflect-map.c
new file mode 100644
index 0000000..67960de
--- /dev/null
+++ b/libgo/runtime/go-reflect-map.c
@@ -0,0 +1,139 @@
+/* go-reflect-map.c -- map reflection support for Go.
+
+   Copyright 2009, 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdlib.h>
+#include <stdint.h>
+
+#include "go-alloc.h"
+#include "go-type.h"
+#include "map.h"
+
+/* This file implements support for reflection on maps.  These
+   functions are called from reflect/value.go.  */
+
+extern _Bool mapaccess (unsigned char *, unsigned char *, unsigned char *)
+  asm ("libgo_reflect.reflect.mapaccess");
+
+_Bool
+mapaccess (unsigned char *m, unsigned char *key, unsigned char *val)
+{
+  struct __go_map *map = (struct __go_map *) m;
+  void *p;
+  const struct __go_type_descriptor *val_descriptor;
+
+  p = __go_map_index (map, key, 0);
+  if (p == NULL)
+    return 0;
+  else
+    {
+      val_descriptor = map->__descriptor->__map_descriptor->__val_type;
+      __builtin_memcpy (val, p, val_descriptor->__size);
+      return 1;
+    }
+}
+
+extern void mapassign (unsigned char *, unsigned char *, unsigned char *)
+  asm ("libgo_reflect.reflect.mapassign");
+
+void
+mapassign (unsigned char *m, unsigned char *key, unsigned char *val)
+{
+  struct __go_map *map = (struct __go_map *) m;
+
+  if (val == NULL)
+    __go_map_delete (map, key);
+  else
+    {
+      void *p;
+      const struct __go_type_descriptor *val_descriptor;
+
+      p = __go_map_index (map, key, 1);
+      val_descriptor = map->__descriptor->__map_descriptor->__val_type;
+      __builtin_memcpy (p, val, val_descriptor->__size);
+    }
+}
+
+extern int32_t maplen (unsigned char *)
+  asm ("libgo_reflect.reflect.maplen");
+
+int32_t
+maplen (unsigned char *m __attribute__ ((unused)))
+{
+  struct __go_map *map = (struct __go_map *) m;
+  return (int32_t) map->__element_count;
+}
+
+extern unsigned char *mapiterinit (unsigned char *)
+  asm ("libgo_reflect.reflect.mapiterinit");
+
+unsigned char *
+mapiterinit (unsigned char *m)
+{
+  struct __go_hash_iter *it;
+
+  it = __go_alloc (sizeof (struct __go_hash_iter));
+  __go_mapiterinit ((struct __go_map *) m, it);
+  return (unsigned char *) it;
+}
+
+extern void mapiternext (unsigned char *)
+  asm ("libgo_reflect.reflect.mapiternext");
+
+void
+mapiternext (unsigned char *it)
+{
+  __go_mapiternext ((struct __go_hash_iter *) it);
+}
+
+extern _Bool mapiterkey (unsigned char *, unsigned char *)
+  asm ("libgo_reflect.reflect.mapiterkey");
+
+_Bool
+mapiterkey (unsigned char *ita, unsigned char *key)
+{
+  struct __go_hash_iter *it = (struct __go_hash_iter *) ita;
+
+  if (it->entry == NULL)
+    return 0;
+  else
+    {
+      __go_mapiter1 (it, key);
+      return 1;
+    }
+}
+
+/* Make a new map.  We have to build our own map descriptor.  */
+
+extern unsigned char *makemap (const struct __go_map_type *)
+  asm ("libgo_reflect.reflect.makemap");
+
+unsigned char *
+makemap (const struct __go_map_type *t)
+{
+  struct __go_map_descriptor *md;
+  unsigned int o;
+  const struct __go_type_descriptor *kt;
+  const struct __go_type_descriptor *vt;
+
+  /* FIXME: Reference count.  */
+  md = (struct __go_map_descriptor *) __go_alloc (sizeof (*md));
+  md->__map_descriptor = t;
+  o = sizeof (void *);
+  kt = t->__key_type;
+  o = (o + kt->__field_align - 1) & ~ (kt->__field_align - 1);
+  md->__key_offset = o;
+  o += kt->__size;
+  vt = t->__val_type;
+  o = (o + vt->__field_align - 1) & ~ (vt->__field_align - 1);
+  md->__val_offset = o;
+  o += vt->__size;
+  o = (o + sizeof (void *) - 1) & ~ (sizeof (void *) - 1);
+  o = (o + kt->__field_align - 1) & ~ (kt->__field_align - 1);
+  o = (o + vt->__field_align - 1) & ~ (vt->__field_align - 1);
+  md->__entry_size = o;
+
+  return (unsigned char *) __go_new_map (md, 0);
+}
diff --git a/libgo/runtime/go-reflect.c b/libgo/runtime/go-reflect.c
new file mode 100644
index 0000000..3166157
--- /dev/null
+++ b/libgo/runtime/go-reflect.c
@@ -0,0 +1,221 @@
+/* go-reflect.c -- implement unsafe.Reflect and unsafe.Typeof for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdlib.h>
+#include <stdint.h>
+
+#include "interface.h"
+#include "go-alloc.h"
+#include "go-panic.h"
+#include "go-string.h"
+#include "go-type.h"
+
+/* For field alignment.  */
+
+struct field_align
+{
+  char c;
+  struct __go_type_descriptor *p;
+};
+
+/* The type descriptors in the runtime package.  */
+
+extern const struct __go_type_descriptor ptr_bool_descriptor
+  asm ("__go_td_pN30_libgo_runtime.runtime.BoolType");
+extern const struct __go_type_descriptor ptr_float32_descriptor
+  asm ("__go_td_pN33_libgo_runtime.runtime.Float32Type");
+extern const struct __go_type_descriptor ptr_float64_descriptor
+  asm ("__go_td_pN33_libgo_runtime.runtime.Float64Type");
+extern const struct __go_type_descriptor ptr_float_descriptor
+  asm ("__go_td_pN31_libgo_runtime.runtime.FloatType");
+extern const struct __go_type_descriptor ptr_int16_descriptor
+  asm ("__go_td_pN31_libgo_runtime.runtime.Int16Type");
+extern const struct __go_type_descriptor ptr_int32_descriptor
+  asm ("__go_td_pN31_libgo_runtime.runtime.Int32Type");
+extern const struct __go_type_descriptor ptr_int64_descriptor
+  asm ("__go_td_pN31_libgo_runtime.runtime.Int64Type");
+extern const struct __go_type_descriptor ptr_int8_descriptor
+  asm ("__go_td_pN30_libgo_runtime.runtime.Int8Type");
+extern const struct __go_type_descriptor ptr_int_descriptor
+  asm ("__go_td_pN29_libgo_runtime.runtime.IntType");
+extern const struct __go_type_descriptor ptr_uint16_descriptor
+  asm ("__go_td_pN32_libgo_runtime.runtime.Uint16Type");
+extern const struct __go_type_descriptor ptr_uint32_descriptor
+  asm ("__go_td_pN32_libgo_runtime.runtime.Uint32Type");
+extern const struct __go_type_descriptor ptr_uint64_descriptor
+  asm ("__go_td_pN32_libgo_runtime.runtime.Uint64Type");
+extern const struct __go_type_descriptor ptr_uint8_descriptor
+  asm ("__go_td_pN31_libgo_runtime.runtime.Uint8Type");
+extern const struct __go_type_descriptor ptr_uint_descriptor
+  asm ("__go_td_pN30_libgo_runtime.runtime.UintType");
+extern const struct __go_type_descriptor ptr_string_descriptor
+  asm ("__go_td_pN32_libgo_runtime.runtime.StringType");
+extern const struct __go_type_descriptor ptr_uintptr_descriptor
+  asm ("__go_td_pN33_libgo_runtime.runtime.UintptrType");
+extern const struct __go_type_descriptor ptr_dotdotdot_descriptor
+  asm ("__go_td_pN35_libgo_runtime.runtime.DotDotDotType");
+extern const struct __go_type_descriptor ptr_unsafe_pointer_decriptor
+  asm ("__go_td_pN39_libgo_runtime.runtime.UnsafePointerType");
+extern const struct __go_type_descriptor ptr_array_descriptor
+  asm ("__go_td_pN31_libgo_runtime.runtime.ArrayType");
+extern const struct __go_type_descriptor ptr_slice_descriptor
+  asm ("__go_td_pN31_libgo_runtime.runtime.SliceType");
+extern const struct __go_type_descriptor ptr_chan_descriptor
+  asm ("__go_td_pN30_libgo_runtime.runtime.ChanType");
+extern const struct __go_type_descriptor ptr_func_descriptor
+  asm ("__go_td_pN30_libgo_runtime.runtime.FuncType");
+extern const struct __go_type_descriptor ptr_interface_descriptor
+  asm ("__go_td_pN35_libgo_runtime.runtime.InterfaceType");
+extern const struct __go_type_descriptor ptr_map_descriptor
+  asm ("__go_td_pN29_libgo_runtime.runtime.MapType");
+extern const struct __go_type_descriptor ptr_ptr_descriptor
+  asm ("__go_td_pN29_libgo_runtime.runtime.PtrType");
+extern const struct __go_type_descriptor ptr_struct_descriptor
+  asm ("__go_td_pN32_libgo_runtime.runtime.StructType");
+
+const struct __go_type_descriptor *
+get_descriptor (int code)
+{
+  switch (code)
+    {
+    case GO_BOOL:
+      return &ptr_bool_descriptor;
+    case GO_FLOAT32:
+      return &ptr_float32_descriptor;
+    case GO_FLOAT64:
+      return &ptr_float64_descriptor;
+    case GO_FLOAT:
+      return &ptr_float_descriptor;
+    case GO_INT16:
+      return &ptr_int16_descriptor;
+    case GO_INT32:
+      return &ptr_int32_descriptor;
+    case GO_INT64:
+      return &ptr_int64_descriptor;
+    case GO_INT8:
+      return &ptr_int8_descriptor;
+    case GO_INT:
+      return &ptr_int_descriptor;
+    case GO_UINT16:
+      return &ptr_uint16_descriptor;
+    case GO_UINT32:
+      return &ptr_uint32_descriptor;
+    case GO_UINT64:
+      return &ptr_uint64_descriptor;
+    case GO_UINT8:
+      return &ptr_uint8_descriptor;
+    case GO_UINT:
+      return &ptr_uint_descriptor;
+    case GO_STRING:
+      return &ptr_string_descriptor;
+    case GO_UINTPTR:
+      return &ptr_uintptr_descriptor;
+    case GO_DOTDOTDOT:
+      return &ptr_dotdotdot_descriptor;
+    case GO_UNSAFE_POINTER:
+      return &ptr_unsafe_pointer_decriptor;
+    case GO_ARRAY:
+      return &ptr_array_descriptor;
+    case GO_SLICE:
+      return &ptr_slice_descriptor;
+    case GO_CHAN:
+      return &ptr_chan_descriptor;
+    case GO_FUNC:
+      return &ptr_func_descriptor;
+    case GO_INTERFACE:
+      return &ptr_interface_descriptor;
+    case GO_MAP:
+      return &ptr_map_descriptor;
+    case GO_PTR:
+      return &ptr_ptr_descriptor;
+    case GO_STRUCT:
+      return &ptr_struct_descriptor;
+    default:
+      abort ();
+    }
+}
+
+/* Implement unsafe.Reflect.  */
+
+struct reflect_ret
+{
+  struct __go_interface *rettype;
+  void *addr;
+};
+
+struct reflect_ret Reflect (const struct __go_interface *)
+  asm ("libgo_unsafe.unsafe.Reflect");
+
+struct reflect_ret
+Reflect (const struct __go_interface *p)
+{
+  struct reflect_ret ret;
+
+  if (p == NULL)
+    {
+      ret.rettype = NULL;
+      ret.addr = NULL;
+    }
+  else
+    {
+      size_t size;
+
+      ret.rettype = ((struct __go_interface *)
+		     __go_alloc (sizeof (struct __go_interface)));
+      ret.rettype->__type_descriptor =
+	get_descriptor (p->__type_descriptor->__code);
+      ret.rettype->__methods = NULL;
+
+      /* This memcpy is really just an assignment of a const pointer
+	 to a non-const pointer.  FIXME: We should canonicalize this
+	 pointer, so that for a given type we always return the same
+	 pointer.  */
+      __builtin_memcpy (&ret.rettype->__object, &p->__type_descriptor,
+			sizeof (void *));
+
+      /* Make a copy of the value.  */
+      size = p->__type_descriptor->__size;
+      if (size <= sizeof (uint64_t))
+	ret.addr = __go_alloc (sizeof (uint64_t));
+      else
+	ret.addr = __go_alloc (size);
+      if (__go_is_pointer_type (p->__type_descriptor))
+	*(void **) ret.addr = p->__object;
+      else
+	__builtin_memcpy (ret.addr, p->__object, size);
+    }
+
+  return ret;
+}
+
+/* Implement unsafe.Typeof.  */
+
+struct __go_interface *Typeof (const struct __go_interface *)
+  asm ("libgo_unsafe.unsafe.Typeof");
+
+struct __go_interface *
+Typeof (const struct __go_interface *p)
+{
+  if (p == NULL)
+    return NULL;
+  else
+    {
+      struct __go_interface *ret;
+
+      ret = ((struct __go_interface *)
+	     __go_alloc (sizeof (struct __go_interface)));
+      ret->__type_descriptor = get_descriptor (p->__type_descriptor->__code);
+      ret->__methods = NULL;
+
+      /* This memcpy is really just an assignment of a const pointer
+	 to a non-const pointer.  FIXME: We should canonicalize this
+	 pointer, so that for a given type we always return the same
+	 pointer.  */
+      __builtin_memcpy (&ret->__object, &p->__type_descriptor, sizeof (void *));
+
+      return ret;
+    }
+}
diff --git a/libgo/runtime/go-rune.c b/libgo/runtime/go-rune.c
new file mode 100644
index 0000000..7e31eb8
--- /dev/null
+++ b/libgo/runtime/go-rune.c
@@ -0,0 +1,77 @@
+/* go-rune.c -- rune functions for Go.
+
+   Copyright 2009, 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "go-string.h"
+
+/* Get a character from the UTF-8 string STR, of length LEN.  Store
+   the Unicode character, if any, in *RUNE.  Return the number of
+   characters used from STR.  */
+
+int
+__go_get_rune (const unsigned char *str, size_t len, int *rune)
+{
+  int c, c1, c2, c3;
+
+  /* Default to the "replacement character".  */
+  *rune = 0xfffd;
+
+  if (len <= 0)
+    return 1;
+
+  c = *str;
+  if (c <= 0x7f)
+    {
+      *rune = c;
+      return 1;
+    }
+
+  if (len <= 1)
+    return 1;
+
+  c1 = str[1];
+  if ((c & 0xe0) == 0xc0
+      && (c1 & 0xc0) == 0x80)
+    {
+      *rune = (((c & 0x1f) << 6)
+	       + (c1 & 0x3f));
+      return 2;
+    }
+
+  if (len <= 2)
+    return 1;
+
+  c2 = str[2];
+  if ((c & 0xf0) == 0xe0
+      && (c1 & 0xc0) == 0x80
+      && (c2 & 0xc0) == 0x80)
+    {
+      *rune = (((c & 0xf) << 12)
+	       + ((c1 & 0x3f) << 6)
+	       + (c2 & 0x3f));
+      return 3;
+    }
+
+  if (len <= 3)
+    return 1;
+
+  c3 = str[3];
+  if ((c & 0xf8) == 0xf0
+      && (c1 & 0xc0) == 0x80
+      && (c2 & 0xc0) == 0x80
+      && (c3 & 0xc0) == 0x80)
+    {
+      *rune = (((c & 0x7) << 18)
+	       + ((c1 & 0x3f) << 12)
+	       + ((c2 & 0x3f) << 6)
+	       + (c3 & 0x3f));
+      return 4;
+    }
+
+  /* Invalid encoding.  Return 1 so that we advance.  */
+  return 1;
+}
diff --git a/libgo/runtime/go-sched.c b/libgo/runtime/go-sched.c
new file mode 100644
index 0000000..2e36d31
--- /dev/null
+++ b/libgo/runtime/go-sched.c
@@ -0,0 +1,15 @@
+/* go-sched.c -- the runtime.Gosched function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <sched.h>
+
+void Gosched (void) asm ("libgo_runtime.runtime.Gosched");
+
+void
+Gosched (void)
+{
+  sched_yield ();
+}
diff --git a/libgo/runtime/go-select.c b/libgo/runtime/go-select.c
new file mode 100644
index 0000000..936550c
--- /dev/null
+++ b/libgo/runtime/go-select.c
@@ -0,0 +1,754 @@
+/* go-select.c -- implement select.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <pthread.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "channel.h"
+
+/* __go_select builds an array of these structures.  */
+
+struct select_channel
+{
+  /* The channel being selected.  */
+  struct __go_channel* channel;
+  /* If this channel is selected, the value to return.  */
+  size_t retval;
+  /* If this channel is a duplicate of one which appears earlier in
+     the array, this is the array index of the earlier channel.  This
+     is -1UL if this is not a dup.  */
+  size_t dup_index;
+  /* An entry to put on the send or receive queue.  */
+  struct __go_channel_select queue_entry;
+  /* True if selected for send.  */
+  _Bool is_send;
+  /* True if channel is ready--it has data to receive or space to
+     send.  */
+  _Bool is_ready;
+};
+
+/* This mutex controls access to __go_select_cond.  This mutex may not
+   be acquired if any channel locks are held.  */
+
+static pthread_mutex_t __go_select_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/* When we have to wait for channels, we tell them to trigger this
+   condition variable when they send or receive something.  */
+
+static pthread_cond_t __go_select_cond = PTHREAD_COND_INITIALIZER;
+
+/* Sort the channels by address.  This avoids deadlock when multiple
+   selects are running on overlapping sets of channels.  */
+
+static int
+channel_sort (const void *p1, const void *p2)
+{
+  const struct select_channel *c1 = (const struct select_channel *) p1;
+  const struct select_channel *c2 = (const struct select_channel *) p2;
+
+  if ((uintptr_t) c1->channel < (uintptr_t) c2->channel)
+    return -1;
+  else if ((uintptr_t) c1->channel > (uintptr_t) c2->channel)
+    return 1;
+  else
+    return 0;
+}
+
+/* Return whether there is an entry on QUEUE which can be used for a
+   synchronous send or receive.  */
+
+static _Bool
+is_queue_ready (struct __go_channel_select *queue)
+{
+  int x;
+
+  if (queue == NULL)
+    return 0;
+
+  x = pthread_mutex_lock (&__go_select_data_mutex);
+  assert (x == 0);
+
+  while (queue != NULL)
+    {
+      if (*queue->selected == NULL)
+	break;
+      queue = queue->next;
+    }
+
+  x = pthread_mutex_unlock (&__go_select_data_mutex);
+  assert (x == 0);
+
+  return queue != NULL;
+}
+
+/* Return whether CHAN is ready.  If IS_SEND is true check whether it
+   has space to send, otherwise check whether it has a value to
+   receive.  */
+
+static _Bool
+is_channel_ready (struct __go_channel* channel, _Bool is_send)
+{
+  if (is_send)
+    {
+      if (channel->selected_for_send)
+	return 0;
+      if (channel->is_closed)
+	return 1;
+      if (channel->num_entries > 0)
+	{
+	  /* An asynchronous channel is ready for sending if there is
+	     room in the buffer.  */
+	  return ((channel->next_store + 1) % channel->num_entries
+		  != channel->next_fetch);
+	}
+      else
+	{
+	  if (channel->waiting_to_send)
+	    {
+	      /* Some other goroutine is waiting to send on this
+		 channel, so we can't.  */
+	      return 0;
+	    }
+	  if (channel->waiting_to_receive)
+	    {
+	      /* Some other goroutine is waiting to receive a value,
+		 so we can send one.  */
+	      return 1;
+	    }
+	  if (is_queue_ready (channel->select_receive_queue))
+	    {
+	      /* There is a select statement waiting to synchronize
+		 with this one.  */
+	      return 1;
+	    }
+	  return 0;
+	}
+    }
+  else
+    {
+      if (channel->selected_for_receive)
+	return 0;
+      if (channel->is_closed)
+	return 1;
+      if (channel->num_entries > 0)
+	{
+	  /* An asynchronous channel is ready for receiving if there
+	     is a value in the buffer.  */
+	  return channel->next_fetch != channel->next_store;
+	}
+      else
+	{
+	  if (channel->waiting_to_receive)
+	    {
+	      /* Some other goroutine is waiting to receive from this
+		 channel, so it is not ready for us to receive.  */
+	      return 0;
+	    }
+	  if (channel->next_store > 0)
+	    {
+	      /* There is data on the channel.  */
+	      return 1;
+	    }
+	  if (is_queue_ready (channel->select_send_queue))
+	    {
+	      /* There is a select statement waiting to synchronize
+		 with this one.  */
+	      return 1;
+	    }
+	  return 0;
+	}
+    }
+}
+
+/* Mark a channel as selected.  The channel is locked.  IS_SELECTED is
+   true if the channel was selected for us by another goroutine.  We
+   set *NEEDS_BROADCAST if we need to broadcast on the select
+   condition variable.  Return true if we got it.  */
+
+static _Bool
+mark_channel_selected (struct __go_channel *channel, _Bool is_send,
+		       _Bool is_selected, _Bool *needs_broadcast)
+{
+  if (channel->num_entries == 0)
+    {
+      /* This is a synchronous channel.  If there is no goroutine
+	 currently waiting, but there is another select waiting, then
+	 we need to tell that select to use this channel.  That may
+	 fail--there may be no other goroutines currently waiting--as
+	 a third goroutine may already have claimed the select.  */
+      if (!is_selected
+	  && !channel->is_closed
+	  && (is_send
+	      ? !channel->waiting_to_receive
+	      : channel->next_store == 0))
+	{
+	  int x;
+	  struct __go_channel_select *queue;
+
+	  x = pthread_mutex_lock (&__go_select_data_mutex);
+	  assert (x == 0);
+
+	  queue = (is_send
+		   ? channel->select_receive_queue
+		   : channel->select_send_queue);
+	  assert (queue != NULL);
+
+	  while (queue != NULL)
+	    {
+	      if (*queue->selected == NULL)
+		{
+		  *queue->selected = channel;
+		  *queue->is_read = !is_send;
+		  break;
+		}
+	      queue = queue->next;
+	    }
+
+	  x = pthread_mutex_unlock (&__go_select_data_mutex);
+	  assert (x == 0);
+
+	  if (queue == NULL)
+	    return 0;
+
+	  if (is_send)
+	    channel->selected_for_receive = 1;
+	  else
+	    channel->selected_for_send = 1;
+
+	  /* We are going to have to tell the other select that there
+	     is something to do.  */
+	  *needs_broadcast = 1;
+	}
+    }
+
+  if (is_send)
+    channel->selected_for_send = 1;
+  else
+    channel->selected_for_receive = 1;
+
+  return 1;
+}
+
+/* Mark a channel to indicate that a select is waiting.  The channel
+   is locked.  */
+
+static void
+mark_select_waiting (struct select_channel *sc,
+		     struct __go_channel **selected_pointer,
+		     _Bool *selected_for_read_pointer)
+{
+  struct __go_channel *channel = sc->channel;
+  _Bool is_send = sc->is_send;
+
+  if (channel->num_entries == 0)
+    {
+      struct __go_channel_select **pp;
+
+      pp = (is_send
+	    ? &channel->select_send_queue
+	    : &channel->select_receive_queue);
+
+      /* Add an entry to the queue of selects on this channel.  */
+      sc->queue_entry.next = *pp;
+      sc->queue_entry.selected = selected_pointer;
+      sc->queue_entry.is_read = selected_for_read_pointer;
+
+      *pp = &sc->queue_entry;
+    }
+
+  channel->select_mutex = &__go_select_mutex;
+  channel->select_cond = &__go_select_cond;
+
+  /* We never actually clear the select_mutex and select_cond fields.
+     In order to clear them safely, we would need to have some way of
+     knowing when no select is waiting for the channel.  Thus we
+     introduce a bit of inefficiency for every channel that select
+     needs to wait for.  This is harmless other than the performance
+     cost.  */
+}
+
+/* Remove the entry for this select waiting on this channel.  The
+   channel is locked.  We check both queues, because the channel may
+   be selected for both reading and writing.  */
+
+static void
+clear_select_waiting (struct select_channel *sc,
+		      struct __go_channel **selected_pointer)
+{
+  struct __go_channel *channel = sc->channel;
+
+  if (channel->num_entries == 0)
+    {
+      _Bool found;
+      struct __go_channel_select **pp;
+
+      found = 0;
+
+      for (pp = &channel->select_send_queue; *pp != NULL; pp = &(*pp)->next)
+	{
+	  if ((*pp)->selected == selected_pointer)
+	    {
+	      *pp = (*pp)->next;
+	      found = 1;
+	      break;
+	    }
+	}
+
+      for (pp = &channel->select_receive_queue; *pp != NULL; pp = &(*pp)->next)
+	{
+	  if ((*pp)->selected == selected_pointer)
+	    {
+	      *pp = (*pp)->next;
+	      found = 1;
+	      break;
+	    }
+	}
+
+      assert (found);
+    }
+}
+
+/* Look through the list of channels to see which ones are ready.
+   Lock each channels, and set the is_ready flag.  Return the number
+   of ready channels.  */
+
+static size_t
+lock_channels_find_ready (struct select_channel *channels, size_t count)
+{
+  size_t ready_count;
+  size_t i;
+
+  ready_count = 0;
+  for (i = 0; i < count; ++i)
+    {
+      struct __go_channel *channel = channels[i].channel;
+      _Bool is_send = channels[i].is_send;
+      size_t dup_index = channels[i].dup_index;
+      int x;
+
+      if (channel == NULL)
+	continue;
+
+      if (dup_index != (size_t) -1UL)
+	{
+	  if (channels[dup_index].is_ready)
+	    {
+	      channels[i].is_ready = 1;
+	      ++ready_count;
+	    }
+	  continue;
+	}
+
+      x = pthread_mutex_lock (&channel->lock);
+      assert (x == 0);
+
+      if (is_channel_ready (channel, is_send))
+	{
+	  channels[i].is_ready = 1;
+	  ++ready_count;
+	}
+    }
+
+  return ready_count;
+}
+
+/* The channel we are going to select has been forced by some other
+   goroutine.  SELECTED_CHANNEL is the channel we will use,
+   SELECTED_FOR_READ is whether the other goroutine wants to read from
+   the channel.  Note that the channel could be specified multiple
+   times in this select, so we must mark each appropriate entry for
+   this channel as ready.  Every other channel is marked as not ready.
+   All the channels are locked before this routine is called.  This
+   returns the number of ready channels.  */
+
+size_t
+force_selected_channel_ready (struct select_channel *channels, size_t count,
+			      struct __go_channel *selected_channel,
+			      _Bool selected_for_read)
+{
+  size_t ready_count;
+  size_t i;
+
+  ready_count = 0;
+  for (i = 0; i < count; ++i)
+    {
+      struct __go_channel *channel = channels[i].channel;
+      _Bool is_send = channels[i].is_send;
+
+      if (channel == NULL)
+	continue;
+
+      if (channel != selected_channel
+	  || (is_send ? !selected_for_read : selected_for_read))
+	channels[i].is_ready = 0;
+      else
+	{
+	  channels[i].is_ready = 1;
+	  ++ready_count;
+	}
+    }
+  assert (ready_count > 0);
+  return ready_count;
+}
+
+/* Unlock all the channels.  */
+
+static void
+unlock_channels (struct select_channel *channels, size_t count)
+{
+  size_t i;
+  int x;
+
+  for (i = 0; i < count; ++i)
+    {
+      struct __go_channel *channel = channels[i].channel;
+
+      if (channel == NULL)
+	continue;
+
+      if (channels[i].dup_index != (size_t) -1UL)
+	continue;
+
+      x = pthread_mutex_unlock (&channel->lock);
+      assert (x == 0);
+    }
+}
+
+/* At least one channel is ready.  Randomly pick a channel to return.
+   Unlock all the channels.  IS_SELECTED is true if the channel was
+   picked for us by some other goroutine.  If SELECTED_POINTER is not
+   NULL, remove it from the queue for all the channels.  Return the
+   retval field of the selected channel.  This will return 0 if we
+   can't use the selected channel, because it relied on synchronizing
+   with some other select, and that select already synchronized with a
+   different channel.  */
+
+static size_t
+unlock_channels_and_select (struct select_channel *channels,
+			    size_t count, size_t ready_count,
+			    _Bool is_selected,
+			    struct __go_channel **selected_pointer)
+{
+  size_t selected;
+  size_t ret;
+  _Bool needs_broadcast;
+  size_t i;
+  int x;
+
+  /* Pick which channel we are going to return.  */
+  selected = (size_t) random () % ready_count;
+
+  ret = 0;
+  needs_broadcast = 0;
+
+  /* Look at the channels in reverse order so that we don't unlock a
+     duplicated channel until we have seen all its dups.  */
+  for (i = 0; i < count; ++i)
+    {
+      size_t j = count - i - 1;
+      struct __go_channel *channel = channels[j].channel;
+      _Bool is_send = channels[j].is_send;
+
+      if (channel == NULL)
+	continue;
+
+      if (channels[j].is_ready)
+	{
+	  if (selected == 0)
+	    {
+	      if (mark_channel_selected (channel, is_send, is_selected,
+					 &needs_broadcast))
+		ret = channels[j].retval;
+	    }
+
+	  --selected;
+	}
+
+      if (channels[j].dup_index == (size_t) -1UL)
+	{
+	  if (selected_pointer != NULL)
+	    clear_select_waiting (&channels[j], selected_pointer);
+
+	  x = pthread_mutex_unlock (&channel->lock);
+	  assert (x == 0);
+	}
+    }
+
+  /* The NEEDS_BROADCAST variable is set if we are synchronizing with
+     some other select statement.  We can't do the actual broadcast
+     until we have unlocked all the channels.  */
+
+  if (needs_broadcast)
+    {
+      x = pthread_mutex_lock (&__go_select_mutex);
+      assert (x == 0);
+
+      x = pthread_cond_broadcast (&__go_select_cond);
+      assert (x == 0);
+
+      x = pthread_mutex_unlock (&__go_select_mutex);
+      assert (x == 0);
+    }
+
+  return ret;
+}
+
+/* Mark all channels to show that we are waiting for them.  This is
+   called with the select mutex held, but none of the channels are
+   locked.  This returns true if some channel was found to be
+   ready.  */
+
+static _Bool
+mark_all_channels_waiting (struct select_channel* channels, size_t count,
+			   struct __go_channel **selected_pointer,
+			   _Bool *selected_for_read_pointer)
+{
+  _Bool ret;
+  int x;
+  size_t i;
+
+  ret = 0;
+  for (i = 0; i < count; ++i)
+    {
+      struct __go_channel *channel = channels[i].channel;
+      _Bool is_send = channels[i].is_send;
+
+      if (channel == NULL)
+	continue;
+
+      if (channels[i].dup_index != (size_t) -1UL)
+	{
+	  size_t j;
+
+	  /* A channel may be selected for both read and write.  */
+	  if (channels[channels[i].dup_index].is_send != is_send)
+	    {
+	      for (j = channels[i].dup_index + 1; j < i; ++j)
+		{
+		  if (channels[j].channel == channel
+		      && channels[j].is_send == is_send)
+		    break;
+		}
+	      if (j < i)
+		continue;
+	    }
+	}
+
+      x = pthread_mutex_lock (&channel->lock);
+      assert (x == 0);
+
+      /* To avoid a race condition, we have to check again whether the
+	 channel is ready.  It may have become ready since we did the
+	 first set of checks but before we acquired the select mutex.
+	 If we don't check here, we could sleep forever on the select
+	 condition variable.  */
+      if (is_channel_ready (channel, is_send))
+	ret = 1;
+
+      /* If SELECTED_POINTER is NULL, then we have already marked the
+	 channel as waiting.  */
+      if (selected_pointer != NULL)
+	mark_select_waiting (&channels[i], selected_pointer,
+			     selected_for_read_pointer);
+
+      x = pthread_mutex_unlock (&channel->lock);
+      assert (x == 0);
+    }
+
+  return ret;
+}
+
+/* Implement select.  This is called by the compiler-generated code
+   with pairs of arguments: a pointer to a channel, and an int which
+   is non-zero for send, zero for receive.  */
+
+size_t
+__go_select (size_t count, _Bool has_default,
+	     struct __go_channel **channel_args, _Bool *is_send_args)
+{
+  struct select_channel stack_buffer[16];
+  struct select_channel *allocated_buffer;
+  struct select_channel *channels;
+  size_t i;
+  int x;
+  struct __go_channel *selected_channel;
+  _Bool selected_for_read;
+  _Bool is_queued;
+
+  if (count < sizeof stack_buffer / sizeof stack_buffer[0])
+    {
+      channels = &stack_buffer[0];
+      allocated_buffer = NULL;
+    }
+  else
+    {
+      allocated_buffer = ((struct select_channel *)
+			  malloc (count * sizeof (struct select_channel)));
+      channels = allocated_buffer;
+    }
+
+  for (i = 0; i < count; ++i)
+    {
+      struct __go_channel *channel_arg = channel_args[i];
+      _Bool is_send = is_send_args[i];
+
+      channels[i].channel = (struct __go_channel*) channel_arg;
+      channels[i].retval = i + 1;
+      channels[i].dup_index = (size_t) -1UL;
+      channels[i].queue_entry.next = NULL;
+      channels[i].queue_entry.selected = NULL;
+      channels[i].is_send = is_send;
+      channels[i].is_ready = 0;
+    }
+
+  qsort (channels, count, sizeof (struct select_channel), channel_sort);
+
+  for (i = 0; i < count; ++i)
+    {
+      size_t j;
+
+      for (j = 0; j < i; ++j)
+	{
+	  if (channels[j].channel == channels[i].channel)
+	    {
+	      channels[i].dup_index = j;
+	      break;
+	    }
+	}
+    }
+
+  /* SELECT_CHANNEL is used to select synchronized channels.  If no
+     channels are ready, we store a pointer to this variable on the
+     select queue for each synchronized channel.  Because the variable
+     may be set by channel operations running in other goroutines,
+     SELECT_CHANNEL may only be accessed when all the channels are
+     locked and/or when the select_data_mutex is locked.  */
+  selected_channel = NULL;
+
+  /* SELECTED_FOR_READ is set to true if SELECTED_CHANNEL was set by a
+     goroutine which wants to read from the channel.  The access
+     restrictions for this are like those for SELECTED_CHANNEL.  */
+  selected_for_read = 0;
+
+  /* IS_QUEUED is true if we have queued up this select on the queues
+     for any associated synchronous channels.  We only do this if no
+     channels are ready the first time around the loop.  */
+  is_queued = 0;
+
+  while (1)
+    {
+      int ready_count;
+      _Bool is_selected;
+
+      /* Lock all channels, identify which ones are ready.  */
+      ready_count = lock_channels_find_ready (channels, count);
+
+      /* All the channels are locked, so we can look at
+	 SELECTED_CHANNEL.  If it is not NULL, then our choice has
+	 been forced by some other goroutine.  This can only happen
+	 after the first time through the loop.  */
+      is_selected = selected_channel != NULL;
+      if (is_selected)
+	ready_count = force_selected_channel_ready (channels, count,
+						    selected_channel,
+						    selected_for_read);
+
+      if (ready_count > 0)
+	{
+	  size_t ret;
+
+	  ret = unlock_channels_and_select (channels, count, ready_count,
+					    is_selected,
+					    (is_queued
+					     ? &selected_channel
+					     : NULL));
+
+	  /* If RET is zero, it means that the channel we picked
+	     turned out not to be ready, because some other select
+	     grabbed it during our traversal.  Loop around and try
+	     again.  */
+	  if (ret == 0)
+	    {
+	      is_queued = 0;
+	      /* We are no longer on any channel queues, so it is safe
+		 to touch SELECTED_CHANNEL here.  It must be NULL,
+		 because otherwise that would somebody has promised to
+		 synch up with us and then failed to do so.  */
+	      assert (selected_channel == NULL);
+	      continue;
+	    }
+
+	  if (allocated_buffer != NULL)
+	    free (allocated_buffer);
+
+	  return ret;
+	}
+
+      /* No channels were ready.  */
+
+      unlock_channels (channels, count);
+
+      if (has_default)
+	{
+	  /* Use the default clause.  */
+	  if (allocated_buffer != NULL)
+	    free (allocated_buffer);
+	  return 0;
+	}
+
+      /* This is a blocking select.  Grab the select lock, tell all
+	 the channels to notify us when something happens, and wait
+	 for something to happen.  */
+
+      x = pthread_mutex_lock (&__go_select_mutex);
+      assert (x == 0);
+
+      /* Check whether CHANNEL_SELECTED was set while the channels
+	 were unlocked.  If it was set, then we can simply loop around
+	 again.  We need to check this while the select mutex is held.
+	 It is possible that something will set CHANNEL_SELECTED while
+	 we mark the channels as waiting.  If this happens, that
+	 goroutine is required to signal the select condition
+	 variable, which means acquiring the select mutex.  Since we
+	 have the select mutex locked ourselves, we can not miss that
+	 signal.  */
+
+      x = pthread_mutex_lock (&__go_select_data_mutex);
+      assert (x == 0);
+
+      is_selected = selected_channel != NULL;
+
+      x = pthread_mutex_unlock (&__go_select_data_mutex);
+      assert (x == 0);
+
+      if (!is_selected)
+	{
+	  /* Mark the channels as waiting, and check whether they have
+	     become ready.  */
+	  if (!mark_all_channels_waiting (channels, count,
+					  (is_queued
+					   ? NULL
+					   : &selected_channel),
+					  (is_queued
+					   ? NULL
+					   : &selected_for_read)))
+	    {
+	      x = pthread_cond_wait (&__go_select_cond, &__go_select_mutex);
+	      assert (x == 0);
+	    }
+
+	  is_queued = 1;
+	}
+
+      x = pthread_mutex_unlock (&__go_select_mutex);
+      assert (x == 0);
+    }
+}
diff --git a/libgo/runtime/go-semacquire.c b/libgo/runtime/go-semacquire.c
new file mode 100644
index 0000000..47daf78
--- /dev/null
+++ b/libgo/runtime/go-semacquire.c
@@ -0,0 +1,121 @@
+/* go-semacquire.c -- implement runtime.Semacquire and runtime.Semrelease.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <stdint.h>
+
+#include <pthread.h>
+
+/* We use a single global lock and condition variable.  This is
+   painful, since it will cause unnecessary contention, but is hard to
+   avoid in a portable manner.  On Linux we can use futexes, but they
+   are unfortunately not exposed by libc and are thus also hard to use
+   portably.  */
+
+static pthread_mutex_t sem_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t sem_cond = PTHREAD_COND_INITIALIZER;
+
+/* If the value in *ADDR is positive, and we are able to atomically
+   decrement it, return true.  Otherwise do nothing and return
+   false.  */
+
+static _Bool
+acquire (int32_t *addr)
+{
+  while (1)
+    {
+      int32_t val;
+
+      val = *addr;
+      if (val <= 0)
+	return 0;
+      if (__sync_bool_compare_and_swap (addr, val, val - 1))
+	return 1;
+    }
+}
+
+/* Implement runtime.Semacquire.  ADDR points to a semaphore count.
+   We have acquired the semaphore when we have decremented the count
+   and it remains nonnegative.  */
+
+void Semacquire (int32_t *) asm ("libgo_runtime.runtime.Semacquire");
+
+void
+Semacquire (int32_t *addr)
+{
+  while (1)
+    {
+      int i;
+
+      /* If the current count is positive, and we are able to atomically
+	 decrement it, then we have acquired the semaphore.  */
+      if (acquire (addr))
+	return;
+
+      /* Lock the mutex.  */
+      i = pthread_mutex_lock (&sem_lock);
+      assert (i == 0);
+
+      /* Check the count again with the mutex locked.  */
+      if (acquire (addr))
+	{
+	  i = pthread_mutex_unlock (&sem_lock);
+	  assert (i == 0);
+	  return;
+	}
+
+      /* The count is zero.  Even if a call to runtime.Semrelease
+	 increments it to become positive, that call will try to
+	 acquire the mutex and block, so we are sure to see the signal
+	 of the condition variable.  */
+      i = pthread_cond_wait (&sem_cond, &sem_lock);
+      assert (i == 0);
+
+      /* Unlock the mutex and try again.  */
+      i = pthread_mutex_unlock (&sem_lock);
+      assert (i == 0);
+    }
+}
+
+/* Implement runtime.Semrelease.  ADDR points to a semaphore count.  We
+   must atomically increment the count.  If the count becomes
+   positive, we signal the condition variable to wake up another
+   process.  */
+
+void Semrelease (int32_t *) asm ("libgo_runtime.runtime.Semrelease");
+
+void
+Semrelease (int32_t *addr)
+{
+  int32_t val;
+
+  val = __sync_fetch_and_add (addr, 1);
+
+  /* VAL is the old value.  It should never be negative.  If it is
+     negative, that implies that Semacquire somehow decremented a zero
+     value, or that the count has overflowed.  */
+  assert (val >= 0);
+
+  /* If the old value was zero, then we have now released a count, and
+     we signal the condition variable.  If the old value was positive,
+     then nobody can be waiting.  We have to use
+     pthread_cond_broadcast, not pthread_cond_signal, because
+     otherwise there would be a race condition when the count is
+     incremented twice before any locker manages to decrement it.  */
+  if (val == 0)
+    {
+      int i;
+
+      i = pthread_mutex_lock (&sem_lock);
+      assert (i == 0);
+
+      i = pthread_cond_broadcast (&sem_cond);
+      assert (i == 0);
+
+      i = pthread_mutex_unlock (&sem_lock);
+      assert (i == 0);
+    }
+}
diff --git a/libgo/runtime/go-send-big.c b/libgo/runtime/go-send-big.c
new file mode 100644
index 0000000..a565251
--- /dev/null
+++ b/libgo/runtime/go-send-big.c
@@ -0,0 +1,28 @@
+/* go-send-big.c -- send something bigger than uint64_t on a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+#include <assert.h>
+
+#include "channel.h"
+
+void
+__go_send_big (struct __go_channel* channel, const void *val, _Bool for_select)
+{
+  size_t alloc_size;
+  size_t offset;
+
+  alloc_size = ((channel->element_size + sizeof (uint64_t) - 1)
+		/ sizeof (uint64_t));
+
+  if (!__go_send_acquire (channel, for_select))
+    return;
+
+  offset = channel->next_store * alloc_size;
+  __builtin_memcpy (&channel->data[offset], val, channel->element_size);
+
+  __go_send_release (channel);
+}
diff --git a/libgo/runtime/go-send-nb-big.c b/libgo/runtime/go-send-nb-big.c
new file mode 100644
index 0000000..aad1bd1
--- /dev/null
+++ b/libgo/runtime/go-send-nb-big.c
@@ -0,0 +1,33 @@
+/* go-send-nb-big.c -- nonblocking send of something big on a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+#include <assert.h>
+
+#include "channel.h"
+
+_Bool
+__go_send_nonblocking_big (struct __go_channel* channel, const void *val)
+{
+  size_t alloc_size;
+  size_t offset;
+
+  assert (channel->element_size > sizeof (uint64_t));
+
+  alloc_size = ((channel->element_size + sizeof (uint64_t) - 1)
+		/ sizeof (uint64_t));
+
+  int data = __go_send_nonblocking_acquire (channel);
+  if (data != SEND_NONBLOCKING_ACQUIRE_SPACE)
+    return data == SEND_NONBLOCKING_ACQUIRE_CLOSED;
+
+  offset = channel->next_store * alloc_size;
+  __builtin_memcpy (&channel->data[offset], val, channel->element_size);
+
+  __go_send_release (channel);
+
+  return 1;
+}
diff --git a/libgo/runtime/go-send-nb-small.c b/libgo/runtime/go-send-nb-small.c
new file mode 100644
index 0000000..e49223e
--- /dev/null
+++ b/libgo/runtime/go-send-nb-small.c
@@ -0,0 +1,108 @@
+/* go-send-nb-small.c -- nonblocking send of something small on a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+#include <assert.h>
+
+#include "go-panic.h"
+#include "channel.h"
+
+/* Prepare to send something on a nonblocking channel.  */
+
+int
+__go_send_nonblocking_acquire (struct __go_channel *channel)
+{
+  int i;
+  _Bool has_space;
+
+  i = pthread_mutex_lock (&channel->lock);
+  assert (i == 0);
+
+  while (channel->selected_for_send)
+    {
+      i = pthread_cond_wait (&channel->cond, &channel->lock);
+      assert (i == 0);
+    }
+
+  if (channel->is_closed)
+    {
+      ++channel->closed_op_count;
+      if (channel->closed_op_count >= MAX_CLOSED_OPERATIONS)
+	__go_panic_msg ("too many operations on closed channel");
+      i = pthread_mutex_unlock (&channel->lock);
+      assert (i == 0);
+      return SEND_NONBLOCKING_ACQUIRE_CLOSED;
+    }
+
+  if (channel->num_entries > 0)
+      has_space = ((channel->next_store + 1) % channel->num_entries
+		   != channel->next_fetch);
+  else
+    {
+      /* This is a synchronous channel.  If somebody is current
+	 sending, then we can't send.  Otherwise, see if somebody is
+	 waiting to receive, or see if we can synch with a select.  */
+      if (channel->waiting_to_send)
+	{
+	  /* Some other goroutine is currently sending on this
+	     channel, which means that we can't.  */
+	  has_space = 0;
+	}
+      else if (channel->waiting_to_receive)
+	{
+	  /* Some other goroutine is waiting to receive a value, so we
+	     can send directly to them.  */
+	  has_space = 1;
+	}
+      else if (__go_synch_with_select (channel, 1))
+	{
+	  /* We found a select waiting to receive data, so we can send
+	     to that.  */
+	  __go_broadcast_to_select (channel);
+	  has_space = 1;
+	}
+      else
+	{
+	  /* Otherwise, we can't send, because nobody is waiting to
+	     receive.  */
+	  has_space = 0;
+	}
+
+      if (has_space)
+	{
+	  channel->waiting_to_send = 1;
+	  assert (channel->next_store == 0);
+	}
+    }
+
+  if (!has_space)
+    {
+      i = pthread_mutex_unlock (&channel->lock);
+      assert (i == 0);
+
+      return SEND_NONBLOCKING_ACQUIRE_NOSPACE;
+    }
+
+  return SEND_NONBLOCKING_ACQUIRE_SPACE;
+}
+
+/* Send something 64 bits or smaller on a channel.  */
+
+_Bool
+__go_send_nonblocking_small (struct __go_channel *channel, uint64_t val)
+{
+  assert (channel->element_size <= sizeof (uint64_t));
+
+  int data = __go_send_nonblocking_acquire (channel);
+  if (data != SEND_NONBLOCKING_ACQUIRE_SPACE)
+    return data == SEND_NONBLOCKING_ACQUIRE_CLOSED;
+
+  channel->data[channel->next_store] = val;
+
+  __go_send_release (channel);
+
+  return 1;
+}
diff --git a/libgo/runtime/go-send-small.c b/libgo/runtime/go-send-small.c
new file mode 100644
index 0000000..b733935
--- /dev/null
+++ b/libgo/runtime/go-send-small.c
@@ -0,0 +1,158 @@
+/* go-send-small.c -- send something 64 bits or smaller on a channel.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stdint.h>
+#include <assert.h>
+
+#include "go-panic.h"
+#include "channel.h"
+
+/* Prepare to send something on a channel.  Return true if the channel
+   is acquired, false, if it is closed.  FOR_SELECT is true if this
+   call is being made after a select statement returned with this
+   channel selected.  */
+
+_Bool
+__go_send_acquire (struct __go_channel *channel, _Bool for_select)
+{
+  int i;
+
+  i = pthread_mutex_lock (&channel->lock);
+  assert (i == 0);
+
+  while (1)
+    {
+      /* Check whether the channel is closed.  */
+      if (channel->is_closed)
+	{
+	  ++channel->closed_op_count;
+	  if (channel->closed_op_count >= MAX_CLOSED_OPERATIONS)
+	    __go_panic_msg ("too many operations on closed channel");
+	  channel->selected_for_send = 0;
+	  __go_unlock_and_notify_selects (channel);
+	  return 0;
+	}
+
+      /* If somebody else has the channel locked for sending, we have
+	 to wait.  If FOR_SELECT is true, then we are the one with the
+	 lock.  */
+      if (!channel->selected_for_send || for_select)
+	{
+	  if (channel->num_entries == 0)
+	    {
+	      /* This is a synchronous channel.  If nobody else is
+		 waiting to send, we grab the channel and tell the
+		 caller to send the data.  We will then wait for a
+		 receiver.  */
+	      if (!channel->waiting_to_send)
+		{
+		  assert (channel->next_store == 0);
+		  return 1;
+		}
+	    }
+	  else
+	    {
+	      /* If there is room on the channel, we are OK.  */
+	      if ((channel->next_store + 1) % channel->num_entries
+		  != channel->next_fetch)
+		return 1;
+	    }
+	}
+
+      /* Wait for something to change, then loop around and try
+	 again.  */
+
+      i = pthread_cond_wait (&channel->cond, &channel->lock);
+      assert (i == 0);
+    }
+}
+
+/* Finished sending something on a channel.  */
+
+void
+__go_send_release (struct __go_channel *channel)
+{
+  int i;
+
+  if (channel->num_entries != 0)
+    {
+      /* This is a buffered channel.  Bump the store count and signal
+	 the condition variable.  */
+      channel->next_store = (channel->next_store + 1) % channel->num_entries;
+
+      i = pthread_cond_signal (&channel->cond);
+      assert (i == 0);
+    }
+  else
+    {
+      _Bool synched_with_select;
+
+      /* This is a synchronous channel.  Indicate that we have a value
+	 waiting.  */
+      channel->next_store = 1;
+      channel->waiting_to_send = 1;
+
+      /* Tell everybody else to do something.  This has to be a
+	 broadcast because we might have both senders and receivers
+	 waiting on the condition, but senders won't send another
+	 signal.  */
+      i = pthread_cond_broadcast (&channel->cond);
+      assert (i == 0);
+
+      /* Wait until the value is received.  */
+      synched_with_select = 0;
+      while (1)
+	{
+	  if (channel->next_store == 0)
+	    break;
+
+	  /* If nobody is currently waiting to receive, try to synch
+	     up with a select.  */
+	  if (!channel->waiting_to_receive && !synched_with_select)
+	    {
+	      if (__go_synch_with_select (channel, 1))
+		{
+		  synched_with_select = 1;
+		  __go_broadcast_to_select (channel);
+		  continue;
+		}
+	    }
+
+	  i = pthread_cond_wait (&channel->cond, &channel->lock);
+	  assert (i == 0);
+	}
+
+      channel->waiting_to_send = 0;
+
+      /* Using the mutexes should implement a memory barrier.  */
+
+      /* We have to signal again since we cleared the waiting_to_send
+	 field.  This has to be a broadcast because both senders and
+	 receivers might be waiting, but only senders will be able to
+	 act.  */
+      i = pthread_cond_broadcast (&channel->cond);
+      assert (i == 0);
+    }
+
+  channel->selected_for_send = 0;
+
+  __go_unlock_and_notify_selects (channel);
+}
+
+/* Send something 64 bits or smaller on a channel.  */
+
+void
+__go_send_small (struct __go_channel *channel, uint64_t val, _Bool for_select)
+{
+  assert (channel->element_size <= sizeof (uint64_t));
+
+  if (!__go_send_acquire (channel, for_select))
+    return;
+
+  channel->data[channel->next_store] = val;
+
+  __go_send_release (channel);
+}
diff --git a/libgo/runtime/go-signal.c b/libgo/runtime/go-signal.c
new file mode 100644
index 0000000..b064b88
--- /dev/null
+++ b/libgo/runtime/go-signal.c
@@ -0,0 +1,137 @@
+/* go-signal.c -- signal handling for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <signal.h>
+#include <stdlib.h>
+
+#include "go-signal.h"
+
+#include "runtime.h"
+
+#undef int 
+
+#ifndef SA_ONSTACK
+#define SA_ONSTACK 0
+#endif
+
+/* What to do for a signal.  */
+
+struct sigtab
+{
+  /* Signal number.  */
+  int sig;
+  /* Nonzero if the signal should be ignored.  */
+  _Bool ignore;
+};
+
+/* What to do for signals.  */
+
+static struct sigtab signals[] =
+{
+  { SIGHUP, 0 },
+  { SIGINT, 0 },
+  { SIGALRM, 1 },
+  { SIGTERM, 0 },
+#ifdef SIGUSR1
+  { SIGUSR1, 1 },
+#endif
+#ifdef SIGUSR2
+  { SIGUSR2, 1 },
+#endif
+#ifdef SIGTSTP
+  { SIGTSTP, 1 },
+#endif
+#ifdef SIGTTIN
+  { SIGTTIN, 1 },
+#endif
+#ifdef SIGTTOU
+  { SIGTTOU, 1 },
+#endif
+#ifdef SIGURG
+  { SIGURG, 1 },
+#endif
+#ifdef SIGXCPU
+  { SIGXCPU, 1 },
+#endif
+#ifdef SIGXFSZ
+  { SIGXFSZ, 1 },
+#endif
+#ifdef SIGVTARLM
+  { SIGVTALRM, 1 },
+#endif
+#ifdef SIGPROF
+  { SIGPROF, 1 },
+#endif
+#ifdef SIGWINCH
+  { SIGWINCH, 1 },
+#endif
+#ifdef SIGIO
+  { SIGIO, 1 },
+#endif
+#ifdef SIGPWR
+  { SIGPWR, 1 },
+#endif
+  { -1, 0 }
+};
+
+/* The Go signal handler.  */
+
+static void
+sighandler (int sig)
+{
+  int i;
+
+  if (sigsend (sig))
+    return;
+  for (i = 0; signals[i].sig != -1; ++i)
+    {
+      if (signals[i].sig == sig)
+	{
+	  struct sigaction sa;
+
+	  if (signals[i].ignore)
+	    return;
+
+	  memset (&sa, 0, sizeof sa);
+
+	  sa.sa_handler = SIG_DFL;
+
+	  i = sigemptyset (&sa.sa_mask);
+	  assert (i == 0);
+
+	  if (sigaction (sig, &sa, NULL) != 0)
+	    abort ();
+
+	  raise (sig);
+	  exit (2);
+	}
+    }
+  abort ();
+}
+
+/* Initialize signal handling for Go.  This is called when the program
+   starts.  */
+
+void
+__initsig ()
+{
+  struct sigaction sa;
+  int i;
+
+  siginit ();
+
+  memset (&sa, 0, sizeof sa);
+
+  sa.sa_handler = sighandler;
+
+  i = sigfillset (&sa.sa_mask);
+  assert (i == 0);
+
+  for (i = 0; signals[i].sig != -1; ++i)
+    if (sigaction (signals[i].sig, &sa, NULL) != 0)
+      assert (0);
+}
diff --git a/libgo/runtime/go-signal.h b/libgo/runtime/go-signal.h
new file mode 100644
index 0000000..a30173a
--- /dev/null
+++ b/libgo/runtime/go-signal.h
@@ -0,0 +1,7 @@
+/* go-signal.h -- signal handling for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+extern void __initsig (void);
diff --git a/libgo/runtime/go-strcmp.c b/libgo/runtime/go-strcmp.c
new file mode 100644
index 0000000..d33b4d8
--- /dev/null
+++ b/libgo/runtime/go-strcmp.c
@@ -0,0 +1,40 @@
+/* go-strcmp.c -- the go string comparison function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-string.h"
+
+int
+__go_strcmp(const struct __go_string* s1, const struct __go_string* s2)
+{
+  int i;
+
+  if (s1 == NULL)
+    {
+      if (s2 == NULL || s2->__length == 0)
+	return 0;
+      return -1;
+    }
+  else if (s2 == NULL)
+    {
+      if (s1->__length == 0)
+	return 0;
+      return 1;
+    }
+
+  i = __builtin_memcmp(s1->__data, s2->__data,
+		       (s1->__length < s2->__length
+			? s1->__length
+			: s2->__length));
+  if (i != 0)
+    return i;
+
+  if (s1->__length < s2->__length)
+    return -1;
+  else if (s1->__length > s2->__length)
+    return 1;
+  else
+    return 0;
+}
diff --git a/libgo/runtime/go-string.h b/libgo/runtime/go-string.h
new file mode 100644
index 0000000..c1a1f7f
--- /dev/null
+++ b/libgo/runtime/go-string.h
@@ -0,0 +1,47 @@
+/* go-string.h -- the string type for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#ifndef LIBGO_GO_STRING_H
+#define LIBGO_GO_STRING_H
+
+#include <stddef.h>
+
+/* A string is represented as a pointer to this structure.  */
+
+struct __go_string
+{
+  /* The number of bytes in the string.  */
+  size_t __length;
+  /* The actual bytes.  */
+  unsigned char __data[];
+};
+
+static inline _Bool
+__go_strings_equal (const struct __go_string *s1, const struct __go_string *s2)
+{
+  if (s1 == NULL)
+    return s2 == NULL || s2->__length == 0;
+  if (s2 == NULL)
+    return s1->__length == 0;
+  if (s1->__length != s2->__length)
+    return 0;
+  return __builtin_memcmp (s1->__data, s2->__data, s1->__length) == 0;
+}
+
+static inline _Bool
+__go_ptr_strings_equal (const struct __go_string * const *ps1,
+			const struct __go_string * const *ps2)
+{
+  if (ps1 == NULL)
+    return ps2 == NULL;
+  if (ps2 == NULL)
+    return 0;
+  return __go_strings_equal (*ps1, *ps2);
+}
+
+extern int __go_get_rune (const unsigned char *, size_t, int *);
+
+#endif /* !defined(LIBGO_GO_STRING_H) */
diff --git a/libgo/runtime/go-strplus.c b/libgo/runtime/go-strplus.c
new file mode 100644
index 0000000..151ede5
--- /dev/null
+++ b/libgo/runtime/go-strplus.c
@@ -0,0 +1,27 @@
+/* go-strplus.c -- the go string append function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+#include "go-string.h"
+
+const struct __go_string *
+__go_string_plus (const struct __go_string *s1, const struct __go_string *s2)
+{
+  size_t len;
+  struct __go_string* ret;
+
+  if (s1 == NULL)
+    return s2;
+  else if (s2 == NULL)
+    return s1;
+
+  len = s1->__length + s2->__length;
+  ret = (struct __go_string *) __go_alloc (sizeof (struct __go_string) + len);
+  ret->__length = len;
+  __builtin_memcpy (ret->__data, s1->__data, s1->__length);
+  __builtin_memcpy (ret->__data + s1->__length, s2->__data, s2->__length);
+  return ret;
+}
diff --git a/libgo/runtime/go-strslice.c b/libgo/runtime/go-strslice.c
new file mode 100644
index 0000000..5883428
--- /dev/null
+++ b/libgo/runtime/go-strslice.c
@@ -0,0 +1,29 @@
+/* go-strslice.c -- the go string slice function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+#include "go-string.h"
+#include "go-panic.h"
+
+const struct __go_string*
+__go_string_slice (const struct __go_string *s, size_t start, size_t end)
+{
+  size_t len;
+  struct __go_string *ret;
+
+  len = s == NULL ? 0 : s->__length;
+  if (end == (size_t) -1)
+    end = len;
+  if (start > len || end < start || end > len)
+    __go_panic_msg ("string index out of bounds");
+  if (s == NULL)
+    return NULL;
+  len = end - start;
+  ret = (struct __go_string *) __go_alloc (sizeof (struct __go_string) + len);
+  ret->__length = len;
+  __builtin_memcpy (ret->__data, s->__data + start, len);
+  return ret;
+}
diff --git a/libgo/runtime/go-trampoline.c b/libgo/runtime/go-trampoline.c
new file mode 100644
index 0000000..7005b67
--- /dev/null
+++ b/libgo/runtime/go-trampoline.c
@@ -0,0 +1,34 @@
+/* go-trampoline.c -- allocate a trampoline for a nested function.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+#include "go-alloc.h"
+
+/* In order to build a trampoline we need space which is both writable
+   and executable.  We currently just allocate a whole page.  This
+   needs to be more system dependent.  */
+
+void *
+__go_allocate_trampoline (size_t size)
+{
+  unsigned int page_size;
+  void *ret;
+  int i;
+
+  page_size = getpagesize ();
+  assert (page_size >= size);
+  ret = __go_alloc (2 * page_size - 1);
+  ret = (void *) (((uintptr_t) ret + page_size - 1)
+		  & ~ ((uintptr_t) page_size - 1));
+  i = mprotect (ret, size, PROT_READ | PROT_WRITE | PROT_EXEC);
+  assert (i == 0);
+  return ret;
+}
diff --git a/libgo/runtime/go-type-error.c b/libgo/runtime/go-type-error.c
new file mode 100644
index 0000000..336421d
--- /dev/null
+++ b/libgo/runtime/go-type-error.c
@@ -0,0 +1,28 @@
+/* go-type-error.c -- invalid hash and equality functions.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-type.h"
+#include "go-panic.h"
+
+/* A hash function used for a type which does not support hash
+   functions.  */
+
+size_t
+__go_type_hash_error (const void *val __attribute__ ((unused)),
+		      size_t key_size __attribute__ ((unused)))
+{
+  __go_panic_msg ("hash of type which does not support hash computations");
+}
+
+/* An equality function for an interface.  */
+
+_Bool
+__go_type_equal_error (const void *v1 __attribute__ ((unused)),
+		       const void *v2 __attribute__ ((unused)),
+		       size_t key_size __attribute__ ((unused)))
+{
+  __go_panic_msg ("comparison of type which may not be compared");
+}
diff --git a/libgo/runtime/go-type-identity.c b/libgo/runtime/go-type-identity.c
new file mode 100644
index 0000000..f1de3c2
--- /dev/null
+++ b/libgo/runtime/go-type-identity.c
@@ -0,0 +1,50 @@
+/* go-type-identity.c -- hash and equality identity functions.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "go-type.h"
+
+/* Typedefs for accesses of different sizes.  */
+
+typedef int QItype __attribute__ ((mode (QI)));
+typedef int HItype __attribute__ ((mode (HI)));
+typedef int SItype __attribute__ ((mode (SI)));
+typedef int DItype __attribute__ ((mode (DI)));
+
+/* An identity hash function for a type.  This is used for types where
+   we can simply use the type value itself as a hash code.  This is
+   true of, e.g., integers and pointers.  */
+
+size_t
+__go_type_hash_identity (const void *key, size_t key_size)
+{
+  switch (key_size)
+    {
+    case 1:
+      return *(const QItype *) key;
+    case 2:
+      return *(const HItype *) key;
+    case 3:
+    case 4:
+    case 5:
+    case 6:
+    case 7:
+      return *(const SItype *) key;
+    default:
+      return *(const DItype *) key;
+    }
+}
+
+/* An identity equality function for a type.  This is used for types
+   where we can check for equality by checking that the values have
+   the same bits.  */
+
+_Bool
+__go_type_equal_identity (const void *k1, const void *k2, size_t key_size)
+{
+  return __builtin_memcmp (k1, k2, key_size) == 0;
+}
diff --git a/libgo/runtime/go-type-interface.c b/libgo/runtime/go-type-interface.c
new file mode 100644
index 0000000..5488e72
--- /dev/null
+++ b/libgo/runtime/go-type-interface.c
@@ -0,0 +1,53 @@
+/* go-type-interface.c -- hash and equality interface functions.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include "interface.h"
+#include "go-type.h"
+
+/* A hash function for an interface.  */
+
+size_t
+__go_type_hash_interface (const void *vval,
+			  size_t key_size __attribute__ ((unused)))
+{
+  const struct __go_interface *val;
+  size_t size;
+
+  val = *(const struct __go_interface * const *) vval;
+  if (val == NULL)
+    return 0;
+  size = val->__type_descriptor->__size;
+  if (__go_is_pointer_type (val->__type_descriptor))
+    return val->__type_descriptor->__hash (&val->__object, size);
+  else
+    return val->__type_descriptor->__hash (val->__object, size);
+}
+
+/* An equality function for an interface.  */
+
+_Bool
+__go_type_equal_interface (const void *vv1, const void *vv2,
+			   size_t key_size __attribute__ ((unused)))
+{
+  const struct __go_interface *v1;
+  const struct __go_interface *v2;
+
+  v1 = *(const struct __go_interface * const *) vv1;
+  v2 = *(const struct __go_interface * const *) vv2;
+  if (v1 == NULL || v2 == NULL)
+    return v1 == v2;
+  if (!__go_type_descriptors_equal (v1->__type_descriptor,
+				    v2->__type_descriptor))
+    return 0;
+  if (__go_is_pointer_type (v1->__type_descriptor))
+    return v1->__object == v2->__object;
+  else
+    return v1->__type_descriptor->__equal (v1->__object, v2->__object,
+					   v1->__type_descriptor->__size);
+}
diff --git a/libgo/runtime/go-type-string.c b/libgo/runtime/go-type-string.c
new file mode 100644
index 0000000..c74eb4f
--- /dev/null
+++ b/libgo/runtime/go-type-string.c
@@ -0,0 +1,53 @@
+/* go-type-string.c -- hash and equality string functions.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "go-string.h"
+#include "go-type.h"
+
+/* A string hash function for a map.  */
+
+size_t
+__go_type_hash_string (const void *vkey,
+		       size_t key_size __attribute__ ((unused)))
+{
+  size_t ret;
+  const struct __go_string *key;
+  size_t len;
+  size_t i;
+  const unsigned char *p;
+
+  ret = 5381;
+  key = *(const struct __go_string * const *) vkey;
+  if (key == NULL)
+    return ret;
+  len = key->__length;
+  for (i = 0, p = key->__data; i < len; i++, p++)
+    ret = ret * 33 + *p;
+  return ret;
+}
+
+/* A string equality function for a map.  */
+
+_Bool
+__go_type_equal_string (const void *vk1, const void *vk2,
+			size_t key_size __attribute__ ((unused)))
+{
+  const struct __go_string *k1;
+  const struct __go_string *k2;
+
+  k1 = *(const struct __go_string * const *) vk1;
+  k2 = *(const struct __go_string * const *) vk2;
+  if (k1 == NULL)
+    return k2 == NULL || k2->__length == 0;
+  else if (k2 == NULL)
+    return k1->__length == 0;
+  else if (k1->__length != k2->__length)
+    return 0;
+  else
+    return __builtin_memcmp (k1->__data, k2->__data, k1->__length) == 0;
+}
diff --git a/libgo/runtime/go-type.h b/libgo/runtime/go-type.h
new file mode 100644
index 0000000..64a42e8
--- /dev/null
+++ b/libgo/runtime/go-type.h
@@ -0,0 +1,322 @@
+/* go-type.h -- basic information for a Go type.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#ifndef LIBGO_GO_TYPE_H
+#define LIBGO_GO_TYPE_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "go-string.h"
+#include "array.h"
+
+/* Many of the types in this file must match the data structures
+   generated by the compiler, and must also match the Go types which
+   appear in go/runtime/type.go and go/reflect/type.go.  */
+
+/* Type codes.  These are used to get the type descriptor to use for
+   the type itself, when using unsafe.Typeof or unsafe.Reflect.  The
+   codes here must match the codes generated by the compiler.  These
+   are macros rather than an enum to make it easy to change values in
+   the future and hard to get confused about it.
+
+   The gc compiler works differently: it stores a pointer to the
+   type's type descriptor itself.  We don't do that because it would
+   mean that when the compiler writes out a type descriptor, it could
+   cause a reference to a variable defined in a shared library, which
+   might require a COPY reloc.  */
+
+#define GO_BOOL 0
+#define GO_FLOAT32 1
+#define GO_FLOAT64 2
+#define GO_FLOAT 3
+#define GO_INT16 4
+#define GO_INT32 5
+#define GO_INT64 6
+#define GO_INT8 7
+#define GO_INT 8
+#define GO_UINT16 9
+#define GO_UINT32 10
+#define GO_UINT64 11
+#define GO_UINT8 12
+#define GO_UINT 13
+#define GO_STRING 14
+#define GO_UINTPTR 15
+#define GO_DOTDOTDOT 16
+#define GO_UNSAFE_POINTER 17
+#define GO_ARRAY 18
+#define GO_SLICE 19
+#define GO_CHAN 20
+#define GO_FUNC 21
+#define GO_INTERFACE 22
+#define GO_MAP 23
+#define GO_PTR 24
+#define GO_STRUCT 25
+
+/* For each Go type the compiler constructs one of these structures.
+   This is used for type reflectin, interfaces, maps, and reference
+   counting.  */
+
+struct __go_type_descriptor
+{
+  /* The type code for this type, a value in enum __go_type_codes.
+     This is used by unsafe.Reflect and unsafe.Typeof to determine the
+     type descriptor to return for this type itself.  It is also used
+     by reflect.toType when mapping to a reflect Type structure.  */
+  unsigned char __code;
+
+  /* The alignment in bytes of a variable with this type.  */
+  unsigned char __align;
+
+  /* The alignment in bytes of a struct field with this type.  */
+  unsigned char __field_align;
+
+  /* The size in bytes of a value of this type.  Note that all types
+     in Go have a fixed size.  */
+  uintptr_t __size;
+
+  /* This function takes a pointer to a value of this type, and the
+     size of this type, and returns a hash code.  We pass the size
+     explicitly becaues it means that we can share a single instance
+     of this function for various different types.  */
+  size_t (*__hash) (const void *, size_t);
+
+  /* This function takes two pointers to values of this type, and the
+     size of this type, and returns whether the values are equal.  */
+  _Bool (*__equal) (const void *, const void *, size_t);
+
+  /* A string describing this type.  This is only used for
+     debugging.  */
+  const struct __go_string * const *__reflection;
+
+  /* A pointer to fields which are only used for some types.  */
+  const struct __go_uncommon_type *__uncommon;
+};
+
+/* The information we store for each method of a type.  */
+
+struct __go_method
+{
+  /* A hash code for the type of the method.  This is computed by the
+     compiler.  It is used to ensure that dynamic interface
+     conversions don't get confused by two different types with
+     different methods with the same name.  */
+  uint32_t __hash;
+
+  /* The name of the method.  */
+  const struct __go_string **__name;
+
+  /* This is NULL for an exported method, or the name of the package
+     where it lives.  */
+  const struct __go_string **__pkg_path;
+
+  /* The type of the method.  This will be a function type.  */
+  const struct __go_type_descriptor *__type;
+
+  /* A pointer to the code which implements the method.  This is
+     really a function pointer.  */
+  const void *__function;
+};
+
+/* Additional information that we keep for named types and for types
+   with methods.  */
+
+struct __go_uncommon_type
+{
+  /* The name of the type.  */
+  const struct __go_string **__name;
+
+  /* The type's package.  This is NULL for builtin types.  */
+  const struct __go_string **__pkg_path;
+
+  /* The type's methods.  This is an array of struct __go_method.  */
+  struct __go_open_array __methods;
+};
+
+/* The type descriptor for a fixed array type.  */
+
+struct __go_array_type
+{
+  /* Starts like all type descriptors.  */
+  struct __go_type_descriptor __common;
+
+  /* The element type.  */
+  struct __go_type_descriptor *__element_type;
+
+  /* The length of the array.  */
+  uintptr_t __len;
+};
+
+/* The type descriptor for a slice.  */
+
+struct __go_slice_type
+{
+  /* Starts like all other type descriptors.  */
+  struct __go_type_descriptor __common;
+
+  /* The element type.  */
+  struct __go_type_descriptor *__element_type;
+};
+
+/* The direction of a channel.  */
+#define CHANNEL_RECV_DIR 1
+#define CHANNEL_SEND_DIR 2
+#define CHANNEL_BOTH_DIR (CHANNEL_RECV_DIR | CHANNEL_SEND_DIR)
+
+/* The type descriptor for a channel.  */
+
+struct __go_channel_type
+{
+  /* Starts like all other type descriptors.  */
+  struct __go_type_descriptor __common;
+
+  /* The element type.  */
+  const struct __go_type_descriptor *__element_type;
+
+  /* The direction.  */
+  uintptr_t __dir;
+};
+
+/* The type descriptor for a function.  */
+
+struct __go_func_type
+{
+  /* Starts like all other type descriptors.  */
+  struct __go_type_descriptor __common;
+
+  /* The input parameter types.  This is an array of pointers to
+     struct __go_type_descriptor.  */
+  struct __go_open_array __in;
+
+  /* The output parameter types.  This is an array of pointers to
+     struct __go_type_descriptor.  */
+  struct __go_open_array __out;
+};
+
+/* A method on an interface type.  */
+
+struct __go_interface_method
+{
+  /* Hash code for the type of the method.  This is computed by the
+     compiler, and matches the hash codes used in struct
+     __go_method.  */
+  uint32_t __hash;
+
+  /* The name of the method.  */
+  const struct __go_string **__name;
+
+  /* This is NULL for an exported method, or the name of the package
+     where it lives.  */
+  const struct __go_string **__pkg_path;
+
+  /* The real type of the method.  */
+  struct __go_type_descriptor *__type;
+};
+
+/* An interface type.  */
+
+struct __go_interface_type
+{
+  /* Starts like all other type descriptors.  */
+  struct __go_type_descriptor __common;
+
+  /* Array of __go_interface_method .  The methods are sorted in the
+     same order that they appear in the definition of the
+     interface.  */
+  struct __go_open_array __methods;
+};
+
+/* A map type.  */
+
+struct __go_map_type
+{
+  /* Starts like all other type descriptors.  */
+  struct __go_type_descriptor __common;
+
+  /* The map key type.  */
+  const struct __go_type_descriptor *__key_type;
+
+  /* The map value type.  */
+  const struct __go_type_descriptor *__val_type;
+};
+
+/* A pointer type.  */
+
+struct __go_ptr_type
+{
+  /* Starts like all other type descriptors.  */
+  struct __go_type_descriptor __common;
+
+  /* The type to which this points.  */
+  const struct __go_type_descriptor *__element_type;
+};
+
+/* A field in a structure.  */
+
+struct __go_struct_field
+{
+  /* The name of the field--NULL for an anonymous field.  */
+  const struct __go_string **__name;
+
+  /* This is NULL for an exported method, or the name of the package
+     where it lives.  */
+  const struct __go_string **__pkg_path;
+
+  /* The type of the field.  */
+  const struct __go_type_descriptor *__type;
+
+  /* The field tag, or NULL.  */
+  const struct __go_string **__tag;
+
+  /* The offset of the field in the struct.  */
+  uintptr_t __offset;
+};
+
+/* A struct type.  */
+
+struct __go_struct_type
+{
+  /* Starts like all other type descriptors.  */
+  struct __go_type_descriptor __common;
+
+  /* An array of struct __go_struct_field.  */
+  struct __go_open_array __fields;
+};
+
+/* The type of a varargs parameter to a function.  */
+
+struct __go_dotdotdot_type
+{
+  /* Starts like all other type descriptors.  */
+  struct __go_type_descriptor __common;
+
+  /* The type of varargs parameters; this will be NULL if no type is
+     specified.  */
+  const struct __go_type_descriptor *__type;
+};
+
+/* Whether a type descriptor is a pointer.  */
+
+static inline _Bool
+__go_is_pointer_type (const struct __go_type_descriptor *td)
+{
+  return td->__code == GO_PTR || td->__code == GO_UNSAFE_POINTER;
+}
+
+extern _Bool
+__go_type_descriptors_equal(const struct __go_type_descriptor*,
+			    const struct __go_type_descriptor*);
+
+extern size_t __go_type_hash_identity (const void *, size_t);
+extern _Bool __go_type_equal_identity (const void *, const void *, size_t);
+extern size_t __go_type_hash_string (const void *, size_t);
+extern _Bool __go_type_equal_string (const void *, const void *, size_t);
+extern size_t __go_type_hash_interface (const void *, size_t);
+extern _Bool __go_type_equal_interface (const void *, const void *, size_t);
+extern size_t __go_type_hash_error (const void *, size_t);
+extern _Bool __go_type_equal_error (const void *, const void *, size_t);
+
+#endif /* !defined(LIBGO_GO_TYPE_H) */
diff --git a/libgo/runtime/go-typedesc-equal.c b/libgo/runtime/go-typedesc-equal.c
new file mode 100644
index 0000000..e0f492b
--- /dev/null
+++ b/libgo/runtime/go-typedesc-equal.c
@@ -0,0 +1,38 @@
+/* go-typedesc-equal.c -- return whether two type descriptors are equal.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-string.h"
+#include "go-type.h"
+
+/* Compare type descriptors for equality.  This is necessary because
+   types may have different descriptors in different shared libraries.
+   Also, unnamed types may have multiple type descriptors even in a
+   single shared library.  */
+
+_Bool
+__go_type_descriptors_equal (const struct __go_type_descriptor *td1,
+			     const struct __go_type_descriptor *td2)
+{
+  if (td1 == td2)
+    return 1;
+  /* In a type switch we can get a NULL descriptor.  */
+  if (td1 == NULL || td2 == NULL)
+    return 0;
+  if (td1->__code != td2->__code)
+    return 0;
+  if (td1->__uncommon != NULL && td1->__uncommon->__name != NULL)
+    {
+      if (td2->__uncommon == NULL || td2->__uncommon->__name == NULL)
+	return 0;
+      return (__go_ptr_strings_equal (td1->__uncommon->__name,
+				      td2->__uncommon->__name)
+	      && __go_ptr_strings_equal (td1->__uncommon->__pkg_path,
+					 td2->__uncommon->__pkg_path));
+    }
+  if (td2->__uncommon != NULL && td2->__uncommon->__name != NULL)
+    return 0;
+  return __go_ptr_strings_equal (td1->__reflection, td2->__reflection);
+}
diff --git a/libgo/runtime/go-unreflect.c b/libgo/runtime/go-unreflect.c
new file mode 100644
index 0000000..9adedfd
--- /dev/null
+++ b/libgo/runtime/go-unreflect.c
@@ -0,0 +1,31 @@
+/* go-unreflect.c -- implement unsafe.Unreflect for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+#include "go-type.h"
+#include "interface.h"
+
+/* Implement unsafe.Unreflect.  */
+
+struct __go_interface *Unreflect (const struct __go_interface *type,
+				  void *object)
+  asm ("libgo_unsafe.unsafe.Unreflect");
+
+struct __go_interface *
+Unreflect (const struct __go_interface *type, void *object)
+{
+  struct __go_interface *ret;
+
+  ret = (struct __go_interface *) __go_alloc (sizeof (struct __go_interface));
+  ret->__type_descriptor = type->__object;
+  ret->__methods = NULL;
+  // FIXME: Handle reference counts.
+  if (__go_is_pointer_type (ret->__type_descriptor))
+    ret->__object = *(void **) object;
+  else
+    ret->__object = object;
+  return ret;
+}
diff --git a/libgo/runtime/go-unsafe-new.c b/libgo/runtime/go-unsafe-new.c
new file mode 100644
index 0000000..7156ce4
--- /dev/null
+++ b/libgo/runtime/go-unsafe-new.c
@@ -0,0 +1,27 @@
+/* go-unsafe-new.c -- unsafe.New function for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+#include "go-type.h"
+#include "interface.h"
+
+/* Implement unsafe.New.  */
+
+void *New (const struct __go_interface *type) asm ("libgo_unsafe.unsafe.New");
+
+/* The dynamic type of the argument will be a pointer to a type
+   descriptor.  */
+
+void *
+New (const struct __go_interface *type)
+{
+  const void *object;
+  const struct __go_type_descriptor *descriptor;
+
+  object = type->__object;
+  descriptor = (const struct __go_type_descriptor *) object;
+  return __go_alloc (descriptor->__size);
+}
diff --git a/libgo/runtime/go-unsafe-newarray.c b/libgo/runtime/go-unsafe-newarray.c
new file mode 100644
index 0000000..5f1f93b
--- /dev/null
+++ b/libgo/runtime/go-unsafe-newarray.c
@@ -0,0 +1,28 @@
+/* go-unsafe-newarray.c -- unsafe.NewArray function for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "go-alloc.h"
+#include "go-type.h"
+#include "interface.h"
+
+/* Implement unsafe.NewArray.  */
+
+void *NewArray (const struct __go_interface *type, int n)
+  asm ("libgo_unsafe.unsafe.NewArray");
+
+/* The dynamic type of the argument will be a pointer to a type
+   descriptor.  */
+
+void *
+NewArray (const struct __go_interface *type, int n)
+{
+  const void *object;
+  const struct __go_type_descriptor *descriptor;
+
+  object = type->__object;
+  descriptor = (const struct __go_type_descriptor *) object;
+  return __go_alloc (descriptor->__size * n);
+}
diff --git a/libgo/runtime/go-unsafe-pointer.c b/libgo/runtime/go-unsafe-pointer.c
new file mode 100644
index 0000000..d1c0fab
--- /dev/null
+++ b/libgo/runtime/go-unsafe-pointer.c
@@ -0,0 +1,109 @@
+/* go-unsafe-pointer.c -- unsafe.Pointer type descriptor for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "go-string.h"
+#include "go-type.h"
+
+/* This file provides the type descriptor for the unsafe.Pointer type.
+   The unsafe package is defined by the compiler itself, which means
+   that there is no package to compile to define the type
+   descriptor.  */
+
+extern const struct __go_type_descriptor unsafe_Pointer
+  asm ("__go_tdn_libgo_unsafe.unsafe.Pointer");
+
+/* Used to determine the field alignment.  */
+struct field_align
+{
+  char c;
+  void *p;
+};
+
+/* The reflection string.  */
+#define REFLECTION "unsafe.Pointer"
+#define REFLECTION_LEN 14
+static const struct
+{
+  size_t length;
+  char data[REFLECTION_LEN];
+} reflection_string =
+{
+  sizeof REFLECTION - 1,
+  { REFLECTION }
+};
+
+static const struct __go_string * const ptr_reflection_string =
+  (const struct __go_string *) &reflection_string;
+
+const struct __go_type_descriptor unsafe_Pointer =
+{
+  /* __code */
+  GO_UNSAFE_POINTER,
+  /* __align */
+  __alignof (void *),
+  /* __field_align */
+  offsetof (struct field_align, p) - 1,
+  /* __size */
+  sizeof (void *),
+  /* __hash */
+  __go_type_hash_identity,
+  /* __equal */
+  __go_type_equal_identity,
+  /* __reflection */
+  &ptr_reflection_string,
+  /* __uncommon */
+  NULL
+};
+
+/* We also need the type descriptor for the pointer to unsafe.Pointer,
+   since any package which refers to that type descriptor will expect
+   it to be defined elsewhere.  */
+
+extern const struct __go_ptr_type pointer_unsafe_Pointer
+  asm ("__go_td_pN27_libgo_unsafe.unsafe.Pointer");
+
+/* The reflection string.  */
+#define PREFLECTION "*unsafe.Pointer"
+#define PREFLECTION_LEN 15
+static const struct
+{
+  size_t length;
+  char data[PREFLECTION_LEN];
+} preflection_string =
+{
+  sizeof PREFLECTION - 1,
+  { PREFLECTION }
+};
+
+static const struct __go_string * const ptr_preflection_string =
+  (const struct __go_string *) &preflection_string;
+
+const struct __go_ptr_type pointer_unsafe_Pointer =
+{
+  /* __common */
+  {
+    /* __code */
+    GO_PTR,
+    /* __align */
+    __alignof (void *),
+    /* __field_align */
+    offsetof (struct field_align, p) - 1,
+    /* __size */
+    sizeof (void *),
+    /* __hash */
+    __go_type_hash_identity,
+    /* __equal */
+    __go_type_equal_identity,
+    /* __reflection */
+    &ptr_preflection_string,
+    /* __uncommon */
+    NULL
+  },
+  /* __element_type */
+  &unsafe_Pointer
+};
diff --git a/libgo/runtime/iface.cgo b/libgo/runtime/iface.cgo
new file mode 100644
index 0000000..6084511
--- /dev/null
+++ b/libgo/runtime/iface.cgo
@@ -0,0 +1,58 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+#include "go-type.h"
+#include "interface.h"
+#define nil NULL
+
+typedef _Bool bool;
+typedef struct __go_type_descriptor descriptor;
+typedef const struct __go_type_descriptor const_descriptor;
+typedef struct __go_interface interface;
+
+// Compare two type descriptors.
+func ifacetypeeq(a *descriptor, b *descriptor) (eq bool) {
+	eq = __go_type_descriptors_equal(a, b);
+}
+
+// Return the descriptor for an interface type.
+func ifacetype(i *interface) (d *const_descriptor) {
+	if (i == nil) {
+		return nil;
+	}
+	d = i->__type_descriptor;
+}
+
+// Convert an interface to a different interface type.
+func ifaceI2I2(inter *descriptor, i *interface) (ret *interface, ok bool) {
+	ret = __go_convert_interface(inter, i, &ok);
+}
+
+// Return whether we can convert an interface to a type.
+func ifaceI2Tp(to *descriptor, from *descriptor) (ok bool) {
+	ok = __go_can_convert_to_interface(to, from);
+}
+
+// Convert an interface to a pointer type.
+func ifaceI2T2P(inter *descriptor, i *interface) (ret *void, ok bool) {
+	if (i != nil && __go_type_descriptors_equal(inter, i->__type_descriptor)) {
+		ret = i->__object;
+		ok = 1;
+	} else {
+		ret = nil;
+		ok = 0;
+	}
+}
+
+// Convert an interface to a non-pointer type.
+func ifaceI2T2(inter *descriptor, i *interface, ret *void) (ok bool) {
+	if (i != nil && __go_type_descriptors_equal(inter, i->__type_descriptor)) {
+		__builtin_memcpy(ret, i->__object, inter->__size);
+		ok = 1;
+	} else {
+		__builtin_memset(ret, 0, inter->__size);
+		ok = 0;
+	}
+}
diff --git a/libgo/runtime/interface.h b/libgo/runtime/interface.h
new file mode 100644
index 0000000..7f07652
--- /dev/null
+++ b/libgo/runtime/interface.h
@@ -0,0 +1,51 @@
+/* interface.h -- the interface type for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "go-type.h"
+
+/* An variable with an interface type is represented as a pointer to
+   this struct.  */
+
+struct __go_interface
+{
+  /* A pointer to the type descriptor for the dynamic type of the
+     object.  */
+  const struct __go_type_descriptor *__type_descriptor;
+
+  /* A pointer to the methods for the interface.  This is effectively
+     the vtable for this interface.  This is simply a list of pointers
+     to functions.  They are in the same order as the list in the
+     internal representation of the interface, which sorts them by
+     name.  */
+  const void* __methods;
+
+  /* The object.  If the object is a pointer--if the type descriptor
+     code is GO_PTR or GO_UNSAFE_POINTER--then this field is the value
+     of the object itself.  Otherwise this is a pointer to memory
+     which holds the value.  */
+  void *__object;
+};
+
+extern struct __go_interface *
+__go_new_interface_pointer (const struct __go_type_descriptor *descriptor,
+			    void *methods, void *object);
+
+extern struct __go_interface *
+__go_new_interface_object (const struct __go_type_descriptor *descriptor,
+			   void *methods, size_t object_size,
+			   const void* object);
+
+extern struct __go_interface *
+__go_convert_interface (const struct __go_type_descriptor *,
+			const void *rhs,
+			_Bool *success);
+
+extern _Bool
+__go_can_convert_to_interface(const struct __go_type_descriptor *,
+			      const struct __go_type_descriptor *);
diff --git a/libgo/runtime/malloc.c b/libgo/runtime/malloc.c
new file mode 100644
index 0000000..b72a05a
--- /dev/null
+++ b/libgo/runtime/malloc.c
@@ -0,0 +1,350 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// See malloc.h for overview.
+//
+// TODO(rsc): double-check stats.
+// TODO(rsc): solve "stack overflow during malloc" problem.
+
+#include <stddef.h>
+#include <errno.h>
+#include <stdlib.h>
+#include "go-alloc.h"
+#include "runtime.h"
+#include "malloc.h"
+
+MHeap mheap;
+MStats mstats;
+
+// Allocate an object of at least size bytes.
+// Small objects are allocated from the per-thread cache's free lists.
+// Large objects (> 32 kB) are allocated straight from the heap.
+void*
+__go_alloc(uintptr size)
+{
+	int32 sizeclass;
+	MCache *c;
+	uintptr npages;
+	MSpan *s;
+	void *v;
+	uint32 *ref;
+
+	if(m->mallocing)
+		throw("malloc/free - deadlock");
+	m->mallocing = 1;
+
+	if(size == 0)
+		size = 1;
+
+	mstats.nmalloc++;
+	if(size <= MaxSmallSize) {
+		// Allocate from mcache free lists.
+		sizeclass = SizeToClass(size);
+		size = class_to_size[sizeclass];
+		c = m->mcache;
+		v = MCache_Alloc(c, sizeclass, size);
+		if(v == nil)
+			throw("out of memory");
+		mstats.alloc += size;
+	} else {
+		// TODO(rsc): Report tracebacks for very large allocations.
+
+		// Allocate directly from heap.
+		npages = size >> PageShift;
+		if((size & PageMask) != 0)
+			npages++;
+		s = MHeap_Alloc(&mheap, npages, 0);
+		if(s == nil)
+			throw("out of memory");
+		mstats.alloc += npages<<PageShift;
+		v = (void*)(s->start << PageShift);
+	}
+
+	// setup for mark sweep
+	if(!mlookup(v, nil, nil, &ref)) {
+		printf("malloc %zd; mlookup failed\n", (size_t) size);
+		throw("malloc mlookup");
+	}
+	*ref = RefNone;
+
+	m->mallocing = 0;
+	return v;
+}
+
+void*
+mallocgc(uintptr size)
+{
+	void *v;
+
+	v = __go_alloc(size);
+	if(mstats.inuse_pages > mstats.next_gc)
+		gc(0);
+	return v;
+}
+
+// Free the object whose base pointer is v.
+void
+__go_free(void *v)
+{
+	int32 sizeclass, size;
+	uintptr page, tmp;
+	MSpan *s;
+	MCache *c;
+	uint32 *ref;
+
+	if(v == nil)
+		return;
+
+	if(m->mallocing)
+		throw("malloc/free - deadlock");
+	m->mallocing = 1;
+
+	if(!mlookup(v, nil, nil, &ref))
+		throw("free mlookup");
+	*ref = RefFree;
+
+	// Find size class for v.
+	page = (uintptr)v >> PageShift;
+	sizeclass = MHeapMapCache_GET(&mheap.mapcache, page, tmp);
+	if(sizeclass == 0) {
+		// Missed in cache.
+		s = MHeap_Lookup(&mheap, page);
+		if(s == nil)
+			throw("free - invalid pointer");
+		sizeclass = s->sizeclass;
+		if(sizeclass == 0) {
+			// Large object.
+			mstats.alloc -= s->npages<<PageShift;
+			sys_memclr(v, s->npages<<PageShift);
+			MHeap_Free(&mheap, s);
+			goto out;
+		}
+		MHeapMapCache_SET(&mheap.mapcache, page, sizeclass);
+	}
+
+	// Small object.
+	c = m->mcache;
+	size = class_to_size[sizeclass];
+	sys_memclr(v, size);
+	mstats.alloc -= size;
+	MCache_Free(c, v, sizeclass, size);
+
+out:
+	m->mallocing = 0;
+}
+
+int32
+mlookup(void *v, byte **base, uintptr *size, uint32 **ref)
+{
+	uintptr n, nobj, i;
+	byte *p;
+	MSpan *s;
+
+	mstats.nlookup++;
+	s = MHeap_LookupMaybe(&mheap, (uintptr)v>>PageShift);
+	if(s == nil) {
+		if(base)
+			*base = nil;
+		if(size)
+			*size = 0;
+		if(ref)
+			*ref = 0;
+		return 0;
+	}
+
+	p = (byte*)((uintptr)s->start<<PageShift);
+	if(s->sizeclass == 0) {
+		// Large object.
+		if(base)
+			*base = p;
+		if(size)
+			*size = s->npages<<PageShift;
+		if(ref)
+			*ref = &s->gcref0;
+		return 1;
+	}
+
+	if((byte*)v >= (byte*)s->gcref) {
+		// pointers into the gc ref counts
+		// do not count as pointers.
+		return 0;
+	}
+
+	n = class_to_size[s->sizeclass];
+	i = ((byte*)v - p)/n;
+	if(base)
+		*base = p + i*n;
+	if(size)
+		*size = n;
+	nobj = (s->npages << PageShift) / (n + RefcountOverhead);
+	if((byte*)s->gcref < p || (byte*)(s->gcref+nobj) > p+(s->npages<<PageShift)) {
+		printf("odd span state=%d span=%p base=%p sizeclass=%d n=%zd size=%zd npages=%zd\n",
+			s->state, s, p, s->sizeclass, (size_t)nobj, (size_t)n, (size_t)s->npages);
+		printf("s->base sizeclass %d v=%p base=%p gcref=%p blocksize=%zd nobj=%zd size=%zd end=%p end=%p\n",
+			s->sizeclass, v, p, s->gcref, (size_t)s->npages<<PageShift,
+			(size_t)nobj, (size_t)n, s->gcref + nobj, p+(s->npages<<PageShift));
+		throw("bad gcref");
+	}
+	if(ref)
+		*ref = &s->gcref[i];
+
+	return 1;
+}
+
+MCache*
+allocmcache(void)
+{
+	return FixAlloc_Alloc(&mheap.cachealloc);
+}
+
+void
+mallocinit(void)
+{
+	InitSizes();
+	MHeap_Init(&mheap, SysAlloc);
+	m->mcache = allocmcache();
+
+	// See if it works.
+	__go_free(__go_alloc(1));
+}
+
+// TODO(rsc): Move elsewhere.
+enum
+{
+	NHUNK		= 20<<20,
+
+#ifndef __GNUC__
+	PROT_NONE	= 0x00,
+	PROT_READ	= 0x01,
+	PROT_WRITE	= 0x02,
+	PROT_EXEC	= 0x04,
+
+	MAP_FILE	= 0x0000,
+	MAP_SHARED	= 0x0001,
+	MAP_PRIVATE	= 0x0002,
+	MAP_FIXED	= 0x0010,
+	MAP_ANON	= 0x1000,	// not on Linux - TODO(rsc)
+#endif
+};
+
+void*
+SysAlloc(uintptr n)
+{
+	void *p;
+
+	mstats.sys += n;
+	p = sys_mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, 0, 0);
+	if(p < (void*)4096) {
+		if(errno == EACCES) {
+			printf("mmap: access denied\n");
+			printf("If you're running SELinux, enable execmem for this process.\n");
+		} else {
+			printf("mmap: errno=%d\n", errno);
+		}
+		exit(2);
+	}
+	return p;
+}
+
+void
+SysUnused(void *v, uintptr n)
+{
+	USED(v);
+	USED(n);
+	// TODO(rsc): call madvise MADV_DONTNEED
+}
+
+void
+SysFree(void *v, uintptr n)
+{
+	USED(v);
+	USED(n);
+	// TODO(rsc): call munmap
+}
+
+// Runtime stubs.
+
+extern void *oldmal(uint32);
+
+void*
+mal(uint32 n)
+{
+//return oldmal(n);
+	void *v;
+
+	v = mallocgc(n);
+
+	if(0) {
+		byte *p;
+		uint32 i;
+		p = v;
+		for(i=0; i<n; i++) {
+			if(p[i] != 0) {
+				printf("mal %d => %p: byte %d is non-zero\n", n, v, i);
+				throw("mal");
+			}
+		}
+	}
+
+//printf("mal %d %p\n", n, v);  // |checkmal to check for overlapping returns.
+	return v;
+}
+
+// Stack allocator uses malloc/free most of the time,
+// but if we're in the middle of malloc and need stack,
+// we have to do something else to avoid deadlock.
+// In that case, we fall back on a fixed-size free-list
+// allocator, assuming that inside malloc all the stack
+// frames are small, so that all the stack allocations
+// will be a single size, the minimum (right now, 5k).
+struct {
+	Lock;
+	FixAlloc;
+} stacks;
+
+void*
+stackalloc(uint32 n)
+{
+	void *v;
+	uint32 *ref;
+
+//return oldmal(n);
+	if(m->mallocing) {
+		lock(&stacks);
+		if(stacks.size == 0)
+			FixAlloc_Init(&stacks, n, SysAlloc, nil, nil);
+		if(stacks.size != n) {
+			printf("stackalloc: in malloc, size=%zu want %d", (size_t)stacks.size, n);
+			throw("stackalloc");
+		}
+		v = FixAlloc_Alloc(&stacks);
+		unlock(&stacks);
+		return v;
+	}
+	v = __go_alloc(n);
+	mlookup(v, nil, nil, &ref);
+	*ref = RefStack;
+	return v;
+}
+
+void
+stackfree(void *v)
+{
+//return;
+
+	if(m->mallocing) {
+		lock(&stacks);
+		FixAlloc_Free(&stacks, v);
+		unlock(&stacks);
+		return;
+	}
+	__go_free(v);
+}
+
+// Temporary.  We don't do GC.
+void
+gc(int32 force)
+{
+	USED(force);
+}
diff --git a/libgo/runtime/malloc.h b/libgo/runtime/malloc.h
new file mode 100644
index 0000000..ea46384
--- /dev/null
+++ b/libgo/runtime/malloc.h
@@ -0,0 +1,410 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Memory allocator, based on tcmalloc.
+// http://goog-perftools.sourceforge.net/doc/tcmalloc.html
+
+// The main allocator works in runs of pages.
+// Small allocation sizes (up to and including 32 kB) are
+// rounded to one of about 100 size classes, each of which
+// has its own free list of objects of exactly that size.
+// Any free page of memory can be split into a set of objects
+// of one size class, which are then managed using free list
+// allocators.
+//
+// The allocator's data structures are:
+//
+//	FixAlloc: a free-list allocator for fixed-size objects,
+//		used to manage storage used by the allocator.
+//	MHeap: the malloc heap, managed at page (4096-byte) granularity.
+//	MSpan: a run of pages managed by the MHeap.
+//	MHeapMap: a mapping from page IDs to MSpans.
+//	MHeapMapCache: a small cache of MHeapMap mapping page IDs
+//		to size classes for pages used for small objects.
+//	MCentral: a shared free list for a given size class.
+//	MCache: a per-thread (in Go, per-M) cache for small objects.
+//	MStats: allocation statistics.
+//
+// Allocating a small object proceeds up a hierarchy of caches:
+//
+//	1. Round the size up to one of the small size classes
+//	   and look in the corresponding MCache free list.
+//	   If the list is not empty, allocate an object from it.
+//	   This can all be done without acquiring a lock.
+//
+//	2. If the MCache free list is empty, replenish it by
+//	   taking a bunch of objects from the MCentral free list.
+//	   Moving a bunch amortizes the cost of acquiring the MCentral lock.
+//
+//	3. If the MCentral free list is empty, replenish it by
+//	   allocating a run of pages from the MHeap and then
+//	   chopping that memory into a objects of the given size.
+//	   Allocating many objects amortizes the cost of locking
+//	   the heap.
+//
+//	4. If the MHeap is empty or has no page runs large enough,
+//	   allocate a new group of pages (at least 1MB) from the
+//	   operating system.  Allocating a large run of pages
+//	   amortizes the cost of talking to the operating system.
+//
+// Freeing a small object proceeds up the same hierarchy:
+//
+//	1. Look up the size class for the object and add it to
+//	   the MCache free list.
+//
+//	2. If the MCache free list is too long or the MCache has
+//	   too much memory, return some to the MCentral free lists.
+//
+//	3. If all the objects in a given span have returned to
+//	   the MCentral list, return that span to the page heap.
+//
+//	4. If the heap has too much memory, return some to the
+//	   operating system.
+//
+//	TODO(rsc): Step 4 is not implemented.
+//
+// Allocating and freeing a large object uses the page heap
+// directly, bypassing the MCache and MCentral free lists.
+//
+// This C code was written with an eye toward translating to Go
+// in the future.  Methods have the form Type_Method(Type *t, ...).
+
+
+typedef struct FixAlloc	FixAlloc;
+typedef struct MCentral	MCentral;
+typedef struct MHeap	MHeap;
+typedef struct MHeapMap	MHeapMap;
+typedef struct MHeapMapCache	MHeapMapCache;
+typedef struct MSpan	MSpan;
+typedef struct MStats	MStats;
+typedef struct MLink	MLink;
+
+enum
+{
+	PageShift	= 12,
+	PageSize	= 1<<PageShift,
+	PageMask	= PageSize - 1,
+};
+typedef	uintptr	PageID;		// address >> PageShift
+
+enum
+{
+	// Tunable constants.
+	NumSizeClasses = 150,		// Number of size classes (must match msize.c)
+	MaxSmallSize = 32<<10,
+
+	FixAllocChunk = 128<<10,	// Chunk size for FixAlloc
+	MaxMCacheListLen = 256,		// Maximum objects on MCacheList
+	MaxMCacheSize = 2<<20,		// Maximum bytes in one MCache
+	MaxMHeapList = 1<<(20 - PageShift),	// Maximum page length for fixed-size list in MHeap.
+	HeapAllocChunk = 1<<20,		// Chunk size for heap growth
+};
+
+
+// A generic linked list of blocks.  (Typically the block is bigger than sizeof(MLink).)
+struct MLink
+{
+	MLink *next;
+};
+
+// SysAlloc obtains a large chunk of memory from the operating system,
+// typically on the order of a hundred kilobytes or a megabyte.
+//
+// SysUnused notifies the operating system that the contents
+// of the memory region are no longer needed and can be reused
+// for other purposes.  The program reserves the right to start
+// accessing those pages in the future.
+//
+// SysFree returns it unconditionally; this is only used if
+// an out-of-memory error has been detected midway through
+// an allocation.  It is okay if SysFree is a no-op.
+
+void*	SysAlloc(uintptr nbytes);
+void	SysFree(void *v, uintptr nbytes);
+void	SysUnused(void *v, uintptr nbytes);
+
+
+// FixAlloc is a simple free-list allocator for fixed size objects.
+// Malloc uses a FixAlloc wrapped around SysAlloc to manages its
+// MCache and MSpan objects.
+//
+// Memory returned by FixAlloc_Alloc is not zeroed.
+// The caller is responsible for locking around FixAlloc calls.
+// Callers can keep state in the object but the first word is
+// smashed by freeing and reallocating.
+struct FixAlloc
+{
+	uintptr size;
+	void *(*alloc)(uintptr);
+	void (*first)(void *arg, byte *p);	// called first time p is returned
+	void *arg;
+	MLink *list;
+	byte *chunk;
+	uint32 nchunk;
+};
+
+void	FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg);
+void*	FixAlloc_Alloc(FixAlloc *f);
+void	FixAlloc_Free(FixAlloc *f, void *p);
+
+
+// Statistics.
+// Shared with Go: if you edit this structure, also edit ../lib/malloc.go.
+struct MStats
+{
+	uint64	alloc;
+	uint64	sys;
+	uint64	stacks;
+	uint64	inuse_pages;	// protected by mheap.Lock
+	uint64	next_gc;	// protected by mheap.Lock
+	uint64	nlookup;	// unprotected (approximate)
+	uint64	nmalloc;	// unprotected (approximate)
+	bool	enablegc;
+};
+extern MStats mstats;
+
+
+// Size classes.  Computed and initialized by InitSizes.
+//
+// SizeToClass(0 <= n <= MaxSmallSize) returns the size class,
+//	1 <= sizeclass < NumSizeClasses, for n.
+//	Size class 0 is reserved to mean "not small".
+//
+// class_to_size[i] = largest size in class i
+// class_to_allocnpages[i] = number of pages to allocate when
+// 	making new objects in class i
+// class_to_transfercount[i] = number of objects to move when
+//	taking a bunch of objects out of the central lists
+//	and putting them in the thread free list.
+
+int32	SizeToClass(int32);
+extern	int32	class_to_size[NumSizeClasses];
+extern	int32	class_to_allocnpages[NumSizeClasses];
+extern	int32	class_to_transfercount[NumSizeClasses];
+extern	void	InitSizes(void);
+
+
+// Per-thread (in Go, per-M) cache for small objects.
+// No locking needed because it is per-thread (per-M).
+typedef struct MCacheList MCacheList;
+struct MCacheList
+{
+	MLink *list;
+	uint32 nlist;
+	uint32 nlistmin;
+};
+
+struct MCache
+{
+	MCacheList list[NumSizeClasses];
+	uint64 size;
+};
+
+void*	MCache_Alloc(MCache *c, int32 sizeclass, uintptr size);
+void	MCache_Free(MCache *c, void *p, int32 sizeclass, uintptr size);
+
+
+// An MSpan is a run of pages.
+enum
+{
+	MSpanInUse = 0,
+	MSpanFree,
+	MSpanListHead,
+	MSpanDead,
+};
+struct MSpan
+{
+	MSpan	*next;		// in a span linked list
+	MSpan	*prev;		// in a span linked list
+	MSpan	*allnext;		// in the list of all spans
+	PageID	start;		// starting page number
+	uintptr	npages;		// number of pages in span
+	MLink	*freelist;	// list of free objects
+	uint32	ref;		// number of allocated objects in this span
+	uint32	sizeclass;	// size class
+	uint32	state;		// MSpanInUse etc
+	union {
+		uint32	*gcref;	// sizeclass > 0
+		uint32	gcref0;	// sizeclass == 0
+	};
+};
+
+void	MSpan_Init(MSpan *span, PageID start, uintptr npages);
+
+// Every MSpan is in one doubly-linked list,
+// either one of the MHeap's free lists or one of the
+// MCentral's span lists.  We use empty MSpan structures as list heads.
+void	MSpanList_Init(MSpan *list);
+bool	MSpanList_IsEmpty(MSpan *list);
+void	MSpanList_Insert(MSpan *list, MSpan *span);
+void	MSpanList_Remove(MSpan *span);	// from whatever list it is in
+
+
+// Central list of free objects of a given size.
+struct MCentral
+{
+	Lock;
+	int32 sizeclass;
+	MSpan nonempty;
+	MSpan empty;
+	int32 nfree;
+};
+
+void	MCentral_Init(MCentral *c, int32 sizeclass);
+int32	MCentral_AllocList(MCentral *c, int32 n, MLink **first);
+void	MCentral_FreeList(MCentral *c, int32 n, MLink *first);
+
+
+// Free(v) must be able to determine the MSpan containing v.
+// The MHeapMap is a 3-level radix tree mapping page numbers to MSpans.
+//
+// NOTE(rsc): On a 32-bit platform (= 20-bit page numbers),
+// we can swap in a 2-level radix tree.
+//
+// NOTE(rsc): We use a 3-level tree because tcmalloc does, but
+// having only three levels requires approximately 1 MB per node
+// in the tree, making the minimum map footprint 3 MB.
+// Using a 4-level tree would cut the minimum footprint to 256 kB.
+// On the other hand, it's just virtual address space: most of
+// the memory is never going to be touched, thus never paged in.
+
+typedef struct MHeapMapNode2 MHeapMapNode2;
+typedef struct MHeapMapNode3 MHeapMapNode3;
+
+enum
+{
+#if __SIZEOF_POINTER__ == 4
+	// 32 bit address - 12 bit page size == 20 bits to map
+	MHeapMap_Level1Bits = 8,
+	MHeapMap_Level2Bits = 8,
+	MHeapMap_Level3Bits = 4,
+#else
+	// 64 bit address - 12 bit page size = 52 bits to map
+	MHeapMap_Level1Bits = 18,
+	MHeapMap_Level2Bits = 18,
+	MHeapMap_Level3Bits = 16,
+#endif
+
+	MHeapMap_TotalBits =
+		MHeapMap_Level1Bits +
+		MHeapMap_Level2Bits +
+		MHeapMap_Level3Bits,
+
+	MHeapMap_Level1Mask = (1<<MHeapMap_Level1Bits) - 1,
+	MHeapMap_Level2Mask = (1<<MHeapMap_Level2Bits) - 1,
+	MHeapMap_Level3Mask = (1<<MHeapMap_Level3Bits) - 1,
+};
+
+struct MHeapMap
+{
+	void *(*allocator)(uintptr);
+	MHeapMapNode2 *p[1<<MHeapMap_Level1Bits];
+};
+
+struct MHeapMapNode2
+{
+	MHeapMapNode3 *p[1<<MHeapMap_Level2Bits];
+};
+
+struct MHeapMapNode3
+{
+	MSpan *s[1<<MHeapMap_Level3Bits];
+};
+
+void	MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr));
+bool	MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr npages);
+MSpan*	MHeapMap_Get(MHeapMap *m, PageID k);
+MSpan*	MHeapMap_GetMaybe(MHeapMap *m, PageID k);
+void	MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
+
+
+// Much of the time, free(v) needs to know only the size class for v,
+// not which span it came from.  The MHeapMap finds the size class
+// by looking up the span.
+//
+// An MHeapMapCache is a simple direct-mapped cache translating
+// page numbers to size classes.  It avoids the expensive MHeapMap
+// lookup for hot pages.
+//
+// The cache entries are 64 bits, with the page number in the low part
+// and the value at the top.
+//
+// NOTE(rsc): On a machine with 32-bit addresses (= 20-bit page numbers),
+// we can use a 16-bit cache entry by not storing the redundant 12 bits
+// of the key that are used as the entry index.  Here in 64-bit land,
+// that trick won't work unless the hash table has 2^28 entries.
+enum
+{
+	MHeapMapCache_HashBits = 12
+};
+
+struct MHeapMapCache
+{
+	uintptr array[1<<MHeapMapCache_HashBits];
+};
+
+// All macros for speed (sorry).
+#define HMASK	((1<<MHeapMapCache_HashBits)-1)
+#define KBITS	MHeapMap_TotalBits
+#define KMASK	((1LL<<KBITS)-1)
+
+#define MHeapMapCache_SET(cache, key, value) \
+	((cache)->array[(key) & HMASK] = (key) | ((uintptr)(value) << KBITS))
+
+#define MHeapMapCache_GET(cache, key, tmp) \
+	(tmp = (cache)->array[(key) & HMASK], \
+	 (tmp & KMASK) == (key) ? (tmp >> KBITS) : 0)
+
+
+// Main malloc heap.
+// The heap itself is the "free[]" and "large" arrays,
+// but all the other global data is here too.
+struct MHeap
+{
+	Lock;
+	MSpan free[MaxMHeapList];	// free lists of given length
+	MSpan large;			// free lists length >= MaxMHeapList
+	MSpan *allspans;
+
+	// span lookup
+	MHeapMap map;
+	MHeapMapCache mapcache;
+
+	// range of addresses we might see in the heap
+	byte *min;
+	byte *max;
+
+	// central free lists for small size classes.
+	// the union makes sure that the MCentrals are
+	// spaced 64 bytes apart, so that each MCentral.Lock
+	// gets its own cache line.
+	union {
+		MCentral;
+		byte pad[64];
+	} central[NumSizeClasses];
+
+	FixAlloc spanalloc;	// allocator for Span*
+	FixAlloc cachealloc;	// allocator for MCache*
+};
+extern MHeap mheap;
+
+void	MHeap_Init(MHeap *h, void *(*allocator)(uintptr));
+MSpan*	MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass);
+void	MHeap_Free(MHeap *h, MSpan *s);
+MSpan*	MHeap_Lookup(MHeap *h, PageID p);
+MSpan*	MHeap_LookupMaybe(MHeap *h, PageID p);
+
+int32	mlookup(void *v, byte **base, uintptr *size, uint32 **ref);
+void	gc(int32 force);
+
+enum
+{
+	RefcountOverhead = 4,	// one uint32 per object
+
+	RefFree = 0,	// must be zero
+	RefManual,	// manual allocation - don't free
+	RefStack,		// stack segment - don't free and don't scan for pointers
+	RefNone,		// no references
+	RefSome,		// some references
+};
diff --git a/libgo/runtime/malloc_go.cgo b/libgo/runtime/malloc_go.cgo
new file mode 100644
index 0000000..32c0141
--- /dev/null
+++ b/libgo/runtime/malloc_go.cgo
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package malloc
+#include "go-alloc.h"
+#include "runtime.h"
+#include "malloc.h"
+
+func Alloc(n uintptr) (p *byte) {
+	p = __go_alloc(n);
+}
+
+func Free(p *byte) {
+	__go_free(p);
+}
+
+func Lookup(p *byte) (base *byte, size uintptr) {
+	mlookup(p, &base, &size, nil);
+}
+
+func GetStats() (s *MStats) {
+	s = &mstats;
+}
+
+func GC() {
+	gc(1);
+}
+
diff --git a/libgo/runtime/map.cgo b/libgo/runtime/map.cgo
new file mode 100644
index 0000000..2495993
--- /dev/null
+++ b/libgo/runtime/map.cgo
@@ -0,0 +1,68 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+#include "map.h"
+#define nil NULL
+
+typedef unsigned char byte;
+typedef _Bool bool;
+
+typedef struct __go_map hmap;
+typedef struct __go_hash_iter hiter;
+
+/* Access a value in a map, returning a value and a presence indicator.  */
+
+func mapaccess2(h *hmap, key *byte, val *byte) (present bool) {
+	byte *mapval;
+	size_t valsize;
+
+	mapval = __go_map_index(h, key, 0);
+	if (mapval == nil) {
+		present = 0;
+	} else {
+		valsize = h->__descriptor->__map_descriptor->__val_type->__size;
+		__builtin_memcpy(val, mapval, valsize);
+		present = 1;
+	}
+}
+
+/* Optionally assign a value to a map (m[k] = v, p).  */
+
+func mapassign2(h *hmap, key *byte, val *byte, p bool) {
+	if (!p) {
+		__go_map_delete(h, key);
+	} else {
+		byte *mapval;
+		size_t valsize;
+
+		mapval = __go_map_index(h, key, 1);
+		valsize = h->__descriptor->__map_descriptor->__val_type->__size;
+		__builtin_memcpy(mapval, val, valsize);
+	}
+}
+
+/* Initialize a range over a map.  */
+
+func mapiterinit(h *hmap, it *hiter) {
+	__go_mapiterinit(h, it);
+}
+
+/* Move to the next iteration, updating *HITER.  */
+
+func mapiternext(it *hiter) {
+	__go_mapiternext(it);
+}
+
+/* Get the key of the current iteration.  */
+
+func mapiter1(it *hiter, key *byte) {
+	__go_mapiter1(it, key);
+}
+
+/* Get the key and value of the current iteration.  */
+
+func mapiter2(it *hiter, key *byte, val *byte) {
+	__go_mapiter2(it, key, val);
+}
diff --git a/libgo/runtime/map.h b/libgo/runtime/map.h
new file mode 100644
index 0000000..a0c834a
--- /dev/null
+++ b/libgo/runtime/map.h
@@ -0,0 +1,86 @@
+/* map.h -- the map type for Go.
+
+   Copyright 2009, 2010 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include <stddef.h>
+
+#include "go-type.h"
+
+/* A map descriptor is what we need to manipulate the map.  This is
+   constant for a given map type.  */
+
+struct __go_map_descriptor
+{
+  /* A pointer to the type descriptor for the type of the map itself.  */
+  const struct __go_map_type *__map_descriptor;
+
+  /* A map entry is a struct with three fields:
+       map_entry_type *next_entry;
+       key_type key;
+       value_type value;
+     This is the size of that struct.  */
+  size_t __entry_size;
+
+  /* The offset of the key field in a map entry struct.  */
+  size_t __key_offset;
+
+  /* The offset of the value field in a map entry struct (the value
+     field immediately follows the key field, but there may be some
+     bytes inserted for alignment).  */
+  size_t __val_offset;
+};
+
+struct __go_map
+{
+  /* The constant descriptor for this map.  */
+  const struct __go_map_descriptor *__descriptor;
+
+  /* The number of elements in the hash table.  */
+  size_t __element_count;
+
+  /* The number of entries in the __buckets array.  */
+  size_t __bucket_count;
+
+  /* Each bucket is a pointer to a linked list of map entries.  */
+  void **__buckets;
+};
+
+/* For a map iteration the compiled code will use a pointer to an
+   iteration structure.  The iteration structure will be allocated on
+   the stack.  The Go code must allocate at least enough space.  */
+
+struct __go_hash_iter
+{
+  /* A pointer to the current entry.  This will be set to NULL when
+     the range has completed.  The Go will test this field, so it must
+     be the first one in the structure.  */
+  const void *entry;
+  /* The map we are iterating over.  */
+  const struct __go_map *map;
+  /* A pointer to the next entry in the current bucket.  This permits
+     deleting the current entry.  This will be NULL when we have seen
+     all the entries in the current bucket.  */
+  const void *next_entry;
+  /* The bucket index of the current and next entry.  */
+  size_t bucket;
+};
+
+extern struct __go_map *__go_new_map (const struct __go_map_descriptor *,
+				      size_t);
+
+extern unsigned long __go_map_next_prime (unsigned long);
+
+extern void *__go_map_index (struct __go_map *, const void *, _Bool);
+
+extern void __go_map_delete (struct __go_map *, const void *);
+
+extern void __go_mapiterinit (const struct __go_map *, struct __go_hash_iter *);
+
+extern void __go_mapiternext (struct __go_hash_iter *);
+
+extern void __go_mapiter1 (struct __go_hash_iter *it, unsigned char *key);
+
+extern void __go_mapiter2 (struct __go_hash_iter *it, unsigned char *key,
+			   unsigned char *val);
diff --git a/libgo/runtime/mcache.c b/libgo/runtime/mcache.c
new file mode 100644
index 0000000..a7fafc1
--- /dev/null
+++ b/libgo/runtime/mcache.c
@@ -0,0 +1,104 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Per-thread (in Go, per-M) malloc cache for small objects.
+//
+// See malloc.h for an overview.
+
+#include "runtime.h"
+#include "malloc.h"
+
+void*
+MCache_Alloc(MCache *c, int32 sizeclass, uintptr size)
+{
+	MCacheList *l;
+	MLink *first, *v;
+	int32 n;
+
+	// Allocate from list.
+	l = &c->list[sizeclass];
+	if(l->list == nil) {
+		// Replenish using central lists.
+		n = MCentral_AllocList(&mheap.central[sizeclass],
+			class_to_transfercount[sizeclass], &first);
+		l->list = first;
+		l->nlist = n;
+		c->size += n*size;
+	}
+	v = l->list;
+	l->list = v->next;
+	l->nlist--;
+	if(l->nlist < l->nlistmin)
+		l->nlistmin = l->nlist;
+	c->size -= size;
+
+	// v is zeroed except for the link pointer
+	// that we used above; zero that.
+	v->next = nil;
+	return v;
+}
+
+// Take n elements off l and return them to the central free list.
+static void
+ReleaseN(MCache *c, MCacheList *l, int32 n, int32 sizeclass)
+{
+	MLink *first, **lp;
+	int32 i;
+
+	// Cut off first n elements.
+	first = l->list;
+	lp = &l->list;
+	for(i=0; i<n; i++)
+		lp = &(*lp)->next;
+	l->list = *lp;
+	*lp = nil;
+	l->nlist -= n;
+	if(l->nlist < l->nlistmin)
+		l->nlistmin = l->nlist;
+	c->size -= n*class_to_size[sizeclass];
+
+	// Return them to central free list.
+	MCentral_FreeList(&mheap.central[sizeclass], n, first);
+}
+
+void
+MCache_Free(MCache *c, void *v, int32 sizeclass, uintptr size)
+{
+	int32 i, n;
+	MCacheList *l;
+	MLink *p;
+
+	// Put back on list.
+	l = &c->list[sizeclass];
+	p = v;
+	p->next = l->list;
+	l->list = p;
+	l->nlist++;
+	c->size += size;
+
+	if(l->nlist >= MaxMCacheListLen) {
+		// Release a chunk back.
+		ReleaseN(c, l, class_to_transfercount[sizeclass], sizeclass);
+	}
+
+	if(c->size >= MaxMCacheSize) {
+		// Scavenge.
+		for(i=0; i<NumSizeClasses; i++) {
+			l = &c->list[i];
+			n = l->nlistmin;
+
+			// n is the minimum number of elements we've seen on
+			// the list since the last scavenge.  If n > 0, it means that
+			// we could have gotten by with n fewer elements
+			// without needing to consult the central free list.
+			// Move toward that situation by releasing n/2 of them.
+			if(n > 0) {
+				if(n > 1)
+					n /= 2;
+				ReleaseN(c, l, n, i);
+			}
+			l->nlistmin = l->nlist;
+		}
+	}
+}
diff --git a/libgo/runtime/mcentral.c b/libgo/runtime/mcentral.c
new file mode 100644
index 0000000..829f821
--- /dev/null
+++ b/libgo/runtime/mcentral.c
@@ -0,0 +1,194 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Central free lists.
+//
+// See malloc.h for an overview.
+//
+// The MCentral doesn't actually contain the list of free objects; the MSpan does.
+// Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
+// and those that are completely allocated (c->empty).
+//
+// TODO(rsc): tcmalloc uses a "transfer cache" to split the list
+// into sections of class_to_transfercount[sizeclass] objects
+// so that it is faster to move those lists between MCaches and MCentrals.
+
+#include "runtime.h"
+#include "malloc.h"
+
+static bool MCentral_Grow(MCentral *c);
+static void* MCentral_Alloc(MCentral *c);
+static void MCentral_Free(MCentral *c, void *v);
+
+// Initialize a single central free list.
+void
+MCentral_Init(MCentral *c, int32 sizeclass)
+{
+	if (pthread_mutex_init(c, NULL) != 0)
+		throw("pthread_mutex_init failed");
+	c->sizeclass = sizeclass;
+	MSpanList_Init(&c->nonempty);
+	MSpanList_Init(&c->empty);
+}
+
+// Allocate up to n objects from the central free list.
+// Return the number of objects allocated.
+// The objects are linked together by their first words.
+// On return, *pstart points at the first object and *pend at the last.
+int32
+MCentral_AllocList(MCentral *c, int32 n, MLink **pfirst)
+{
+	MLink *first, *last, *v;
+	int32 i;
+
+
+	lock(c);
+	// Replenish central list if empty.
+	if(MSpanList_IsEmpty(&c->nonempty)) {
+		if(!MCentral_Grow(c)) {
+			unlock(c);
+			*pfirst = nil;
+			return 0;
+		}
+	}
+
+	// Copy from list, up to n.
+	// First one is guaranteed to work, because we just grew the list.
+	first = MCentral_Alloc(c);
+	last = first;
+	for(i=1; i<n && (v = MCentral_Alloc(c)) != nil; i++) {
+		last->next = v;
+		last = v;
+	}
+	last->next = nil;
+	c->nfree -= i;
+
+	unlock(c);
+	*pfirst = first;
+	return i;
+}
+
+// Helper: allocate one object from the central free list.
+static void*
+MCentral_Alloc(MCentral *c)
+{
+	MSpan *s;
+	MLink *v;
+
+	if(MSpanList_IsEmpty(&c->nonempty))
+		return nil;
+	s = c->nonempty.next;
+	s->ref++;
+	v = s->freelist;
+	s->freelist = v->next;
+	if(s->freelist == nil) {
+		MSpanList_Remove(s);
+		MSpanList_Insert(&c->empty, s);
+	}
+	return v;
+}
+
+// Free n objects back into the central free list.
+// Return the number of objects allocated.
+// The objects are linked together by their first words.
+// On return, *pstart points at the first object and *pend at the last.
+void
+MCentral_FreeList(MCentral *c, int32 n, MLink *start)
+{
+	MLink *v, *next;
+
+	// Assume next == nil marks end of list.
+	// n and end would be useful if we implemented
+	// the transfer cache optimization in the TODO above.
+	USED(n);
+
+	lock(c);
+	for(v=start; v; v=next) {
+		next = v->next;
+		MCentral_Free(c, v);
+	}
+	unlock(c);
+}
+
+// Helper: free one object back into the central free list.
+static void
+MCentral_Free(MCentral *c, void *v)
+{
+	MSpan *s;
+	PageID page;
+	MLink *p, *next;
+
+	// Find span for v.
+	page = (uintptr)v >> PageShift;
+	s = MHeap_Lookup(&mheap, page);
+	if(s == nil || s->ref == 0)
+		throw("invalid free");
+
+	// Move to nonempty if necessary.
+	if(s->freelist == nil) {
+		MSpanList_Remove(s);
+		MSpanList_Insert(&c->nonempty, s);
+	}
+
+	// Add v back to s's free list.
+	p = v;
+	p->next = s->freelist;
+	s->freelist = p;
+	c->nfree++;
+
+	// If s is completely freed, return it to the heap.
+	if(--s->ref == 0) {
+		MSpanList_Remove(s);
+		// Freed blocks are zeroed except for the link pointer.
+		// Zero the link pointers so that the page is all zero.
+		for(p=s->freelist; p; p=next) {
+			next = p->next;
+			p->next = nil;
+		}
+		s->freelist = nil;
+		c->nfree -= (s->npages << PageShift) / class_to_size[c->sizeclass];
+		unlock(c);
+		MHeap_Free(&mheap, s);
+		lock(c);
+	}
+}
+
+// Fetch a new span from the heap and
+// carve into objects for the free list.
+static bool
+MCentral_Grow(MCentral *c)
+{
+	int32 i, n, npages, size;
+	MLink **tailp, *v;
+	byte *p;
+	MSpan *s;
+
+	unlock(c);
+	npages = class_to_allocnpages[c->sizeclass];
+	s = MHeap_Alloc(&mheap, npages, c->sizeclass);
+	if(s == nil) {
+		// TODO(rsc): Log out of memory
+		lock(c);
+		return false;
+	}
+
+	// Carve span into sequence of blocks.
+	tailp = &s->freelist;
+	p = (byte*)(s->start << PageShift);
+	size = class_to_size[c->sizeclass];
+	n = (npages << PageShift) / (size + RefcountOverhead);
+	s->gcref = (uint32*)(p + size*n);
+	for(i=0; i<n; i++) {
+		v = (MLink*)p;
+		*tailp = v;
+		tailp = &v->next;
+		p += size;
+	}
+	*tailp = nil;
+
+	lock(c);
+	c->nfree += n;
+	MSpanList_Insert(&c->nonempty, s);
+	return true;
+}
diff --git a/libgo/runtime/mfixalloc.c b/libgo/runtime/mfixalloc.c
new file mode 100644
index 0000000..88872c2
--- /dev/null
+++ b/libgo/runtime/mfixalloc.c
@@ -0,0 +1,55 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Fixed-size object allocator.  Returned memory is not zeroed.
+//
+// See malloc.h for overview.
+
+#include "runtime.h"
+#include "malloc.h"
+
+// Initialize f to allocate objects of the given size,
+// using the allocator to obtain chunks of memory.
+void
+FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg)
+{
+	f->size = size;
+	f->alloc = alloc;
+	f->first = first;
+	f->arg = arg;
+	f->list = nil;
+	f->chunk = nil;
+	f->nchunk = 0;
+}
+
+void*
+FixAlloc_Alloc(FixAlloc *f)
+{
+	void *v;
+
+	if(f->list) {
+		v = f->list;
+		f->list = *(void**)f->list;
+		return v;
+	}
+	if(f->nchunk < f->size) {
+		f->chunk = f->alloc(FixAllocChunk);
+		if(f->chunk == nil)
+			throw("out of memory (FixAlloc)");
+		f->nchunk = FixAllocChunk;
+	}
+	v = f->chunk;
+	if(f->first)
+		f->first(f->arg, v);
+	f->chunk += f->size;
+	f->nchunk -= f->size;
+	return v;
+}
+
+void
+FixAlloc_Free(FixAlloc *f, void *p)
+{
+	*(void**)p = f->list;
+	f->list = p;
+}
diff --git a/libgo/runtime/mheap.c b/libgo/runtime/mheap.c
new file mode 100644
index 0000000..9dc3c35
--- /dev/null
+++ b/libgo/runtime/mheap.c
@@ -0,0 +1,446 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Page heap.
+//
+// See malloc.h for overview.
+//
+// When a MSpan is in the heap free list, state == MSpanFree
+// and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
+//
+// When a MSpan is allocated, state == MSpanInUse
+// and heapmap(i) == span for all s->start <= i < s->start+s->npages.
+
+#include "runtime.h"
+#include "malloc.h"
+
+static MSpan *MHeap_AllocLocked(MHeap*, uintptr, int32);
+static bool MHeap_Grow(MHeap*, uintptr);
+static void MHeap_FreeLocked(MHeap*, MSpan*);
+static MSpan *MHeap_AllocLarge(MHeap*, uintptr);
+static MSpan *BestFit(MSpan*, uintptr, MSpan*);
+
+static void
+RecordSpan(void *vh, byte *p)
+{
+	MHeap *h;
+	MSpan *s;
+
+	h = vh;
+	s = (MSpan*)p;
+	s->allnext = h->allspans;
+	h->allspans = s;
+}
+
+// Initialize the heap; fetch memory using alloc.
+void
+MHeap_Init(MHeap *h, void *(*alloc)(uintptr))
+{
+	uint32 i;
+
+	if (pthread_mutex_init(h, NULL) != 0)
+		throw("pthread_mutex_init failed");
+	FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h);
+	FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil);
+	MHeapMap_Init(&h->map, alloc);
+	// h->mapcache needs no init
+	for(i=0; i<nelem(h->free); i++)
+		MSpanList_Init(&h->free[i]);
+	MSpanList_Init(&h->large);
+	for(i=0; i<nelem(h->central); i++)
+		MCentral_Init(&h->central[i], i);
+}
+
+// Allocate a new span of npage pages from the heap
+// and record its size class in the HeapMap and HeapMapCache.
+MSpan*
+MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass)
+{
+	MSpan *s;
+
+	lock(h);
+	s = MHeap_AllocLocked(h, npage, sizeclass);
+	if(s != nil)
+		mstats.inuse_pages += npage;
+	unlock(h);
+	return s;
+}
+
+static MSpan*
+MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
+{
+	uintptr n;
+	MSpan *s, *t;
+
+	// Try in fixed-size lists up to max.
+	for(n=npage; n < nelem(h->free); n++) {
+		if(!MSpanList_IsEmpty(&h->free[n])) {
+			s = h->free[n].next;
+			goto HaveSpan;
+		}
+	}
+
+	// Best fit in list of large spans.
+	if((s = MHeap_AllocLarge(h, npage)) == nil) {
+		if(!MHeap_Grow(h, npage))
+			return nil;
+		if((s = MHeap_AllocLarge(h, npage)) == nil)
+			return nil;
+	}
+
+HaveSpan:
+	// Mark span in use.
+	if(s->state != MSpanFree)
+		throw("MHeap_AllocLocked - MSpan not free");
+	if(s->npages < npage)
+		throw("MHeap_AllocLocked - bad npages");
+	MSpanList_Remove(s);
+	s->state = MSpanInUse;
+
+	if(s->npages > npage) {
+		// Trim extra and put it back in the heap.
+		t = FixAlloc_Alloc(&h->spanalloc);
+		MSpan_Init(t, s->start + npage, s->npages - npage);
+		s->npages = npage;
+		MHeapMap_Set(&h->map, t->start - 1, s);
+		MHeapMap_Set(&h->map, t->start, t);
+		MHeapMap_Set(&h->map, t->start + t->npages - 1, t);
+		t->state = MSpanInUse;
+		MHeap_FreeLocked(h, t);
+	}
+
+	// If span is being used for small objects, cache size class.
+	// No matter what, cache span info, because gc needs to be
+	// able to map interior pointer to containing span.
+	s->sizeclass = sizeclass;
+	for(n=0; n<npage; n++)
+		MHeapMap_Set(&h->map, s->start+n, s);
+	if(sizeclass == 0) {
+		uintptr tmp;
+
+		// If there are entries for this span, invalidate them,
+		// but don't blow out cache entries about other spans.
+		for(n=0; n<npage; n++)
+			if(MHeapMapCache_GET(&h->mapcache, s->start+n, tmp) != 0)
+				MHeapMapCache_SET(&h->mapcache, s->start+n, 0);
+	} else {
+		// Save cache entries for this span.
+		// If there's a size class, there aren't that many pages.
+		for(n=0; n<npage; n++)
+			MHeapMapCache_SET(&h->mapcache, s->start+n, sizeclass);
+	}
+
+	return s;
+}
+
+// Allocate a span of exactly npage pages from the list of large spans.
+static MSpan*
+MHeap_AllocLarge(MHeap *h, uintptr npage)
+{
+	return BestFit(&h->large, npage, nil);
+}
+
+// Search list for smallest span with >= npage pages.
+// If there are multiple smallest spans, take the one
+// with the earliest starting address.
+static MSpan*
+BestFit(MSpan *list, uintptr npage, MSpan *best)
+{
+	MSpan *s;
+
+	for(s=list->next; s != list; s=s->next) {
+		if(s->npages < npage)
+			continue;
+		if(best == nil
+		|| s->npages < best->npages
+		|| (s->npages == best->npages && s->start < best->start))
+			best = s;
+	}
+	return best;
+}
+
+// Try to add at least npage pages of memory to the heap,
+// returning whether it worked.
+static bool
+MHeap_Grow(MHeap *h, uintptr npage)
+{
+	uintptr ask;
+	void *v;
+	MSpan *s;
+
+	// Ask for a big chunk, to reduce the number of mappings
+	// the operating system needs to track; also amortizes
+	// the overhead of an operating system mapping.
+	// For Native Client, allocate a multiple of 64kB (16 pages).
+	npage = (npage+15)&~15;
+	ask = npage<<PageShift;
+	if(ask < HeapAllocChunk)
+		ask = HeapAllocChunk;
+
+	v = SysAlloc(ask);
+	if(v == nil) {
+		if(ask > (npage<<PageShift)) {
+			ask = npage<<PageShift;
+			v = SysAlloc(ask);
+		}
+		if(v == nil)
+			return false;
+	}
+
+	if((byte*)v < h->min || h->min == nil)
+		h->min = v;
+	if((byte*)v+ask > h->max)
+		h->max = (byte*)v+ask;
+
+	// NOTE(rsc): In tcmalloc, if we've accumulated enough
+	// system allocations, the heap map gets entirely allocated
+	// in 32-bit mode.  (In 64-bit mode that's not practical.)
+	if(!MHeapMap_Preallocate(&h->map, ((uintptr)v>>PageShift) - 1, (ask>>PageShift) + 2)) {
+		SysFree(v, ask);
+		return false;
+	}
+
+	// Create a fake "in use" span and free it, so that the
+	// right coalescing happens.
+	s = FixAlloc_Alloc(&h->spanalloc);
+	MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
+	MHeapMap_Set(&h->map, s->start, s);
+	MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
+	s->state = MSpanInUse;
+	MHeap_FreeLocked(h, s);
+	return true;
+}
+
+// Look up the span at the given page number.
+// Page number is guaranteed to be in map
+// and is guaranteed to be start or end of span.
+MSpan*
+MHeap_Lookup(MHeap *h, PageID p)
+{
+	return MHeapMap_Get(&h->map, p);
+}
+
+// Look up the span at the given page number.
+// Page number is *not* guaranteed to be in map
+// and may be anywhere in the span.
+// Map entries for the middle of a span are only
+// valid for allocated spans.  Free spans may have
+// other garbage in their middles, so we have to
+// check for that.
+MSpan*
+MHeap_LookupMaybe(MHeap *h, PageID p)
+{
+	MSpan *s;
+
+	s = MHeapMap_GetMaybe(&h->map, p);
+	if(s == nil || p < s->start || p - s->start >= s->npages)
+		return nil;
+	return s;
+}
+
+// Free the span back into the heap.
+void
+MHeap_Free(MHeap *h, MSpan *s)
+{
+	lock(h);
+	mstats.inuse_pages -= s->npages;
+	MHeap_FreeLocked(h, s);
+	unlock(h);
+}
+
+static void
+MHeap_FreeLocked(MHeap *h, MSpan *s)
+{
+	MSpan *t;
+
+	if(s->state != MSpanInUse || s->ref != 0) {
+		printf("MHeap_FreeLocked - span %p ptr %zu state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
+		throw("MHeap_FreeLocked - invalid free");
+	}
+	s->state = MSpanFree;
+	MSpanList_Remove(s);
+
+	// Coalesce with earlier, later spans.
+	if((t = MHeapMap_Get(&h->map, s->start - 1)) != nil && t->state != MSpanInUse) {
+		s->start = t->start;
+		s->npages += t->npages;
+		MHeapMap_Set(&h->map, s->start, s);
+		MSpanList_Remove(t);
+		t->state = MSpanDead;
+		FixAlloc_Free(&h->spanalloc, t);
+	}
+	if((t = MHeapMap_Get(&h->map, s->start + s->npages)) != nil && t->state != MSpanInUse) {
+		s->npages += t->npages;
+		MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
+		MSpanList_Remove(t);
+		t->state = MSpanDead;
+		FixAlloc_Free(&h->spanalloc, t);
+	}
+
+	// Insert s into appropriate list.
+	if(s->npages < nelem(h->free))
+		MSpanList_Insert(&h->free[s->npages], s);
+	else
+		MSpanList_Insert(&h->large, s);
+
+	// TODO(rsc): IncrementalScavenge() to return memory to OS.
+}
+
+// 3-level radix tree mapping page ids to Span*.
+void
+MHeapMap_Init(MHeapMap *m, void *(*allocator)(size_t))
+{
+	m->allocator = allocator;
+}
+
+MSpan*
+MHeapMap_Get(MHeapMap *m, PageID k)
+{
+	int32 i1, i2, i3;
+
+	i3 = k & MHeapMap_Level3Mask;
+	k >>= MHeapMap_Level3Bits;
+	i2 = k & MHeapMap_Level2Mask;
+	k >>= MHeapMap_Level2Bits;
+	i1 = k & MHeapMap_Level1Mask;
+	k >>= MHeapMap_Level1Bits;
+	if(k != 0)
+		throw("MHeapMap_Get");
+
+	return m->p[i1]->p[i2]->s[i3];
+}
+
+MSpan*
+MHeapMap_GetMaybe(MHeapMap *m, PageID k)
+{
+	int32 i1, i2, i3;
+	MHeapMapNode2 *p2;
+	MHeapMapNode3 *p3;
+
+	i3 = k & MHeapMap_Level3Mask;
+	k >>= MHeapMap_Level3Bits;
+	i2 = k & MHeapMap_Level2Mask;
+	k >>= MHeapMap_Level2Bits;
+	i1 = k & MHeapMap_Level1Mask;
+	k >>= MHeapMap_Level1Bits;
+	if(k != 0)
+		throw("MHeapMap_Get");
+
+	p2 = m->p[i1];
+	if(p2 == nil)
+		return nil;
+	p3 = p2->p[i2];
+	if(p3 == nil)
+		return nil;
+	return p3->s[i3];
+}
+
+void
+MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s)
+{
+	int32 i1, i2, i3;
+
+	i3 = k & MHeapMap_Level3Mask;
+	k >>= MHeapMap_Level3Bits;
+	i2 = k & MHeapMap_Level2Mask;
+	k >>= MHeapMap_Level2Bits;
+	i1 = k & MHeapMap_Level1Mask;
+	k >>= MHeapMap_Level1Bits;
+	if(k != 0)
+		throw("MHeapMap_Set");
+
+	m->p[i1]->p[i2]->s[i3] = s;
+}
+
+// Allocate the storage required for entries [k, k+1, ..., k+len-1]
+// so that Get and Set calls need not check for nil pointers.
+bool
+MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr len)
+{
+	uintptr end;
+	int32 i1, i2;
+	MHeapMapNode2 *p2;
+	MHeapMapNode3 *p3;
+
+	end = k+len;
+	while(k < end) {
+		if((k >> MHeapMap_TotalBits) != 0)
+			return false;
+		i2 = (k >> MHeapMap_Level3Bits) & MHeapMap_Level2Mask;
+		i1 = (k >> (MHeapMap_Level3Bits + MHeapMap_Level2Bits)) & MHeapMap_Level1Mask;
+
+		// first-level pointer
+		if((p2 = m->p[i1]) == nil) {
+			p2 = m->allocator(sizeof *p2);
+			if(p2 == nil)
+				return false;
+			sys_memclr((byte*)p2, sizeof *p2);
+			m->p[i1] = p2;
+		}
+
+		// second-level pointer
+		if(p2->p[i2] == nil) {
+			p3 = m->allocator(sizeof *p3);
+			if(p3 == nil)
+				return false;
+			sys_memclr((byte*)p3, sizeof *p3);
+			p2->p[i2] = p3;
+		}
+
+		// advance key past this leaf node
+		k = ((k >> MHeapMap_Level3Bits) + 1) << MHeapMap_Level3Bits;
+	}
+	return true;
+}
+
+// Initialize a new span with the given start and npages.
+void
+MSpan_Init(MSpan *span, PageID start, uintptr npages)
+{
+	span->next = nil;
+	span->prev = nil;
+	span->start = start;
+	span->npages = npages;
+	span->freelist = nil;
+	span->ref = 0;
+	span->sizeclass = 0;
+	span->state = 0;
+}
+
+// Initialize an empty doubly-linked list.
+void
+MSpanList_Init(MSpan *list)
+{
+	list->state = MSpanListHead;
+	list->next = list;
+	list->prev = list;
+}
+
+void
+MSpanList_Remove(MSpan *span)
+{
+	if(span->prev == nil && span->next == nil)
+		return;
+	span->prev->next = span->next;
+	span->next->prev = span->prev;
+	span->prev = nil;
+	span->next = nil;
+}
+
+bool
+MSpanList_IsEmpty(MSpan *list)
+{
+	return list->next == list;
+}
+
+void
+MSpanList_Insert(MSpan *list, MSpan *span)
+{
+	if(span->next != nil || span->prev != nil)
+		throw("MSpanList_Insert");
+	span->next = list->next;
+	span->prev = list;
+	span->next->prev = span;
+	span->prev->next = span;
+}
diff --git a/libgo/runtime/msize.c b/libgo/runtime/msize.c
new file mode 100644
index 0000000..bcdc300
--- /dev/null
+++ b/libgo/runtime/msize.c
@@ -0,0 +1,165 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Malloc small size classes.
+//
+// See malloc.h for overview.
+//
+// The size classes are chosen so that rounding an allocation
+// request up to the next size class wastes at most 12.5% (1.125x).
+//
+// Each size class has its own page count that gets allocated
+// and chopped up when new objects of the size class are needed.
+// That page count is chosen so that chopping up the run of
+// pages into objects of the given size wastes at most 12.5% (1.125x)
+// of the memory.  It is not necessary that the cutoff here be
+// the same as above.
+//
+// The two sources of waste multiply, so the worst possible case
+// for the above constraints would be that allocations of some
+// size might have a 26.6% (1.266x) overhead.
+// In practice, only one of the wastes comes into play for a
+// given size (sizes < 512 waste mainly on the round-up,
+// sizes > 512 waste mainly on the page chopping).
+//
+// TODO(rsc): Compute max waste for any given size.
+
+#include "runtime.h"
+#include "malloc.h"
+
+int32 class_to_size[NumSizeClasses];
+int32 class_to_allocnpages[NumSizeClasses];
+int32 class_to_transfercount[NumSizeClasses];
+
+// The SizeToClass lookup is implemented using two arrays,
+// one mapping sizes <= 1024 to their class and one mapping
+// sizes >= 1024 and <= MaxSmallSize to their class.
+// All objects are 8-aligned, so the first array is indexed by
+// the size divided by 8 (rounded up).  Objects >= 1024 bytes
+// are 128-aligned, so the second array is indexed by the
+// size divided by 128 (rounded up).  The arrays are filled in
+// by InitSizes.
+
+static int32 size_to_class8[1024/8 + 1];
+static int32 size_to_class128[(MaxSmallSize-1024)/128 + 1];
+
+int32
+SizeToClass(int32 size)
+{
+	if(size > MaxSmallSize)
+		throw("SizeToClass - invalid size");
+	if(size > 1024-8)
+		return size_to_class128[(size-1024+127) >> 7];
+	return size_to_class8[(size+7)>>3];
+}
+
+void
+InitSizes(void)
+{
+	int32 align, sizeclass, size, osize, nextsize, n;
+	uint32 i;
+	uintptr allocsize, npages;
+
+	// Initialize the class_to_size table (and choose class sizes in the process).
+	class_to_size[0] = 0;
+	sizeclass = 1;	// 0 means no class
+	align = 8;
+	for(size = align; size <= MaxSmallSize; size += align) {
+		if((size&(size-1)) == 0) {	// bump alignment once in a while
+			if(size >= 2048)
+				align = 256;
+			else if(size >= 128)
+				align = size / 8;
+			else if(size >= 16)
+				align = 16;	// required for x86 SSE instructions, if we want to use them
+		}
+		if((align&(align-1)) != 0)
+			throw("InitSizes - bug");
+
+		// Make the allocnpages big enough that
+		// the leftover is less than 1/8 of the total,
+		// so wasted space is at most 12.5%.
+		allocsize = PageSize;
+		osize = size + RefcountOverhead;
+		while(allocsize%osize > (PageSize/8))
+			allocsize += PageSize;
+		npages = allocsize >> PageShift;
+
+		// If the previous sizeclass chose the same
+		// allocation size and fit the same number of
+		// objects into the page, we might as well
+		// use just this size instead of having two
+		// different sizes.
+		if(sizeclass > 1
+		&& (int32)npages == class_to_allocnpages[sizeclass-1]
+		&& allocsize/osize == allocsize/(class_to_size[sizeclass-1]+RefcountOverhead)) {
+			class_to_size[sizeclass-1] = size;
+			continue;
+		}
+
+		class_to_allocnpages[sizeclass] = npages;
+		class_to_size[sizeclass] = size;
+		sizeclass++;
+	}
+	if(sizeclass != NumSizeClasses) {
+		printf("sizeclass=%d NumSizeClasses=%d\n", sizeclass, NumSizeClasses);
+		throw("InitSizes - bad NumSizeClasses");
+	}
+
+	// Initialize the size_to_class tables.
+	nextsize = 0;
+	for (sizeclass = 1; sizeclass < NumSizeClasses; sizeclass++) {
+		for(; nextsize < 1024 && nextsize <= class_to_size[sizeclass]; nextsize+=8)
+			size_to_class8[nextsize/8] = sizeclass;
+		if(nextsize >= 1024)
+			for(; nextsize <= class_to_size[sizeclass]; nextsize += 128)
+				size_to_class128[(nextsize-1024)/128] = sizeclass;
+	}
+
+	// Double-check SizeToClass.
+	if(0) {
+		for(n=0; n < MaxSmallSize; n++) {
+			sizeclass = SizeToClass(n);
+			if(sizeclass < 1 || sizeclass >= NumSizeClasses || class_to_size[sizeclass] < n) {
+				printf("size=%d sizeclass=%d class_to_size=%d\n", n, sizeclass, class_to_size[sizeclass]);
+				printf("incorrect SizeToClass");
+				goto dump;
+			}
+			if(sizeclass > 1 && class_to_size[sizeclass-1] >= n) {
+				printf("size=%d sizeclass=%d class_to_size=%d\n", n, sizeclass, class_to_size[sizeclass]);
+				printf("SizeToClass too big");
+				goto dump;
+			}
+		}
+	}
+
+	// Initialize the class_to_transfercount table.
+	for(sizeclass = 1; sizeclass < NumSizeClasses; sizeclass++) {
+		n = 64*1024 / class_to_size[sizeclass];
+		if(n < 2)
+			n = 2;
+		if(n > 32)
+			n = 32;
+		class_to_transfercount[sizeclass] = n;
+	}
+	return;
+
+dump:
+	if(1){
+		printf("NumSizeClasses=%d\n", NumSizeClasses);
+		printf("class_to_size:");
+		for(sizeclass=0; sizeclass<NumSizeClasses; sizeclass++)
+			printf(" %d", class_to_size[sizeclass]);
+		printf("\n\n");
+		printf("size_to_class8:");
+		for(i=0; i<nelem(size_to_class8); i++)
+			printf(" %d=>%d(%d)\n", i*8, size_to_class8[i], class_to_size[size_to_class8[i]]);
+		printf("\n");
+		printf("size_to_class128:");
+		for(i=0; i<nelem(size_to_class128); i++)
+			printf(" %d=>%d(%d)\n", i*128, size_to_class128[i], class_to_size[size_to_class128[i]]);
+		printf("\n");
+	}
+	throw("InitSizes failed");
+}
diff --git a/libgo/runtime/proc.c b/libgo/runtime/proc.c
new file mode 100644
index 0000000..1930ea1
--- /dev/null
+++ b/libgo/runtime/proc.c
@@ -0,0 +1,12 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "runtime.h"
+#include "malloc.h"	/* so that acid generated from proc.c includes malloc data structures */
+
+typedef struct Sched Sched;
+
+M	m0;
+
+__thread M *m = &m0;
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h
new file mode 100644
index 0000000..6937d18
--- /dev/null
+++ b/libgo/runtime/runtime.h
@@ -0,0 +1,114 @@
+/* runtime.h -- runtime support for Go.
+
+   Copyright 2009 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#define _GNU_SOURCE
+#include <signal.h>
+#include <stdio.h>
+#include <string.h>
+#include <pthread.h>
+#include <sys/mman.h>
+
+#include "go-alloc.h"
+#include "go-panic.h"
+#include "go-string.h"
+
+typedef struct __go_string *String;
+
+/* This file supports C files copied from the 6g runtime library.
+   This is a version of the 6g runtime.h rewritten for gccgo's version
+   of the code.  */
+
+typedef signed int   int8    __attribute__ ((mode (QI)));
+typedef unsigned int uint8   __attribute__ ((mode (QI)));
+typedef signed int   int16   __attribute__ ((mode (HI)));
+typedef unsigned int uint16  __attribute__ ((mode (HI)));
+typedef signed int   int32   __attribute__ ((mode (SI)));
+typedef unsigned int uint32  __attribute__ ((mode (SI)));
+typedef signed int   int64   __attribute__ ((mode (DI)));
+typedef unsigned int uint64  __attribute__ ((mode (DI)));
+typedef float        float32 __attribute__ ((mode (SF)));
+typedef double       float64 __attribute__ ((mode (DF)));
+typedef unsigned int uintptr __attribute__ ((mode (pointer)));
+
+/* Defined types.  */
+
+typedef	uint8			bool;
+typedef	uint8			byte;
+typedef	struct	M		M;
+typedef	struct	MCache		MCache;
+
+/* We use mutexes for locks.  6g uses futexes directly, and perhaps
+   someday we will do that too.  */
+
+typedef pthread_mutex_t		Lock;
+
+/* A Note.  */
+
+typedef	struct	Note		Note;
+
+struct Note {
+	int32 woken;
+};
+
+/* Per CPU declarations.  */
+
+extern __thread		M* 	m;
+
+/* Constants.  */
+
+enum
+{
+	true	= 1,
+	false	= 0,
+};
+
+/* Structures.  */
+
+struct	M
+{
+	int32	mallocing;
+	MCache	*mcache;
+};
+
+/* Macros.  */
+#define	nelem(x)	(sizeof(x)/sizeof((x)[0]))
+#define	nil		((void*)0)
+#define USED(v)		((void) v)
+
+/* We map throw to panic.  */
+#define throw(s) __go_panic_msg (s)
+
+/* Mutual exclusion locks.  */
+#define lock(p) \
+  (pthread_mutex_lock(p) == 0 || (__go_panic_msg ("lock failed"), 0))
+#define unlock(p) \
+  (pthread_mutex_unlock(p) == 0 || (__go_panic_msg ("unlock failed"), 0))
+
+void	mallocinit(void);
+void	siginit(void);
+bool	sigsend(int32 sig);
+
+/*
+ * sleep and wakeup on one-time events.
+ * before any calls to notesleep or notewakeup,
+ * must call noteclear to initialize the Note.
+ * then, any number of threads can call notesleep
+ * and exactly one thread can call notewakeup (once).
+ * once notewakeup has been called, all the notesleeps
+ * will return.  future notesleeps will return immediately.
+ */
+void	noteclear(Note*);
+void	notesleep(Note*);
+void	notewakeup(Note*);
+
+/* Functions.  */
+#define sys_memclr(buf, size) __builtin_memset(buf, 0, size)
+#define sys_mmap mmap
+MCache*	allocmcache(void);
+void*	mallocgc(uintptr size);
+void	free(void *v);
+
+#define cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
diff --git a/libgo/runtime/sigqueue.cgo b/libgo/runtime/sigqueue.cgo
new file mode 100644
index 0000000..79feab6
--- /dev/null
+++ b/libgo/runtime/sigqueue.cgo
@@ -0,0 +1,107 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements runtime support for signal handling.
+//
+// Most synchronization primitives are not available from
+// the signal handler (it cannot block and cannot use locks)
+// so the handler communicates with a processing goroutine
+// via struct sig, below.
+//
+// Ownership for sig.Note passes back and forth between
+// the signal handler and the signal goroutine in rounds.
+// The initial state is that sig.note is cleared (setup by siginit).
+// At the beginning of each round, mask == 0.
+// The round goes through three stages:
+//
+// (In parallel)
+// 1a) One or more signals arrive and are handled
+// by sigsend using cas to set bits in sig.mask.
+// The handler that changes sig.mask from zero to non-zero
+// calls notewakeup(&sig).
+// 1b) Sigrecv calls notesleep(&sig) to wait for the wakeup.
+//
+// 2) Having received the wakeup, sigrecv knows that sigsend
+// will not send another wakeup, so it can noteclear(&sig)
+// to prepare for the next round. (Sigsend may still be adding
+// signals to sig.mask at this point, which is fine.)
+//
+// 3) Sigrecv uses cas to grab the current sig.mask and zero it,
+// triggering the next round.
+//
+// The signal handler takes ownership of the note by atomically
+// changing mask from a zero to non-zero value. It gives up
+// ownership by calling notewakeup. The signal goroutine takes
+// ownership by returning from notesleep (caused by the notewakeup)
+// and gives up ownership by clearing mask.
+
+package runtime
+#include "runtime.h"
+#include "defs.h"
+
+static struct {
+	Note;
+	uint32 mask;
+	bool inuse;
+} sig;
+
+void
+siginit(void)
+{
+	noteclear(&sig);
+}
+
+// Called from sighandler to send a signal back out of the signal handling thread.
+bool
+sigsend(int32 s)
+{
+	uint32 bit, mask;
+
+	if(!sig.inuse)
+		return false;
+	bit = 1 << s;
+	for(;;) {
+		mask = sig.mask;
+		if(mask & bit)
+			break;		// signal already in queue
+		if(cas(&sig.mask, mask, mask|bit)) {
+			// Added to queue.
+			// Only send a wakeup for the first signal in each round.
+			if(mask == 0)
+				notewakeup(&sig);
+			break;
+		}
+	}
+	return true;
+}
+
+// Called to receive a bitmask of queued signals.
+func Sigrecv() (m uint32) {
+	// runtime·entersyscall();
+	notesleep(&sig);
+	// runtime·exitsyscall();
+	noteclear(&sig);
+	for(;;) {
+		m = sig.mask;
+		if(cas(&sig.mask, m, 0))
+			break;
+	}
+}
+
+func Signame(sig int32) (name String) {
+	const char* s = strsignal(sig);
+	char buf[100];
+	if (s == NULL) {
+		snprintf(buf, sizeof buf, "signal %d", sig);
+		s = buf;
+	}
+	int32 len = strlen(s);
+	name = __go_alloc(sizeof(struct __go_string) + len);
+	name->__length = len;
+	memcpy(name->__data, s, len);
+}
+
+func Siginit() {
+	sig.inuse = true;	// enable reception of signals; cannot disable
+}
diff --git a/libgo/runtime/string.cgo b/libgo/runtime/string.cgo
new file mode 100644
index 0000000..01c5eb2
--- /dev/null
+++ b/libgo/runtime/string.cgo
@@ -0,0 +1,60 @@
+// Copyright 2009, 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+#include "go-string.h"
+#define charntorune(pv, str, len) __go_get_rune(str, len, pv)
+
+typedef struct __go_string __go_string;
+typedef int int32;
+
+enum
+{
+	Runeself	= 0x80,
+};
+
+func stringiter(s *__go_string, k int32) (retk int32) {
+	int32 l, n;
+
+	if((size_t) k >= s->__length) {
+		// retk=0 is end of iteration
+		retk = 0;
+		goto out;
+	}
+
+	l = s->__data[k];
+	if(l < Runeself) {
+		retk = k+1;
+		goto out;
+	}
+
+	// multi-char rune
+	n = charntorune(&l, s->__data+k, s->__length+k);
+	retk = k + (n ? n : 1);
+
+out:
+}
+
+func stringiter2(s *__go_string, k int32) (retk int32, retv int32) {
+	int32 n;
+
+	if((size_t) k >= s->__length) {
+		// retk=0 is end of iteration
+		retk = 0;
+		retv = 0;
+		goto out;
+	}
+
+	retv = s->__data[k];
+	if(retv < Runeself) {
+		retk = k+1;
+		goto out;
+	}
+
+	// multi-char rune
+	n = charntorune(&retv, s->__data+k, s->__length-k);
+	retk = k + (n ? n : 1);
+
+out:
+}