src/pkg/[a-m]*: gofix -r error -force=error

R=golang-dev, iant
CC=golang-dev
https://golang.org/cl/5322051
diff --git a/src/pkg/archive/tar/reader.go b/src/pkg/archive/tar/reader.go
index 12de2ad..65bf120 100644
--- a/src/pkg/archive/tar/reader.go
+++ b/src/pkg/archive/tar/reader.go
@@ -9,6 +9,7 @@
 
 import (
 	"bytes"
+	"errors"
 	"io"
 	"io/ioutil"
 	"os"
@@ -16,7 +17,7 @@
 )
 
 var (
-	HeaderError = os.NewError("invalid tar header")
+	HeaderError = errors.New("invalid tar header")
 )
 
 // A Reader provides sequential access to the contents of a tar archive.
@@ -39,7 +40,7 @@
 //	}
 type Reader struct {
 	r   io.Reader
-	err os.Error
+	err error
 	nb  int64 // number of unread bytes for current file entry
 	pad int64 // amount of padding (ignored) after current file entry
 }
@@ -48,7 +49,7 @@
 func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
 
 // Next advances to the next entry in the tar archive.
-func (tr *Reader) Next() (*Header, os.Error) {
+func (tr *Reader) Next() (*Header, error) {
 	var hdr *Header
 	if tr.err == nil {
 		tr.skipUnread()
@@ -119,7 +120,7 @@
 			return nil
 		}
 		if bytes.Equal(header, zeroBlock[0:blockSize]) {
-			tr.err = os.EOF
+			tr.err = io.EOF
 		} else {
 			tr.err = HeaderError // zero block and then non-zero block
 		}
@@ -201,10 +202,10 @@
 // Read reads from the current entry in the tar archive.
 // It returns 0, os.EOF when it reaches the end of that entry,
 // until Next is called to advance to the next entry.
-func (tr *Reader) Read(b []byte) (n int, err os.Error) {
+func (tr *Reader) Read(b []byte) (n int, err error) {
 	if tr.nb == 0 {
 		// file consumed
-		return 0, os.EOF
+		return 0, io.EOF
 	}
 
 	if int64(len(b)) > tr.nb {
@@ -213,7 +214,7 @@
 	n, err = tr.r.Read(b)
 	tr.nb -= int64(n)
 
-	if err == os.EOF && tr.nb > 0 {
+	if err == io.EOF && tr.nb > 0 {
 		err = io.ErrUnexpectedEOF
 	}
 	tr.err = err
diff --git a/src/pkg/archive/tar/reader_test.go b/src/pkg/archive/tar/reader_test.go
index f473c90..00eea6b 100644
--- a/src/pkg/archive/tar/reader_test.go
+++ b/src/pkg/archive/tar/reader_test.go
@@ -132,7 +132,7 @@
 			}
 		}
 		hdr, err := tr.Next()
-		if err == os.EOF {
+		if err == io.EOF {
 			break
 		}
 		if hdr != nil || err != nil {
@@ -195,7 +195,7 @@
 	// loop over all files
 	for ; ; nread++ {
 		hdr, err := tr.Next()
-		if hdr == nil || err == os.EOF {
+		if hdr == nil || err == io.EOF {
 			break
 		}
 
@@ -211,7 +211,7 @@
 		rdbuf := make([]uint8, 8)
 		for {
 			nr, err := tr.Read(rdbuf)
-			if err == os.EOF {
+			if err == io.EOF {
 				break
 			}
 			if err != nil {
@@ -250,7 +250,7 @@
 		for {
 			nr, err := f.Read(rdbuf)
 			w.Write(rdbuf[0:nr])
-			if err == os.EOF {
+			if err == io.EOF {
 				break
 			}
 		}
@@ -262,7 +262,7 @@
 
 	for ; ; nread++ {
 		hdr, err := tr.Next()
-		if hdr == nil || err == os.EOF {
+		if hdr == nil || err == io.EOF {
 			break
 		}
 	}
diff --git a/src/pkg/archive/tar/writer.go b/src/pkg/archive/tar/writer.go
index c6ce224..222df90 100644
--- a/src/pkg/archive/tar/writer.go
+++ b/src/pkg/archive/tar/writer.go
@@ -8,15 +8,15 @@
 // - catch more errors (no first header, write after close, etc.)
 
 import (
+	"errors"
 	"io"
-	"os"
 	"strconv"
 )
 
 var (
-	ErrWriteTooLong    = os.NewError("write too long")
-	ErrFieldTooLong    = os.NewError("header field too long")
-	ErrWriteAfterClose = os.NewError("write after close")
+	ErrWriteTooLong    = errors.New("write too long")
+	ErrFieldTooLong    = errors.New("header field too long")
+	ErrWriteAfterClose = errors.New("write after close")
 )
 
 // A Writer provides sequential writing of a tar archive in POSIX.1 format.
@@ -36,7 +36,7 @@
 //	tw.Close()
 type Writer struct {
 	w          io.Writer
-	err        os.Error
+	err        error
 	nb         int64 // number of unwritten bytes for current file entry
 	pad        int64 // amount of padding to write after current file entry
 	closed     bool
@@ -47,7 +47,7 @@
 func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
 
 // Flush finishes writing the current file (optional).
-func (tw *Writer) Flush() os.Error {
+func (tw *Writer) Flush() error {
 	n := tw.nb + tw.pad
 	for n > 0 && tw.err == nil {
 		nr := n
@@ -107,7 +107,7 @@
 // WriteHeader writes hdr and prepares to accept the file's contents.
 // WriteHeader calls Flush if it is not the first header.
 // Calling after a Close will return ErrWriteAfterClose.
-func (tw *Writer) WriteHeader(hdr *Header) os.Error {
+func (tw *Writer) WriteHeader(hdr *Header) error {
 	if tw.closed {
 		return ErrWriteAfterClose
 	}
@@ -165,7 +165,7 @@
 // Write writes to the current entry in the tar archive.
 // Write returns the error ErrWriteTooLong if more than
 // hdr.Size bytes are written after WriteHeader.
-func (tw *Writer) Write(b []byte) (n int, err os.Error) {
+func (tw *Writer) Write(b []byte) (n int, err error) {
 	if tw.closed {
 		err = ErrWriteTooLong
 		return
@@ -187,7 +187,7 @@
 
 // Close closes the tar archive, flushing any unwritten
 // data to the underlying writer.
-func (tw *Writer) Close() os.Error {
+func (tw *Writer) Close() error {
 	if tw.err != nil || tw.closed {
 		return tw.err
 	}
diff --git a/src/pkg/archive/zip/reader.go b/src/pkg/archive/zip/reader.go
index b0a5599..64152b4 100644
--- a/src/pkg/archive/zip/reader.go
+++ b/src/pkg/archive/zip/reader.go
@@ -7,6 +7,7 @@
 import (
 	"bufio"
 	"compress/flate"
+	"errors"
 	"hash"
 	"hash/crc32"
 	"encoding/binary"
@@ -16,9 +17,9 @@
 )
 
 var (
-	FormatError       = os.NewError("zip: not a valid zip file")
-	UnsupportedMethod = os.NewError("zip: unsupported compression algorithm")
-	ChecksumError     = os.NewError("zip: checksum error")
+	FormatError       = errors.New("zip: not a valid zip file")
+	UnsupportedMethod = errors.New("zip: unsupported compression algorithm")
+	ChecksumError     = errors.New("zip: checksum error")
 )
 
 type Reader struct {
@@ -44,7 +45,7 @@
 }
 
 // OpenReader will open the Zip file specified by name and return a ReadCloser.
-func OpenReader(name string) (*ReadCloser, os.Error) {
+func OpenReader(name string) (*ReadCloser, error) {
 	f, err := os.Open(name)
 	if err != nil {
 		return nil, err
@@ -64,7 +65,7 @@
 
 // NewReader returns a new Reader reading from r, which is assumed to
 // have the given size in bytes.
-func NewReader(r io.ReaderAt, size int64) (*Reader, os.Error) {
+func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
 	zr := new(Reader)
 	if err := zr.init(r, size); err != nil {
 		return nil, err
@@ -72,7 +73,7 @@
 	return zr, nil
 }
 
-func (z *Reader) init(r io.ReaderAt, size int64) os.Error {
+func (z *Reader) init(r io.ReaderAt, size int64) error {
 	end, err := readDirectoryEnd(r, size)
 	if err != nil {
 		return err
@@ -110,13 +111,13 @@
 }
 
 // Close closes the Zip file, rendering it unusable for I/O.
-func (rc *ReadCloser) Close() os.Error {
+func (rc *ReadCloser) Close() error {
 	return rc.f.Close()
 }
 
 // Open returns a ReadCloser that provides access to the File's contents.
 // It is safe to Open and Read from files concurrently.
-func (f *File) Open() (rc io.ReadCloser, err os.Error) {
+func (f *File) Open() (rc io.ReadCloser, err error) {
 	bodyOffset, err := f.findBodyOffset()
 	if err != nil {
 		return
@@ -148,10 +149,10 @@
 	zipr io.Reader // for reading the data descriptor
 }
 
-func (r *checksumReader) Read(b []byte) (n int, err os.Error) {
+func (r *checksumReader) Read(b []byte) (n int, err error) {
 	n, err = r.rc.Read(b)
 	r.hash.Write(b[:n])
-	if err != os.EOF {
+	if err != io.EOF {
 		return
 	}
 	if r.f.hasDataDescriptor() {
@@ -165,9 +166,9 @@
 	return
 }
 
-func (r *checksumReader) Close() os.Error { return r.rc.Close() }
+func (r *checksumReader) Close() error { return r.rc.Close() }
 
-func readFileHeader(f *File, r io.Reader) os.Error {
+func readFileHeader(f *File, r io.Reader) error {
 	var b [fileHeaderLen]byte
 	if _, err := io.ReadFull(r, b[:]); err != nil {
 		return err
@@ -197,7 +198,7 @@
 
 // findBodyOffset does the minimum work to verify the file has a header
 // and returns the file body offset.
-func (f *File) findBodyOffset() (int64, os.Error) {
+func (f *File) findBodyOffset() (int64, error) {
 	r := io.NewSectionReader(f.zipr, f.headerOffset, f.zipsize-f.headerOffset)
 	var b [fileHeaderLen]byte
 	if _, err := io.ReadFull(r, b[:]); err != nil {
@@ -215,7 +216,7 @@
 // readDirectoryHeader attempts to read a directory header from r.
 // It returns io.ErrUnexpectedEOF if it cannot read a complete header,
 // and FormatError if it doesn't find a valid header signature.
-func readDirectoryHeader(f *File, r io.Reader) os.Error {
+func readDirectoryHeader(f *File, r io.Reader) error {
 	var b [directoryHeaderLen]byte
 	if _, err := io.ReadFull(r, b[:]); err != nil {
 		return err
@@ -250,7 +251,7 @@
 	return nil
 }
 
-func readDataDescriptor(r io.Reader, f *File) os.Error {
+func readDataDescriptor(r io.Reader, f *File) error {
 	var b [dataDescriptorLen]byte
 	if _, err := io.ReadFull(r, b[:]); err != nil {
 		return err
@@ -262,7 +263,7 @@
 	return nil
 }
 
-func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err os.Error) {
+func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err error) {
 	// look for directoryEndSignature in the last 1k, then in the last 65k
 	var b []byte
 	for i, bLen := range []int64{1024, 65 * 1024} {
@@ -270,7 +271,7 @@
 			bLen = size
 		}
 		b = make([]byte, int(bLen))
-		if _, err := r.ReadAt(b, size-bLen); err != nil && err != os.EOF {
+		if _, err := r.ReadAt(b, size-bLen); err != nil && err != io.EOF {
 			return nil, err
 		}
 		if p := findSignatureInBlock(b); p >= 0 {
diff --git a/src/pkg/archive/zip/reader_test.go b/src/pkg/archive/zip/reader_test.go
index 3b7b0dc..4d80aab 100644
--- a/src/pkg/archive/zip/reader_test.go
+++ b/src/pkg/archive/zip/reader_test.go
@@ -9,7 +9,6 @@
 	"encoding/binary"
 	"io"
 	"io/ioutil"
-	"os"
 	"testing"
 	"time"
 )
@@ -18,7 +17,7 @@
 	Name    string
 	Comment string
 	File    []ZipTestFile
-	Error   os.Error // the error that Opening this file should return
+	Error   error // the error that Opening this file should return
 }
 
 type ZipTestFile struct {
@@ -245,7 +244,7 @@
 
 type sliceReaderAt []byte
 
-func (r sliceReaderAt) ReadAt(b []byte, off int64) (int, os.Error) {
+func (r sliceReaderAt) ReadAt(b []byte, off int64) (int, error) {
 	copy(b, r[int(off):int(off)+len(b)])
 	return len(b), nil
 }
diff --git a/src/pkg/archive/zip/struct.go b/src/pkg/archive/zip/struct.go
index 4f9f599..b862b5a 100644
--- a/src/pkg/archive/zip/struct.go
+++ b/src/pkg/archive/zip/struct.go
@@ -11,7 +11,7 @@
 */
 package zip
 
-import "os"
+import "errors"
 import "time"
 
 // Compression methods.
@@ -60,9 +60,9 @@
 	comment            string
 }
 
-func recoverError(errp *os.Error) {
+func recoverError(errp *error) {
 	if e := recover(); e != nil {
-		if err, ok := e.(os.Error); ok {
+		if err, ok := e.(error); ok {
 			*errp = err
 			return
 		}
@@ -96,11 +96,11 @@
 
 // Mode returns the permission and mode bits for the FileHeader.
 // An error is returned in case the information is not available.
-func (h *FileHeader) Mode() (mode uint32, err os.Error) {
+func (h *FileHeader) Mode() (mode uint32, err error) {
 	if h.CreatorVersion>>8 == creatorUnix {
 		return h.ExternalAttrs >> 16, nil
 	}
-	return 0, os.NewError("file mode not available")
+	return 0, errors.New("file mode not available")
 }
 
 // SetMode changes the permission and mode bits for the FileHeader.
diff --git a/src/pkg/archive/zip/writer.go b/src/pkg/archive/zip/writer.go
index 3a6dc38..a153064 100644
--- a/src/pkg/archive/zip/writer.go
+++ b/src/pkg/archive/zip/writer.go
@@ -8,10 +8,10 @@
 	"bufio"
 	"compress/flate"
 	"encoding/binary"
+	"errors"
 	"hash"
 	"hash/crc32"
 	"io"
-	"os"
 )
 
 // TODO(adg): support zip file comments
@@ -37,7 +37,7 @@
 
 // Close finishes writing the zip file by writing the central directory.
 // It does not (and can not) close the underlying writer.
-func (w *Writer) Close() (err os.Error) {
+func (w *Writer) Close() (err error) {
 	if w.last != nil && !w.last.closed {
 		if err = w.last.close(); err != nil {
 			return
@@ -45,7 +45,7 @@
 		w.last = nil
 	}
 	if w.closed {
-		return os.NewError("zip: writer closed twice")
+		return errors.New("zip: writer closed twice")
 	}
 	w.closed = true
 
@@ -94,7 +94,7 @@
 // It returns a Writer to which the file contents should be written.
 // The file's contents must be written to the io.Writer before the next
 // call to Create, CreateHeader, or Close.
-func (w *Writer) Create(name string) (io.Writer, os.Error) {
+func (w *Writer) Create(name string) (io.Writer, error) {
 	header := &FileHeader{
 		Name:   name,
 		Method: Deflate,
@@ -107,7 +107,7 @@
 // It returns a Writer to which the file contents should be written.
 // The file's contents must be written to the io.Writer before the next
 // call to Create, CreateHeader, or Close.
-func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, os.Error) {
+func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
 	if w.last != nil && !w.last.closed {
 		if err := w.last.close(); err != nil {
 			return nil, err
@@ -148,7 +148,7 @@
 	return fw, nil
 }
 
-func writeHeader(w io.Writer, h *FileHeader) (err os.Error) {
+func writeHeader(w io.Writer, h *FileHeader) (err error) {
 	defer recoverError(&err)
 	write(w, uint32(fileHeaderSignature))
 	write(w, h.ReaderVersion)
@@ -176,17 +176,17 @@
 	closed    bool
 }
 
-func (w *fileWriter) Write(p []byte) (int, os.Error) {
+func (w *fileWriter) Write(p []byte) (int, error) {
 	if w.closed {
-		return 0, os.NewError("zip: write to closed file")
+		return 0, errors.New("zip: write to closed file")
 	}
 	w.crc32.Write(p)
 	return w.rawCount.Write(p)
 }
 
-func (w *fileWriter) close() (err os.Error) {
+func (w *fileWriter) close() (err error) {
 	if w.closed {
-		return os.NewError("zip: file closed twice")
+		return errors.New("zip: file closed twice")
 	}
 	w.closed = true
 	if err = w.comp.Close(); err != nil {
@@ -213,7 +213,7 @@
 	count int64
 }
 
-func (w *countWriter) Write(p []byte) (int, os.Error) {
+func (w *countWriter) Write(p []byte) (int, error) {
 	n, err := w.w.Write(p)
 	w.count += int64(n)
 	return n, err
@@ -223,7 +223,7 @@
 	io.Writer
 }
 
-func (w nopCloser) Close() os.Error {
+func (w nopCloser) Close() error {
 	return nil
 }
 
diff --git a/src/pkg/archive/zip/zip_test.go b/src/pkg/archive/zip/zip_test.go
index 0f71fdf..2075715 100644
--- a/src/pkg/archive/zip/zip_test.go
+++ b/src/pkg/archive/zip/zip_test.go
@@ -9,15 +9,15 @@
 import (
 	"bytes"
 	"fmt"
-	"os"
+	"io"
 	"testing"
 )
 
 type stringReaderAt string
 
-func (s stringReaderAt) ReadAt(p []byte, off int64) (n int, err os.Error) {
+func (s stringReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
 	if off >= int64(len(s)) {
-		return 0, os.EOF
+		return 0, io.EOF
 	}
 	n = copy(p, s[off:])
 	return
diff --git a/src/pkg/asn1/asn1.go b/src/pkg/asn1/asn1.go
index e7bd62e..73e733e 100644
--- a/src/pkg/asn1/asn1.go
+++ b/src/pkg/asn1/asn1.go
@@ -22,7 +22,6 @@
 import (
 	"big"
 	"fmt"
-	"os"
 	"reflect"
 	"time"
 )
@@ -33,20 +32,20 @@
 	Msg string
 }
 
-func (e StructuralError) String() string { return "ASN.1 structure error: " + e.Msg }
+func (e StructuralError) Error() string { return "ASN.1 structure error: " + e.Msg }
 
 // A SyntaxError suggests that the ASN.1 data is invalid.
 type SyntaxError struct {
 	Msg string
 }
 
-func (e SyntaxError) String() string { return "ASN.1 syntax error: " + e.Msg }
+func (e SyntaxError) Error() string { return "ASN.1 syntax error: " + e.Msg }
 
 // We start by dealing with each of the primitive types in turn.
 
 // BOOLEAN
 
-func parseBool(bytes []byte) (ret bool, err os.Error) {
+func parseBool(bytes []byte) (ret bool, err error) {
 	if len(bytes) != 1 {
 		err = SyntaxError{"invalid boolean"}
 		return
@@ -59,7 +58,7 @@
 
 // parseInt64 treats the given bytes as a big-endian, signed integer and
 // returns the result.
-func parseInt64(bytes []byte) (ret int64, err os.Error) {
+func parseInt64(bytes []byte) (ret int64, err error) {
 	if len(bytes) > 8 {
 		// We'll overflow an int64 in this case.
 		err = StructuralError{"integer too large"}
@@ -78,7 +77,7 @@
 
 // parseInt treats the given bytes as a big-endian, signed integer and returns
 // the result.
-func parseInt(bytes []byte) (int, os.Error) {
+func parseInt(bytes []byte) (int, error) {
 	ret64, err := parseInt64(bytes)
 	if err != nil {
 		return 0, err
@@ -150,7 +149,7 @@
 }
 
 // parseBitString parses an ASN.1 bit string from the given byte slice and returns it.
-func parseBitString(bytes []byte) (ret BitString, err os.Error) {
+func parseBitString(bytes []byte) (ret BitString, err error) {
 	if len(bytes) == 0 {
 		err = SyntaxError{"zero length BIT STRING"}
 		return
@@ -189,7 +188,7 @@
 // parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
 // returns it. An object identifier is a sequence of variable length integers
 // that are assigned in a hierarchy.
-func parseObjectIdentifier(bytes []byte) (s []int, err os.Error) {
+func parseObjectIdentifier(bytes []byte) (s []int, err error) {
 	if len(bytes) == 0 {
 		err = SyntaxError{"zero length OBJECT IDENTIFIER"}
 		return
@@ -227,7 +226,7 @@
 
 // parseBase128Int parses a base-128 encoded int from the given offset in the
 // given byte slice. It returns the value and the new offset.
-func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err os.Error) {
+func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) {
 	offset = initOffset
 	for shifted := 0; offset < len(bytes); shifted++ {
 		if shifted > 4 {
@@ -248,7 +247,7 @@
 
 // UTCTime
 
-func parseUTCTime(bytes []byte) (ret *time.Time, err os.Error) {
+func parseUTCTime(bytes []byte) (ret *time.Time, err error) {
 	s := string(bytes)
 	ret, err = time.Parse("0601021504Z0700", s)
 	if err == nil {
@@ -260,7 +259,7 @@
 
 // parseGeneralizedTime parses the GeneralizedTime from the given byte slice
 // and returns the resulting time.
-func parseGeneralizedTime(bytes []byte) (ret *time.Time, err os.Error) {
+func parseGeneralizedTime(bytes []byte) (ret *time.Time, err error) {
 	return time.Parse("20060102150405Z0700", string(bytes))
 }
 
@@ -268,7 +267,7 @@
 
 // parsePrintableString parses a ASN.1 PrintableString from the given byte
 // array and returns it.
-func parsePrintableString(bytes []byte) (ret string, err os.Error) {
+func parsePrintableString(bytes []byte) (ret string, err error) {
 	for _, b := range bytes {
 		if !isPrintable(b) {
 			err = SyntaxError{"PrintableString contains invalid character"}
@@ -300,7 +299,7 @@
 
 // parseIA5String parses a ASN.1 IA5String (ASCII string) from the given
 // byte slice and returns it.
-func parseIA5String(bytes []byte) (ret string, err os.Error) {
+func parseIA5String(bytes []byte) (ret string, err error) {
 	for _, b := range bytes {
 		if b >= 0x80 {
 			err = SyntaxError{"IA5String contains invalid character"}
@@ -315,7 +314,7 @@
 
 // parseT61String parses a ASN.1 T61String (8-bit clean string) from the given
 // byte slice and returns it.
-func parseT61String(bytes []byte) (ret string, err os.Error) {
+func parseT61String(bytes []byte) (ret string, err error) {
 	return string(bytes), nil
 }
 
@@ -323,7 +322,7 @@
 
 // parseUTF8String parses a ASN.1 UTF8String (raw UTF-8) from the given byte
 // array and returns it.
-func parseUTF8String(bytes []byte) (ret string, err os.Error) {
+func parseUTF8String(bytes []byte) (ret string, err error) {
 	return string(bytes), nil
 }
 
@@ -346,7 +345,7 @@
 // into a byte slice. It returns the parsed data and the new offset. SET and
 // SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we
 // don't distinguish between ordered and unordered objects in this code.
-func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err os.Error) {
+func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) {
 	offset = initOffset
 	b := bytes[offset]
 	offset++
@@ -402,7 +401,7 @@
 // parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse
 // a number of ASN.1 values from the given byte slice and returns them as a
 // slice of Go values of the given type.
-func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err os.Error) {
+func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) {
 	expectedTag, compoundType, ok := getUniversalType(elemType)
 	if !ok {
 		err = StructuralError{"unknown Go type for slice"}
@@ -466,7 +465,7 @@
 // parseField is the main parsing function. Given a byte slice and an offset
 // into the array, it will try to parse a suitable ASN.1 value out and store it
 // in the given Value.
-func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err os.Error) {
+func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) {
 	offset = initOffset
 	fieldType := v.Type()
 
@@ -649,7 +648,7 @@
 		return
 	case timeType:
 		var time *time.Time
-		var err1 os.Error
+		var err1 error
 		if universalTag == tagUTCTime {
 			time, err1 = parseUTCTime(innerBytes)
 		} else {
@@ -826,13 +825,13 @@
 //
 // Other ASN.1 types are not supported; if it encounters them,
 // Unmarshal returns a parse error.
-func Unmarshal(b []byte, val interface{}) (rest []byte, err os.Error) {
+func Unmarshal(b []byte, val interface{}) (rest []byte, err error) {
 	return UnmarshalWithParams(b, val, "")
 }
 
 // UnmarshalWithParams allows field parameters to be specified for the
 // top-level element. The form of the params is the same as the field tags.
-func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err os.Error) {
+func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) {
 	v := reflect.ValueOf(val).Elem()
 	offset, err := parseField(v, b, 0, parseFieldParameters(params))
 	if err != nil {
diff --git a/src/pkg/asn1/marshal.go b/src/pkg/asn1/marshal.go
index 6d1f78b..583d010 100644
--- a/src/pkg/asn1/marshal.go
+++ b/src/pkg/asn1/marshal.go
@@ -9,7 +9,6 @@
 	"bytes"
 	"fmt"
 	"io"
-	"os"
 	"reflect"
 	"time"
 )
@@ -48,7 +47,7 @@
 	return
 }
 
-func (f *forkableWriter) writeTo(out io.Writer) (n int, err os.Error) {
+func (f *forkableWriter) writeTo(out io.Writer) (n int, err error) {
 	n, err = out.Write(f.Bytes())
 	if err != nil {
 		return
@@ -71,7 +70,7 @@
 	return
 }
 
-func marshalBase128Int(out *forkableWriter, n int64) (err os.Error) {
+func marshalBase128Int(out *forkableWriter, n int64) (err error) {
 	if n == 0 {
 		err = out.WriteByte(0)
 		return
@@ -97,7 +96,7 @@
 	return nil
 }
 
-func marshalInt64(out *forkableWriter, i int64) (err os.Error) {
+func marshalInt64(out *forkableWriter, i int64) (err error) {
 	n := int64Length(i)
 
 	for ; n > 0; n-- {
@@ -126,7 +125,7 @@
 	return
 }
 
-func marshalBigInt(out *forkableWriter, n *big.Int) (err os.Error) {
+func marshalBigInt(out *forkableWriter, n *big.Int) (err error) {
 	if n.Sign() < 0 {
 		// A negative number has to be converted to two's-complement
 		// form. So we'll subtract 1 and invert. If the
@@ -163,7 +162,7 @@
 	return
 }
 
-func marshalLength(out *forkableWriter, i int) (err os.Error) {
+func marshalLength(out *forkableWriter, i int) (err error) {
 	n := lengthLength(i)
 
 	for ; n > 0; n-- {
@@ -185,7 +184,7 @@
 	return
 }
 
-func marshalTagAndLength(out *forkableWriter, t tagAndLength) (err os.Error) {
+func marshalTagAndLength(out *forkableWriter, t tagAndLength) (err error) {
 	b := uint8(t.class) << 6
 	if t.isCompound {
 		b |= 0x20
@@ -228,7 +227,7 @@
 	return nil
 }
 
-func marshalBitString(out *forkableWriter, b BitString) (err os.Error) {
+func marshalBitString(out *forkableWriter, b BitString) (err error) {
 	paddingBits := byte((8 - b.BitLength%8) % 8)
 	err = out.WriteByte(paddingBits)
 	if err != nil {
@@ -238,7 +237,7 @@
 	return
 }
 
-func marshalObjectIdentifier(out *forkableWriter, oid []int) (err os.Error) {
+func marshalObjectIdentifier(out *forkableWriter, oid []int) (err error) {
 	if len(oid) < 2 || oid[0] > 6 || oid[1] >= 40 {
 		return StructuralError{"invalid object identifier"}
 	}
@@ -257,7 +256,7 @@
 	return
 }
 
-func marshalPrintableString(out *forkableWriter, s string) (err os.Error) {
+func marshalPrintableString(out *forkableWriter, s string) (err error) {
 	b := []byte(s)
 	for _, c := range b {
 		if !isPrintable(c) {
@@ -269,7 +268,7 @@
 	return
 }
 
-func marshalIA5String(out *forkableWriter, s string) (err os.Error) {
+func marshalIA5String(out *forkableWriter, s string) (err error) {
 	b := []byte(s)
 	for _, c := range b {
 		if c > 127 {
@@ -281,7 +280,7 @@
 	return
 }
 
-func marshalTwoDigits(out *forkableWriter, v int) (err os.Error) {
+func marshalTwoDigits(out *forkableWriter, v int) (err error) {
 	err = out.WriteByte(byte('0' + (v/10)%10))
 	if err != nil {
 		return
@@ -289,7 +288,7 @@
 	return out.WriteByte(byte('0' + v%10))
 }
 
-func marshalUTCTime(out *forkableWriter, t *time.Time) (err os.Error) {
+func marshalUTCTime(out *forkableWriter, t *time.Time) (err error) {
 	switch {
 	case 1950 <= t.Year && t.Year < 2000:
 		err = marshalTwoDigits(out, int(t.Year-1900))
@@ -364,7 +363,7 @@
 	return in[offset:]
 }
 
-func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameters) (err os.Error) {
+func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameters) (err error) {
 	switch value.Type() {
 	case timeType:
 		return marshalUTCTime(out, value.Interface().(*time.Time))
@@ -452,7 +451,7 @@
 	return StructuralError{"unknown Go type"}
 }
 
-func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) (err os.Error) {
+func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) (err error) {
 	// If the field is an interface{} then recurse into it.
 	if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 {
 		return marshalField(out, v.Elem(), params)
@@ -535,7 +534,7 @@
 }
 
 // Marshal returns the ASN.1 encoding of val.
-func Marshal(val interface{}) ([]byte, os.Error) {
+func Marshal(val interface{}) ([]byte, error) {
 	var out bytes.Buffer
 	v := reflect.ValueOf(val)
 	f := newForkableWriter()
diff --git a/src/pkg/big/int.go b/src/pkg/big/int.go
index db13d20..c6affbb 100644
--- a/src/pkg/big/int.go
+++ b/src/pkg/big/int.go
@@ -7,9 +7,9 @@
 package big
 
 import (
+	"errors"
 	"fmt"
 	"io"
-	"os"
 	"rand"
 	"strings"
 )
@@ -432,7 +432,7 @@
 // ``0x'' or ``0X'' selects base 16; the ``0'' prefix selects base 8, and a
 // ``0b'' or ``0B'' prefix selects base 2. Otherwise the selected base is 10.
 //
-func (z *Int) scan(r io.RuneScanner, base int) (*Int, int, os.Error) {
+func (z *Int) scan(r io.RuneScanner, base int) (*Int, int, error) {
 	// determine sign
 	ch, _, err := r.ReadRune()
 	if err != nil {
@@ -460,7 +460,7 @@
 // Scan is a support routine for fmt.Scanner; it sets z to the value of
 // the scanned number. It accepts the formats 'b' (binary), 'o' (octal),
 // 'd' (decimal), 'x' (lowercase hexadecimal), and 'X' (uppercase hexadecimal).
-func (z *Int) Scan(s fmt.ScanState, ch rune) os.Error {
+func (z *Int) Scan(s fmt.ScanState, ch rune) error {
 	s.SkipSpace() // skip leading space characters
 	base := 0
 	switch ch {
@@ -475,7 +475,7 @@
 	case 's', 'v':
 		// let scan determine the base
 	default:
-		return os.NewError("Int.Scan: invalid verb")
+		return errors.New("Int.Scan: invalid verb")
 	}
 	_, _, err := z.scan(s, base)
 	return err
@@ -513,7 +513,7 @@
 		return nil, false
 	}
 	_, _, err = r.ReadRune()
-	if err != os.EOF {
+	if err != io.EOF {
 		return nil, false
 	}
 	return z, true // err == os.EOF => scan consumed all of s
@@ -847,7 +847,7 @@
 const intGobVersion byte = 1
 
 // GobEncode implements the gob.GobEncoder interface.
-func (z *Int) GobEncode() ([]byte, os.Error) {
+func (z *Int) GobEncode() ([]byte, error) {
 	buf := make([]byte, 1+len(z.abs)*_S) // extra byte for version and sign bit
 	i := z.abs.bytes(buf) - 1            // i >= 0
 	b := intGobVersion << 1              // make space for sign bit
@@ -859,13 +859,13 @@
 }
 
 // GobDecode implements the gob.GobDecoder interface.
-func (z *Int) GobDecode(buf []byte) os.Error {
+func (z *Int) GobDecode(buf []byte) error {
 	if len(buf) == 0 {
-		return os.NewError("Int.GobDecode: no data")
+		return errors.New("Int.GobDecode: no data")
 	}
 	b := buf[0]
 	if b>>1 != intGobVersion {
-		return os.NewError(fmt.Sprintf("Int.GobDecode: encoding version %d not supported", b>>1))
+		return errors.New(fmt.Sprintf("Int.GobDecode: encoding version %d not supported", b>>1))
 	}
 	z.neg = b&1 != 0
 	z.abs = z.abs.setBytes(buf[1:])
diff --git a/src/pkg/big/nat.go b/src/pkg/big/nat.go
index fa0d7e7..a46f782 100644
--- a/src/pkg/big/nat.go
+++ b/src/pkg/big/nat.go
@@ -19,8 +19,8 @@
 // and rationals.
 
 import (
+	"errors"
 	"io"
-	"os"
 	"rand"
 )
 
@@ -613,10 +613,10 @@
 // ``0x'' or ``0X'' selects base 16; the ``0'' prefix selects base 8, and a
 // ``0b'' or ``0B'' prefix selects base 2. Otherwise the selected base is 10.
 //
-func (z nat) scan(r io.RuneScanner, base int) (nat, int, os.Error) {
+func (z nat) scan(r io.RuneScanner, base int) (nat, int, error) {
 	// reject illegal bases
 	if base < 0 || base == 1 || MaxBase < base {
-		return z, 0, os.NewError("illegal number base")
+		return z, 0, errors.New("illegal number base")
 	}
 
 	// one char look-ahead
@@ -644,7 +644,7 @@
 						return z, 0, err
 					}
 				}
-			case os.EOF:
+			case io.EOF:
 				return z.make(0), 10, nil
 			default:
 				return z, 10, err
@@ -676,7 +676,7 @@
 		}
 
 		if ch, _, err = r.ReadRune(); err != nil {
-			if err != os.EOF {
+			if err != io.EOF {
 				return z, int(b), err
 			}
 			break
@@ -693,7 +693,7 @@
 		return z, 10, nil
 	case base != 0 || b != 8:
 		// there was neither a mantissa digit nor the octal prefix 0
-		return z, int(b), os.NewError("syntax error scanning number")
+		return z, int(b), errors.New("syntax error scanning number")
 	}
 
 	return z.norm(), int(b), nil
diff --git a/src/pkg/big/nat_test.go b/src/pkg/big/nat_test.go
index ab34c6e..041a6c4 100644
--- a/src/pkg/big/nat_test.go
+++ b/src/pkg/big/nat_test.go
@@ -6,7 +6,7 @@
 
 import (
 	"fmt"
-	"os"
+	"io"
 	"strings"
 	"testing"
 )
@@ -288,7 +288,7 @@
 			t.Errorf("scan%+v\n\tgot b = %d; want %d", a, b, a.base)
 		}
 		next, _, err := r.ReadRune()
-		if err == os.EOF {
+		if err == io.EOF {
 			next = 0
 			err = nil
 		}
diff --git a/src/pkg/big/rat.go b/src/pkg/big/rat.go
index 1940a05..3a0add3 100644
--- a/src/pkg/big/rat.go
+++ b/src/pkg/big/rat.go
@@ -8,8 +8,8 @@
 
 import (
 	"encoding/binary"
+	"errors"
 	"fmt"
-	"os"
 	"strings"
 )
 
@@ -255,16 +255,16 @@
 
 // Scan is a support routine for fmt.Scanner. It accepts the formats
 // 'e', 'E', 'f', 'F', 'g', 'G', and 'v'. All formats are equivalent.
-func (z *Rat) Scan(s fmt.ScanState, ch rune) os.Error {
+func (z *Rat) Scan(s fmt.ScanState, ch rune) error {
 	tok, err := s.Token(true, ratTok)
 	if err != nil {
 		return err
 	}
 	if strings.IndexRune("efgEFGv", ch) < 0 {
-		return os.NewError("Rat.Scan: invalid verb")
+		return errors.New("Rat.Scan: invalid verb")
 	}
 	if _, ok := z.SetString(string(tok)); !ok {
-		return os.NewError("Rat.Scan: invalid syntax")
+		return errors.New("Rat.Scan: invalid syntax")
 	}
 	return nil
 }
@@ -285,7 +285,7 @@
 			return nil, false
 		}
 		s = s[sep+1:]
-		var err os.Error
+		var err error
 		if z.b, _, err = z.b.scan(strings.NewReader(s), 10); err != nil {
 			return nil, false
 		}
@@ -395,14 +395,14 @@
 const ratGobVersion byte = 1
 
 // GobEncode implements the gob.GobEncoder interface.
-func (z *Rat) GobEncode() ([]byte, os.Error) {
+func (z *Rat) GobEncode() ([]byte, error) {
 	buf := make([]byte, 1+4+(len(z.a.abs)+len(z.b))*_S) // extra bytes for version and sign bit (1), and numerator length (4)
 	i := z.b.bytes(buf)
 	j := z.a.abs.bytes(buf[0:i])
 	n := i - j
 	if int(uint32(n)) != n {
 		// this should never happen
-		return nil, os.NewError("Rat.GobEncode: numerator too large")
+		return nil, errors.New("Rat.GobEncode: numerator too large")
 	}
 	binary.BigEndian.PutUint32(buf[j-4:j], uint32(n))
 	j -= 1 + 4
@@ -415,13 +415,13 @@
 }
 
 // GobDecode implements the gob.GobDecoder interface.
-func (z *Rat) GobDecode(buf []byte) os.Error {
+func (z *Rat) GobDecode(buf []byte) error {
 	if len(buf) == 0 {
-		return os.NewError("Rat.GobDecode: no data")
+		return errors.New("Rat.GobDecode: no data")
 	}
 	b := buf[0]
 	if b>>1 != ratGobVersion {
-		return os.NewError(fmt.Sprintf("Rat.GobDecode: encoding version %d not supported", b>>1))
+		return errors.New(fmt.Sprintf("Rat.GobDecode: encoding version %d not supported", b>>1))
 	}
 	const j = 1 + 4
 	i := j + binary.BigEndian.Uint32(buf[j-4:j])
diff --git a/src/pkg/bufio/bufio.go b/src/pkg/bufio/bufio.go
index 3a4e0ed..f4ed91b 100644
--- a/src/pkg/bufio/bufio.go
+++ b/src/pkg/bufio/bufio.go
@@ -10,7 +10,6 @@
 import (
 	"bytes"
 	"io"
-	"os"
 	"strconv"
 	"utf8"
 )
@@ -24,20 +23,20 @@
 	ErrorString string
 }
 
-func (err *Error) String() string { return err.ErrorString }
+func (err *Error) Error() string { return err.ErrorString }
 
 var (
-	ErrInvalidUnreadByte os.Error = &Error{"bufio: invalid use of UnreadByte"}
-	ErrInvalidUnreadRune os.Error = &Error{"bufio: invalid use of UnreadRune"}
-	ErrBufferFull        os.Error = &Error{"bufio: buffer full"}
-	ErrNegativeCount     os.Error = &Error{"bufio: negative count"}
-	errInternal          os.Error = &Error{"bufio: internal error"}
+	ErrInvalidUnreadByte error = &Error{"bufio: invalid use of UnreadByte"}
+	ErrInvalidUnreadRune error = &Error{"bufio: invalid use of UnreadRune"}
+	ErrBufferFull        error = &Error{"bufio: buffer full"}
+	ErrNegativeCount     error = &Error{"bufio: negative count"}
+	errInternal          error = &Error{"bufio: internal error"}
 )
 
 // BufSizeError is the error representing an invalid buffer size.
 type BufSizeError int
 
-func (b BufSizeError) String() string {
+func (b BufSizeError) Error() string {
 	return "bufio: bad buffer size " + strconv.Itoa(int(b))
 }
 
@@ -48,7 +47,7 @@
 	buf          []byte
 	rd           io.Reader
 	r, w         int
-	err          os.Error
+	err          error
 	lastByte     int
 	lastRuneSize int
 }
@@ -57,7 +56,7 @@
 // which must be greater than one.  If the argument io.Reader is already a
 // Reader with large enough size, it returns the underlying Reader.
 // It returns the Reader and any error.
-func NewReaderSize(rd io.Reader, size int) (*Reader, os.Error) {
+func NewReaderSize(rd io.Reader, size int) (*Reader, error) {
 	if size <= 1 {
 		return nil, BufSizeError(size)
 	}
@@ -101,7 +100,7 @@
 	}
 }
 
-func (b *Reader) readErr() os.Error {
+func (b *Reader) readErr() error {
 	err := b.err
 	b.err = nil
 	return err
@@ -111,7 +110,7 @@
 // being valid at the next read call. If Peek returns fewer than n bytes, it
 // also returns an error explaining why the read is short. The error is
 // ErrBufferFull if n is larger than b's buffer size.
-func (b *Reader) Peek(n int) ([]byte, os.Error) {
+func (b *Reader) Peek(n int) ([]byte, error) {
 	if n < 0 {
 		return nil, ErrNegativeCount
 	}
@@ -137,7 +136,7 @@
 // It calls Read at most once on the underlying Reader,
 // hence n may be less than len(p).
 // At EOF, the count will be zero and err will be os.EOF.
-func (b *Reader) Read(p []byte) (n int, err os.Error) {
+func (b *Reader) Read(p []byte) (n int, err error) {
 	n = len(p)
 	if n == 0 {
 		return 0, b.readErr()
@@ -174,7 +173,7 @@
 
 // ReadByte reads and returns a single byte.
 // If no byte is available, returns an error.
-func (b *Reader) ReadByte() (c byte, err os.Error) {
+func (b *Reader) ReadByte() (c byte, err error) {
 	b.lastRuneSize = -1
 	for b.w == b.r {
 		if b.err != nil {
@@ -189,7 +188,7 @@
 }
 
 // UnreadByte unreads the last byte.  Only the most recently read byte can be unread.
-func (b *Reader) UnreadByte() os.Error {
+func (b *Reader) UnreadByte() error {
 	b.lastRuneSize = -1
 	if b.r == b.w && b.lastByte >= 0 {
 		b.w = 1
@@ -208,7 +207,7 @@
 
 // ReadRune reads a single UTF-8 encoded Unicode character and returns the
 // rune and its size in bytes.
-func (b *Reader) ReadRune() (r rune, size int, err os.Error) {
+func (b *Reader) ReadRune() (r rune, size int, err error) {
 	for b.r+utf8.UTFMax > b.w && !utf8.FullRune(b.buf[b.r:b.w]) && b.err == nil {
 		b.fill()
 	}
@@ -230,7 +229,7 @@
 // the buffer was not a ReadRune, UnreadRune returns an error.  (In this
 // regard it is stricter than UnreadByte, which will unread the last byte
 // from any read operation.)
-func (b *Reader) UnreadRune() os.Error {
+func (b *Reader) UnreadRune() error {
 	if b.lastRuneSize < 0 || b.r == 0 {
 		return ErrInvalidUnreadRune
 	}
@@ -253,7 +252,7 @@
 // by the next I/O operation, most clients should use
 // ReadBytes or ReadString instead.
 // ReadSlice returns err != nil if and only if line does not end in delim.
-func (b *Reader) ReadSlice(delim byte) (line []byte, err os.Error) {
+func (b *Reader) ReadSlice(delim byte) (line []byte, err error) {
 	// Look in buffer.
 	if i := bytes.IndexByte(b.buf[b.r:b.w], delim); i >= 0 {
 		line1 := b.buf[b.r : b.r+i+1]
@@ -295,7 +294,7 @@
 // of the line. The returned buffer is only valid until the next call to
 // ReadLine. ReadLine either returns a non-nil line or it returns an error,
 // never both.
-func (b *Reader) ReadLine() (line []byte, isPrefix bool, err os.Error) {
+func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) {
 	line, err = b.ReadSlice('\n')
 	if err == ErrBufferFull {
 		// Handle the case where "\r\n" straddles the buffer.
@@ -333,7 +332,7 @@
 // it returns the data read before the error and the error itself (often os.EOF).
 // ReadBytes returns err != nil if and only if the returned data does not end in
 // delim.
-func (b *Reader) ReadBytes(delim byte) (line []byte, err os.Error) {
+func (b *Reader) ReadBytes(delim byte) (line []byte, err error) {
 	// Use ReadSlice to look for array,
 	// accumulating full buffers.
 	var frag []byte
@@ -341,7 +340,7 @@
 	err = nil
 
 	for {
-		var e os.Error
+		var e error
 		frag, e = b.ReadSlice(delim)
 		if e == nil { // got final fragment
 			break
@@ -380,7 +379,7 @@
 // it returns the data read before the error and the error itself (often os.EOF).
 // ReadString returns err != nil if and only if the returned data does not end in
 // delim.
-func (b *Reader) ReadString(delim byte) (line string, err os.Error) {
+func (b *Reader) ReadString(delim byte) (line string, err error) {
 	bytes, e := b.ReadBytes(delim)
 	return string(bytes), e
 }
@@ -389,7 +388,7 @@
 
 // Writer implements buffering for an io.Writer object.
 type Writer struct {
-	err os.Error
+	err error
 	buf []byte
 	n   int
 	wr  io.Writer
@@ -399,7 +398,7 @@
 // which must be greater than zero. If the argument io.Writer is already a
 // Writer with large enough size, it returns the underlying Writer.
 // It returns the Writer and any error.
-func NewWriterSize(wr io.Writer, size int) (*Writer, os.Error) {
+func NewWriterSize(wr io.Writer, size int) (*Writer, error) {
 	if size <= 0 {
 		return nil, BufSizeError(size)
 	}
@@ -425,7 +424,7 @@
 }
 
 // Flush writes any buffered data to the underlying io.Writer.
-func (b *Writer) Flush() os.Error {
+func (b *Writer) Flush() error {
 	if b.err != nil {
 		return b.err
 	}
@@ -458,7 +457,7 @@
 // It returns the number of bytes written.
 // If nn < len(p), it also returns an error explaining
 // why the write is short.
-func (b *Writer) Write(p []byte) (nn int, err os.Error) {
+func (b *Writer) Write(p []byte) (nn int, err error) {
 	for len(p) > b.Available() && b.err == nil {
 		var n int
 		if b.Buffered() == 0 {
@@ -483,7 +482,7 @@
 }
 
 // WriteByte writes a single byte.
-func (b *Writer) WriteByte(c byte) os.Error {
+func (b *Writer) WriteByte(c byte) error {
 	if b.err != nil {
 		return b.err
 	}
@@ -497,7 +496,7 @@
 
 // WriteRune writes a single Unicode code point, returning
 // the number of bytes written and any error.
-func (b *Writer) WriteRune(r rune) (size int, err os.Error) {
+func (b *Writer) WriteRune(r rune) (size int, err error) {
 	if r < utf8.RuneSelf {
 		err = b.WriteByte(byte(r))
 		if err != nil {
@@ -528,7 +527,7 @@
 // It returns the number of bytes written.
 // If the count is less than len(s), it also returns an error explaining
 // why the write is short.
-func (b *Writer) WriteString(s string) (int, os.Error) {
+func (b *Writer) WriteString(s string) (int, error) {
 	nn := 0
 	for len(s) > b.Available() && b.err == nil {
 		n := copy(b.buf[b.n:], s)
diff --git a/src/pkg/bufio/bufio_test.go b/src/pkg/bufio/bufio_test.go
index 4fd5f90..0285dee 100644
--- a/src/pkg/bufio/bufio_test.go
+++ b/src/pkg/bufio/bufio_test.go
@@ -28,7 +28,7 @@
 	return r13
 }
 
-func (r13 *rot13Reader) Read(p []byte) (int, os.Error) {
+func (r13 *rot13Reader) Read(p []byte) (int, error) {
 	n, e := r13.r.Read(p)
 	if e != nil {
 		return n, e
@@ -50,14 +50,14 @@
 	nb := 0
 	for {
 		c, e := buf.ReadByte()
-		if e == os.EOF {
+		if e == io.EOF {
 			break
 		}
 		if e == nil {
 			b[nb] = c
 			nb++
 		} else if e != iotest.ErrTimeout {
-			panic("Data: " + e.String())
+			panic("Data: " + e.Error())
 		}
 	}
 	return string(b[0:nb])
@@ -95,11 +95,11 @@
 	s := ""
 	for {
 		s1, e := b.ReadString('\n')
-		if e == os.EOF {
+		if e == io.EOF {
 			break
 		}
 		if e != nil && e != iotest.ErrTimeout {
-			panic("GetLines: " + e.String())
+			panic("GetLines: " + e.Error())
 		}
 		s += s1
 	}
@@ -113,7 +113,7 @@
 	for {
 		n, e := buf.Read(b[nb : nb+m])
 		nb += n
-		if e == os.EOF {
+		if e == io.EOF {
 			break
 		}
 	}
@@ -179,13 +179,13 @@
 	step int
 }
 
-func (r *StringReader) Read(p []byte) (n int, err os.Error) {
+func (r *StringReader) Read(p []byte) (n int, err error) {
 	if r.step < len(r.data) {
 		s := r.data[r.step]
 		n = copy(p, s)
 		r.step++
 	} else {
-		err = os.EOF
+		err = io.EOF
 	}
 	return
 }
@@ -197,7 +197,7 @@
 	for {
 		r, _, err := r.ReadRune()
 		if err != nil {
-			if err != os.EOF {
+			if err != io.EOF {
 				return
 			}
 			break
@@ -235,7 +235,7 @@
 	for {
 		r1, _, err := r.ReadRune()
 		if err != nil {
-			if err != os.EOF {
+			if err != io.EOF {
 				t.Error("unexpected EOF")
 			}
 			break
@@ -328,7 +328,7 @@
 	_, _, err := r.ReadRune()
 	if err == nil {
 		t.Error("expected error at EOF")
-	} else if err != os.EOF {
+	} else if err != io.EOF {
 		t.Error("expected EOF; got", err)
 	}
 }
@@ -413,11 +413,11 @@
 
 type errorWriterTest struct {
 	n, m   int
-	err    os.Error
-	expect os.Error
+	err    error
+	expect error
 }
 
-func (w errorWriterTest) Write(p []byte) (int, os.Error) {
+func (w errorWriterTest) Write(p []byte) (int, error) {
 	return len(p) * w.n / w.m, w.err
 }
 
@@ -559,7 +559,7 @@
 	if s, err := buf.Peek(0); string(s) != "" || err != nil {
 		t.Fatalf("want %q got %q, err=%v", "", string(s), err)
 	}
-	if _, err := buf.Peek(1); err != os.EOF {
+	if _, err := buf.Peek(1); err != io.EOF {
 		t.Fatalf("want EOF got %v", err)
 	}
 }
@@ -583,7 +583,7 @@
 	stride int
 }
 
-func (t *testReader) Read(buf []byte) (n int, err os.Error) {
+func (t *testReader) Read(buf []byte) (n int, err error) {
 	n = t.stride
 	if n > len(t.data) {
 		n = len(t.data)
@@ -594,7 +594,7 @@
 	copy(buf, t.data)
 	t.data = t.data[n:]
 	if len(t.data) == 0 {
-		err = os.EOF
+		err = io.EOF
 	}
 	return
 }
@@ -614,7 +614,7 @@
 				t.Errorf("ReadLine returned prefix")
 			}
 			if err != nil {
-				if err != os.EOF {
+				if err != io.EOF {
 					t.Fatalf("Got unknown error: %s", err)
 				}
 				break
@@ -679,7 +679,7 @@
 func TestReadEmptyBuffer(t *testing.T) {
 	l, _ := NewReaderSize(bytes.NewBuffer(nil), 10)
 	line, isPrefix, err := l.ReadLine()
-	if err != os.EOF {
+	if err != io.EOF {
 		t.Errorf("expected EOF from ReadLine, got '%s' %t %s", line, isPrefix, err)
 	}
 }
@@ -693,7 +693,7 @@
 	}
 
 	line, isPrefix, err := l.ReadLine()
-	if err != os.EOF {
+	if err != io.EOF {
 		t.Errorf("expected EOF from ReadLine, got '%s' %t %s", line, isPrefix, err)
 	}
 }
@@ -701,7 +701,7 @@
 type readLineResult struct {
 	line     []byte
 	isPrefix bool
-	err      os.Error
+	err      error
 }
 
 var readLineNewlinesTests = []struct {
@@ -714,27 +714,27 @@
 		{nil, false, nil},
 		{[]byte("b"), true, nil},
 		{nil, false, nil},
-		{nil, false, os.EOF},
+		{nil, false, io.EOF},
 	}},
 	{"hello\r\nworld\r\n", 6, []readLineResult{
 		{[]byte("hello"), true, nil},
 		{nil, false, nil},
 		{[]byte("world"), true, nil},
 		{nil, false, nil},
-		{nil, false, os.EOF},
+		{nil, false, io.EOF},
 	}},
 	{"hello\rworld\r", 6, []readLineResult{
 		{[]byte("hello"), true, nil},
 		{[]byte("\rworld"), true, nil},
 		{[]byte("\r"), false, nil},
-		{nil, false, os.EOF},
+		{nil, false, io.EOF},
 	}},
 	{"h\ri\r\n\r", 2, []readLineResult{
 		{[]byte("h"), true, nil},
 		{[]byte("\ri"), true, nil},
 		{nil, false, nil},
 		{[]byte("\r"), false, nil},
-		{nil, false, os.EOF},
+		{nil, false, io.EOF},
 	}},
 }
 
diff --git a/src/pkg/bytes/buffer.go b/src/pkg/bytes/buffer.go
index c2a8c9f..fbfd621 100644
--- a/src/pkg/bytes/buffer.go
+++ b/src/pkg/bytes/buffer.go
@@ -7,8 +7,8 @@
 // Simple byte buffer for marshaling data.
 
 import (
+	"errors"
 	"io"
-	"os"
 	"utf8"
 )
 
@@ -94,7 +94,7 @@
 
 // Write appends the contents of p to the buffer.  The return
 // value n is the length of p; err is always nil.
-func (b *Buffer) Write(p []byte) (n int, err os.Error) {
+func (b *Buffer) Write(p []byte) (n int, err error) {
 	b.lastRead = opInvalid
 	m := b.grow(len(p))
 	copy(b.buf[m:], p)
@@ -103,7 +103,7 @@
 
 // WriteString appends the contents of s to the buffer.  The return
 // value n is the length of s; err is always nil.
-func (b *Buffer) WriteString(s string) (n int, err os.Error) {
+func (b *Buffer) WriteString(s string) (n int, err error) {
 	b.lastRead = opInvalid
 	m := b.grow(len(s))
 	return copy(b.buf[m:], s), nil
@@ -119,7 +119,7 @@
 // The return value n is the number of bytes read.
 // Any error except os.EOF encountered during the read
 // is also returned.
-func (b *Buffer) ReadFrom(r io.Reader) (n int64, err os.Error) {
+func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
 	b.lastRead = opInvalid
 	// If buffer is empty, reset to recover space.
 	if b.off >= len(b.buf) {
@@ -143,7 +143,7 @@
 		m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
 		b.buf = b.buf[0 : len(b.buf)+m]
 		n += int64(m)
-		if e == os.EOF {
+		if e == io.EOF {
 			break
 		}
 		if e != nil {
@@ -157,7 +157,7 @@
 // occurs. The return value n is the number of bytes written; it always
 // fits into an int, but it is int64 to match the io.WriterTo interface.
 // Any error encountered during the write is also returned.
-func (b *Buffer) WriteTo(w io.Writer) (n int64, err os.Error) {
+func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
 	b.lastRead = opInvalid
 	if b.off < len(b.buf) {
 		m, e := w.Write(b.buf[b.off:])
@@ -177,7 +177,7 @@
 // WriteByte appends the byte c to the buffer.
 // The returned error is always nil, but is included
 // to match bufio.Writer's WriteByte.
-func (b *Buffer) WriteByte(c byte) os.Error {
+func (b *Buffer) WriteByte(c byte) error {
 	b.lastRead = opInvalid
 	m := b.grow(1)
 	b.buf[m] = c
@@ -188,7 +188,7 @@
 // code point r to the buffer, returning its length and
 // an error, which is always nil but is included
 // to match bufio.Writer's WriteRune.
-func (b *Buffer) WriteRune(r rune) (n int, err os.Error) {
+func (b *Buffer) WriteRune(r rune) (n int, err error) {
 	if r < utf8.RuneSelf {
 		b.WriteByte(byte(r))
 		return 1, nil
@@ -202,12 +202,12 @@
 // is drained.  The return value n is the number of bytes read.  If the
 // buffer has no data to return, err is os.EOF even if len(p) is zero;
 // otherwise it is nil.
-func (b *Buffer) Read(p []byte) (n int, err os.Error) {
+func (b *Buffer) Read(p []byte) (n int, err error) {
 	b.lastRead = opInvalid
 	if b.off >= len(b.buf) {
 		// Buffer is empty, reset to recover space.
 		b.Truncate(0)
-		return 0, os.EOF
+		return 0, io.EOF
 	}
 	n = copy(p, b.buf[b.off:])
 	b.off += n
@@ -237,12 +237,12 @@
 
 // ReadByte reads and returns the next byte from the buffer.
 // If no byte is available, it returns error os.EOF.
-func (b *Buffer) ReadByte() (c byte, err os.Error) {
+func (b *Buffer) ReadByte() (c byte, err error) {
 	b.lastRead = opInvalid
 	if b.off >= len(b.buf) {
 		// Buffer is empty, reset to recover space.
 		b.Truncate(0)
-		return 0, os.EOF
+		return 0, io.EOF
 	}
 	c = b.buf[b.off]
 	b.off++
@@ -255,12 +255,12 @@
 // If no bytes are available, the error returned is os.EOF.
 // If the bytes are an erroneous UTF-8 encoding, it
 // consumes one byte and returns U+FFFD, 1.
-func (b *Buffer) ReadRune() (r rune, size int, err os.Error) {
+func (b *Buffer) ReadRune() (r rune, size int, err error) {
 	b.lastRead = opInvalid
 	if b.off >= len(b.buf) {
 		// Buffer is empty, reset to recover space.
 		b.Truncate(0)
-		return 0, 0, os.EOF
+		return 0, 0, io.EOF
 	}
 	b.lastRead = opReadRune
 	c := b.buf[b.off]
@@ -278,9 +278,9 @@
 // not a ReadRune, UnreadRune returns an error.  (In this regard
 // it is stricter than UnreadByte, which will unread the last byte
 // from any read operation.)
-func (b *Buffer) UnreadRune() os.Error {
+func (b *Buffer) UnreadRune() error {
 	if b.lastRead != opReadRune {
-		return os.NewError("bytes.Buffer: UnreadRune: previous operation was not ReadRune")
+		return errors.New("bytes.Buffer: UnreadRune: previous operation was not ReadRune")
 	}
 	b.lastRead = opInvalid
 	if b.off > 0 {
@@ -293,9 +293,9 @@
 // UnreadByte unreads the last byte returned by the most recent
 // read operation.  If write has happened since the last read, UnreadByte
 // returns an error.
-func (b *Buffer) UnreadByte() os.Error {
+func (b *Buffer) UnreadByte() error {
 	if b.lastRead != opReadRune && b.lastRead != opRead {
-		return os.NewError("bytes.Buffer: UnreadByte: previous operation was not a read")
+		return errors.New("bytes.Buffer: UnreadByte: previous operation was not a read")
 	}
 	b.lastRead = opInvalid
 	if b.off > 0 {
@@ -310,12 +310,12 @@
 // it returns the data read before the error and the error itself (often os.EOF).
 // ReadBytes returns err != nil if and only if the returned data does not end in
 // delim.
-func (b *Buffer) ReadBytes(delim byte) (line []byte, err os.Error) {
+func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
 	i := IndexByte(b.buf[b.off:], delim)
 	size := i + 1
 	if i < 0 {
 		size = len(b.buf) - b.off
-		err = os.EOF
+		err = io.EOF
 	}
 	line = make([]byte, size)
 	copy(line, b.buf[b.off:])
@@ -329,7 +329,7 @@
 // it returns the data read before the error and the error itself (often os.EOF).
 // ReadString returns err != nil if and only if the returned data does not end
 // in delim.
-func (b *Buffer) ReadString(delim byte) (line string, err os.Error) {
+func (b *Buffer) ReadString(delim byte) (line string, err error) {
 	bytes, err := b.ReadBytes(delim)
 	return string(bytes), err
 }
diff --git a/src/pkg/bytes/buffer_test.go b/src/pkg/bytes/buffer_test.go
index ee38e08..c271b48 100644
--- a/src/pkg/bytes/buffer_test.go
+++ b/src/pkg/bytes/buffer_test.go
@@ -6,7 +6,7 @@
 
 import (
 	. "bytes"
-	"os"
+	"io"
 	"rand"
 	"testing"
 	"utf8"
@@ -344,21 +344,21 @@
 	buffer   string
 	delim    byte
 	expected []string
-	err      os.Error
+	err      error
 }{
-	{"", 0, []string{""}, os.EOF},
+	{"", 0, []string{""}, io.EOF},
 	{"a\x00", 0, []string{"a\x00"}, nil},
 	{"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil},
 	{"hello\x01world", 1, []string{"hello\x01"}, nil},
-	{"foo\nbar", 0, []string{"foo\nbar"}, os.EOF},
+	{"foo\nbar", 0, []string{"foo\nbar"}, io.EOF},
 	{"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil},
-	{"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, os.EOF},
+	{"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF},
 }
 
 func TestReadBytes(t *testing.T) {
 	for _, test := range readBytesTests {
 		buf := NewBufferString(test.buffer)
-		var err os.Error
+		var err error
 		for _, expected := range test.expected {
 			var bytes []byte
 			bytes, err = buf.ReadBytes(test.delim)
diff --git a/src/pkg/compress/bzip2/bit_reader.go b/src/pkg/compress/bzip2/bit_reader.go
index 50f0ec8..390ee7c 100644
--- a/src/pkg/compress/bzip2/bit_reader.go
+++ b/src/pkg/compress/bzip2/bit_reader.go
@@ -7,7 +7,6 @@
 import (
 	"bufio"
 	"io"
-	"os"
 )
 
 // bitReader wraps an io.Reader and provides the ability to read values,
@@ -18,14 +17,14 @@
 	r    byteReader
 	n    uint64
 	bits uint
-	err  os.Error
+	err  error
 }
 
 // bitReader needs to read bytes from an io.Reader. We attempt to cast the
 // given io.Reader to this interface and, if it doesn't already fit, we wrap in
 // a bufio.Reader.
 type byteReader interface {
-	ReadByte() (byte, os.Error)
+	ReadByte() (byte, error)
 }
 
 func newBitReader(r io.Reader) bitReader {
@@ -42,7 +41,7 @@
 func (br *bitReader) ReadBits64(bits uint) (n uint64) {
 	for bits > br.bits {
 		b, err := br.r.ReadByte()
-		if err == os.EOF {
+		if err == io.EOF {
 			err = io.ErrUnexpectedEOF
 		}
 		if err != nil {
@@ -83,6 +82,6 @@
 	return n != 0
 }
 
-func (br *bitReader) Error() os.Error {
+func (br *bitReader) Error() error {
 	return br.err
 }
diff --git a/src/pkg/compress/bzip2/bzip2.go b/src/pkg/compress/bzip2/bzip2.go
index 8b45723..343cca0 100644
--- a/src/pkg/compress/bzip2/bzip2.go
+++ b/src/pkg/compress/bzip2/bzip2.go
@@ -5,10 +5,7 @@
 // Package bzip2 implements bzip2 decompression.
 package bzip2
 
-import (
-	"io"
-	"os"
-)
+import "io"
 
 // There's no RFC for bzip2. I used the Wikipedia page for reference and a lot
 // of guessing: http://en.wikipedia.org/wiki/Bzip2
@@ -19,7 +16,7 @@
 // syntactically invalid.
 type StructuralError string
 
-func (s StructuralError) String() string {
+func (s StructuralError) Error() string {
 	return "bzip2 data invalid: " + string(s)
 }
 
@@ -53,7 +50,7 @@
 const bzip2FinalMagic = 0x177245385090
 
 // setup parses the bzip2 header.
-func (bz2 *reader) setup() os.Error {
+func (bz2 *reader) setup() error {
 	br := &bz2.br
 
 	magic := br.ReadBits(16)
@@ -76,9 +73,9 @@
 	return nil
 }
 
-func (bz2 *reader) Read(buf []byte) (n int, err os.Error) {
+func (bz2 *reader) Read(buf []byte) (n int, err error) {
 	if bz2.eof {
-		return 0, os.EOF
+		return 0, io.EOF
 	}
 
 	if !bz2.setupDone {
@@ -101,7 +98,7 @@
 	return
 }
 
-func (bz2 *reader) read(buf []byte) (n int, err os.Error) {
+func (bz2 *reader) read(buf []byte) (n int, err error) {
 	// bzip2 is a block based compressor, except that it has a run-length
 	// preprocessing step. The block based nature means that we can
 	// preallocate fixed-size buffers and reuse them. However, the RLE
@@ -162,7 +159,7 @@
 	if magic == bzip2FinalMagic {
 		br.ReadBits64(32) // ignored CRC
 		bz2.eof = true
-		return 0, os.EOF
+		return 0, io.EOF
 	} else if magic != bzip2BlockMagic {
 		return 0, StructuralError("bad magic value found")
 	}
@@ -176,7 +173,7 @@
 }
 
 // readBlock reads a bzip2 block. The magic number should already have been consumed.
-func (bz2 *reader) readBlock() (err os.Error) {
+func (bz2 *reader) readBlock() (err error) {
 	br := &bz2.br
 	br.ReadBits64(32) // skip checksum. TODO: check it if we can figure out what it is.
 	randomized := br.ReadBits(1)
diff --git a/src/pkg/compress/bzip2/bzip2_test.go b/src/pkg/compress/bzip2/bzip2_test.go
index 156eea8..7b227ac9 100644
--- a/src/pkg/compress/bzip2/bzip2_test.go
+++ b/src/pkg/compress/bzip2/bzip2_test.go
@@ -9,7 +9,6 @@
 	"encoding/hex"
 	"io"
 	"io/ioutil"
-	"os"
 	"testing"
 )
 
@@ -46,7 +45,7 @@
 	return bytes.NewBuffer(data)
 }
 
-func decompressHex(s string) (out []byte, err os.Error) {
+func decompressHex(s string) (out []byte, err error) {
 	r := NewReader(readerFromHex(s))
 	return ioutil.ReadAll(r)
 }
diff --git a/src/pkg/compress/bzip2/huffman.go b/src/pkg/compress/bzip2/huffman.go
index dc05739..078c1cb 100644
--- a/src/pkg/compress/bzip2/huffman.go
+++ b/src/pkg/compress/bzip2/huffman.go
@@ -4,10 +4,7 @@
 
 package bzip2
 
-import (
-	"os"
-	"sort"
-)
+import "sort"
 
 // A huffmanTree is a binary tree which is navigated, bit-by-bit to reach a
 // symbol.
@@ -63,7 +60,7 @@
 
 // newHuffmanTree builds a Huffman tree from a slice containing the code
 // lengths of each symbol. The maximum code length is 32 bits.
-func newHuffmanTree(lengths []uint8) (huffmanTree, os.Error) {
+func newHuffmanTree(lengths []uint8) (huffmanTree, error) {
 	// There are many possible trees that assign the same code length to
 	// each symbol (consider reflecting a tree down the middle, for
 	// example). Since the code length assignments determine the
@@ -176,7 +173,7 @@
 // buildHuffmanNode takes a slice of sorted huffmanCodes and builds a node in
 // the Huffman tree at the given level. It returns the index of the newly
 // constructed node.
-func buildHuffmanNode(t *huffmanTree, codes []huffmanCode, level uint32) (nodeIndex uint16, err os.Error) {
+func buildHuffmanNode(t *huffmanTree, codes []huffmanCode, level uint32) (nodeIndex uint16, err error) {
 	test := uint32(1) << (31 - level)
 
 	// We have to search the list of codes to find the divide between the left and right sides.
diff --git a/src/pkg/compress/flate/deflate.go b/src/pkg/compress/flate/deflate.go
index b1cee0b..1f659ba 100644
--- a/src/pkg/compress/flate/deflate.go
+++ b/src/pkg/compress/flate/deflate.go
@@ -7,7 +7,6 @@
 import (
 	"io"
 	"math"
-	"os"
 )
 
 const (
@@ -89,7 +88,7 @@
 	offset         int
 	hash           int
 	maxInsertIndex int
-	err            os.Error
+	err            error
 }
 
 func (d *compressor) fillDeflate(b []byte) int {
@@ -123,7 +122,7 @@
 	return n
 }
 
-func (d *compressor) writeBlock(tokens []token, index int, eof bool) os.Error {
+func (d *compressor) writeBlock(tokens []token, index int, eof bool) error {
 	if index > 0 || eof {
 		var window []byte
 		if d.blockStart <= index {
@@ -194,7 +193,7 @@
 	return
 }
 
-func (d *compressor) writeStoredBlock(buf []byte) os.Error {
+func (d *compressor) writeStoredBlock(buf []byte) error {
 	if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
 		return d.w.err
 	}
@@ -365,7 +364,7 @@
 	d.windowEnd = 0
 }
 
-func (d *compressor) write(b []byte) (n int, err os.Error) {
+func (d *compressor) write(b []byte) (n int, err error) {
 	n = len(b)
 	b = b[d.fill(d, b):]
 	for len(b) > 0 {
@@ -375,7 +374,7 @@
 	return n, d.err
 }
 
-func (d *compressor) syncFlush() os.Error {
+func (d *compressor) syncFlush() error {
 	d.sync = true
 	d.step(d)
 	if d.err == nil {
@@ -387,7 +386,7 @@
 	return d.err
 }
 
-func (d *compressor) init(w io.Writer, level int) (err os.Error) {
+func (d *compressor) init(w io.Writer, level int) (err error) {
 	d.w = newHuffmanBitWriter(w)
 
 	switch {
@@ -409,7 +408,7 @@
 	return nil
 }
 
-func (d *compressor) close() os.Error {
+func (d *compressor) close() error {
 	d.sync = true
 	d.step(d)
 	if d.err != nil {
@@ -455,7 +454,7 @@
 	enabled bool
 }
 
-func (w *dictWriter) Write(b []byte) (n int, err os.Error) {
+func (w *dictWriter) Write(b []byte) (n int, err error) {
 	if w.enabled {
 		return w.w.Write(b)
 	}
@@ -470,7 +469,7 @@
 
 // Write writes data to w, which will eventually write the
 // compressed form of data to its underlying writer.
-func (w *Writer) Write(data []byte) (n int, err os.Error) {
+func (w *Writer) Write(data []byte) (n int, err error) {
 	return w.d.write(data)
 }
 
@@ -481,13 +480,13 @@
 // If the underlying writer returns an error, Flush returns that error.
 //
 // In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
-func (w *Writer) Flush() os.Error {
+func (w *Writer) Flush() error {
 	// For more about flushing:
 	// http://www.bolet.org/~pornin/deflate-flush.html
 	return w.d.syncFlush()
 }
 
 // Close flushes and closes the writer.
-func (w *Writer) Close() os.Error {
+func (w *Writer) Close() error {
 	return w.d.close()
 }
diff --git a/src/pkg/compress/flate/deflate_test.go b/src/pkg/compress/flate/deflate_test.go
index 9308236..db2d71d 100644
--- a/src/pkg/compress/flate/deflate_test.go
+++ b/src/pkg/compress/flate/deflate_test.go
@@ -9,7 +9,6 @@
 	"fmt"
 	"io"
 	"io/ioutil"
-	"os"
 	"sync"
 	"testing"
 )
@@ -102,7 +101,7 @@
 	return &syncBuffer{ready: make(chan bool, 1)}
 }
 
-func (b *syncBuffer) Read(p []byte) (n int, err os.Error) {
+func (b *syncBuffer) Read(p []byte) (n int, err error) {
 	for {
 		b.mu.RLock()
 		n, err = b.buf.Read(p)
@@ -122,7 +121,7 @@
 	}
 }
 
-func (b *syncBuffer) Write(p []byte) (n int, err os.Error) {
+func (b *syncBuffer) Write(p []byte) (n int, err error) {
 	n, err = b.buf.Write(p)
 	b.signal()
 	return
@@ -137,7 +136,7 @@
 	b.signal()
 }
 
-func (b *syncBuffer) Close() os.Error {
+func (b *syncBuffer) Close() error {
 	b.closed = true
 	b.signal()
 	return nil
@@ -204,7 +203,7 @@
 	}
 	buf.ReadMode()
 	out := make([]byte, 10)
-	if n, err := r.Read(out); n > 0 || err != os.EOF {
+	if n, err := r.Read(out); n > 0 || err != io.EOF {
 		t.Errorf("testSync (%d, %d, %s): final Read: %d, %v (hex: %x)", level, len(input), name, n, err, out[0:n])
 	}
 	if buf.buf.Len() != 0 {
@@ -225,7 +224,7 @@
 	}
 }
 
-func testToFromWithLevel(t *testing.T, level int, input []byte, name string) os.Error {
+func testToFromWithLevel(t *testing.T, level int, input []byte, name string) error {
 	buffer := bytes.NewBuffer(nil)
 	w := NewWriter(buffer, level)
 	w.Write(input)
diff --git a/src/pkg/compress/flate/huffman_bit_writer.go b/src/pkg/compress/flate/huffman_bit_writer.go
index 3981df5..efd99c6 100644
--- a/src/pkg/compress/flate/huffman_bit_writer.go
+++ b/src/pkg/compress/flate/huffman_bit_writer.go
@@ -7,7 +7,6 @@
 import (
 	"io"
 	"math"
-	"os"
 	"strconv"
 )
 
@@ -83,7 +82,7 @@
 	literalEncoding *huffmanEncoder
 	offsetEncoding  *huffmanEncoder
 	codegenEncoding *huffmanEncoder
-	err             os.Error
+	err             error
 }
 
 type WrongValueError struct {
@@ -106,7 +105,7 @@
 	}
 }
 
-func (err WrongValueError) String() string {
+func (err WrongValueError) Error() string {
 	return "huffmanBitWriter: " + err.name + " should belong to [" + strconv.Itoa64(int64(err.from)) + ";" +
 		strconv.Itoa64(int64(err.to)) + "] but actual value is " + strconv.Itoa64(int64(err.value))
 }
diff --git a/src/pkg/compress/flate/inflate.go b/src/pkg/compress/flate/inflate.go
index 3845f12..3f0c948 100644
--- a/src/pkg/compress/flate/inflate.go
+++ b/src/pkg/compress/flate/inflate.go
@@ -10,7 +10,6 @@
 import (
 	"bufio"
 	"io"
-	"os"
 	"strconv"
 )
 
@@ -25,33 +24,33 @@
 // A CorruptInputError reports the presence of corrupt input at a given offset.
 type CorruptInputError int64
 
-func (e CorruptInputError) String() string {
+func (e CorruptInputError) Error() string {
 	return "flate: corrupt input before offset " + strconv.Itoa64(int64(e))
 }
 
 // An InternalError reports an error in the flate code itself.
 type InternalError string
 
-func (e InternalError) String() string { return "flate: internal error: " + string(e) }
+func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
 
 // A ReadError reports an error encountered while reading input.
 type ReadError struct {
-	Offset int64    // byte offset where error occurred
-	Error  os.Error // error returned by underlying Read
+	Offset int64 // byte offset where error occurred
+	Err    error // error returned by underlying Read
 }
 
-func (e *ReadError) String() string {
-	return "flate: read error at offset " + strconv.Itoa64(e.Offset) + ": " + e.Error.String()
+func (e *ReadError) Error() string {
+	return "flate: read error at offset " + strconv.Itoa64(e.Offset) + ": " + e.Err.Error()
 }
 
 // A WriteError reports an error encountered while writing output.
 type WriteError struct {
-	Offset int64    // byte offset where error occurred
-	Error  os.Error // error returned by underlying Write
+	Offset int64 // byte offset where error occurred
+	Err    error // error returned by underlying Write
 }
 
-func (e *WriteError) String() string {
-	return "flate: write error at offset " + strconv.Itoa64(e.Offset) + ": " + e.Error.String()
+func (e *WriteError) Error() string {
+	return "flate: write error at offset " + strconv.Itoa64(e.Offset) + ": " + e.Err.Error()
 }
 
 // Huffman decoder is based on
@@ -190,7 +189,7 @@
 // the NewReader will introduce its own buffering.
 type Reader interface {
 	io.Reader
-	ReadByte() (c byte, err os.Error)
+	ReadByte() (c byte, err error)
 }
 
 // Decompress state.
@@ -224,7 +223,7 @@
 	// and decompression state.
 	step     func(*decompressor)
 	final    bool
-	err      os.Error
+	err      error
 	toRead   []byte
 	hl, hd   *huffmanDecoder
 	copyLen  int
@@ -237,7 +236,7 @@
 			f.flush((*decompressor).nextBlock)
 			return
 		}
-		f.err = os.EOF
+		f.err = io.EOF
 		return
 	}
 	for f.nb < 1+2 {
@@ -272,7 +271,7 @@
 	}
 }
 
-func (f *decompressor) Read(b []byte) (int, os.Error) {
+func (f *decompressor) Read(b []byte) (int, error) {
 	for {
 		if len(f.toRead) > 0 {
 			n := copy(b, f.toRead)
@@ -287,8 +286,8 @@
 	panic("unreachable")
 }
 
-func (f *decompressor) Close() os.Error {
-	if f.err == os.EOF {
+func (f *decompressor) Close() error {
+	if f.err == io.EOF {
 		return nil
 	}
 	return f.err
@@ -299,7 +298,7 @@
 
 var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
 
-func (f *decompressor) readHuffman() os.Error {
+func (f *decompressor) readHuffman() error {
 	// HLIT[5], HDIST[5], HCLEN[4].
 	for f.nb < 5+5+4 {
 		if err := f.moreBits(); err != nil {
@@ -625,10 +624,10 @@
 	f.hw = f.hp
 }
 
-func (f *decompressor) moreBits() os.Error {
+func (f *decompressor) moreBits() error {
 	c, err := f.r.ReadByte()
 	if err != nil {
-		if err == os.EOF {
+		if err == io.EOF {
 			err = io.ErrUnexpectedEOF
 		}
 		return err
@@ -640,7 +639,7 @@
 }
 
 // Read the next Huffman-encoded symbol from f according to h.
-func (f *decompressor) huffSym(h *huffmanDecoder) (int, os.Error) {
+func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
 	for n := uint(h.min); n <= uint(h.max); n++ {
 		lim := h.limit[n]
 		if lim == -1 {
diff --git a/src/pkg/compress/gzip/gunzip.go b/src/pkg/compress/gzip/gunzip.go
index 6ac9293..a23e515 100644
--- a/src/pkg/compress/gzip/gunzip.go
+++ b/src/pkg/compress/gzip/gunzip.go
@@ -9,10 +9,10 @@
 import (
 	"bufio"
 	"compress/flate"
+	"errors"
 	"hash"
 	"hash/crc32"
 	"io"
-	"os"
 )
 
 // BUG(nigeltao): Comments and Names don't properly map UTF-8 character codes outside of
@@ -36,8 +36,8 @@
 	return bufio.NewReader(r)
 }
 
-var HeaderError = os.NewError("invalid gzip header")
-var ChecksumError = os.NewError("gzip checksum error")
+var HeaderError = errors.New("invalid gzip header")
+var ChecksumError = errors.New("gzip checksum error")
 
 // The gzip file stores a header giving metadata about the compressed file.
 // That header is exposed as the fields of the Compressor and Decompressor structs.
@@ -71,13 +71,13 @@
 	size         uint32
 	flg          byte
 	buf          [512]byte
-	err          os.Error
+	err          error
 }
 
 // NewReader creates a new Decompressor reading the given reader.
 // The implementation buffers input and may read more data than necessary from r.
 // It is the caller's responsibility to call Close on the Decompressor when done.
-func NewReader(r io.Reader) (*Decompressor, os.Error) {
+func NewReader(r io.Reader) (*Decompressor, error) {
 	z := new(Decompressor)
 	z.r = makeReader(r)
 	z.digest = crc32.NewIEEE()
@@ -93,8 +93,8 @@
 	return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
 }
 
-func (z *Decompressor) readString() (string, os.Error) {
-	var err os.Error
+func (z *Decompressor) readString() (string, error) {
+	var err error
 	for i := 0; ; i++ {
 		if i >= len(z.buf) {
 			return "", HeaderError
@@ -112,7 +112,7 @@
 	panic("not reached")
 }
 
-func (z *Decompressor) read2() (uint32, os.Error) {
+func (z *Decompressor) read2() (uint32, error) {
 	_, err := io.ReadFull(z.r, z.buf[0:2])
 	if err != nil {
 		return 0, err
@@ -120,7 +120,7 @@
 	return uint32(z.buf[0]) | uint32(z.buf[1])<<8, nil
 }
 
-func (z *Decompressor) readHeader(save bool) os.Error {
+func (z *Decompressor) readHeader(save bool) error {
 	_, err := io.ReadFull(z.r, z.buf[0:10])
 	if err != nil {
 		return err
@@ -186,7 +186,7 @@
 	return nil
 }
 
-func (z *Decompressor) Read(p []byte) (n int, err os.Error) {
+func (z *Decompressor) Read(p []byte) (n int, err error) {
 	if z.err != nil {
 		return 0, z.err
 	}
@@ -197,7 +197,7 @@
 	n, err = z.decompressor.Read(p)
 	z.digest.Write(p[0:n])
 	z.size += uint32(n)
-	if n != 0 || err != os.EOF {
+	if n != 0 || err != io.EOF {
 		z.err = err
 		return
 	}
@@ -227,4 +227,4 @@
 }
 
 // Calling Close does not close the wrapped io.Reader originally passed to NewReader.
-func (z *Decompressor) Close() os.Error { return z.decompressor.Close() }
+func (z *Decompressor) Close() error { return z.decompressor.Close() }
diff --git a/src/pkg/compress/gzip/gunzip_test.go b/src/pkg/compress/gzip/gunzip_test.go
index 1c08c73..771b0b6 100644
--- a/src/pkg/compress/gzip/gunzip_test.go
+++ b/src/pkg/compress/gzip/gunzip_test.go
@@ -7,7 +7,6 @@
 import (
 	"bytes"
 	"io"
-	"os"
 	"testing"
 )
 
@@ -16,7 +15,7 @@
 	desc string
 	raw  string
 	gzip []byte
-	err  os.Error
+	err  error
 }
 
 var gunzipTests = []gunzipTest{
diff --git a/src/pkg/compress/gzip/gzip.go b/src/pkg/compress/gzip/gzip.go
index 8860d10..94b0f1f 100644
--- a/src/pkg/compress/gzip/gzip.go
+++ b/src/pkg/compress/gzip/gzip.go
@@ -6,10 +6,10 @@
 
 import (
 	"compress/flate"
+	"errors"
 	"hash"
 	"hash/crc32"
 	"io"
-	"os"
 )
 
 // These constants are copied from the flate package, so that code that imports
@@ -32,11 +32,11 @@
 	size       uint32
 	closed     bool
 	buf        [10]byte
-	err        os.Error
+	err        error
 }
 
 // NewWriter calls NewWriterLevel with the default compression level.
-func NewWriter(w io.Writer) (*Compressor, os.Error) {
+func NewWriter(w io.Writer) (*Compressor, error) {
 	return NewWriterLevel(w, DefaultCompression)
 }
 
@@ -47,7 +47,7 @@
 // It is the caller's responsibility to call Close on the WriteCloser when done.
 // level is the compression level, which can be DefaultCompression, NoCompression,
 // or any integer value between BestSpeed and BestCompression (inclusive).
-func NewWriterLevel(w io.Writer, level int) (*Compressor, os.Error) {
+func NewWriterLevel(w io.Writer, level int) (*Compressor, error) {
 	z := new(Compressor)
 	z.OS = 255 // unknown
 	z.w = w
@@ -70,9 +70,9 @@
 }
 
 // writeBytes writes a length-prefixed byte slice to z.w.
-func (z *Compressor) writeBytes(b []byte) os.Error {
+func (z *Compressor) writeBytes(b []byte) error {
 	if len(b) > 0xffff {
-		return os.NewError("gzip.Write: Extra data is too large")
+		return errors.New("gzip.Write: Extra data is too large")
 	}
 	put2(z.buf[0:2], uint16(len(b)))
 	_, err := z.w.Write(z.buf[0:2])
@@ -84,12 +84,12 @@
 }
 
 // writeString writes a string (in ISO 8859-1 (Latin-1) format) to z.w.
-func (z *Compressor) writeString(s string) os.Error {
+func (z *Compressor) writeString(s string) error {
 	// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1).
 	// TODO(nigeltao): Convert from UTF-8 to ISO 8859-1 (Latin-1).
 	for _, v := range s {
 		if v == 0 || v > 0x7f {
-			return os.NewError("gzip.Write: non-ASCII header string")
+			return errors.New("gzip.Write: non-ASCII header string")
 		}
 	}
 	_, err := io.WriteString(z.w, s)
@@ -102,7 +102,7 @@
 	return err
 }
 
-func (z *Compressor) Write(p []byte) (int, os.Error) {
+func (z *Compressor) Write(p []byte) (int, error) {
 	if z.err != nil {
 		return 0, z.err
 	}
@@ -162,7 +162,7 @@
 }
 
 // Calling Close does not close the wrapped io.Writer originally passed to NewWriter.
-func (z *Compressor) Close() os.Error {
+func (z *Compressor) Close() error {
 	if z.err != nil {
 		return z.err
 	}
diff --git a/src/pkg/compress/lzw/reader.go b/src/pkg/compress/lzw/reader.go
index 21231c8..c787a95 100644
--- a/src/pkg/compress/lzw/reader.go
+++ b/src/pkg/compress/lzw/reader.go
@@ -16,6 +16,7 @@
 
 import (
 	"bufio"
+	"errors"
 	"fmt"
 	"io"
 	"os"
@@ -45,9 +46,9 @@
 	bits     uint32
 	nBits    uint
 	width    uint
-	read     func(*decoder) (uint16, os.Error) // readLSB or readMSB
-	litWidth int                               // width in bits of literal codes
-	err      os.Error
+	read     func(*decoder) (uint16, error) // readLSB or readMSB
+	litWidth int                            // width in bits of literal codes
+	err      error
 
 	// The first 1<<litWidth codes are literal codes.
 	// The next two codes mean clear and EOF.
@@ -78,7 +79,7 @@
 }
 
 // readLSB returns the next code for "Least Significant Bits first" data.
-func (d *decoder) readLSB() (uint16, os.Error) {
+func (d *decoder) readLSB() (uint16, error) {
 	for d.nBits < d.width {
 		x, err := d.r.ReadByte()
 		if err != nil {
@@ -94,7 +95,7 @@
 }
 
 // readMSB returns the next code for "Most Significant Bits first" data.
-func (d *decoder) readMSB() (uint16, os.Error) {
+func (d *decoder) readMSB() (uint16, error) {
 	for d.nBits < d.width {
 		x, err := d.r.ReadByte()
 		if err != nil {
@@ -109,7 +110,7 @@
 	return code, nil
 }
 
-func (d *decoder) Read(b []byte) (int, os.Error) {
+func (d *decoder) Read(b []byte) (int, error) {
 	for {
 		if len(d.toRead) > 0 {
 			n := copy(b, d.toRead)
@@ -132,7 +133,7 @@
 	for {
 		code, err := d.read(d)
 		if err != nil {
-			if err == os.EOF {
+			if err == io.EOF {
 				err = io.ErrUnexpectedEOF
 			}
 			d.err = err
@@ -156,7 +157,7 @@
 			continue
 		case code == d.eof:
 			d.flush()
-			d.err = os.EOF
+			d.err = io.EOF
 			return
 		case code <= d.hi:
 			c, i := code, len(d.output)-1
@@ -186,7 +187,7 @@
 				d.prefix[d.hi] = d.last
 			}
 		default:
-			d.err = os.NewError("lzw: invalid code")
+			d.err = errors.New("lzw: invalid code")
 			return
 		}
 		d.last, d.hi = code, d.hi+1
@@ -211,7 +212,7 @@
 	d.o = 0
 }
 
-func (d *decoder) Close() os.Error {
+func (d *decoder) Close() error {
 	d.err = os.EINVAL // in case any Reads come along
 	return nil
 }
@@ -230,7 +231,7 @@
 	case MSB:
 		d.read = (*decoder).readMSB
 	default:
-		d.err = os.NewError("lzw: unknown order")
+		d.err = errors.New("lzw: unknown order")
 		return d
 	}
 	if litWidth < 2 || 8 < litWidth {
diff --git a/src/pkg/compress/lzw/reader_test.go b/src/pkg/compress/lzw/reader_test.go
index f8042b0..0982157 100644
--- a/src/pkg/compress/lzw/reader_test.go
+++ b/src/pkg/compress/lzw/reader_test.go
@@ -8,7 +8,6 @@
 	"bytes"
 	"io"
 	"io/ioutil"
-	"os"
 	"runtime"
 	"strconv"
 	"strings"
@@ -19,7 +18,7 @@
 	desc       string
 	raw        string
 	compressed string
-	err        os.Error
+	err        error
 }
 
 var lzwTests = []lzwTest{
diff --git a/src/pkg/compress/lzw/writer.go b/src/pkg/compress/lzw/writer.go
index 87143b7..3f380fa 100644
--- a/src/pkg/compress/lzw/writer.go
+++ b/src/pkg/compress/lzw/writer.go
@@ -6,6 +6,7 @@
 
 import (
 	"bufio"
+	"errors"
 	"fmt"
 	"io"
 	"os"
@@ -13,20 +14,20 @@
 
 // A writer is a buffered, flushable writer.
 type writer interface {
-	WriteByte(byte) os.Error
-	Flush() os.Error
+	WriteByte(byte) error
+	Flush() error
 }
 
 // An errWriteCloser is an io.WriteCloser that always returns a given error.
 type errWriteCloser struct {
-	err os.Error
+	err error
 }
 
-func (e *errWriteCloser) Write([]byte) (int, os.Error) {
+func (e *errWriteCloser) Write([]byte) (int, error) {
 	return 0, e.err
 }
 
-func (e *errWriteCloser) Close() os.Error {
+func (e *errWriteCloser) Close() error {
 	return e.err
 }
 
@@ -50,7 +51,7 @@
 	w writer
 	// write, bits, nBits and width are the state for converting a code stream
 	// into a byte stream.
-	write func(*encoder, uint32) os.Error
+	write func(*encoder, uint32) error
 	bits  uint32
 	nBits uint
 	width uint
@@ -64,7 +65,7 @@
 	savedCode uint32
 	// err is the first error encountered during writing. Closing the encoder
 	// will make any future Write calls return os.EINVAL.
-	err os.Error
+	err error
 	// table is the hash table from 20-bit keys to 12-bit values. Each table
 	// entry contains key<<12|val and collisions resolve by linear probing.
 	// The keys consist of a 12-bit code prefix and an 8-bit byte suffix.
@@ -73,7 +74,7 @@
 }
 
 // writeLSB writes the code c for "Least Significant Bits first" data.
-func (e *encoder) writeLSB(c uint32) os.Error {
+func (e *encoder) writeLSB(c uint32) error {
 	e.bits |= c << e.nBits
 	e.nBits += e.width
 	for e.nBits >= 8 {
@@ -87,7 +88,7 @@
 }
 
 // writeMSB writes the code c for "Most Significant Bits first" data.
-func (e *encoder) writeMSB(c uint32) os.Error {
+func (e *encoder) writeMSB(c uint32) error {
 	e.bits |= c << (32 - e.width - e.nBits)
 	e.nBits += e.width
 	for e.nBits >= 8 {
@@ -102,12 +103,12 @@
 
 // errOutOfCodes is an internal error that means that the encoder has run out
 // of unused codes and a clear code needs to be sent next.
-var errOutOfCodes = os.NewError("lzw: out of codes")
+var errOutOfCodes = errors.New("lzw: out of codes")
 
 // incHi increments e.hi and checks for both overflow and running out of
 // unused codes. In the latter case, incHi sends a clear code, resets the
 // encoder state and returns errOutOfCodes.
-func (e *encoder) incHi() os.Error {
+func (e *encoder) incHi() error {
 	e.hi++
 	if e.hi == e.overflow {
 		e.width++
@@ -130,7 +131,7 @@
 }
 
 // Write writes a compressed representation of p to e's underlying writer.
-func (e *encoder) Write(p []byte) (int, os.Error) {
+func (e *encoder) Write(p []byte) (int, error) {
 	if e.err != nil {
 		return 0, e.err
 	}
@@ -188,7 +189,7 @@
 
 // Close closes the encoder, flushing any pending output. It does not close or
 // flush e's underlying writer.
-func (e *encoder) Close() os.Error {
+func (e *encoder) Close() error {
 	if e.err != nil {
 		if e.err == os.EINVAL {
 			return nil
@@ -230,14 +231,14 @@
 // The number of bits to use for literal codes, litWidth, must be in the
 // range [2,8] and is typically 8.
 func NewWriter(w io.Writer, order Order, litWidth int) io.WriteCloser {
-	var write func(*encoder, uint32) os.Error
+	var write func(*encoder, uint32) error
 	switch order {
 	case LSB:
 		write = (*encoder).writeLSB
 	case MSB:
 		write = (*encoder).writeMSB
 	default:
-		return &errWriteCloser{os.NewError("lzw: unknown order")}
+		return &errWriteCloser{errors.New("lzw: unknown order")}
 	}
 	if litWidth < 2 || 8 < litWidth {
 		return &errWriteCloser{fmt.Errorf("lzw: litWidth %d out of range", litWidth)}
diff --git a/src/pkg/compress/lzw/writer_test.go b/src/pkg/compress/lzw/writer_test.go
index 4c5e522..154cdf8 100644
--- a/src/pkg/compress/lzw/writer_test.go
+++ b/src/pkg/compress/lzw/writer_test.go
@@ -45,7 +45,7 @@
 		var b [4096]byte
 		for {
 			n, err0 := raw.Read(b[:])
-			if err0 != nil && err0 != os.EOF {
+			if err0 != nil && err0 != io.EOF {
 				t.Errorf("%s (order=%d litWidth=%d): %v", fn, order, litWidth, err0)
 				return
 			}
@@ -58,7 +58,7 @@
 				t.Errorf("%s (order=%d litWidth=%d): %v", fn, order, litWidth, err1)
 				return
 			}
-			if err0 == os.EOF {
+			if err0 == io.EOF {
 				break
 			}
 		}
diff --git a/src/pkg/compress/zlib/reader.go b/src/pkg/compress/zlib/reader.go
index 78dabdf..50a1e6c 100644
--- a/src/pkg/compress/zlib/reader.go
+++ b/src/pkg/compress/zlib/reader.go
@@ -26,36 +26,36 @@
 import (
 	"bufio"
 	"compress/flate"
+	"errors"
 	"hash"
 	"hash/adler32"
 	"io"
-	"os"
 )
 
 const zlibDeflate = 8
 
-var ChecksumError = os.NewError("zlib checksum error")
-var HeaderError = os.NewError("invalid zlib header")
-var DictionaryError = os.NewError("invalid zlib dictionary")
+var ChecksumError = errors.New("zlib checksum error")
+var HeaderError = errors.New("invalid zlib header")
+var DictionaryError = errors.New("invalid zlib dictionary")
 
 type reader struct {
 	r            flate.Reader
 	decompressor io.ReadCloser
 	digest       hash.Hash32
-	err          os.Error
+	err          error
 	scratch      [4]byte
 }
 
 // NewReader creates a new io.ReadCloser that satisfies reads by decompressing data read from r.
 // The implementation buffers input and may read more data than necessary from r.
 // It is the caller's responsibility to call Close on the ReadCloser when done.
-func NewReader(r io.Reader) (io.ReadCloser, os.Error) {
+func NewReader(r io.Reader) (io.ReadCloser, error) {
 	return NewReaderDict(r, nil)
 }
 
 // NewReaderDict is like NewReader but uses a preset dictionary.
 // NewReaderDict ignores the dictionary if the compressed data does not refer to it.
-func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, os.Error) {
+func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) {
 	z := new(reader)
 	if fr, ok := r.(flate.Reader); ok {
 		z.r = fr
@@ -87,7 +87,7 @@
 	return z, nil
 }
 
-func (z *reader) Read(p []byte) (n int, err os.Error) {
+func (z *reader) Read(p []byte) (n int, err error) {
 	if z.err != nil {
 		return 0, z.err
 	}
@@ -97,7 +97,7 @@
 
 	n, err = z.decompressor.Read(p)
 	z.digest.Write(p[0:n])
-	if n != 0 || err != os.EOF {
+	if n != 0 || err != io.EOF {
 		z.err = err
 		return
 	}
@@ -117,7 +117,7 @@
 }
 
 // Calling Close does not close the wrapped io.Reader originally passed to NewReader.
-func (z *reader) Close() os.Error {
+func (z *reader) Close() error {
 	if z.err != nil {
 		return z.err
 	}
diff --git a/src/pkg/compress/zlib/reader_test.go b/src/pkg/compress/zlib/reader_test.go
index 195db44..d8f9f21 100644
--- a/src/pkg/compress/zlib/reader_test.go
+++ b/src/pkg/compress/zlib/reader_test.go
@@ -7,7 +7,6 @@
 import (
 	"bytes"
 	"io"
-	"os"
 	"testing"
 )
 
@@ -16,7 +15,7 @@
 	raw        string
 	compressed []byte
 	dict       []byte
-	err        os.Error
+	err        error
 }
 
 // Compare-to-golden test data was generated by the ZLIB example program at
diff --git a/src/pkg/compress/zlib/writer.go b/src/pkg/compress/zlib/writer.go
index 8f86e9c..bbff637 100644
--- a/src/pkg/compress/zlib/writer.go
+++ b/src/pkg/compress/zlib/writer.go
@@ -6,10 +6,10 @@
 
 import (
 	"compress/flate"
+	"errors"
 	"hash"
 	"hash/adler32"
 	"io"
-	"os"
 )
 
 // These constants are copied from the flate package, so that code that imports
@@ -27,17 +27,17 @@
 	w          io.Writer
 	compressor *flate.Writer
 	digest     hash.Hash32
-	err        os.Error
+	err        error
 	scratch    [4]byte
 }
 
 // NewWriter calls NewWriterLevel with the default compression level.
-func NewWriter(w io.Writer) (*Writer, os.Error) {
+func NewWriter(w io.Writer) (*Writer, error) {
 	return NewWriterLevel(w, DefaultCompression)
 }
 
 // NewWriterLevel calls NewWriterDict with no dictionary.
-func NewWriterLevel(w io.Writer, level int) (*Writer, os.Error) {
+func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
 	return NewWriterDict(w, level, nil)
 }
 
@@ -46,7 +46,7 @@
 // level is the compression level, which can be DefaultCompression, NoCompression,
 // or any integer value between BestSpeed and BestCompression (inclusive).
 // dict is the preset dictionary to compress with, or nil to use no dictionary.
-func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, os.Error) {
+func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
 	z := new(Writer)
 	// ZLIB has a two-byte header (as documented in RFC 1950).
 	// The first four bits is the CINFO (compression info), which is 7 for the default deflate window size.
@@ -66,7 +66,7 @@
 	case 7, 8, 9:
 		z.scratch[1] = 3 << 6
 	default:
-		return nil, os.NewError("level out of range")
+		return nil, errors.New("level out of range")
 	}
 	if dict != nil {
 		z.scratch[1] |= 1 << 5
@@ -94,7 +94,7 @@
 	return z, nil
 }
 
-func (z *Writer) Write(p []byte) (n int, err os.Error) {
+func (z *Writer) Write(p []byte) (n int, err error) {
 	if z.err != nil {
 		return 0, z.err
 	}
@@ -111,7 +111,7 @@
 }
 
 // Flush flushes the underlying compressor.
-func (z *Writer) Flush() os.Error {
+func (z *Writer) Flush() error {
 	if z.err != nil {
 		return z.err
 	}
@@ -120,7 +120,7 @@
 }
 
 // Calling Close does not close the wrapped io.Writer originally passed to NewWriter.
-func (z *Writer) Close() os.Error {
+func (z *Writer) Close() error {
 	if z.err != nil {
 		return z.err
 	}
diff --git a/src/pkg/crypto/aes/cipher.go b/src/pkg/crypto/aes/cipher.go
index 7322353..5ad75ec 100644
--- a/src/pkg/crypto/aes/cipher.go
+++ b/src/pkg/crypto/aes/cipher.go
@@ -4,10 +4,7 @@
 
 package aes
 
-import (
-	"os"
-	"strconv"
-)
+import "strconv"
 
 // The AES block size in bytes.
 const BlockSize = 16
@@ -20,7 +17,7 @@
 
 type KeySizeError int
 
-func (k KeySizeError) String() string {
+func (k KeySizeError) Error() string {
 	return "crypto/aes: invalid key size " + strconv.Itoa(int(k))
 }
 
@@ -28,7 +25,7 @@
 // The key argument should be the AES key,
 // either 16, 24, or 32 bytes to select
 // AES-128, AES-192, or AES-256.
-func NewCipher(key []byte) (*Cipher, os.Error) {
+func NewCipher(key []byte) (*Cipher, error) {
 	k := len(key)
 	switch k {
 	default:
diff --git a/src/pkg/crypto/bcrypt/base64.go b/src/pkg/crypto/bcrypt/base64.go
index ed6cea7..fc31160 100644
--- a/src/pkg/crypto/bcrypt/base64.go
+++ b/src/pkg/crypto/bcrypt/base64.go
@@ -4,10 +4,7 @@
 
 package bcrypt
 
-import (
-	"encoding/base64"
-	"os"
-)
+import "encoding/base64"
 
 const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
 
@@ -23,7 +20,7 @@
 	return dst[:n]
 }
 
-func base64Decode(src []byte) ([]byte, os.Error) {
+func base64Decode(src []byte) ([]byte, error) {
 	numOfEquals := 4 - (len(src) % 4)
 	for i := 0; i < numOfEquals; i++ {
 		src = append(src, '=')
diff --git a/src/pkg/crypto/bcrypt/bcrypt.go b/src/pkg/crypto/bcrypt/bcrypt.go
index 1e8ccfa..9740135 100644
--- a/src/pkg/crypto/bcrypt/bcrypt.go
+++ b/src/pkg/crypto/bcrypt/bcrypt.go
@@ -11,9 +11,9 @@
 	"crypto/blowfish"
 	"crypto/rand"
 	"crypto/subtle"
+	"errors"
 	"fmt"
 	"io"
-	"os"
 	"strconv"
 )
 
@@ -25,30 +25,30 @@
 
 // The error returned from CompareHashAndPassword when a password and hash do
 // not match.
-var MismatchedHashAndPasswordError = os.NewError("crypto/bcrypt: hashedPassword is not the hash of the given password")
+var MismatchedHashAndPasswordError = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password")
 
 // The error returned from CompareHashAndPassword when a hash is too short to
 // be a bcrypt hash.
-var HashTooShortError = os.NewError("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
+var HashTooShortError = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
 
 // The error returned from CompareHashAndPassword when a hash was created with
 // a bcrypt algorithm newer than this implementation.
 type HashVersionTooNewError byte
 
-func (hv HashVersionTooNewError) String() string {
+func (hv HashVersionTooNewError) Error() string {
 	return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion)
 }
 
 // The error returned from CompareHashAndPassword when a hash starts with something other than '$'
 type InvalidHashPrefixError byte
 
-func (ih InvalidHashPrefixError) String() string {
+func (ih InvalidHashPrefixError) Error() string {
 	return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih))
 }
 
 type InvalidCostError int
 
-func (ic InvalidCostError) String() string {
+func (ic InvalidCostError) Error() string {
 	return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost))
 }
 
@@ -85,7 +85,7 @@
 // cost. If the cost given is less than MinCost, the cost will be set to
 // MinCost, instead. Use CompareHashAndPassword, as defined in this package,
 // to compare the returned hashed password with its cleartext version.
-func GenerateFromPassword(password []byte, cost int) ([]byte, os.Error) {
+func GenerateFromPassword(password []byte, cost int) ([]byte, error) {
 	p, err := newFromPassword(password, cost)
 	if err != nil {
 		return nil, err
@@ -96,7 +96,7 @@
 // CompareHashAndPassword compares a bcrypt hashed password with its possible
 // plaintext equivalent. Note: Using bytes.Equal for this job is
 // insecure. Returns nil on success, or an error on failure.
-func CompareHashAndPassword(hashedPassword, password []byte) os.Error {
+func CompareHashAndPassword(hashedPassword, password []byte) error {
 	p, err := newFromHash(hashedPassword)
 	if err != nil {
 		return err
@@ -115,7 +115,7 @@
 	return MismatchedHashAndPasswordError
 }
 
-func newFromPassword(password []byte, cost int) (*hashed, os.Error) {
+func newFromPassword(password []byte, cost int) (*hashed, error) {
 	if cost < MinCost {
 		cost = DefaultCost
 	}
@@ -144,7 +144,7 @@
 	return p, err
 }
 
-func newFromHash(hashedSecret []byte) (*hashed, os.Error) {
+func newFromHash(hashedSecret []byte) (*hashed, error) {
 	if len(hashedSecret) < minHashSize {
 		return nil, HashTooShortError
 	}
@@ -172,7 +172,7 @@
 	return p, nil
 }
 
-func bcrypt(password []byte, cost uint32, salt []byte) ([]byte, os.Error) {
+func bcrypt(password []byte, cost uint32, salt []byte) ([]byte, error) {
 	cipherData := make([]byte, len(magicCipherData))
 	copy(cipherData, magicCipherData)
 
@@ -193,7 +193,7 @@
 	return hsh, nil
 }
 
-func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, os.Error) {
+func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
 
 	csalt, err := base64Decode(salt)
 	if err != nil {
@@ -240,7 +240,7 @@
 	return arr[:n]
 }
 
-func (p *hashed) decodeVersion(sbytes []byte) (int, os.Error) {
+func (p *hashed) decodeVersion(sbytes []byte) (int, error) {
 	if sbytes[0] != '$' {
 		return -1, InvalidHashPrefixError(sbytes[0])
 	}
@@ -257,7 +257,7 @@
 }
 
 // sbytes should begin where decodeVersion left off.
-func (p *hashed) decodeCost(sbytes []byte) (int, os.Error) {
+func (p *hashed) decodeCost(sbytes []byte) (int, error) {
 	cost, err := strconv.Atoi(string(sbytes[0:2]))
 	if err != nil {
 		return -1, err
@@ -274,7 +274,7 @@
 	return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor)
 }
 
-func checkCost(cost int) os.Error {
+func checkCost(cost int) error {
 	if cost < MinCost || cost > MaxCost {
 		return InvalidCostError(cost)
 	}
diff --git a/src/pkg/crypto/bcrypt/bcrypt_test.go b/src/pkg/crypto/bcrypt/bcrypt_test.go
index 3efbc1c..a3155c5 100644
--- a/src/pkg/crypto/bcrypt/bcrypt_test.go
+++ b/src/pkg/crypto/bcrypt/bcrypt_test.go
@@ -6,7 +6,6 @@
 
 import (
 	"bytes"
-	"os"
 	"testing"
 )
 
@@ -68,7 +67,7 @@
 }
 
 type InvalidHashTest struct {
-	err  os.Error
+	err  error
 	hash []byte
 }
 
@@ -81,7 +80,7 @@
 }
 
 func TestInvalidHashErrors(t *testing.T) {
-	check := func(name string, expected, err os.Error) {
+	check := func(name string, expected, err error) {
 		if err == nil {
 			t.Errorf("%s: Should have returned an error", name)
 		}
diff --git a/src/pkg/crypto/blowfish/cipher.go b/src/pkg/crypto/blowfish/cipher.go
index 3439825..a5d56d2e 100644
--- a/src/pkg/crypto/blowfish/cipher.go
+++ b/src/pkg/crypto/blowfish/cipher.go
@@ -8,10 +8,7 @@
 // The code is a port of Bruce Schneier's C implementation.
 // See http://www.schneier.com/blowfish.html.
 
-import (
-	"os"
-	"strconv"
-)
+import "strconv"
 
 // The Blowfish block size in bytes.
 const BlockSize = 8
@@ -24,13 +21,13 @@
 
 type KeySizeError int
 
-func (k KeySizeError) String() string {
+func (k KeySizeError) Error() string {
 	return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
 }
 
 // NewCipher creates and returns a Cipher.
 // The key argument should be the Blowfish key, 4 to 56 bytes.
-func NewCipher(key []byte) (*Cipher, os.Error) {
+func NewCipher(key []byte) (*Cipher, error) {
 	var result Cipher
 	k := len(key)
 	if k < 4 || k > 56 {
@@ -45,7 +42,7 @@
 // schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
 // sufficient and desirable. For bcrypt compatiblity, the key can be over 56
 // bytes.
-func NewSaltedCipher(key, salt []byte) (*Cipher, os.Error) {
+func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
 	var result Cipher
 	k := len(key)
 	if k < 4 {
diff --git a/src/pkg/crypto/cast5/cast5.go b/src/pkg/crypto/cast5/cast5.go
index e9d4a24..889a13c 100644
--- a/src/pkg/crypto/cast5/cast5.go
+++ b/src/pkg/crypto/cast5/cast5.go
@@ -6,9 +6,7 @@
 // OpenPGP cipher.
 package cast5
 
-import (
-	"os"
-)
+import "errors"
 
 const BlockSize = 8
 const KeySize = 16
@@ -18,9 +16,9 @@
 	rotate  [16]uint8
 }
 
-func NewCipher(key []byte) (c *Cipher, err os.Error) {
+func NewCipher(key []byte) (c *Cipher, err error) {
 	if len(key) != KeySize {
-		return nil, os.NewError("CAST5: keys must be 16 bytes")
+		return nil, errors.New("CAST5: keys must be 16 bytes")
 	}
 
 	c = new(Cipher)
diff --git a/src/pkg/crypto/cipher/io.go b/src/pkg/crypto/cipher/io.go
index 97f40b8..9888c98 100644
--- a/src/pkg/crypto/cipher/io.go
+++ b/src/pkg/crypto/cipher/io.go
@@ -4,10 +4,7 @@
 
 package cipher
 
-import (
-	"os"
-	"io"
-)
+import "io"
 
 // The Stream* objects are so simple that all their members are public. Users
 // can create them themselves.
@@ -19,7 +16,7 @@
 	R io.Reader
 }
 
-func (r StreamReader) Read(dst []byte) (n int, err os.Error) {
+func (r StreamReader) Read(dst []byte) (n int, err error) {
 	n, err = r.R.Read(dst)
 	r.S.XORKeyStream(dst[:n], dst[:n])
 	return
@@ -31,10 +28,10 @@
 type StreamWriter struct {
 	S   Stream
 	W   io.Writer
-	Err os.Error
+	Err error
 }
 
-func (w StreamWriter) Write(src []byte) (n int, err os.Error) {
+func (w StreamWriter) Write(src []byte) (n int, err error) {
 	if w.Err != nil {
 		return 0, w.Err
 	}
@@ -50,7 +47,7 @@
 	return
 }
 
-func (w StreamWriter) Close() os.Error {
+func (w StreamWriter) Close() error {
 	// This saves us from either requiring a WriteCloser or having a
 	// StreamWriterCloser.
 	return w.W.(io.Closer).Close()
diff --git a/src/pkg/crypto/des/cipher.go b/src/pkg/crypto/des/cipher.go
index d17a1a7..fc252c8 100644
--- a/src/pkg/crypto/des/cipher.go
+++ b/src/pkg/crypto/des/cipher.go
@@ -4,17 +4,14 @@
 
 package des
 
-import (
-	"os"
-	"strconv"
-)
+import "strconv"
 
 // The DES block size in bytes.
 const BlockSize = 8
 
 type KeySizeError int
 
-func (k KeySizeError) String() string {
+func (k KeySizeError) Error() string {
 	return "crypto/des: invalid key size " + strconv.Itoa(int(k))
 }
 
@@ -24,7 +21,7 @@
 }
 
 // NewCipher creates and returns a new Cipher.
-func NewCipher(key []byte) (*Cipher, os.Error) {
+func NewCipher(key []byte) (*Cipher, error) {
 	if len(key) != 8 {
 		return nil, KeySizeError(len(key))
 	}
@@ -60,7 +57,7 @@
 }
 
 // NewCipher creates and returns a new Cipher.
-func NewTripleDESCipher(key []byte) (*TripleDESCipher, os.Error) {
+func NewTripleDESCipher(key []byte) (*TripleDESCipher, error) {
 	if len(key) != 24 {
 		return nil, KeySizeError(len(key))
 	}
diff --git a/src/pkg/crypto/dsa/dsa.go b/src/pkg/crypto/dsa/dsa.go
index a5f96fe..692d62a 100644
--- a/src/pkg/crypto/dsa/dsa.go
+++ b/src/pkg/crypto/dsa/dsa.go
@@ -7,8 +7,8 @@
 
 import (
 	"big"
+	"errors"
 	"io"
-	"os"
 )
 
 // Parameters represents the domain parameters for a key. These parameters can
@@ -31,7 +31,7 @@
 
 type invalidPublicKeyError int
 
-func (invalidPublicKeyError) String() string {
+func (invalidPublicKeyError) Error() string {
 	return "crypto/dsa: invalid public key"
 }
 
@@ -58,7 +58,7 @@
 
 // GenerateParameters puts a random, valid set of DSA parameters into params.
 // This function takes many seconds, even on fast machines.
-func GenerateParameters(params *Parameters, rand io.Reader, sizes ParameterSizes) (err os.Error) {
+func GenerateParameters(params *Parameters, rand io.Reader, sizes ParameterSizes) (err error) {
 	// This function doesn't follow FIPS 186-3 exactly in that it doesn't
 	// use a verification seed to generate the primes. The verification
 	// seed doesn't appear to be exported or used by other code and
@@ -79,7 +79,7 @@
 		L = 3072
 		N = 256
 	default:
-		return os.NewError("crypto/dsa: invalid ParameterSizes")
+		return errors.New("crypto/dsa: invalid ParameterSizes")
 	}
 
 	qBytes := make([]byte, N/8)
@@ -156,9 +156,9 @@
 
 // GenerateKey generates a public&private key pair. The Parameters of the
 // PrivateKey must already be valid (see GenerateParameters).
-func GenerateKey(priv *PrivateKey, rand io.Reader) os.Error {
+func GenerateKey(priv *PrivateKey, rand io.Reader) error {
 	if priv.P == nil || priv.Q == nil || priv.G == nil {
-		return os.NewError("crypto/dsa: parameters not set up before generating key")
+		return errors.New("crypto/dsa: parameters not set up before generating key")
 	}
 
 	x := new(big.Int)
@@ -185,7 +185,7 @@
 // larger message) using the private key, priv. It returns the signature as a
 // pair of integers. The security of the private key depends on the entropy of
 // rand.
-func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err os.Error) {
+func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) {
 	// FIPS 186-3, section 4.6
 
 	n := priv.Q.BitLen()
diff --git a/src/pkg/crypto/ecdsa/ecdsa.go b/src/pkg/crypto/ecdsa/ecdsa.go
index 7bce1bc..b7f235b 100644
--- a/src/pkg/crypto/ecdsa/ecdsa.go
+++ b/src/pkg/crypto/ecdsa/ecdsa.go
@@ -16,7 +16,6 @@
 	"big"
 	"crypto/elliptic"
 	"io"
-	"os"
 )
 
 // PublicKey represents an ECDSA public key.
@@ -35,7 +34,7 @@
 
 // randFieldElement returns a random element of the field underlying the given
 // curve using the procedure given in [NSA] A.2.1.
-func randFieldElement(c *elliptic.Curve, rand io.Reader) (k *big.Int, err os.Error) {
+func randFieldElement(c *elliptic.Curve, rand io.Reader) (k *big.Int, err error) {
 	b := make([]byte, c.BitSize/8+8)
 	_, err = io.ReadFull(rand, b)
 	if err != nil {
@@ -50,7 +49,7 @@
 }
 
 // GenerateKey generates a public&private key pair.
-func GenerateKey(c *elliptic.Curve, rand io.Reader) (priv *PrivateKey, err os.Error) {
+func GenerateKey(c *elliptic.Curve, rand io.Reader) (priv *PrivateKey, err error) {
 	k, err := randFieldElement(c, rand)
 	if err != nil {
 		return
@@ -86,7 +85,7 @@
 // larger message) using the private key, priv. It returns the signature as a
 // pair of integers. The security of the private key depends on the entropy of
 // rand.
-func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err os.Error) {
+func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) {
 	// See [NSA] 3.4.1
 	c := priv.PublicKey.Curve
 
diff --git a/src/pkg/crypto/elliptic/elliptic.go b/src/pkg/crypto/elliptic/elliptic.go
index 41835f1..3c3327f 100644
--- a/src/pkg/crypto/elliptic/elliptic.go
+++ b/src/pkg/crypto/elliptic/elliptic.go
@@ -16,7 +16,6 @@
 import (
 	"big"
 	"io"
-	"os"
 	"sync"
 )
 
@@ -249,7 +248,7 @@
 
 // GenerateKey returns a public/private key pair. The private key is generated
 // using the given reader, which must return random data.
-func (curve *Curve) GenerateKey(rand io.Reader) (priv []byte, x, y *big.Int, err os.Error) {
+func (curve *Curve) GenerateKey(rand io.Reader) (priv []byte, x, y *big.Int, err error) {
 	byteLen := (curve.BitSize + 7) >> 3
 	priv = make([]byte, byteLen)
 
diff --git a/src/pkg/crypto/hmac/hmac.go b/src/pkg/crypto/hmac/hmac.go
index 04ec86e..6a17bbd 100644
--- a/src/pkg/crypto/hmac/hmac.go
+++ b/src/pkg/crypto/hmac/hmac.go
@@ -13,7 +13,6 @@
 	"crypto/sha1"
 	"crypto/sha256"
 	"hash"
-	"os"
 )
 
 // FIPS 198:
@@ -60,7 +59,7 @@
 	return h.outer.Sum()
 }
 
-func (h *hmac) Write(p []byte) (n int, err os.Error) {
+func (h *hmac) Write(p []byte) (n int, err error) {
 	return h.inner.Write(p)
 }
 
diff --git a/src/pkg/crypto/md4/md4.go b/src/pkg/crypto/md4/md4.go
index 848d955..f21cc51 100644
--- a/src/pkg/crypto/md4/md4.go
+++ b/src/pkg/crypto/md4/md4.go
@@ -8,7 +8,6 @@
 import (
 	"crypto"
 	"hash"
-	"os"
 )
 
 func init() {
@@ -52,7 +51,7 @@
 
 func (d *digest) Size() int { return Size }
 
-func (d *digest) Write(p []byte) (nn int, err os.Error) {
+func (d *digest) Write(p []byte) (nn int, err error) {
 	nn = len(p)
 	d.len += uint64(nn)
 	if d.nx > 0 {
diff --git a/src/pkg/crypto/md5/md5.go b/src/pkg/crypto/md5/md5.go
index 378faa6..20f3a1b 100644
--- a/src/pkg/crypto/md5/md5.go
+++ b/src/pkg/crypto/md5/md5.go
@@ -8,7 +8,6 @@
 import (
 	"crypto"
 	"hash"
-	"os"
 )
 
 func init() {
@@ -52,7 +51,7 @@
 
 func (d *digest) Size() int { return Size }
 
-func (d *digest) Write(p []byte) (nn int, err os.Error) {
+func (d *digest) Write(p []byte) (nn int, err error) {
 	nn = len(p)
 	d.len += uint64(nn)
 	if d.nx > 0 {
diff --git a/src/pkg/crypto/ocsp/ocsp.go b/src/pkg/crypto/ocsp/ocsp.go
index 7ea7a1e..f697fa1 100644
--- a/src/pkg/crypto/ocsp/ocsp.go
+++ b/src/pkg/crypto/ocsp/ocsp.go
@@ -14,7 +14,6 @@
 	_ "crypto/sha1"
 	"crypto/x509"
 	"crypto/x509/pkix"
-	"os"
 	"time"
 )
 
@@ -106,7 +105,7 @@
 // ParseError results from an invalid OCSP response.
 type ParseError string
 
-func (p ParseError) String() string {
+func (p ParseError) Error() string {
 	return string(p)
 }
 
@@ -114,7 +113,7 @@
 // responses for a single certificate and only those using RSA signatures.
 // Non-RSA responses will result in an x509.UnsupportedAlgorithmError.
 // Signature errors or parse failures will result in a ParseError.
-func ParseResponse(bytes []byte) (*Response, os.Error) {
+func ParseResponse(bytes []byte) (*Response, error) {
 	var resp responseASN1
 	rest, err := asn1.Unmarshal(bytes, &resp)
 	if err != nil {
diff --git a/src/pkg/crypto/openpgp/armor/armor.go b/src/pkg/crypto/openpgp/armor/armor.go
index 9c4180d..707bdf3 100644
--- a/src/pkg/crypto/openpgp/armor/armor.go
+++ b/src/pkg/crypto/openpgp/armor/armor.go
@@ -9,10 +9,9 @@
 import (
 	"bufio"
 	"bytes"
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"encoding/base64"
 	"io"
-	"os"
 )
 
 // A Block represents an OpenPGP armored structure.
@@ -36,7 +35,7 @@
 	oReader openpgpReader
 }
 
-var ArmorCorrupt os.Error = error.StructuralError("armor invalid")
+var ArmorCorrupt error = error_.StructuralError("armor invalid")
 
 const crc24Init = 0xb704ce
 const crc24Poly = 0x1864cfb
@@ -69,9 +68,9 @@
 	crc uint32
 }
 
-func (l *lineReader) Read(p []byte) (n int, err os.Error) {
+func (l *lineReader) Read(p []byte) (n int, err error) {
 	if l.eof {
-		return 0, os.EOF
+		return 0, io.EOF
 	}
 
 	if len(l.buf) > 0 {
@@ -101,7 +100,7 @@
 			uint32(expectedBytes[2])
 
 		line, _, err = l.in.ReadLine()
-		if err != nil && err != os.EOF {
+		if err != nil && err != io.EOF {
 			return
 		}
 		if !bytes.HasPrefix(line, armorEnd) {
@@ -109,7 +108,7 @@
 		}
 
 		l.eof = true
-		return 0, os.EOF
+		return 0, io.EOF
 	}
 
 	if len(line) > 64 {
@@ -138,11 +137,11 @@
 	currentCRC uint32
 }
 
-func (r *openpgpReader) Read(p []byte) (n int, err os.Error) {
+func (r *openpgpReader) Read(p []byte) (n int, err error) {
 	n, err = r.b64Reader.Read(p)
 	r.currentCRC = crc24(r.currentCRC, p[:n])
 
-	if err == os.EOF {
+	if err == io.EOF {
 		if r.lReader.crc != uint32(r.currentCRC&crc24Mask) {
 			return 0, ArmorCorrupt
 		}
@@ -155,7 +154,7 @@
 // leading garbage. If it doesn't find a block, it will return nil, os.EOF. The
 // given Reader is not usable after calling this function: an arbitrary amount
 // of data may have been read past the end of the block.
-func Decode(in io.Reader) (p *Block, err os.Error) {
+func Decode(in io.Reader) (p *Block, err error) {
 	r, _ := bufio.NewReaderSize(in, 100)
 	var line []byte
 	ignoreNext := false
diff --git a/src/pkg/crypto/openpgp/armor/encode.go b/src/pkg/crypto/openpgp/armor/encode.go
index 99dee37..6f07582 100644
--- a/src/pkg/crypto/openpgp/armor/encode.go
+++ b/src/pkg/crypto/openpgp/armor/encode.go
@@ -7,7 +7,6 @@
 import (
 	"encoding/base64"
 	"io"
-	"os"
 )
 
 var armorHeaderSep = []byte(": ")
@@ -16,7 +15,7 @@
 var armorEndOfLineOut = []byte("-----\n")
 
 // writeSlices writes its arguments to the given Writer.
-func writeSlices(out io.Writer, slices ...[]byte) (err os.Error) {
+func writeSlices(out io.Writer, slices ...[]byte) (err error) {
 	for _, s := range slices {
 		_, err = out.Write(s)
 		if err != nil {
@@ -45,7 +44,7 @@
 	}
 }
 
-func (l *lineBreaker) Write(b []byte) (n int, err os.Error) {
+func (l *lineBreaker) Write(b []byte) (n int, err error) {
 	n = len(b)
 
 	if n == 0 {
@@ -81,7 +80,7 @@
 	return
 }
 
-func (l *lineBreaker) Close() (err os.Error) {
+func (l *lineBreaker) Close() (err error) {
 	if l.used > 0 {
 		_, err = l.out.Write(l.line[0:l.used])
 		if err != nil {
@@ -106,12 +105,12 @@
 	blockType []byte
 }
 
-func (e *encoding) Write(data []byte) (n int, err os.Error) {
+func (e *encoding) Write(data []byte) (n int, err error) {
 	e.crc = crc24(e.crc, data)
 	return e.b64.Write(data)
 }
 
-func (e *encoding) Close() (err os.Error) {
+func (e *encoding) Close() (err error) {
 	err = e.b64.Close()
 	if err != nil {
 		return
@@ -131,7 +130,7 @@
 
 // Encode returns a WriteCloser which will encode the data written to it in
 // OpenPGP armor.
-func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err os.Error) {
+func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) {
 	bType := []byte(blockType)
 	err = writeSlices(out, armorStart, bType, armorEndOfLineOut)
 	if err != nil {
diff --git a/src/pkg/crypto/openpgp/canonical_text.go b/src/pkg/crypto/openpgp/canonical_text.go
index 293eff3..fe4557a 100644
--- a/src/pkg/crypto/openpgp/canonical_text.go
+++ b/src/pkg/crypto/openpgp/canonical_text.go
@@ -4,10 +4,7 @@
 
 package openpgp
 
-import (
-	"hash"
-	"os"
-)
+import "hash"
 
 // NewCanonicalTextHash reformats text written to it into the canonical
 // form and then applies the hash h.  See RFC 4880, section 5.2.1.
@@ -22,7 +19,7 @@
 
 var newline = []byte{'\r', '\n'}
 
-func (cth *canonicalTextHash) Write(buf []byte) (int, os.Error) {
+func (cth *canonicalTextHash) Write(buf []byte) (int, error) {
 	start := 0
 
 	for i, c := range buf {
diff --git a/src/pkg/crypto/openpgp/canonical_text_test.go b/src/pkg/crypto/openpgp/canonical_text_test.go
index ccf2910..ae54f8c 100644
--- a/src/pkg/crypto/openpgp/canonical_text_test.go
+++ b/src/pkg/crypto/openpgp/canonical_text_test.go
@@ -6,7 +6,6 @@
 
 import (
 	"bytes"
-	"os"
 	"testing"
 )
 
@@ -14,7 +13,7 @@
 	buf *bytes.Buffer
 }
 
-func (r recordingHash) Write(b []byte) (n int, err os.Error) {
+func (r recordingHash) Write(b []byte) (n int, err error) {
 	return r.buf.Write(b)
 }
 
diff --git a/src/pkg/crypto/openpgp/elgamal/elgamal.go b/src/pkg/crypto/openpgp/elgamal/elgamal.go
index 99a6e3e..2ed49f6 100644
--- a/src/pkg/crypto/openpgp/elgamal/elgamal.go
+++ b/src/pkg/crypto/openpgp/elgamal/elgamal.go
@@ -16,8 +16,8 @@
 	"big"
 	"crypto/rand"
 	"crypto/subtle"
+	"errors"
 	"io"
-	"os"
 )
 
 // PublicKey represents an ElGamal public key.
@@ -34,10 +34,10 @@
 // Encrypt encrypts the given message to the given public key. The result is a
 // pair of integers. Errors can result from reading random, or because msg is
 // too large to be encrypted to the public key.
-func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err os.Error) {
+func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) {
 	pLen := (pub.P.BitLen() + 7) / 8
 	if len(msg) > pLen-11 {
-		err = os.NewError("elgamal: message too long")
+		err = errors.New("elgamal: message too long")
 		return
 	}
 
@@ -74,7 +74,7 @@
 // be used to break the cryptosystem.  See ``Chosen Ciphertext Attacks
 // Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel
 // Bleichenbacher, Advances in Cryptology (Crypto '98),
-func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err os.Error) {
+func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
 	s := new(big.Int).Exp(c1, priv.X, priv.P)
 	s.ModInverse(s, priv.P)
 	s.Mul(s, c2)
@@ -97,13 +97,13 @@
 	}
 
 	if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 {
-		return nil, os.NewError("elgamal: decryption error")
+		return nil, errors.New("elgamal: decryption error")
 	}
 	return em[index+1:], nil
 }
 
 // nonZeroRandomBytes fills the given slice with non-zero random octets.
-func nonZeroRandomBytes(s []byte, rand io.Reader) (err os.Error) {
+func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
 	_, err = io.ReadFull(rand, s)
 	if err != nil {
 		return
diff --git a/src/pkg/crypto/openpgp/error/error.go b/src/pkg/crypto/openpgp/error/error.go
index 9cc21f1..ceeb054 100644
--- a/src/pkg/crypto/openpgp/error/error.go
+++ b/src/pkg/crypto/openpgp/error/error.go
@@ -13,7 +13,7 @@
 // invalid.
 type StructuralError string
 
-func (s StructuralError) String() string {
+func (s StructuralError) Error() string {
 	return "OpenPGP data invalid: " + string(s)
 }
 
@@ -21,7 +21,7 @@
 // makes use of currently unimplemented features.
 type UnsupportedError string
 
-func (s UnsupportedError) String() string {
+func (s UnsupportedError) Error() string {
 	return "OpenPGP feature unsupported: " + string(s)
 }
 
@@ -29,7 +29,7 @@
 // incorrect value.
 type InvalidArgumentError string
 
-func (i InvalidArgumentError) String() string {
+func (i InvalidArgumentError) Error() string {
 	return "OpenPGP argument invalid: " + string(i)
 }
 
@@ -37,13 +37,13 @@
 // validate.
 type SignatureError string
 
-func (b SignatureError) String() string {
+func (b SignatureError) Error() string {
 	return "OpenPGP signature invalid: " + string(b)
 }
 
 type keyIncorrectError int
 
-func (ki keyIncorrectError) String() string {
+func (ki keyIncorrectError) Error() string {
 	return "the given key was incorrect"
 }
 
@@ -51,7 +51,7 @@
 
 type unknownIssuerError int
 
-func (unknownIssuerError) String() string {
+func (unknownIssuerError) Error() string {
 	return "signature make by unknown entity"
 }
 
@@ -59,6 +59,6 @@
 
 type UnknownPacketTypeError uint8
 
-func (upte UnknownPacketTypeError) String() string {
+func (upte UnknownPacketTypeError) Error() string {
 	return "unknown OpenPGP packet type: " + strconv.Itoa(int(upte))
 }
diff --git a/src/pkg/crypto/openpgp/keys.go b/src/pkg/crypto/openpgp/keys.go
index c70fb79..b705d22 100644
--- a/src/pkg/crypto/openpgp/keys.go
+++ b/src/pkg/crypto/openpgp/keys.go
@@ -7,11 +7,10 @@
 import (
 	"crypto"
 	"crypto/openpgp/armor"
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"crypto/openpgp/packet"
 	"crypto/rsa"
 	"io"
-	"os"
 	"time"
 )
 
@@ -178,16 +177,16 @@
 }
 
 // ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file.
-func ReadArmoredKeyRing(r io.Reader) (EntityList, os.Error) {
+func ReadArmoredKeyRing(r io.Reader) (EntityList, error) {
 	block, err := armor.Decode(r)
-	if err == os.EOF {
-		return nil, error.InvalidArgumentError("no armored data found")
+	if err == io.EOF {
+		return nil, error_.InvalidArgumentError("no armored data found")
 	}
 	if err != nil {
 		return nil, err
 	}
 	if block.Type != PublicKeyType && block.Type != PrivateKeyType {
-		return nil, error.InvalidArgumentError("expected public or private key block, got: " + block.Type)
+		return nil, error_.InvalidArgumentError("expected public or private key block, got: " + block.Type)
 	}
 
 	return ReadKeyRing(block.Body)
@@ -195,19 +194,19 @@
 
 // ReadKeyRing reads one or more public/private keys. Unsupported keys are
 // ignored as long as at least a single valid key is found.
-func ReadKeyRing(r io.Reader) (el EntityList, err os.Error) {
+func ReadKeyRing(r io.Reader) (el EntityList, err error) {
 	packets := packet.NewReader(r)
-	var lastUnsupportedError os.Error
+	var lastUnsupportedError error
 
 	for {
 		var e *Entity
 		e, err = readEntity(packets)
 		if err != nil {
-			if _, ok := err.(error.UnsupportedError); ok {
+			if _, ok := err.(error_.UnsupportedError); ok {
 				lastUnsupportedError = err
 				err = readToNextPublicKey(packets)
 			}
-			if err == os.EOF {
+			if err == io.EOF {
 				err = nil
 				break
 			}
@@ -228,14 +227,14 @@
 
 // readToNextPublicKey reads packets until the start of the entity and leaves
 // the first packet of the new entity in the Reader.
-func readToNextPublicKey(packets *packet.Reader) (err os.Error) {
+func readToNextPublicKey(packets *packet.Reader) (err error) {
 	var p packet.Packet
 	for {
 		p, err = packets.Next()
-		if err == os.EOF {
+		if err == io.EOF {
 			return
 		} else if err != nil {
-			if _, ok := err.(error.UnsupportedError); ok {
+			if _, ok := err.(error_.UnsupportedError); ok {
 				err = nil
 				continue
 			}
@@ -253,7 +252,7 @@
 
 // readEntity reads an entity (public key, identities, subkeys etc) from the
 // given Reader.
-func readEntity(packets *packet.Reader) (*Entity, os.Error) {
+func readEntity(packets *packet.Reader) (*Entity, error) {
 	e := new(Entity)
 	e.Identities = make(map[string]*Identity)
 
@@ -266,21 +265,21 @@
 	if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok {
 		if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok {
 			packets.Unread(p)
-			return nil, error.StructuralError("first packet was not a public/private key")
+			return nil, error_.StructuralError("first packet was not a public/private key")
 		} else {
 			e.PrimaryKey = &e.PrivateKey.PublicKey
 		}
 	}
 
 	if !e.PrimaryKey.PubKeyAlgo.CanSign() {
-		return nil, error.StructuralError("primary key cannot be used for signatures")
+		return nil, error_.StructuralError("primary key cannot be used for signatures")
 	}
 
 	var current *Identity
 EachPacket:
 	for {
 		p, err := packets.Next()
-		if err == os.EOF {
+		if err == io.EOF {
 			break
 		} else if err != nil {
 			return nil, err
@@ -295,7 +294,7 @@
 
 			for {
 				p, err = packets.Next()
-				if err == os.EOF {
+				if err == io.EOF {
 					return nil, io.ErrUnexpectedEOF
 				} else if err != nil {
 					return nil, err
@@ -303,12 +302,12 @@
 
 				sig, ok := p.(*packet.Signature)
 				if !ok {
-					return nil, error.StructuralError("user ID packet not followed by self-signature")
+					return nil, error_.StructuralError("user ID packet not followed by self-signature")
 				}
 
 				if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId {
 					if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, sig); err != nil {
-						return nil, error.StructuralError("user ID self-signature invalid: " + err.String())
+						return nil, error_.StructuralError("user ID self-signature invalid: " + err.Error())
 					}
 					current.SelfSignature = sig
 					break
@@ -317,7 +316,7 @@
 			}
 		case *packet.Signature:
 			if current == nil {
-				return nil, error.StructuralError("signature packet found before user id packet")
+				return nil, error_.StructuralError("signature packet found before user id packet")
 			}
 			current.Signatures = append(current.Signatures, pkt)
 		case *packet.PrivateKey:
@@ -344,34 +343,34 @@
 	}
 
 	if len(e.Identities) == 0 {
-		return nil, error.StructuralError("entity without any identities")
+		return nil, error_.StructuralError("entity without any identities")
 	}
 
 	return e, nil
 }
 
-func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) os.Error {
+func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error {
 	var subKey Subkey
 	subKey.PublicKey = pub
 	subKey.PrivateKey = priv
 	p, err := packets.Next()
-	if err == os.EOF {
+	if err == io.EOF {
 		return io.ErrUnexpectedEOF
 	}
 	if err != nil {
-		return error.StructuralError("subkey signature invalid: " + err.String())
+		return error_.StructuralError("subkey signature invalid: " + err.Error())
 	}
 	var ok bool
 	subKey.Sig, ok = p.(*packet.Signature)
 	if !ok {
-		return error.StructuralError("subkey packet not followed by signature")
+		return error_.StructuralError("subkey packet not followed by signature")
 	}
 	if subKey.Sig.SigType != packet.SigTypeSubkeyBinding {
-		return error.StructuralError("subkey signature with wrong type")
+		return error_.StructuralError("subkey signature with wrong type")
 	}
 	err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, subKey.Sig)
 	if err != nil {
-		return error.StructuralError("subkey signature invalid: " + err.String())
+		return error_.StructuralError("subkey signature invalid: " + err.Error())
 	}
 	e.Subkeys = append(e.Subkeys, subKey)
 	return nil
@@ -382,10 +381,10 @@
 // NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a
 // single identity composed of the given full name, comment and email, any of
 // which may be empty but must not contain any of "()<>\x00".
-func NewEntity(rand io.Reader, currentTimeSecs int64, name, comment, email string) (*Entity, os.Error) {
+func NewEntity(rand io.Reader, currentTimeSecs int64, name, comment, email string) (*Entity, error) {
 	uid := packet.NewUserId(name, comment, email)
 	if uid == nil {
-		return nil, error.InvalidArgumentError("user id field contained invalid characters")
+		return nil, error_.InvalidArgumentError("user id field contained invalid characters")
 	}
 	signingPriv, err := rsa.GenerateKey(rand, defaultRSAKeyBits)
 	if err != nil {
@@ -442,7 +441,7 @@
 // SerializePrivate serializes an Entity, including private key material, to
 // the given Writer. For now, it must only be used on an Entity returned from
 // NewEntity.
-func (e *Entity) SerializePrivate(w io.Writer) (err os.Error) {
+func (e *Entity) SerializePrivate(w io.Writer) (err error) {
 	err = e.PrivateKey.Serialize(w)
 	if err != nil {
 		return
@@ -480,7 +479,7 @@
 
 // Serialize writes the public part of the given Entity to w. (No private
 // key material will be output).
-func (e *Entity) Serialize(w io.Writer) os.Error {
+func (e *Entity) Serialize(w io.Writer) error {
 	err := e.PrimaryKey.Serialize(w)
 	if err != nil {
 		return err
@@ -518,16 +517,16 @@
 // associated with e. The provided identity must already be an element of
 // e.Identities and the private key of signer must have been decrypted if
 // necessary.
-func (e *Entity) SignIdentity(identity string, signer *Entity) os.Error {
+func (e *Entity) SignIdentity(identity string, signer *Entity) error {
 	if signer.PrivateKey == nil {
-		return error.InvalidArgumentError("signing Entity must have a private key")
+		return error_.InvalidArgumentError("signing Entity must have a private key")
 	}
 	if signer.PrivateKey.Encrypted {
-		return error.InvalidArgumentError("signing Entity's private key must be decrypted")
+		return error_.InvalidArgumentError("signing Entity's private key must be decrypted")
 	}
 	ident, ok := e.Identities[identity]
 	if !ok {
-		return error.InvalidArgumentError("given identity string not found in Entity")
+		return error_.InvalidArgumentError("given identity string not found in Entity")
 	}
 
 	sig := &packet.Signature{
diff --git a/src/pkg/crypto/openpgp/packet/compressed.go b/src/pkg/crypto/openpgp/packet/compressed.go
index 1c15c24..f80d798 100644
--- a/src/pkg/crypto/openpgp/packet/compressed.go
+++ b/src/pkg/crypto/openpgp/packet/compressed.go
@@ -7,9 +7,8 @@
 import (
 	"compress/flate"
 	"compress/zlib"
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"io"
-	"os"
 	"strconv"
 )
 
@@ -19,7 +18,7 @@
 	Body io.Reader
 }
 
-func (c *Compressed) parse(r io.Reader) os.Error {
+func (c *Compressed) parse(r io.Reader) error {
 	var buf [1]byte
 	_, err := readFull(r, buf[:])
 	if err != nil {
@@ -32,7 +31,7 @@
 	case 2:
 		c.Body, err = zlib.NewReader(r)
 	default:
-		err = error.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0])))
+		err = error_.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0])))
 	}
 
 	return err
diff --git a/src/pkg/crypto/openpgp/packet/compressed_test.go b/src/pkg/crypto/openpgp/packet/compressed_test.go
index 24fe501..cb2d70b 100644
--- a/src/pkg/crypto/openpgp/packet/compressed_test.go
+++ b/src/pkg/crypto/openpgp/packet/compressed_test.go
@@ -7,7 +7,7 @@
 import (
 	"bytes"
 	"encoding/hex"
-	"os"
+	"io"
 	"io/ioutil"
 	"testing"
 )
@@ -26,7 +26,7 @@
 	}
 
 	contents, err := ioutil.ReadAll(c.Body)
-	if err != nil && err != os.EOF {
+	if err != nil && err != io.EOF {
 		t.Error(err)
 		return
 	}
diff --git a/src/pkg/crypto/openpgp/packet/encrypted_key.go b/src/pkg/crypto/openpgp/packet/encrypted_key.go
index b4730cb..d05103f 100644
--- a/src/pkg/crypto/openpgp/packet/encrypted_key.go
+++ b/src/pkg/crypto/openpgp/packet/encrypted_key.go
@@ -7,12 +7,11 @@
 import (
 	"big"
 	"crypto/openpgp/elgamal"
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"crypto/rand"
 	"crypto/rsa"
 	"encoding/binary"
 	"io"
-	"os"
 	"strconv"
 )
 
@@ -29,14 +28,14 @@
 	encryptedMPI1, encryptedMPI2 []byte
 }
 
-func (e *EncryptedKey) parse(r io.Reader) (err os.Error) {
+func (e *EncryptedKey) parse(r io.Reader) (err error) {
 	var buf [10]byte
 	_, err = readFull(r, buf[:])
 	if err != nil {
 		return
 	}
 	if buf[0] != encryptedKeyVersion {
-		return error.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0])))
+		return error_.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0])))
 	}
 	e.KeyId = binary.BigEndian.Uint64(buf[1:9])
 	e.Algo = PublicKeyAlgorithm(buf[9])
@@ -64,8 +63,8 @@
 
 // Decrypt decrypts an encrypted session key with the given private key. The
 // private key must have been decrypted first.
-func (e *EncryptedKey) Decrypt(priv *PrivateKey) os.Error {
-	var err os.Error
+func (e *EncryptedKey) Decrypt(priv *PrivateKey) error {
+	var err error
 	var b []byte
 
 	// TODO(agl): use session key decryption routines here to avoid
@@ -78,7 +77,7 @@
 		c2 := new(big.Int).SetBytes(e.encryptedMPI2)
 		b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2)
 	default:
-		err = error.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo)))
+		err = error_.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo)))
 	}
 
 	if err != nil {
@@ -90,7 +89,7 @@
 	expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1])
 	checksum := checksumKeyMaterial(e.Key)
 	if checksum != expectedChecksum {
-		return error.StructuralError("EncryptedKey checksum incorrect")
+		return error_.StructuralError("EncryptedKey checksum incorrect")
 	}
 
 	return nil
@@ -98,7 +97,7 @@
 
 // SerializeEncryptedKey serializes an encrypted key packet to w that contains
 // key, encrypted to pub.
-func SerializeEncryptedKey(w io.Writer, rand io.Reader, pub *PublicKey, cipherFunc CipherFunction, key []byte) os.Error {
+func SerializeEncryptedKey(w io.Writer, rand io.Reader, pub *PublicKey, cipherFunc CipherFunction, key []byte) error {
 	var buf [10]byte
 	buf[0] = encryptedKeyVersion
 	binary.BigEndian.PutUint64(buf[1:9], pub.KeyId)
@@ -117,16 +116,16 @@
 	case PubKeyAlgoElGamal:
 		return serializeEncryptedKeyElGamal(w, rand, buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock)
 	case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly:
-		return error.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
+		return error_.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
 	}
 
-	return error.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
+	return error_.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
 }
 
-func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) os.Error {
+func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error {
 	cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock)
 	if err != nil {
-		return error.InvalidArgumentError("RSA encryption failed: " + err.String())
+		return error_.InvalidArgumentError("RSA encryption failed: " + err.Error())
 	}
 
 	packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText)
@@ -142,10 +141,10 @@
 	return writeMPI(w, 8*uint16(len(cipherText)), cipherText)
 }
 
-func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) os.Error {
+func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error {
 	c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock)
 	if err != nil {
-		return error.InvalidArgumentError("ElGamal encryption failed: " + err.String())
+		return error_.InvalidArgumentError("ElGamal encryption failed: " + err.Error())
 	}
 
 	packetLen := 10 /* header length */
diff --git a/src/pkg/crypto/openpgp/packet/literal.go b/src/pkg/crypto/openpgp/packet/literal.go
index 9411572..1a9ec6e5 100644
--- a/src/pkg/crypto/openpgp/packet/literal.go
+++ b/src/pkg/crypto/openpgp/packet/literal.go
@@ -7,7 +7,6 @@
 import (
 	"encoding/binary"
 	"io"
-	"os"
 )
 
 // LiteralData represents an encrypted file. See RFC 4880, section 5.9.
@@ -24,7 +23,7 @@
 	return l.FileName == "_CONSOLE"
 }
 
-func (l *LiteralData) parse(r io.Reader) (err os.Error) {
+func (l *LiteralData) parse(r io.Reader) (err error) {
 	var buf [256]byte
 
 	_, err = readFull(r, buf[:2])
@@ -55,7 +54,7 @@
 // SerializeLiteral serializes a literal data packet to w and returns a
 // WriteCloser to which the data itself can be written and which MUST be closed
 // on completion. The fileName is truncated to 255 bytes.
-func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err os.Error) {
+func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) {
 	var buf [4]byte
 	buf[0] = 't'
 	if isBinary {
diff --git a/src/pkg/crypto/openpgp/packet/one_pass_signature.go b/src/pkg/crypto/openpgp/packet/one_pass_signature.go
index ca826e4..13e6aa5 100644
--- a/src/pkg/crypto/openpgp/packet/one_pass_signature.go
+++ b/src/pkg/crypto/openpgp/packet/one_pass_signature.go
@@ -6,11 +6,10 @@
 
 import (
 	"crypto"
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"crypto/openpgp/s2k"
 	"encoding/binary"
 	"io"
-	"os"
 	"strconv"
 )
 
@@ -26,7 +25,7 @@
 
 const onePassSignatureVersion = 3
 
-func (ops *OnePassSignature) parse(r io.Reader) (err os.Error) {
+func (ops *OnePassSignature) parse(r io.Reader) (err error) {
 	var buf [13]byte
 
 	_, err = readFull(r, buf[:])
@@ -34,13 +33,13 @@
 		return
 	}
 	if buf[0] != onePassSignatureVersion {
-		err = error.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0])))
+		err = error_.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0])))
 	}
 
 	var ok bool
 	ops.Hash, ok = s2k.HashIdToHash(buf[2])
 	if !ok {
-		return error.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2])))
+		return error_.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2])))
 	}
 
 	ops.SigType = SignatureType(buf[1])
@@ -51,14 +50,14 @@
 }
 
 // Serialize marshals the given OnePassSignature to w.
-func (ops *OnePassSignature) Serialize(w io.Writer) os.Error {
+func (ops *OnePassSignature) Serialize(w io.Writer) error {
 	var buf [13]byte
 	buf[0] = onePassSignatureVersion
 	buf[1] = uint8(ops.SigType)
 	var ok bool
 	buf[2], ok = s2k.HashToHashId(ops.Hash)
 	if !ok {
-		return error.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash)))
+		return error_.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash)))
 	}
 	buf[3] = uint8(ops.PubKeyAlgo)
 	binary.BigEndian.PutUint64(buf[4:12], ops.KeyId)
diff --git a/src/pkg/crypto/openpgp/packet/packet.go b/src/pkg/crypto/openpgp/packet/packet.go
index 1d7297e..f7ed353 100644
--- a/src/pkg/crypto/openpgp/packet/packet.go
+++ b/src/pkg/crypto/openpgp/packet/packet.go
@@ -11,23 +11,22 @@
 	"crypto/aes"
 	"crypto/cast5"
 	"crypto/cipher"
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"io"
-	"os"
 )
 
 // readFull is the same as io.ReadFull except that reading zero bytes returns
 // ErrUnexpectedEOF rather than EOF.
-func readFull(r io.Reader, buf []byte) (n int, err os.Error) {
+func readFull(r io.Reader, buf []byte) (n int, err error) {
 	n, err = io.ReadFull(r, buf)
-	if err == os.EOF {
+	if err == io.EOF {
 		err = io.ErrUnexpectedEOF
 	}
 	return
 }
 
 // readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2.
-func readLength(r io.Reader) (length int64, isPartial bool, err os.Error) {
+func readLength(r io.Reader) (length int64, isPartial bool, err error) {
 	var buf [4]byte
 	_, err = readFull(r, buf[:1])
 	if err != nil {
@@ -68,10 +67,10 @@
 	isPartial bool
 }
 
-func (r *partialLengthReader) Read(p []byte) (n int, err os.Error) {
+func (r *partialLengthReader) Read(p []byte) (n int, err error) {
 	for r.remaining == 0 {
 		if !r.isPartial {
-			return 0, os.EOF
+			return 0, io.EOF
 		}
 		r.remaining, r.isPartial, err = readLength(r.r)
 		if err != nil {
@@ -86,7 +85,7 @@
 
 	n, err = r.r.Read(p[:int(toRead)])
 	r.remaining -= int64(n)
-	if n < int(toRead) && err == os.EOF {
+	if n < int(toRead) && err == io.EOF {
 		err = io.ErrUnexpectedEOF
 	}
 	return
@@ -99,7 +98,7 @@
 	lengthByte [1]byte
 }
 
-func (w *partialLengthWriter) Write(p []byte) (n int, err os.Error) {
+func (w *partialLengthWriter) Write(p []byte) (n int, err error) {
 	for len(p) > 0 {
 		for power := uint(14); power < 32; power-- {
 			l := 1 << power
@@ -123,7 +122,7 @@
 	return
 }
 
-func (w *partialLengthWriter) Close() os.Error {
+func (w *partialLengthWriter) Close() error {
 	w.lengthByte[0] = 0
 	_, err := w.w.Write(w.lengthByte[:])
 	if err != nil {
@@ -139,16 +138,16 @@
 	n int64
 }
 
-func (l *spanReader) Read(p []byte) (n int, err os.Error) {
+func (l *spanReader) Read(p []byte) (n int, err error) {
 	if l.n <= 0 {
-		return 0, os.EOF
+		return 0, io.EOF
 	}
 	if int64(len(p)) > l.n {
 		p = p[0:l.n]
 	}
 	n, err = l.r.Read(p)
 	l.n -= int64(n)
-	if l.n > 0 && err == os.EOF {
+	if l.n > 0 && err == io.EOF {
 		err = io.ErrUnexpectedEOF
 	}
 	return
@@ -156,14 +155,14 @@
 
 // readHeader parses a packet header and returns an io.Reader which will return
 // the contents of the packet. See RFC 4880, section 4.2.
-func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err os.Error) {
+func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) {
 	var buf [4]byte
 	_, err = io.ReadFull(r, buf[:1])
 	if err != nil {
 		return
 	}
 	if buf[0]&0x80 == 0 {
-		err = error.StructuralError("tag byte does not have MSB set")
+		err = error_.StructuralError("tag byte does not have MSB set")
 		return
 	}
 	if buf[0]&0x40 == 0 {
@@ -209,7 +208,7 @@
 
 // serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section
 // 4.2.
-func serializeHeader(w io.Writer, ptype packetType, length int) (err os.Error) {
+func serializeHeader(w io.Writer, ptype packetType, length int) (err error) {
 	var buf [6]byte
 	var n int
 
@@ -238,7 +237,7 @@
 // serializeStreamHeader writes an OpenPGP packet header to w where the
 // length of the packet is unknown. It returns a io.WriteCloser which can be
 // used to write the contents of the packet. See RFC 4880, section 4.2.
-func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err os.Error) {
+func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) {
 	var buf [1]byte
 	buf[0] = 0x80 | 0x40 | byte(ptype)
 	_, err = w.Write(buf[:])
@@ -252,19 +251,19 @@
 // Packet represents an OpenPGP packet. Users are expected to try casting
 // instances of this interface to specific packet types.
 type Packet interface {
-	parse(io.Reader) os.Error
+	parse(io.Reader) error
 }
 
 // consumeAll reads from the given Reader until error, returning the number of
 // bytes read.
-func consumeAll(r io.Reader) (n int64, err os.Error) {
+func consumeAll(r io.Reader) (n int64, err error) {
 	var m int
 	var buf [1024]byte
 
 	for {
 		m, err = r.Read(buf[:])
 		n += int64(m)
-		if err == os.EOF {
+		if err == io.EOF {
 			err = nil
 			return
 		}
@@ -298,7 +297,7 @@
 
 // Read reads a single OpenPGP packet from the given io.Reader. If there is an
 // error parsing a packet, the whole packet is consumed from the input.
-func Read(r io.Reader) (p Packet, err os.Error) {
+func Read(r io.Reader) (p Packet, err error) {
 	tag, _, contents, err := readHeader(r)
 	if err != nil {
 		return
@@ -338,7 +337,7 @@
 		se.MDC = true
 		p = se
 	default:
-		err = error.UnknownPacketTypeError(tag)
+		err = error_.UnknownPacketTypeError(tag)
 	}
 	if p != nil {
 		err = p.parse(contents)
@@ -447,7 +446,7 @@
 // readMPI reads a big integer from r. The bit length returned is the bit
 // length that was specified in r. This is preserved so that the integer can be
 // reserialized exactly.
-func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err os.Error) {
+func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) {
 	var buf [2]byte
 	_, err = readFull(r, buf[0:])
 	if err != nil {
@@ -469,7 +468,7 @@
 }
 
 // writeMPI serializes a big integer to w.
-func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err os.Error) {
+func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) {
 	_, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)})
 	if err == nil {
 		_, err = w.Write(mpiBytes)
@@ -478,6 +477,6 @@
 }
 
 // writeBig serializes a *big.Int to w.
-func writeBig(w io.Writer, i *big.Int) os.Error {
+func writeBig(w io.Writer, i *big.Int) error {
 	return writeMPI(w, uint16(i.BitLen()), i.Bytes())
 }
diff --git a/src/pkg/crypto/openpgp/packet/packet_test.go b/src/pkg/crypto/openpgp/packet/packet_test.go
index 23d9978a..5326641 100644
--- a/src/pkg/crypto/openpgp/packet/packet_test.go
+++ b/src/pkg/crypto/openpgp/packet/packet_test.go
@@ -6,12 +6,11 @@
 
 import (
 	"bytes"
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"encoding/hex"
 	"fmt"
 	"io"
 	"io/ioutil"
-	"os"
 	"testing"
 )
 
@@ -49,7 +48,7 @@
 	hexInput  string
 	length    int64
 	isPartial bool
-	err       os.Error
+	err       error
 }{
 	{"", 0, false, io.ErrUnexpectedEOF},
 	{"1f", 31, false, nil},
@@ -87,7 +86,7 @@
 
 var partialLengthReaderTests = []struct {
 	hexInput  string
-	err       os.Error
+	err       error
 	hexOutput string
 }{
 	{"e0", io.ErrUnexpectedEOF, ""},
@@ -153,14 +152,14 @@
 	for i, test := range readHeaderTests {
 		tag, length, contents, err := readHeader(readerFromHex(test.hexInput))
 		if test.structuralError {
-			if _, ok := err.(error.StructuralError); ok {
+			if _, ok := err.(error_.StructuralError); ok {
 				continue
 			}
 			t.Errorf("%d: expected StructuralError, got:%s", i, err)
 			continue
 		}
 		if err != nil {
-			if len(test.hexInput) == 0 && err == os.EOF {
+			if len(test.hexInput) == 0 && err == io.EOF {
 				continue
 			}
 			if !test.unexpectedEOF || err != io.ErrUnexpectedEOF {
diff --git a/src/pkg/crypto/openpgp/packet/private_key.go b/src/pkg/crypto/openpgp/packet/private_key.go
index 6f8133d..742ac51 100644
--- a/src/pkg/crypto/openpgp/packet/private_key.go
+++ b/src/pkg/crypto/openpgp/packet/private_key.go
@@ -10,13 +10,12 @@
 	"crypto/cipher"
 	"crypto/dsa"
 	"crypto/openpgp/elgamal"
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"crypto/openpgp/s2k"
 	"crypto/rsa"
 	"crypto/sha1"
 	"io"
 	"io/ioutil"
-	"os"
 	"strconv"
 )
 
@@ -40,7 +39,7 @@
 	return pk
 }
 
-func (pk *PrivateKey) parse(r io.Reader) (err os.Error) {
+func (pk *PrivateKey) parse(r io.Reader) (err error) {
 	err = (&pk.PublicKey).parse(r)
 	if err != nil {
 		return
@@ -72,13 +71,13 @@
 			pk.sha1Checksum = true
 		}
 	default:
-		return error.UnsupportedError("deprecated s2k function in private key")
+		return error_.UnsupportedError("deprecated s2k function in private key")
 	}
 
 	if pk.Encrypted {
 		blockSize := pk.cipher.blockSize()
 		if blockSize == 0 {
-			return error.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher)))
+			return error_.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher)))
 		}
 		pk.iv = make([]byte, blockSize)
 		_, err = readFull(r, pk.iv)
@@ -111,7 +110,7 @@
 	return h
 }
 
-func (pk *PrivateKey) Serialize(w io.Writer) (err os.Error) {
+func (pk *PrivateKey) Serialize(w io.Writer) (err error) {
 	// TODO(agl): support encrypted private keys
 	buf := bytes.NewBuffer(nil)
 	err = pk.PublicKey.serializeWithoutHeaders(buf)
@@ -126,7 +125,7 @@
 	case *rsa.PrivateKey:
 		err = serializeRSAPrivateKey(privateKeyBuf, priv)
 	default:
-		err = error.InvalidArgumentError("non-RSA private key")
+		err = error_.InvalidArgumentError("non-RSA private key")
 	}
 	if err != nil {
 		return
@@ -160,7 +159,7 @@
 	return
 }
 
-func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) os.Error {
+func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error {
 	err := writeBig(w, priv.D)
 	if err != nil {
 		return err
@@ -177,7 +176,7 @@
 }
 
 // Decrypt decrypts an encrypted private key using a passphrase.
-func (pk *PrivateKey) Decrypt(passphrase []byte) os.Error {
+func (pk *PrivateKey) Decrypt(passphrase []byte) error {
 	if !pk.Encrypted {
 		return nil
 	}
@@ -192,18 +191,18 @@
 
 	if pk.sha1Checksum {
 		if len(data) < sha1.Size {
-			return error.StructuralError("truncated private key data")
+			return error_.StructuralError("truncated private key data")
 		}
 		h := sha1.New()
 		h.Write(data[:len(data)-sha1.Size])
 		sum := h.Sum()
 		if !bytes.Equal(sum, data[len(data)-sha1.Size:]) {
-			return error.StructuralError("private key checksum failure")
+			return error_.StructuralError("private key checksum failure")
 		}
 		data = data[:len(data)-sha1.Size]
 	} else {
 		if len(data) < 2 {
-			return error.StructuralError("truncated private key data")
+			return error_.StructuralError("truncated private key data")
 		}
 		var sum uint16
 		for i := 0; i < len(data)-2; i++ {
@@ -211,7 +210,7 @@
 		}
 		if data[len(data)-2] != uint8(sum>>8) ||
 			data[len(data)-1] != uint8(sum) {
-			return error.StructuralError("private key checksum failure")
+			return error_.StructuralError("private key checksum failure")
 		}
 		data = data[:len(data)-2]
 	}
@@ -219,7 +218,7 @@
 	return pk.parsePrivateKey(data)
 }
 
-func (pk *PrivateKey) parsePrivateKey(data []byte) (err os.Error) {
+func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) {
 	switch pk.PublicKey.PubKeyAlgo {
 	case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly:
 		return pk.parseRSAPrivateKey(data)
@@ -231,7 +230,7 @@
 	panic("impossible")
 }
 
-func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err os.Error) {
+func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) {
 	rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey)
 	rsaPriv := new(rsa.PrivateKey)
 	rsaPriv.PublicKey = *rsaPub
@@ -262,7 +261,7 @@
 	return nil
 }
 
-func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err os.Error) {
+func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) {
 	dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey)
 	dsaPriv := new(dsa.PrivateKey)
 	dsaPriv.PublicKey = *dsaPub
@@ -281,7 +280,7 @@
 	return nil
 }
 
-func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err os.Error) {
+func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) {
 	pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey)
 	priv := new(elgamal.PrivateKey)
 	priv.PublicKey = *pub
diff --git a/src/pkg/crypto/openpgp/packet/public_key.go b/src/pkg/crypto/openpgp/packet/public_key.go
index e6b0ae5..af0bc22 100644
--- a/src/pkg/crypto/openpgp/packet/public_key.go
+++ b/src/pkg/crypto/openpgp/packet/public_key.go
@@ -8,14 +8,13 @@
 	"big"
 	"crypto/dsa"
 	"crypto/openpgp/elgamal"
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"crypto/rsa"
 	"crypto/sha1"
 	"encoding/binary"
 	"fmt"
 	"hash"
 	"io"
-	"os"
 	"strconv"
 )
 
@@ -53,7 +52,7 @@
 	return pk
 }
 
-func (pk *PublicKey) parse(r io.Reader) (err os.Error) {
+func (pk *PublicKey) parse(r io.Reader) (err error) {
 	// RFC 4880, section 5.5.2
 	var buf [6]byte
 	_, err = readFull(r, buf[:])
@@ -61,7 +60,7 @@
 		return
 	}
 	if buf[0] != 4 {
-		return error.UnsupportedError("public key version")
+		return error_.UnsupportedError("public key version")
 	}
 	pk.CreationTime = uint32(buf[1])<<24 | uint32(buf[2])<<16 | uint32(buf[3])<<8 | uint32(buf[4])
 	pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5])
@@ -73,7 +72,7 @@
 	case PubKeyAlgoElGamal:
 		err = pk.parseElGamal(r)
 	default:
-		err = error.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
+		err = error_.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
 	}
 	if err != nil {
 		return
@@ -94,7 +93,7 @@
 
 // parseRSA parses RSA public key material from the given Reader. See RFC 4880,
 // section 5.5.2.
-func (pk *PublicKey) parseRSA(r io.Reader) (err os.Error) {
+func (pk *PublicKey) parseRSA(r io.Reader) (err error) {
 	pk.n.bytes, pk.n.bitLength, err = readMPI(r)
 	if err != nil {
 		return
@@ -105,7 +104,7 @@
 	}
 
 	if len(pk.e.bytes) > 3 {
-		err = error.UnsupportedError("large public exponent")
+		err = error_.UnsupportedError("large public exponent")
 		return
 	}
 	rsa := &rsa.PublicKey{
@@ -122,7 +121,7 @@
 
 // parseDSA parses DSA public key material from the given Reader. See RFC 4880,
 // section 5.5.2.
-func (pk *PublicKey) parseDSA(r io.Reader) (err os.Error) {
+func (pk *PublicKey) parseDSA(r io.Reader) (err error) {
 	pk.p.bytes, pk.p.bitLength, err = readMPI(r)
 	if err != nil {
 		return
@@ -151,7 +150,7 @@
 
 // parseElGamal parses ElGamal public key material from the given Reader. See
 // RFC 4880, section 5.5.2.
-func (pk *PublicKey) parseElGamal(r io.Reader) (err os.Error) {
+func (pk *PublicKey) parseElGamal(r io.Reader) (err error) {
 	pk.p.bytes, pk.p.bitLength, err = readMPI(r)
 	if err != nil {
 		return
@@ -199,7 +198,7 @@
 	return
 }
 
-func (pk *PublicKey) Serialize(w io.Writer) (err os.Error) {
+func (pk *PublicKey) Serialize(w io.Writer) (err error) {
 	length := 6 // 6 byte header
 
 	switch pk.PubKeyAlgo {
@@ -232,7 +231,7 @@
 
 // serializeWithoutHeaders marshals the PublicKey to w in the form of an
 // OpenPGP public key packet, not including the packet header.
-func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err os.Error) {
+func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) {
 	var buf [6]byte
 	buf[0] = 4
 	buf[1] = byte(pk.CreationTime >> 24)
@@ -254,7 +253,7 @@
 	case PubKeyAlgoElGamal:
 		return writeMPIs(w, pk.p, pk.g, pk.y)
 	}
-	return error.InvalidArgumentError("bad public-key algorithm")
+	return error_.InvalidArgumentError("bad public-key algorithm")
 }
 
 // CanSign returns true iff this public key can generate signatures
@@ -264,20 +263,20 @@
 
 // VerifySignature returns nil iff sig is a valid signature, made by this
 // public key, of the data hashed into signed. signed is mutated by this call.
-func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err os.Error) {
+func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) {
 	if !pk.CanSign() {
-		return error.InvalidArgumentError("public key cannot generate signatures")
+		return error_.InvalidArgumentError("public key cannot generate signatures")
 	}
 
 	signed.Write(sig.HashSuffix)
 	hashBytes := signed.Sum()
 
 	if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
-		return error.SignatureError("hash tag doesn't match")
+		return error_.SignatureError("hash tag doesn't match")
 	}
 
 	if pk.PubKeyAlgo != sig.PubKeyAlgo {
-		return error.InvalidArgumentError("public key and signature use different algorithms")
+		return error_.InvalidArgumentError("public key and signature use different algorithms")
 	}
 
 	switch pk.PubKeyAlgo {
@@ -285,13 +284,13 @@
 		rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey)
 		err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes)
 		if err != nil {
-			return error.SignatureError("RSA verification failure")
+			return error_.SignatureError("RSA verification failure")
 		}
 		return nil
 	case PubKeyAlgoDSA:
 		dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey)
 		if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) {
-			return error.SignatureError("DSA verification failure")
+			return error_.SignatureError("DSA verification failure")
 		}
 		return nil
 	default:
@@ -302,10 +301,10 @@
 
 // keySignatureHash returns a Hash of the message that needs to be signed for
 // pk to assert a subkey relationship to signed.
-func keySignatureHash(pk, signed *PublicKey, sig *Signature) (h hash.Hash, err os.Error) {
+func keySignatureHash(pk, signed *PublicKey, sig *Signature) (h hash.Hash, err error) {
 	h = sig.Hash.New()
 	if h == nil {
-		return nil, error.UnsupportedError("hash function")
+		return nil, error_.UnsupportedError("hash function")
 	}
 
 	// RFC 4880, section 5.2.4
@@ -318,7 +317,7 @@
 
 // VerifyKeySignature returns nil iff sig is a valid signature, made by this
 // public key, of signed.
-func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) (err os.Error) {
+func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) (err error) {
 	h, err := keySignatureHash(pk, signed, sig)
 	if err != nil {
 		return err
@@ -328,10 +327,10 @@
 
 // userIdSignatureHash returns a Hash of the message that needs to be signed
 // to assert that pk is a valid key for id.
-func userIdSignatureHash(id string, pk *PublicKey, sig *Signature) (h hash.Hash, err os.Error) {
+func userIdSignatureHash(id string, pk *PublicKey, sig *Signature) (h hash.Hash, err error) {
 	h = sig.Hash.New()
 	if h == nil {
-		return nil, error.UnsupportedError("hash function")
+		return nil, error_.UnsupportedError("hash function")
 	}
 
 	// RFC 4880, section 5.2.4
@@ -352,7 +351,7 @@
 
 // VerifyUserIdSignature returns nil iff sig is a valid signature, made by this
 // public key, of id.
-func (pk *PublicKey) VerifyUserIdSignature(id string, sig *Signature) (err os.Error) {
+func (pk *PublicKey) VerifyUserIdSignature(id string, sig *Signature) (err error) {
 	h, err := userIdSignatureHash(id, pk, sig)
 	if err != nil {
 		return err
@@ -382,7 +381,7 @@
 
 // writeMPIs is a utility function for serializing several big integers to the
 // given Writer.
-func writeMPIs(w io.Writer, mpis ...parsedMPI) (err os.Error) {
+func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) {
 	for _, mpi := range mpis {
 		err = writeMPI(w, mpi.bitLength, mpi.bytes)
 		if err != nil {
diff --git a/src/pkg/crypto/openpgp/packet/reader.go b/src/pkg/crypto/openpgp/packet/reader.go
index 5febc3b..e3d733c 100644
--- a/src/pkg/crypto/openpgp/packet/reader.go
+++ b/src/pkg/crypto/openpgp/packet/reader.go
@@ -5,9 +5,8 @@
 package packet
 
 import (
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"io"
-	"os"
 )
 
 // Reader reads packets from an io.Reader and allows packets to be 'unread' so
@@ -19,7 +18,7 @@
 
 // Next returns the most recently unread Packet, or reads another packet from
 // the top-most io.Reader. Unknown packet types are skipped.
-func (r *Reader) Next() (p Packet, err os.Error) {
+func (r *Reader) Next() (p Packet, err error) {
 	if len(r.q) > 0 {
 		p = r.q[len(r.q)-1]
 		r.q = r.q[:len(r.q)-1]
@@ -31,16 +30,16 @@
 		if err == nil {
 			return
 		}
-		if err == os.EOF {
+		if err == io.EOF {
 			r.readers = r.readers[:len(r.readers)-1]
 			continue
 		}
-		if _, ok := err.(error.UnknownPacketTypeError); !ok {
+		if _, ok := err.(error_.UnknownPacketTypeError); !ok {
 			return nil, err
 		}
 	}
 
-	return nil, os.EOF
+	return nil, io.EOF
 }
 
 // Push causes the Reader to start reading from a new io.Reader. When an EOF
diff --git a/src/pkg/crypto/openpgp/packet/signature.go b/src/pkg/crypto/openpgp/packet/signature.go
index 7577e28..4ebb906 100644
--- a/src/pkg/crypto/openpgp/packet/signature.go
+++ b/src/pkg/crypto/openpgp/packet/signature.go
@@ -7,14 +7,13 @@
 import (
 	"crypto"
 	"crypto/dsa"
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"crypto/openpgp/s2k"
 	"crypto/rand"
 	"crypto/rsa"
 	"encoding/binary"
 	"hash"
 	"io"
-	"os"
 	"strconv"
 )
 
@@ -53,7 +52,7 @@
 	outSubpackets []outputSubpacket
 }
 
-func (sig *Signature) parse(r io.Reader) (err os.Error) {
+func (sig *Signature) parse(r io.Reader) (err error) {
 	// RFC 4880, section 5.2.3
 	var buf [5]byte
 	_, err = readFull(r, buf[:1])
@@ -61,7 +60,7 @@
 		return
 	}
 	if buf[0] != 4 {
-		err = error.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0])))
+		err = error_.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0])))
 		return
 	}
 
@@ -74,14 +73,14 @@
 	switch sig.PubKeyAlgo {
 	case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA:
 	default:
-		err = error.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
+		err = error_.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
 		return
 	}
 
 	var ok bool
 	sig.Hash, ok = s2k.HashIdToHash(buf[2])
 	if !ok {
-		return error.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
+		return error_.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
 	}
 
 	hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4])
@@ -144,7 +143,7 @@
 
 // parseSignatureSubpackets parses subpackets of the main signature packet. See
 // RFC 4880, section 5.2.3.1.
-func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err os.Error) {
+func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) {
 	for len(subpackets) > 0 {
 		subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed)
 		if err != nil {
@@ -153,7 +152,7 @@
 	}
 
 	if sig.CreationTime == 0 {
-		err = error.StructuralError("no creation time in signature")
+		err = error_.StructuralError("no creation time in signature")
 	}
 
 	return
@@ -174,7 +173,7 @@
 )
 
 // parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1.
-func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err os.Error) {
+func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) {
 	// RFC 4880, section 5.2.3.1
 	var (
 		length     uint32
@@ -207,7 +206,7 @@
 	rest = subpacket[length:]
 	subpacket = subpacket[:length]
 	if len(subpacket) == 0 {
-		err = error.StructuralError("zero length signature subpacket")
+		err = error_.StructuralError("zero length signature subpacket")
 		return
 	}
 	packetType = signatureSubpacketType(subpacket[0] & 0x7f)
@@ -217,11 +216,11 @@
 	switch packetType {
 	case creationTimeSubpacket:
 		if !isHashed {
-			err = error.StructuralError("signature creation time in non-hashed area")
+			err = error_.StructuralError("signature creation time in non-hashed area")
 			return
 		}
 		if len(subpacket) != 4 {
-			err = error.StructuralError("signature creation time not four bytes")
+			err = error_.StructuralError("signature creation time not four bytes")
 			return
 		}
 		sig.CreationTime = binary.BigEndian.Uint32(subpacket)
@@ -231,7 +230,7 @@
 			return
 		}
 		if len(subpacket) != 4 {
-			err = error.StructuralError("expiration subpacket with bad length")
+			err = error_.StructuralError("expiration subpacket with bad length")
 			return
 		}
 		sig.SigLifetimeSecs = new(uint32)
@@ -242,7 +241,7 @@
 			return
 		}
 		if len(subpacket) != 4 {
-			err = error.StructuralError("key expiration subpacket with bad length")
+			err = error_.StructuralError("key expiration subpacket with bad length")
 			return
 		}
 		sig.KeyLifetimeSecs = new(uint32)
@@ -257,7 +256,7 @@
 	case issuerSubpacket:
 		// Issuer, section 5.2.3.5
 		if len(subpacket) != 8 {
-			err = error.StructuralError("issuer subpacket with bad length")
+			err = error_.StructuralError("issuer subpacket with bad length")
 			return
 		}
 		sig.IssuerKeyId = new(uint64)
@@ -282,7 +281,7 @@
 			return
 		}
 		if len(subpacket) != 1 {
-			err = error.StructuralError("primary user id subpacket with bad length")
+			err = error_.StructuralError("primary user id subpacket with bad length")
 			return
 		}
 		sig.IsPrimaryId = new(bool)
@@ -295,7 +294,7 @@
 			return
 		}
 		if len(subpacket) == 0 {
-			err = error.StructuralError("empty key flags subpacket")
+			err = error_.StructuralError("empty key flags subpacket")
 			return
 		}
 		sig.FlagsValid = true
@@ -314,14 +313,14 @@
 
 	default:
 		if isCritical {
-			err = error.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType)))
+			err = error_.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType)))
 			return
 		}
 	}
 	return
 
 Truncated:
-	err = error.StructuralError("signature subpacket truncated")
+	err = error_.StructuralError("signature subpacket truncated")
 	return
 }
 
@@ -384,7 +383,7 @@
 }
 
 // buildHashSuffix constructs the HashSuffix member of sig in preparation for signing.
-func (sig *Signature) buildHashSuffix() (err os.Error) {
+func (sig *Signature) buildHashSuffix() (err error) {
 	hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true)
 
 	var ok bool
@@ -396,7 +395,7 @@
 	sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash)
 	if !ok {
 		sig.HashSuffix = nil
-		return error.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash)))
+		return error_.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash)))
 	}
 	sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8)
 	sig.HashSuffix[5] = byte(hashedSubpacketsLen)
@@ -411,7 +410,7 @@
 	return
 }
 
-func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err os.Error) {
+func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) {
 	err = sig.buildHashSuffix()
 	if err != nil {
 		return
@@ -426,7 +425,7 @@
 // Sign signs a message with a private key. The hash, h, must contain
 // the hash of the message to be signed and will be mutated by this function.
 // On success, the signature is stored in sig. Call Serialize to write it out.
-func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey) (err os.Error) {
+func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey) (err error) {
 	sig.outSubpackets = sig.buildSubpackets()
 	digest, err := sig.signPrepareHash(h)
 	if err != nil {
@@ -446,7 +445,7 @@
 			sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes))
 		}
 	default:
-		err = error.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo)))
+		err = error_.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo)))
 	}
 
 	return
@@ -455,7 +454,7 @@
 // SignUserId computes a signature from priv, asserting that pub is a valid
 // key for the identity id.  On success, the signature is stored in sig. Call
 // Serialize to write it out.
-func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey) os.Error {
+func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey) error {
 	h, err := userIdSignatureHash(id, pub, sig)
 	if err != nil {
 		return nil
@@ -465,7 +464,7 @@
 
 // SignKey computes a signature from priv, asserting that pub is a subkey.  On
 // success, the signature is stored in sig. Call Serialize to write it out.
-func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey) os.Error {
+func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey) error {
 	h, err := keySignatureHash(&priv.PublicKey, pub, sig)
 	if err != nil {
 		return err
@@ -474,12 +473,12 @@
 }
 
 // Serialize marshals sig to w. SignRSA or SignDSA must have been called first.
-func (sig *Signature) Serialize(w io.Writer) (err os.Error) {
+func (sig *Signature) Serialize(w io.Writer) (err error) {
 	if len(sig.outSubpackets) == 0 {
 		sig.outSubpackets = sig.rawSubpackets
 	}
 	if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil {
-		return error.InvalidArgumentError("Signature: need to call SignRSA or SignDSA before Serialize")
+		return error_.InvalidArgumentError("Signature: need to call SignRSA or SignDSA before Serialize")
 	}
 
 	sigLength := 0
diff --git a/src/pkg/crypto/openpgp/packet/symmetric_key_encrypted.go b/src/pkg/crypto/openpgp/packet/symmetric_key_encrypted.go
index ad4f1d6..76d5151 100644
--- a/src/pkg/crypto/openpgp/packet/symmetric_key_encrypted.go
+++ b/src/pkg/crypto/openpgp/packet/symmetric_key_encrypted.go
@@ -7,10 +7,9 @@
 import (
 	"bytes"
 	"crypto/cipher"
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"crypto/openpgp/s2k"
 	"io"
-	"os"
 	"strconv"
 )
 
@@ -30,7 +29,7 @@
 
 const symmetricKeyEncryptedVersion = 4
 
-func (ske *SymmetricKeyEncrypted) parse(r io.Reader) (err os.Error) {
+func (ske *SymmetricKeyEncrypted) parse(r io.Reader) (err error) {
 	// RFC 4880, section 5.3.
 	var buf [2]byte
 	_, err = readFull(r, buf[:])
@@ -38,12 +37,12 @@
 		return
 	}
 	if buf[0] != symmetricKeyEncryptedVersion {
-		return error.UnsupportedError("SymmetricKeyEncrypted version")
+		return error_.UnsupportedError("SymmetricKeyEncrypted version")
 	}
 	ske.CipherFunc = CipherFunction(buf[1])
 
 	if ske.CipherFunc.KeySize() == 0 {
-		return error.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1])))
+		return error_.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1])))
 	}
 
 	ske.s2k, err = s2k.Parse(r)
@@ -61,7 +60,7 @@
 	err = nil
 	if n != 0 {
 		if n == maxSessionKeySizeInBytes {
-			return error.UnsupportedError("oversized encrypted session key")
+			return error_.UnsupportedError("oversized encrypted session key")
 		}
 		ske.encryptedKey = encryptedKey[:n]
 	}
@@ -73,7 +72,7 @@
 
 // Decrypt attempts to decrypt an encrypted session key. If it returns nil,
 // ske.Key will contain the session key.
-func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) os.Error {
+func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) error {
 	if !ske.Encrypted {
 		return nil
 	}
@@ -90,13 +89,13 @@
 		c.XORKeyStream(ske.encryptedKey, ske.encryptedKey)
 		ske.CipherFunc = CipherFunction(ske.encryptedKey[0])
 		if ske.CipherFunc.blockSize() == 0 {
-			return error.UnsupportedError("unknown cipher: " + strconv.Itoa(int(ske.CipherFunc)))
+			return error_.UnsupportedError("unknown cipher: " + strconv.Itoa(int(ske.CipherFunc)))
 		}
 		ske.CipherFunc = CipherFunction(ske.encryptedKey[0])
 		ske.Key = ske.encryptedKey[1:]
 		if len(ske.Key)%ske.CipherFunc.blockSize() != 0 {
 			ske.Key = nil
-			return error.StructuralError("length of decrypted key not a multiple of block size")
+			return error_.StructuralError("length of decrypted key not a multiple of block size")
 		}
 	}
 
@@ -108,10 +107,10 @@
 // packet contains a random session key, encrypted by a key derived from the
 // given passphrase. The session key is returned and must be passed to
 // SerializeSymmetricallyEncrypted.
-func SerializeSymmetricKeyEncrypted(w io.Writer, rand io.Reader, passphrase []byte, cipherFunc CipherFunction) (key []byte, err os.Error) {
+func SerializeSymmetricKeyEncrypted(w io.Writer, rand io.Reader, passphrase []byte, cipherFunc CipherFunction) (key []byte, err error) {
 	keySize := cipherFunc.KeySize()
 	if keySize == 0 {
-		return nil, error.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc)))
+		return nil, error_.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc)))
 	}
 
 	s2kBuf := new(bytes.Buffer)
diff --git a/src/pkg/crypto/openpgp/packet/symmetric_key_encrypted_test.go b/src/pkg/crypto/openpgp/packet/symmetric_key_encrypted_test.go
index 823ec40..87690f0 100644
--- a/src/pkg/crypto/openpgp/packet/symmetric_key_encrypted_test.go
+++ b/src/pkg/crypto/openpgp/packet/symmetric_key_encrypted_test.go
@@ -8,8 +8,8 @@
 	"bytes"
 	"crypto/rand"
 	"encoding/hex"
+	"io"
 	"io/ioutil"
-	"os"
 	"testing"
 )
 
@@ -48,7 +48,7 @@
 	}
 
 	contents, err := ioutil.ReadAll(r)
-	if err != nil && err != os.EOF {
+	if err != nil && err != io.EOF {
 		t.Error(err)
 		return
 	}
diff --git a/src/pkg/crypto/openpgp/packet/symmetrically_encrypted.go b/src/pkg/crypto/openpgp/packet/symmetrically_encrypted.go
index e33c9f3..8225db6 100644
--- a/src/pkg/crypto/openpgp/packet/symmetrically_encrypted.go
+++ b/src/pkg/crypto/openpgp/packet/symmetrically_encrypted.go
@@ -6,13 +6,12 @@
 
 import (
 	"crypto/cipher"
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"crypto/rand"
 	"crypto/sha1"
 	"crypto/subtle"
 	"hash"
 	"io"
-	"os"
 	"strconv"
 )
 
@@ -27,7 +26,7 @@
 
 const symmetricallyEncryptedVersion = 1
 
-func (se *SymmetricallyEncrypted) parse(r io.Reader) os.Error {
+func (se *SymmetricallyEncrypted) parse(r io.Reader) error {
 	if se.MDC {
 		// See RFC 4880, section 5.13.
 		var buf [1]byte
@@ -36,7 +35,7 @@
 			return err
 		}
 		if buf[0] != symmetricallyEncryptedVersion {
-			return error.UnsupportedError("unknown SymmetricallyEncrypted version")
+			return error_.UnsupportedError("unknown SymmetricallyEncrypted version")
 		}
 	}
 	se.contents = r
@@ -46,13 +45,13 @@
 // Decrypt returns a ReadCloser, from which the decrypted contents of the
 // packet can be read. An incorrect key can, with high probability, be detected
 // immediately and this will result in a KeyIncorrect error being returned.
-func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, os.Error) {
+func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) {
 	keySize := c.KeySize()
 	if keySize == 0 {
-		return nil, error.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c)))
+		return nil, error_.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c)))
 	}
 	if len(key) != keySize {
-		return nil, error.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length")
+		return nil, error_.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length")
 	}
 
 	if se.prefix == nil {
@@ -62,7 +61,7 @@
 			return nil, err
 		}
 	} else if len(se.prefix) != c.blockSize()+2 {
-		return nil, error.InvalidArgumentError("can't try ciphers with different block lengths")
+		return nil, error_.InvalidArgumentError("can't try ciphers with different block lengths")
 	}
 
 	ocfbResync := cipher.OCFBResync
@@ -73,7 +72,7 @@
 
 	s := cipher.NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync)
 	if s == nil {
-		return nil, error.KeyIncorrectError
+		return nil, error_.KeyIncorrectError
 	}
 
 	plaintext := cipher.StreamReader{S: s, R: se.contents}
@@ -94,11 +93,11 @@
 	in io.Reader
 }
 
-func (ser seReader) Read(buf []byte) (int, os.Error) {
+func (ser seReader) Read(buf []byte) (int, error) {
 	return ser.in.Read(buf)
 }
 
-func (ser seReader) Close() os.Error {
+func (ser seReader) Close() error {
 	return nil
 }
 
@@ -118,13 +117,13 @@
 	eof         bool
 }
 
-func (ser *seMDCReader) Read(buf []byte) (n int, err os.Error) {
+func (ser *seMDCReader) Read(buf []byte) (n int, err error) {
 	if ser.error {
 		err = io.ErrUnexpectedEOF
 		return
 	}
 	if ser.eof {
-		err = os.EOF
+		err = io.EOF
 		return
 	}
 
@@ -133,7 +132,7 @@
 	for ser.trailerUsed < mdcTrailerSize {
 		n, err = ser.in.Read(ser.trailer[ser.trailerUsed:])
 		ser.trailerUsed += n
-		if err == os.EOF {
+		if err == io.EOF {
 			if ser.trailerUsed != mdcTrailerSize {
 				n = 0
 				err = io.ErrUnexpectedEOF
@@ -161,7 +160,7 @@
 		copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:])
 		if n < len(buf) {
 			ser.eof = true
-			err = os.EOF
+			err = io.EOF
 		}
 		return
 	}
@@ -171,7 +170,7 @@
 	ser.h.Write(buf[:n])
 	copy(ser.trailer[:], buf[n:])
 
-	if err == os.EOF {
+	if err == io.EOF {
 		ser.eof = true
 	}
 	return
@@ -180,31 +179,31 @@
 // This is a new-format packet tag byte for a type 19 (MDC) packet.
 const mdcPacketTagByte = byte(0x80) | 0x40 | 19
 
-func (ser *seMDCReader) Close() os.Error {
+func (ser *seMDCReader) Close() error {
 	if ser.error {
-		return error.SignatureError("error during reading")
+		return error_.SignatureError("error during reading")
 	}
 
 	for !ser.eof {
 		// We haven't seen EOF so we need to read to the end
 		var buf [1024]byte
 		_, err := ser.Read(buf[:])
-		if err == os.EOF {
+		if err == io.EOF {
 			break
 		}
 		if err != nil {
-			return error.SignatureError("error during reading")
+			return error_.SignatureError("error during reading")
 		}
 	}
 
 	if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size {
-		return error.SignatureError("MDC packet not found")
+		return error_.SignatureError("MDC packet not found")
 	}
 	ser.h.Write(ser.trailer[:2])
 
 	final := ser.h.Sum()
 	if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 {
-		return error.SignatureError("hash mismatch")
+		return error_.SignatureError("hash mismatch")
 	}
 	return nil
 }
@@ -217,12 +216,12 @@
 	h hash.Hash
 }
 
-func (w *seMDCWriter) Write(buf []byte) (n int, err os.Error) {
+func (w *seMDCWriter) Write(buf []byte) (n int, err error) {
 	w.h.Write(buf)
 	return w.w.Write(buf)
 }
 
-func (w *seMDCWriter) Close() (err os.Error) {
+func (w *seMDCWriter) Close() (err error) {
 	var buf [mdcTrailerSize]byte
 
 	buf[0] = mdcPacketTagByte
@@ -243,20 +242,20 @@
 	w io.Writer
 }
 
-func (c noOpCloser) Write(data []byte) (n int, err os.Error) {
+func (c noOpCloser) Write(data []byte) (n int, err error) {
 	return c.w.Write(data)
 }
 
-func (c noOpCloser) Close() os.Error {
+func (c noOpCloser) Close() error {
 	return nil
 }
 
 // SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet
 // to w and returns a WriteCloser to which the to-be-encrypted packets can be
 // written.
-func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte) (contents io.WriteCloser, err os.Error) {
+func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte) (contents io.WriteCloser, err error) {
 	if c.KeySize() != len(key) {
-		return nil, error.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length")
+		return nil, error_.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length")
 	}
 	writeCloser := noOpCloser{w}
 	ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC)
diff --git a/src/pkg/crypto/openpgp/packet/symmetrically_encrypted_test.go b/src/pkg/crypto/openpgp/packet/symmetrically_encrypted_test.go
index 1054fc2..8eee971 100644
--- a/src/pkg/crypto/openpgp/packet/symmetrically_encrypted_test.go
+++ b/src/pkg/crypto/openpgp/packet/symmetrically_encrypted_test.go
@@ -6,12 +6,11 @@
 
 import (
 	"bytes"
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"crypto/sha1"
 	"encoding/hex"
 	"io"
 	"io/ioutil"
-	"os"
 	"testing"
 )
 
@@ -21,7 +20,7 @@
 	stride int
 }
 
-func (t *testReader) Read(buf []byte) (n int, err os.Error) {
+func (t *testReader) Read(buf []byte) (n int, err error) {
 	n = t.stride
 	if n > len(t.data) {
 		n = len(t.data)
@@ -32,7 +31,7 @@
 	copy(buf, t.data)
 	t.data = t.data[n:]
 	if len(t.data) == 0 {
-		err = os.EOF
+		err = io.EOF
 	}
 	return
 }
@@ -71,7 +70,7 @@
 	err = mdcReader.Close()
 	if err == nil {
 		t.Error("corruption: no error")
-	} else if _, ok := err.(*error.SignatureError); !ok {
+	} else if _, ok := err.(*error_.SignatureError); !ok {
 		t.Errorf("corruption: expected SignatureError, got: %s", err)
 	}
 }
diff --git a/src/pkg/crypto/openpgp/packet/userid.go b/src/pkg/crypto/openpgp/packet/userid.go
index 0580ba3..d6bea7d 100644
--- a/src/pkg/crypto/openpgp/packet/userid.go
+++ b/src/pkg/crypto/openpgp/packet/userid.go
@@ -7,7 +7,6 @@
 import (
 	"io"
 	"io/ioutil"
-	"os"
 	"strings"
 )
 
@@ -65,7 +64,7 @@
 	return uid
 }
 
-func (uid *UserId) parse(r io.Reader) (err os.Error) {
+func (uid *UserId) parse(r io.Reader) (err error) {
 	// RFC 4880, section 5.11
 	b, err := ioutil.ReadAll(r)
 	if err != nil {
@@ -78,7 +77,7 @@
 
 // Serialize marshals uid to w in the form of an OpenPGP packet, including
 // header.
-func (uid *UserId) Serialize(w io.Writer) os.Error {
+func (uid *UserId) Serialize(w io.Writer) error {
 	err := serializeHeader(w, packetTypeUserId, len(uid.Id))
 	if err != nil {
 		return err
diff --git a/src/pkg/crypto/openpgp/read.go b/src/pkg/crypto/openpgp/read.go
index d95f613..76fb1ea 100644
--- a/src/pkg/crypto/openpgp/read.go
+++ b/src/pkg/crypto/openpgp/read.go
@@ -8,12 +8,11 @@
 import (
 	"crypto"
 	"crypto/openpgp/armor"
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"crypto/openpgp/packet"
 	_ "crypto/sha256"
 	"hash"
 	"io"
-	"os"
 	"strconv"
 )
 
@@ -21,14 +20,14 @@
 var SignatureType = "PGP SIGNATURE"
 
 // readArmored reads an armored block with the given type.
-func readArmored(r io.Reader, expectedType string) (body io.Reader, err os.Error) {
+func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) {
 	block, err := armor.Decode(r)
 	if err != nil {
 		return
 	}
 
 	if block.Type != expectedType {
-		return nil, error.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type)
+		return nil, error_.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type)
 	}
 
 	return block.Body, nil
@@ -56,7 +55,7 @@
 	// been consumed. Once EOF has been seen, the following fields are
 	// valid. (An authentication code failure is reported as a
 	// SignatureError error when reading from UnverifiedBody.)
-	SignatureError os.Error          // nil if the signature is good.
+	SignatureError error             // nil if the signature is good.
 	Signature      *packet.Signature // the signature packet itself.
 
 	decrypted io.ReadCloser
@@ -69,7 +68,7 @@
 // passphrase to try. If the decrypted private key or given passphrase isn't
 // correct, the function will be called again, forever. Any error returned will
 // be passed up.
-type PromptFunction func(keys []Key, symmetric bool) ([]byte, os.Error)
+type PromptFunction func(keys []Key, symmetric bool) ([]byte, error)
 
 // A keyEnvelopePair is used to store a private key with the envelope that
 // contains a symmetric key, encrypted with that key.
@@ -81,7 +80,7 @@
 // ReadMessage parses an OpenPGP message that may be signed and/or encrypted.
 // The given KeyRing should contain both public keys (for signature
 // verification) and, possibly encrypted, private keys for decrypting.
-func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction) (md *MessageDetails, err os.Error) {
+func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction) (md *MessageDetails, err error) {
 	var p packet.Packet
 
 	var symKeys []*packet.SymmetricKeyEncrypted
@@ -131,7 +130,7 @@
 		case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature:
 			// This message isn't encrypted.
 			if len(symKeys) != 0 || len(pubKeys) != 0 {
-				return nil, error.StructuralError("key material not followed by encrypted message")
+				return nil, error_.StructuralError("key material not followed by encrypted message")
 			}
 			packets.Unread(p)
 			return readSignedMessage(packets, nil, keyring)
@@ -162,7 +161,7 @@
 					continue
 				}
 				decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key)
-				if err != nil && err != error.KeyIncorrectError {
+				if err != nil && err != error_.KeyIncorrectError {
 					return nil, err
 				}
 				if decrypted != nil {
@@ -180,11 +179,11 @@
 		}
 
 		if len(candidates) == 0 && len(symKeys) == 0 {
-			return nil, error.KeyIncorrectError
+			return nil, error_.KeyIncorrectError
 		}
 
 		if prompt == nil {
-			return nil, error.KeyIncorrectError
+			return nil, error_.KeyIncorrectError
 		}
 
 		passphrase, err := prompt(candidates, len(symKeys) != 0)
@@ -198,7 +197,7 @@
 				err = s.Decrypt(passphrase)
 				if err == nil && !s.Encrypted {
 					decrypted, err = se.Decrypt(s.CipherFunc, s.Key)
-					if err != nil && err != error.KeyIncorrectError {
+					if err != nil && err != error_.KeyIncorrectError {
 						return nil, err
 					}
 					if decrypted != nil {
@@ -218,7 +217,7 @@
 // readSignedMessage reads a possibly signed message if mdin is non-zero then
 // that structure is updated and returned. Otherwise a fresh MessageDetails is
 // used.
-func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err os.Error) {
+func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) {
 	if mdin == nil {
 		mdin = new(MessageDetails)
 	}
@@ -238,7 +237,7 @@
 			packets.Push(p.Body)
 		case *packet.OnePassSignature:
 			if !p.IsLast {
-				return nil, error.UnsupportedError("nested signatures")
+				return nil, error_.UnsupportedError("nested signatures")
 			}
 
 			h, wrappedHash, err = hashForSignature(p.Hash, p.SigType)
@@ -279,10 +278,10 @@
 // should be preprocessed (i.e. to normalize line endings). Thus this function
 // returns two hashes. The second should be used to hash the message itself and
 // performs any needed preprocessing.
-func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, os.Error) {
+func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) {
 	h := hashId.New()
 	if h == nil {
-		return nil, nil, error.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId)))
+		return nil, nil, error_.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId)))
 	}
 
 	switch sigType {
@@ -292,7 +291,7 @@
 		return h, NewCanonicalTextHash(h), nil
 	}
 
-	return nil, nil, error.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType)))
+	return nil, nil, error_.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType)))
 }
 
 // checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF
@@ -302,9 +301,9 @@
 	md *MessageDetails
 }
 
-func (cr checkReader) Read(buf []byte) (n int, err os.Error) {
+func (cr checkReader) Read(buf []byte) (n int, err error) {
 	n, err = cr.md.LiteralData.Body.Read(buf)
-	if err == os.EOF {
+	if err == io.EOF {
 		mdcErr := cr.md.decrypted.Close()
 		if mdcErr != nil {
 			err = mdcErr
@@ -322,10 +321,10 @@
 	md             *MessageDetails
 }
 
-func (scr *signatureCheckReader) Read(buf []byte) (n int, err os.Error) {
+func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) {
 	n, err = scr.md.LiteralData.Body.Read(buf)
 	scr.wrappedHash.Write(buf[:n])
-	if err == os.EOF {
+	if err == io.EOF {
 		var p packet.Packet
 		p, scr.md.SignatureError = scr.packets.Next()
 		if scr.md.SignatureError != nil {
@@ -334,7 +333,7 @@
 
 		var ok bool
 		if scr.md.Signature, ok = p.(*packet.Signature); !ok {
-			scr.md.SignatureError = error.StructuralError("LiteralData not followed by Signature")
+			scr.md.SignatureError = error_.StructuralError("LiteralData not followed by Signature")
 			return
 		}
 
@@ -356,7 +355,7 @@
 // CheckDetachedSignature takes a signed file and a detached signature and
 // returns the signer if the signature is valid. If the signer isn't know,
 // UnknownIssuerError is returned.
-func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err os.Error) {
+func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
 	p, err := packet.Read(signature)
 	if err != nil {
 		return
@@ -364,16 +363,16 @@
 
 	sig, ok := p.(*packet.Signature)
 	if !ok {
-		return nil, error.StructuralError("non signature packet found")
+		return nil, error_.StructuralError("non signature packet found")
 	}
 
 	if sig.IssuerKeyId == nil {
-		return nil, error.StructuralError("signature doesn't have an issuer")
+		return nil, error_.StructuralError("signature doesn't have an issuer")
 	}
 
 	keys := keyring.KeysById(*sig.IssuerKeyId)
 	if len(keys) == 0 {
-		return nil, error.UnknownIssuerError
+		return nil, error_.UnknownIssuerError
 	}
 
 	h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType)
@@ -382,7 +381,7 @@
 	}
 
 	_, err = io.Copy(wrappedHash, signed)
-	if err != nil && err != os.EOF {
+	if err != nil && err != io.EOF {
 		return
 	}
 
@@ -400,12 +399,12 @@
 		return
 	}
 
-	return nil, error.UnknownIssuerError
+	return nil, error_.UnknownIssuerError
 }
 
 // CheckArmoredDetachedSignature performs the same actions as
 // CheckDetachedSignature but expects the signature to be armored.
-func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err os.Error) {
+func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
 	body, err := readArmored(signature, SignatureType)
 	if err != nil {
 		return
diff --git a/src/pkg/crypto/openpgp/read_test.go b/src/pkg/crypto/openpgp/read_test.go
index 4dc290e..e8a6bf5 100644
--- a/src/pkg/crypto/openpgp/read_test.go
+++ b/src/pkg/crypto/openpgp/read_test.go
@@ -6,11 +6,10 @@
 
 import (
 	"bytes"
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"encoding/hex"
 	"io"
 	"io/ioutil"
-	"os"
 	"testing"
 )
 
@@ -149,21 +148,21 @@
 	for i, test := range signedEncryptedMessageTests {
 		expected := "Signed and encrypted message\n"
 		kring, _ := ReadKeyRing(readerFromHex(test.keyRingHex))
-		prompt := func(keys []Key, symmetric bool) ([]byte, os.Error) {
+		prompt := func(keys []Key, symmetric bool) ([]byte, error) {
 			if symmetric {
 				t.Errorf("prompt: message was marked as symmetrically encrypted")
-				return nil, error.KeyIncorrectError
+				return nil, error_.KeyIncorrectError
 			}
 
 			if len(keys) == 0 {
 				t.Error("prompt: no keys requested")
-				return nil, error.KeyIncorrectError
+				return nil, error_.KeyIncorrectError
 			}
 
 			err := keys[0].PrivateKey.Decrypt([]byte("passphrase"))
 			if err != nil {
 				t.Errorf("prompt: error decrypting key: %s", err)
-				return nil, error.KeyIncorrectError
+				return nil, error_.KeyIncorrectError
 			}
 
 			return nil, nil
@@ -215,7 +214,7 @@
 func TestSymmetricallyEncrypted(t *testing.T) {
 	expected := "Symmetrically encrypted.\n"
 
-	prompt := func(keys []Key, symmetric bool) ([]byte, os.Error) {
+	prompt := func(keys []Key, symmetric bool) ([]byte, error) {
 		if len(keys) != 0 {
 			t.Errorf("prompt: len(keys) = %d (want 0)", len(keys))
 		}
@@ -287,7 +286,7 @@
 
 func TestNoArmoredData(t *testing.T) {
 	_, err := ReadArmoredKeyRing(bytes.NewBufferString("foo"))
-	if _, ok := err.(error.InvalidArgumentError); !ok {
+	if _, ok := err.(error_.InvalidArgumentError); !ok {
 		t.Errorf("error was not an InvalidArgumentError: %s", err)
 	}
 }
diff --git a/src/pkg/crypto/openpgp/s2k/s2k.go b/src/pkg/crypto/openpgp/s2k/s2k.go
index 013b15c..2a753db 100644
--- a/src/pkg/crypto/openpgp/s2k/s2k.go
+++ b/src/pkg/crypto/openpgp/s2k/s2k.go
@@ -8,10 +8,9 @@
 
 import (
 	"crypto"
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"hash"
 	"io"
-	"os"
 	"strconv"
 )
 
@@ -76,7 +75,7 @@
 
 // Parse reads a binary specification for a string-to-key transformation from r
 // and returns a function which performs that transform.
-func Parse(r io.Reader) (f func(out, in []byte), err os.Error) {
+func Parse(r io.Reader) (f func(out, in []byte), err error) {
 	var buf [9]byte
 
 	_, err = io.ReadFull(r, buf[:2])
@@ -86,11 +85,11 @@
 
 	hash, ok := HashIdToHash(buf[1])
 	if !ok {
-		return nil, error.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1])))
+		return nil, error_.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1])))
 	}
 	h := hash.New()
 	if h == nil {
-		return nil, error.UnsupportedError("hash not available: " + strconv.Itoa(int(hash)))
+		return nil, error_.UnsupportedError("hash not available: " + strconv.Itoa(int(hash)))
 	}
 
 	switch buf[0] {
@@ -120,12 +119,12 @@
 		return f, nil
 	}
 
-	return nil, error.UnsupportedError("S2K function")
+	return nil, error_.UnsupportedError("S2K function")
 }
 
 // Serialize salts and stretches the given passphrase and writes the resulting
 // key into key. It also serializes an S2K descriptor to w.
-func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte) os.Error {
+func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte) error {
 	var buf [11]byte
 	buf[0] = 3 /* iterated and salted */
 	buf[1], _ = HashToHashId(crypto.SHA1)
diff --git a/src/pkg/crypto/openpgp/write.go b/src/pkg/crypto/openpgp/write.go
index 9884472..6f3450c 100644
--- a/src/pkg/crypto/openpgp/write.go
+++ b/src/pkg/crypto/openpgp/write.go
@@ -7,45 +7,44 @@
 import (
 	"crypto"
 	"crypto/openpgp/armor"
-	"crypto/openpgp/error"
+	error_ "crypto/openpgp/error"
 	"crypto/openpgp/packet"
 	"crypto/openpgp/s2k"
 	"crypto/rand"
 	_ "crypto/sha256"
 	"hash"
 	"io"
-	"os"
 	"strconv"
 	"time"
 )
 
 // DetachSign signs message with the private key from signer (which must
 // already have been decrypted) and writes the signature to w.
-func DetachSign(w io.Writer, signer *Entity, message io.Reader) os.Error {
+func DetachSign(w io.Writer, signer *Entity, message io.Reader) error {
 	return detachSign(w, signer, message, packet.SigTypeBinary)
 }
 
 // ArmoredDetachSign signs message with the private key from signer (which
 // must already have been decrypted) and writes an armored signature to w.
-func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader) (err os.Error) {
+func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader) (err error) {
 	return armoredDetachSign(w, signer, message, packet.SigTypeBinary)
 }
 
 // DetachSignText signs message (after canonicalising the line endings) with
 // the private key from signer (which must already have been decrypted) and
 // writes the signature to w.
-func DetachSignText(w io.Writer, signer *Entity, message io.Reader) os.Error {
+func DetachSignText(w io.Writer, signer *Entity, message io.Reader) error {
 	return detachSign(w, signer, message, packet.SigTypeText)
 }
 
 // ArmoredDetachSignText signs message (after canonicalising the line endings)
 // with the private key from signer (which must already have been decrypted)
 // and writes an armored signature to w.
-func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader) os.Error {
+func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader) error {
 	return armoredDetachSign(w, signer, message, packet.SigTypeText)
 }
 
-func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType) (err os.Error) {
+func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType) (err error) {
 	out, err := armor.Encode(w, SignatureType, nil)
 	if err != nil {
 		return
@@ -57,12 +56,12 @@
 	return out.Close()
 }
 
-func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType) (err os.Error) {
+func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType) (err error) {
 	if signer.PrivateKey == nil {
-		return error.InvalidArgumentError("signing key doesn't have a private key")
+		return error_.InvalidArgumentError("signing key doesn't have a private key")
 	}
 	if signer.PrivateKey.Encrypted {
-		return error.InvalidArgumentError("signing key is encrypted")
+		return error_.InvalidArgumentError("signing key is encrypted")
 	}
 
 	sig := new(packet.Signature)
@@ -103,7 +102,7 @@
 // SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase.
 // The resulting WriteCloser must be closed after the contents of the file have
 // been written.
-func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints) (plaintext io.WriteCloser, err os.Error) {
+func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints) (plaintext io.WriteCloser, err error) {
 	if hints == nil {
 		hints = &FileHints{}
 	}
@@ -148,12 +147,12 @@
 // it. hints contains optional information, that is also encrypted, that aids
 // the recipients in processing the message. The resulting WriteCloser must
 // be closed after the contents of the file have been written.
-func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints) (plaintext io.WriteCloser, err os.Error) {
+func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints) (plaintext io.WriteCloser, err error) {
 	var signer *packet.PrivateKey
 	if signed != nil {
 		signer = signed.signingKey().PrivateKey
 		if signer == nil || signer.Encrypted {
-			return nil, error.InvalidArgumentError("signing key must be decrypted")
+			return nil, error_.InvalidArgumentError("signing key must be decrypted")
 		}
 	}
 
@@ -180,7 +179,7 @@
 	for i := range to {
 		encryptKeys[i] = to[i].encryptionKey()
 		if encryptKeys[i].PublicKey == nil {
-			return nil, error.InvalidArgumentError("cannot encrypt a message to key id " + strconv.Uitob64(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys")
+			return nil, error_.InvalidArgumentError("cannot encrypt a message to key id " + strconv.Uitob64(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys")
 		}
 
 		sig := to[i].primaryIdentity().SelfSignature
@@ -198,7 +197,7 @@
 	}
 
 	if len(candidateCiphers) == 0 || len(candidateHashes) == 0 {
-		return nil, error.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms")
+		return nil, error_.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms")
 	}
 
 	cipher := packet.CipherFunction(candidateCiphers[0])
@@ -266,12 +265,12 @@
 	signer        *packet.PrivateKey
 }
 
-func (s signatureWriter) Write(data []byte) (int, os.Error) {
+func (s signatureWriter) Write(data []byte) (int, error) {
 	s.h.Write(data)
 	return s.literalData.Write(data)
 }
 
-func (s signatureWriter) Close() os.Error {
+func (s signatureWriter) Close() error {
 	sig := &packet.Signature{
 		SigType:      packet.SigTypeBinary,
 		PubKeyAlgo:   s.signer.PubKeyAlgo,
@@ -299,10 +298,10 @@
 	w io.Writer
 }
 
-func (c noOpCloser) Write(data []byte) (n int, err os.Error) {
+func (c noOpCloser) Write(data []byte) (n int, err error) {
 	return c.w.Write(data)
 }
 
-func (c noOpCloser) Close() os.Error {
+func (c noOpCloser) Close() error {
 	return nil
 }
diff --git a/src/pkg/crypto/openpgp/write_test.go b/src/pkg/crypto/openpgp/write_test.go
index c542dfa..3cadf4c 100644
--- a/src/pkg/crypto/openpgp/write_test.go
+++ b/src/pkg/crypto/openpgp/write_test.go
@@ -7,7 +7,6 @@
 import (
 	"bytes"
 	"crypto/rand"
-	"os"
 	"io"
 	"io/ioutil"
 	"testing"
@@ -106,7 +105,7 @@
 		t.Errorf("error closing plaintext writer: %s", err)
 	}
 
-	md, err := ReadMessage(buf, nil, func(keys []Key, symmetric bool) ([]byte, os.Error) {
+	md, err := ReadMessage(buf, nil, func(keys []Key, symmetric bool) ([]byte, error) {
 		return []byte("testing"), nil
 	})
 	if err != nil {
diff --git a/src/pkg/crypto/rand/rand.go b/src/pkg/crypto/rand/rand.go
index 42d9da0..5975903 100644
--- a/src/pkg/crypto/rand/rand.go
+++ b/src/pkg/crypto/rand/rand.go
@@ -6,10 +6,7 @@
 // pseudorandom number generator.
 package rand
 
-import (
-	"io"
-	"os"
-)
+import "io"
 
 // Reader is a global, shared instance of a cryptographically
 // strong pseudo-random generator.
@@ -18,4 +15,4 @@
 var Reader io.Reader
 
 // Read is a helper function that calls Reader.Read.
-func Read(b []byte) (n int, err os.Error) { return Reader.Read(b) }
+func Read(b []byte) (n int, err error) { return Reader.Read(b) }
diff --git a/src/pkg/crypto/rand/rand_unix.go b/src/pkg/crypto/rand/rand_unix.go
index 76a7365..09442ad 100644
--- a/src/pkg/crypto/rand/rand_unix.go
+++ b/src/pkg/crypto/rand/rand_unix.go
@@ -30,7 +30,7 @@
 	mu   sync.Mutex
 }
 
-func (r *devReader) Read(b []byte) (n int, err os.Error) {
+func (r *devReader) Read(b []byte) (n int, err error) {
 	r.mu.Lock()
 	defer r.mu.Unlock()
 	if r.f == nil {
@@ -71,7 +71,7 @@
 	time, seed, dst, key [aes.BlockSize]byte
 }
 
-func (r *reader) Read(b []byte) (n int, err os.Error) {
+func (r *reader) Read(b []byte) (n int, err error) {
 	r.mu.Lock()
 	defer r.mu.Unlock()
 	n = len(b)
diff --git a/src/pkg/crypto/rand/rand_windows.go b/src/pkg/crypto/rand/rand_windows.go
index 0eab6b2..590571d 100644
--- a/src/pkg/crypto/rand/rand_windows.go
+++ b/src/pkg/crypto/rand/rand_windows.go
@@ -23,7 +23,7 @@
 	mu   sync.Mutex
 }
 
-func (r *rngReader) Read(b []byte) (n int, err os.Error) {
+func (r *rngReader) Read(b []byte) (n int, err error) {
 	r.mu.Lock()
 	if r.prov == 0 {
 		const provType = syscall.PROV_RSA_FULL
diff --git a/src/pkg/crypto/rand/util.go b/src/pkg/crypto/rand/util.go
index 7702847..322da4a 100644
--- a/src/pkg/crypto/rand/util.go
+++ b/src/pkg/crypto/rand/util.go
@@ -12,7 +12,7 @@
 
 // Prime returns a number, p, of the given size, such that p is prime
 // with high probability.
-func Prime(rand io.Reader, bits int) (p *big.Int, err os.Error) {
+func Prime(rand io.Reader, bits int) (p *big.Int, err error) {
 	if bits < 1 {
 		err = os.EINVAL
 	}
@@ -48,7 +48,7 @@
 }
 
 // Int returns a uniform random value in [0, max).
-func Int(rand io.Reader, max *big.Int) (n *big.Int, err os.Error) {
+func Int(rand io.Reader, max *big.Int) (n *big.Int, err error) {
 	k := (max.BitLen() + 7) / 8
 
 	// b is the number of bits in the most significant byte of max.
diff --git a/src/pkg/crypto/rc4/rc4.go b/src/pkg/crypto/rc4/rc4.go
index 7ee47109..1bb278f 100644
--- a/src/pkg/crypto/rc4/rc4.go
+++ b/src/pkg/crypto/rc4/rc4.go
@@ -9,10 +9,7 @@
 // BUG(agl): RC4 is in common use but has design weaknesses that make
 // it a poor choice for new protocols.
 
-import (
-	"os"
-	"strconv"
-)
+import "strconv"
 
 // A Cipher is an instance of RC4 using a particular key.
 type Cipher struct {
@@ -22,13 +19,13 @@
 
 type KeySizeError int
 
-func (k KeySizeError) String() string {
+func (k KeySizeError) Error() string {
 	return "crypto/rc4: invalid key size " + strconv.Itoa(int(k))
 }
 
 // NewCipher creates and returns a new Cipher.  The key argument should be the
 // RC4 key, at least 1 byte and at most 256 bytes.
-func NewCipher(key []byte) (*Cipher, os.Error) {
+func NewCipher(key []byte) (*Cipher, error) {
 	k := len(key)
 	if k < 1 || k > 256 {
 		return nil, KeySizeError(k)
diff --git a/src/pkg/crypto/ripemd160/ripemd160.go b/src/pkg/crypto/ripemd160/ripemd160.go
index 5aaca59..6ccfe87 100644
--- a/src/pkg/crypto/ripemd160/ripemd160.go
+++ b/src/pkg/crypto/ripemd160/ripemd160.go
@@ -12,7 +12,6 @@
 import (
 	"crypto"
 	"hash"
-	"os"
 )
 
 func init() {
@@ -56,7 +55,7 @@
 
 func (d *digest) Size() int { return Size }
 
-func (d *digest) Write(p []byte) (nn int, err os.Error) {
+func (d *digest) Write(p []byte) (nn int, err error) {
 	nn = len(p)
 	d.tc += uint64(nn)
 	if d.nx > 0 {
diff --git a/src/pkg/crypto/rsa/pkcs1v15.go b/src/pkg/crypto/rsa/pkcs1v15.go
index 6006231..901539d 100644
--- a/src/pkg/crypto/rsa/pkcs1v15.go
+++ b/src/pkg/crypto/rsa/pkcs1v15.go
@@ -8,8 +8,8 @@
 	"big"
 	"crypto"
 	"crypto/subtle"
+	"errors"
 	"io"
-	"os"
 )
 
 // This file implements encryption and decryption using PKCS#1 v1.5 padding.
@@ -18,7 +18,7 @@
 // The message must be no longer than the length of the public modulus minus 11 bytes.
 // WARNING: use of this function to encrypt plaintexts other than session keys
 // is dangerous. Use RSA OAEP in new protocols.
-func EncryptPKCS1v15(rand io.Reader, pub *PublicKey, msg []byte) (out []byte, err os.Error) {
+func EncryptPKCS1v15(rand io.Reader, pub *PublicKey, msg []byte) (out []byte, err error) {
 	k := (pub.N.BitLen() + 7) / 8
 	if len(msg) > k-11 {
 		err = MessageTooLongError{}
@@ -44,7 +44,7 @@
 
 // DecryptPKCS1v15 decrypts a plaintext using RSA and the padding scheme from PKCS#1 v1.5.
 // If rand != nil, it uses RSA blinding to avoid timing side-channel attacks.
-func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (out []byte, err os.Error) {
+func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (out []byte, err error) {
 	valid, out, err := decryptPKCS1v15(rand, priv, ciphertext)
 	if err == nil && valid == 0 {
 		err = DecryptionError{}
@@ -66,7 +66,7 @@
 // See ``Chosen Ciphertext Attacks Against Protocols Based on the RSA
 // Encryption Standard PKCS #1'', Daniel Bleichenbacher, Advances in Cryptology
 // (Crypto '98),
-func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) (err os.Error) {
+func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) (err error) {
 	k := (priv.N.BitLen() + 7) / 8
 	if k-(len(key)+3+8) < 0 {
 		err = DecryptionError{}
@@ -83,7 +83,7 @@
 	return
 }
 
-func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid int, msg []byte, err os.Error) {
+func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid int, msg []byte, err error) {
 	k := (priv.N.BitLen() + 7) / 8
 	if k < 11 {
 		err = DecryptionError{}
@@ -119,7 +119,7 @@
 }
 
 // nonZeroRandomBytes fills the given slice with non-zero random octets.
-func nonZeroRandomBytes(s []byte, rand io.Reader) (err os.Error) {
+func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
 	_, err = io.ReadFull(rand, s)
 	if err != nil {
 		return
@@ -161,7 +161,7 @@
 // SignPKCS1v15 calculates the signature of hashed using RSASSA-PKCS1-V1_5-SIGN from RSA PKCS#1 v1.5.
 // Note that hashed must be the result of hashing the input message using the
 // given hash function.
-func SignPKCS1v15(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) (s []byte, err os.Error) {
+func SignPKCS1v15(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) (s []byte, err error) {
 	hashLen, prefix, err := pkcs1v15HashInfo(hash, len(hashed))
 	if err != nil {
 		return
@@ -194,7 +194,7 @@
 // hashed is the result of hashing the input message using the given hash
 // function and sig is the signature. A valid signature is indicated by
 // returning a nil error.
-func VerifyPKCS1v15(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) (err os.Error) {
+func VerifyPKCS1v15(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) (err error) {
 	hashLen, prefix, err := pkcs1v15HashInfo(hash, len(hashed))
 	if err != nil {
 		return
@@ -229,14 +229,14 @@
 	return nil
 }
 
-func pkcs1v15HashInfo(hash crypto.Hash, inLen int) (hashLen int, prefix []byte, err os.Error) {
+func pkcs1v15HashInfo(hash crypto.Hash, inLen int) (hashLen int, prefix []byte, err error) {
 	hashLen = hash.Size()
 	if inLen != hashLen {
-		return 0, nil, os.NewError("input must be hashed message")
+		return 0, nil, errors.New("input must be hashed message")
 	}
 	prefix, ok := hashPrefixes[hash]
 	if !ok {
-		return 0, nil, os.NewError("unsupported hash function")
+		return 0, nil, errors.New("unsupported hash function")
 	}
 	return
 }
diff --git a/src/pkg/crypto/rsa/rsa.go b/src/pkg/crypto/rsa/rsa.go
index 3df88e0..d1b9577 100644
--- a/src/pkg/crypto/rsa/rsa.go
+++ b/src/pkg/crypto/rsa/rsa.go
@@ -11,9 +11,9 @@
 	"big"
 	"crypto/rand"
 	"crypto/subtle"
+	"errors"
 	"hash"
 	"io"
-	"os"
 )
 
 var bigZero = big.NewInt(0)
@@ -57,14 +57,14 @@
 // Validate performs basic sanity checks on the key.
 // It returns nil if the key is valid, or else an os.Error describing a problem.
 
-func (priv *PrivateKey) Validate() os.Error {
+func (priv *PrivateKey) Validate() error {
 	// Check that the prime factors are actually prime. Note that this is
 	// just a sanity check. Since the random witnesses chosen by
 	// ProbablyPrime are deterministic, given the candidate number, it's
 	// easy for an attack to generate composites that pass this test.
 	for _, prime := range priv.Primes {
 		if !big.ProbablyPrime(prime, 20) {
-			return os.NewError("prime factor is composite")
+			return errors.New("prime factor is composite")
 		}
 	}
 
@@ -74,7 +74,7 @@
 		modulus.Mul(modulus, prime)
 	}
 	if modulus.Cmp(priv.N) != 0 {
-		return os.NewError("invalid modulus")
+		return errors.New("invalid modulus")
 	}
 	// Check that e and totient(Πprimes) are coprime.
 	totient := new(big.Int).Set(bigOne)
@@ -88,19 +88,19 @@
 	y := new(big.Int)
 	big.GcdInt(gcd, x, y, totient, e)
 	if gcd.Cmp(bigOne) != 0 {
-		return os.NewError("invalid public exponent E")
+		return errors.New("invalid public exponent E")
 	}
 	// Check that de ≡ 1 (mod totient(Πprimes))
 	de := new(big.Int).Mul(priv.D, e)
 	de.Mod(de, totient)
 	if de.Cmp(bigOne) != 0 {
-		return os.NewError("invalid private exponent D")
+		return errors.New("invalid private exponent D")
 	}
 	return nil
 }
 
 // GenerateKey generates an RSA keypair of the given bit size.
-func GenerateKey(random io.Reader, bits int) (priv *PrivateKey, err os.Error) {
+func GenerateKey(random io.Reader, bits int) (priv *PrivateKey, err error) {
 	return GenerateMultiPrimeKey(random, 2, bits)
 }
 
@@ -114,12 +114,12 @@
 //
 // [1] US patent 4405829 (1972, expired)
 // [2] http://www.cacr.math.uwaterloo.ca/techreports/2006/cacr2006-16.pdf
-func GenerateMultiPrimeKey(random io.Reader, nprimes int, bits int) (priv *PrivateKey, err os.Error) {
+func GenerateMultiPrimeKey(random io.Reader, nprimes int, bits int) (priv *PrivateKey, err error) {
 	priv = new(PrivateKey)
 	priv.E = 65537
 
 	if nprimes < 2 {
-		return nil, os.NewError("rsa.GenerateMultiPrimeKey: nprimes must be >= 2")
+		return nil, errors.New("rsa.GenerateMultiPrimeKey: nprimes must be >= 2")
 	}
 
 	primes := make([]*big.Int, nprimes)
@@ -210,7 +210,7 @@
 // is too large for the size of the public key.
 type MessageTooLongError struct{}
 
-func (MessageTooLongError) String() string {
+func (MessageTooLongError) Error() string {
 	return "message too long for RSA public key size"
 }
 
@@ -223,7 +223,7 @@
 // EncryptOAEP encrypts the given message with RSA-OAEP.
 // The message must be no longer than the length of the public modulus less
 // twice the hash length plus 2.
-func EncryptOAEP(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) (out []byte, err os.Error) {
+func EncryptOAEP(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) (out []byte, err error) {
 	hash.Reset()
 	k := (pub.N.BitLen() + 7) / 8
 	if len(msg) > k-2*hash.Size()-2 {
@@ -270,13 +270,13 @@
 // It is deliberately vague to avoid adaptive attacks.
 type DecryptionError struct{}
 
-func (DecryptionError) String() string { return "RSA decryption error" }
+func (DecryptionError) Error() string { return "RSA decryption error" }
 
 // A VerificationError represents a failure to verify a signature.
 // It is deliberately vague to avoid adaptive attacks.
 type VerificationError struct{}
 
-func (VerificationError) String() string { return "RSA verification error" }
+func (VerificationError) Error() string { return "RSA verification error" }
 
 // modInverse returns ia, the inverse of a in the multiplicative group of prime
 // order n. It requires that a be a member of the group (i.e. less than n).
@@ -335,7 +335,7 @@
 
 // decrypt performs an RSA decryption, resulting in a plaintext integer. If a
 // random source is given, RSA blinding is used.
-func decrypt(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err os.Error) {
+func decrypt(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) {
 	// TODO(agl): can we get away with reusing blinds?
 	if c.Cmp(priv.N) > 0 {
 		err = DecryptionError{}
@@ -413,7 +413,7 @@
 
 // DecryptOAEP decrypts ciphertext using RSA-OAEP.
 // If rand != nil, DecryptOAEP uses RSA blinding to avoid timing side-channel attacks.
-func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) (msg []byte, err os.Error) {
+func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) (msg []byte, err error) {
 	k := (priv.N.BitLen() + 7) / 8
 	if len(ciphertext) > k ||
 		k < hash.Size()*2+2 {
diff --git a/src/pkg/crypto/sha1/sha1.go b/src/pkg/crypto/sha1/sha1.go
index 788d1ff..4cdf5b2 100644
--- a/src/pkg/crypto/sha1/sha1.go
+++ b/src/pkg/crypto/sha1/sha1.go
@@ -8,7 +8,6 @@
 import (
 	"crypto"
 	"hash"
-	"os"
 )
 
 func init() {
@@ -54,7 +53,7 @@
 
 func (d *digest) Size() int { return Size }
 
-func (d *digest) Write(p []byte) (nn int, err os.Error) {
+func (d *digest) Write(p []byte) (nn int, err error) {
 	nn = len(p)
 	d.len += uint64(nn)
 	if d.nx > 0 {
diff --git a/src/pkg/crypto/sha256/sha256.go b/src/pkg/crypto/sha256/sha256.go
index a2c058d..14b8cfc 100644
--- a/src/pkg/crypto/sha256/sha256.go
+++ b/src/pkg/crypto/sha256/sha256.go
@@ -9,7 +9,6 @@
 import (
 	"crypto"
 	"hash"
-	"os"
 )
 
 func init() {
@@ -98,7 +97,7 @@
 	return Size224
 }
 
-func (d *digest) Write(p []byte) (nn int, err os.Error) {
+func (d *digest) Write(p []byte) (nn int, err error) {
 	nn = len(p)
 	d.len += uint64(nn)
 	if d.nx > 0 {
diff --git a/src/pkg/crypto/sha512/sha512.go b/src/pkg/crypto/sha512/sha512.go
index 78f5fe2..1bd2798 100644
--- a/src/pkg/crypto/sha512/sha512.go
+++ b/src/pkg/crypto/sha512/sha512.go
@@ -9,7 +9,6 @@
 import (
 	"crypto"
 	"hash"
-	"os"
 )
 
 func init() {
@@ -98,7 +97,7 @@
 	return Size384
 }
 
-func (d *digest) Write(p []byte) (nn int, err os.Error) {
+func (d *digest) Write(p []byte) (nn int, err error) {
 	nn = len(p)
 	d.len += uint64(nn)
 	if d.nx > 0 {
diff --git a/src/pkg/crypto/tls/cipher_suites.go b/src/pkg/crypto/tls/cipher_suites.go
index 0c62251..1134f36 100644
--- a/src/pkg/crypto/tls/cipher_suites.go
+++ b/src/pkg/crypto/tls/cipher_suites.go
@@ -13,7 +13,6 @@
 	"crypto/sha1"
 	"crypto/x509"
 	"hash"
-	"os"
 )
 
 // a keyAgreement implements the client and server side of a TLS key agreement
@@ -24,15 +23,15 @@
 	// In the case that the key agreement protocol doesn't use a
 	// ServerKeyExchange message, generateServerKeyExchange can return nil,
 	// nil.
-	generateServerKeyExchange(*Config, *clientHelloMsg, *serverHelloMsg) (*serverKeyExchangeMsg, os.Error)
-	processClientKeyExchange(*Config, *clientKeyExchangeMsg, uint16) ([]byte, os.Error)
+	generateServerKeyExchange(*Config, *clientHelloMsg, *serverHelloMsg) (*serverKeyExchangeMsg, error)
+	processClientKeyExchange(*Config, *clientKeyExchangeMsg, uint16) ([]byte, error)
 
 	// On the client side, the next two methods are called in order.
 
 	// This method may not be called if the server doesn't send a
 	// ServerKeyExchange message.
-	processServerKeyExchange(*Config, *clientHelloMsg, *serverHelloMsg, *x509.Certificate, *serverKeyExchangeMsg) os.Error
-	generateClientKeyExchange(*Config, *clientHelloMsg, *x509.Certificate) ([]byte, *clientKeyExchangeMsg, os.Error)
+	processServerKeyExchange(*Config, *clientHelloMsg, *serverHelloMsg, *x509.Certificate, *serverKeyExchangeMsg) error
+	generateClientKeyExchange(*Config, *clientHelloMsg, *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error)
 }
 
 // A cipherSuite is a specific combination of key agreement, cipher and MAC
diff --git a/src/pkg/crypto/tls/conn.go b/src/pkg/crypto/tls/conn.go
index 9bca7d9..c052337 100644
--- a/src/pkg/crypto/tls/conn.go
+++ b/src/pkg/crypto/tls/conn.go
@@ -11,9 +11,9 @@
 	"crypto/cipher"
 	"crypto/subtle"
 	"crypto/x509"
+	"errors"
 	"io"
 	"net"
-	"os"
 	"sync"
 )
 
@@ -44,7 +44,7 @@
 
 	// first permanent error
 	errMutex sync.Mutex
-	err      os.Error
+	err      error
 
 	// input/output
 	in, out  halfConn     // in.Mutex < out.Mutex
@@ -55,7 +55,7 @@
 	tmp [16]byte
 }
 
-func (c *Conn) setError(err os.Error) os.Error {
+func (c *Conn) setError(err error) error {
 	c.errMutex.Lock()
 	defer c.errMutex.Unlock()
 
@@ -65,7 +65,7 @@
 	return err
 }
 
-func (c *Conn) error() os.Error {
+func (c *Conn) error() error {
 	c.errMutex.Lock()
 	defer c.errMutex.Unlock()
 
@@ -88,21 +88,21 @@
 
 // SetTimeout sets the read deadline associated with the connection.
 // There is no write deadline.
-func (c *Conn) SetTimeout(nsec int64) os.Error {
+func (c *Conn) SetTimeout(nsec int64) error {
 	return c.conn.SetTimeout(nsec)
 }
 
 // SetReadTimeout sets the time (in nanoseconds) that
 // Read will wait for data before returning os.EAGAIN.
 // Setting nsec == 0 (the default) disables the deadline.
-func (c *Conn) SetReadTimeout(nsec int64) os.Error {
+func (c *Conn) SetReadTimeout(nsec int64) error {
 	return c.conn.SetReadTimeout(nsec)
 }
 
 // SetWriteTimeout exists to satisfy the net.Conn interface
 // but is not implemented by TLS.  It always returns an error.
-func (c *Conn) SetWriteTimeout(nsec int64) os.Error {
-	return os.NewError("TLS does not support SetWriteTimeout")
+func (c *Conn) SetWriteTimeout(nsec int64) error {
+	return errors.New("TLS does not support SetWriteTimeout")
 }
 
 // A halfConn represents one direction of the record layer
@@ -129,7 +129,7 @@
 
 // changeCipherSpec changes the encryption and MAC states
 // to the ones previously passed to prepareCipherSpec.
-func (hc *halfConn) changeCipherSpec() os.Error {
+func (hc *halfConn) changeCipherSpec() error {
 	if hc.nextCipher == nil {
 		return alertInternalError
 	}
@@ -378,7 +378,7 @@
 
 // readFromUntil reads from r into b until b contains at least n bytes
 // or else returns an error.
-func (b *block) readFromUntil(r io.Reader, n int) os.Error {
+func (b *block) readFromUntil(r io.Reader, n int) error {
 	// quick case
 	if len(b.data) >= n {
 		return nil
@@ -399,7 +399,7 @@
 	return nil
 }
 
-func (b *block) Read(p []byte) (n int, err os.Error) {
+func (b *block) Read(p []byte) (n int, err error) {
 	n = copy(p, b.data[b.off:])
 	b.off += n
 	return
@@ -443,7 +443,7 @@
 // readRecord reads the next TLS record from the connection
 // and updates the record layer state.
 // c.in.Mutex <= L; c.input == nil.
-func (c *Conn) readRecord(want recordType) os.Error {
+func (c *Conn) readRecord(want recordType) error {
 	// Caller must be in sync with connection:
 	// handshake data if handshake not yet completed,
 	// else application data.  (We don't support renegotiation.)
@@ -502,7 +502,7 @@
 		}
 	}
 	if err := b.readFromUntil(c.conn, recordHeaderLen+n); err != nil {
-		if err == os.EOF {
+		if err == io.EOF {
 			err = io.ErrUnexpectedEOF
 		}
 		if e, ok := err.(net.Error); !ok || !e.Temporary() {
@@ -534,7 +534,7 @@
 			break
 		}
 		if alert(data[1]) == alertCloseNotify {
-			c.setError(os.EOF)
+			c.setError(io.EOF)
 			break
 		}
 		switch data[0] {
@@ -543,7 +543,7 @@
 			c.in.freeBlock(b)
 			goto Again
 		case alertLevelError:
-			c.setError(&net.OpError{Op: "remote error", Error: alert(data[1])})
+			c.setError(&net.OpError{Op: "remote error", Err: alert(data[1])})
 		default:
 			c.sendAlert(alertUnexpectedMessage)
 		}
@@ -582,7 +582,7 @@
 
 // sendAlert sends a TLS alert message.
 // c.out.Mutex <= L.
-func (c *Conn) sendAlertLocked(err alert) os.Error {
+func (c *Conn) sendAlertLocked(err alert) error {
 	c.tmp[0] = alertLevelError
 	if err == alertNoRenegotiation {
 		c.tmp[0] = alertLevelWarning
@@ -591,14 +591,14 @@
 	c.writeRecord(recordTypeAlert, c.tmp[0:2])
 	// closeNotify is a special case in that it isn't an error:
 	if err != alertCloseNotify {
-		return c.setError(&net.OpError{Op: "local error", Error: err})
+		return c.setError(&net.OpError{Op: "local error", Err: err})
 	}
 	return nil
 }
 
 // sendAlert sends a TLS alert message.
 // L < c.out.Mutex.
-func (c *Conn) sendAlert(err alert) os.Error {
+func (c *Conn) sendAlert(err alert) error {
 	c.out.Lock()
 	defer c.out.Unlock()
 	return c.sendAlertLocked(err)
@@ -607,7 +607,7 @@
 // writeRecord writes a TLS record with the given type and payload
 // to the connection and updates the record layer state.
 // c.out.Mutex <= L.
-func (c *Conn) writeRecord(typ recordType, data []byte) (n int, err os.Error) {
+func (c *Conn) writeRecord(typ recordType, data []byte) (n int, err error) {
 	b := c.out.newBlock()
 	for len(data) > 0 {
 		m := len(data)
@@ -643,7 +643,7 @@
 			c.tmp[0] = alertLevelError
 			c.tmp[1] = byte(err.(alert))
 			c.writeRecord(recordTypeAlert, c.tmp[0:2])
-			c.err = &net.OpError{Op: "local error", Error: err}
+			c.err = &net.OpError{Op: "local error", Err: err}
 			return n, c.err
 		}
 	}
@@ -653,7 +653,7 @@
 // readHandshake reads the next handshake message from
 // the record layer.
 // c.in.Mutex < L; c.out.Mutex < L.
-func (c *Conn) readHandshake() (interface{}, os.Error) {
+func (c *Conn) readHandshake() (interface{}, error) {
 	for c.hand.Len() < 4 {
 		if c.err != nil {
 			return nil, c.err
@@ -720,7 +720,7 @@
 }
 
 // Write writes data to the connection.
-func (c *Conn) Write(b []byte) (n int, err os.Error) {
+func (c *Conn) Write(b []byte) (n int, err error) {
 	if err = c.Handshake(); err != nil {
 		return
 	}
@@ -739,7 +739,7 @@
 
 // Read can be made to time out and return err == os.EAGAIN
 // after a fixed time limit; see SetTimeout and SetReadTimeout.
-func (c *Conn) Read(b []byte) (n int, err os.Error) {
+func (c *Conn) Read(b []byte) (n int, err error) {
 	if err = c.Handshake(); err != nil {
 		return
 	}
@@ -765,8 +765,8 @@
 }
 
 // Close closes the connection.
-func (c *Conn) Close() os.Error {
-	var alertErr os.Error
+func (c *Conn) Close() error {
+	var alertErr error
 
 	c.handshakeMutex.Lock()
 	defer c.handshakeMutex.Unlock()
@@ -784,7 +784,7 @@
 // protocol if it has not yet been run.
 // Most uses of this package need not call Handshake
 // explicitly: the first Read or Write will call it automatically.
-func (c *Conn) Handshake() os.Error {
+func (c *Conn) Handshake() error {
 	c.handshakeMutex.Lock()
 	defer c.handshakeMutex.Unlock()
 	if err := c.error(); err != nil {
@@ -830,14 +830,14 @@
 // VerifyHostname checks that the peer certificate chain is valid for
 // connecting to host.  If so, it returns nil; if not, it returns an os.Error
 // describing the problem.
-func (c *Conn) VerifyHostname(host string) os.Error {
+func (c *Conn) VerifyHostname(host string) error {
 	c.handshakeMutex.Lock()
 	defer c.handshakeMutex.Unlock()
 	if !c.isClient {
-		return os.NewError("VerifyHostname called on TLS server connection")
+		return errors.New("VerifyHostname called on TLS server connection")
 	}
 	if !c.handshakeComplete {
-		return os.NewError("TLS handshake has not yet been performed")
+		return errors.New("TLS handshake has not yet been performed")
 	}
 	return c.peerCertificates[0].VerifyHostname(host)
 }
diff --git a/src/pkg/crypto/tls/handshake_client.go b/src/pkg/crypto/tls/handshake_client.go
index 575a121..aed991c 100644
--- a/src/pkg/crypto/tls/handshake_client.go
+++ b/src/pkg/crypto/tls/handshake_client.go
@@ -9,11 +9,11 @@
 	"crypto/rsa"
 	"crypto/subtle"
 	"crypto/x509"
+	"errors"
 	"io"
-	"os"
 )
 
-func (c *Conn) clientHandshake() os.Error {
+func (c *Conn) clientHandshake() error {
 	finishedHash := newFinishedHash(versionTLS10)
 
 	if c.config == nil {
@@ -40,7 +40,7 @@
 	_, err := io.ReadFull(c.config.rand(), hello.random[4:])
 	if err != nil {
 		c.sendAlert(alertInternalError)
-		return os.NewError("short read from Rand")
+		return errors.New("short read from Rand")
 	}
 
 	finishedHash.Write(hello.marshal())
@@ -69,7 +69,7 @@
 
 	if !hello.nextProtoNeg && serverHello.nextProtoNeg {
 		c.sendAlert(alertHandshakeFailure)
-		return os.NewError("server advertised unrequested NPN")
+		return errors.New("server advertised unrequested NPN")
 	}
 
 	suite, suiteId := mutualCipherSuite(c.config.cipherSuites(), serverHello.cipherSuite)
@@ -92,7 +92,7 @@
 		cert, err := x509.ParseCertificate(asn1Data)
 		if err != nil {
 			c.sendAlert(alertBadCertificate)
-			return os.NewError("failed to parse certificate from server: " + err.String())
+			return errors.New("failed to parse certificate from server: " + err.Error())
 		}
 		certs[i] = cert
 	}
diff --git a/src/pkg/crypto/tls/handshake_server.go b/src/pkg/crypto/tls/handshake_server.go
index ed9a2e6..d5af084 100644
--- a/src/pkg/crypto/tls/handshake_server.go
+++ b/src/pkg/crypto/tls/handshake_server.go
@@ -9,11 +9,11 @@
 	"crypto/rsa"
 	"crypto/subtle"
 	"crypto/x509"
+	"errors"
 	"io"
-	"os"
 )
 
-func (c *Conn) serverHandshake() os.Error {
+func (c *Conn) serverHandshake() error {
 	config := c.config
 	msg, err := c.readHandshake()
 	if err != nil {
@@ -177,7 +177,7 @@
 			cert, err := x509.ParseCertificate(asn1Data)
 			if err != nil {
 				c.sendAlert(alertBadCertificate)
-				return os.NewError("could not parse client's certificate: " + err.String())
+				return errors.New("could not parse client's certificate: " + err.Error())
 			}
 			certs[i] = cert
 		}
@@ -186,7 +186,7 @@
 		for i := 1; i < len(certs); i++ {
 			if err := certs[i-1].CheckSignatureFrom(certs[i]); err != nil {
 				c.sendAlert(alertBadCertificate)
-				return os.NewError("could not validate certificate signature: " + err.String())
+				return errors.New("could not validate certificate signature: " + err.Error())
 			}
 		}
 
@@ -233,7 +233,7 @@
 		err = rsa.VerifyPKCS1v15(pub, crypto.MD5SHA1, digest, certVerify.signature)
 		if err != nil {
 			c.sendAlert(alertBadCertificate)
-			return os.NewError("could not validate signature of connection nonces: " + err.String())
+			return errors.New("could not validate signature of connection nonces: " + err.Error())
 		}
 
 		finishedHash.Write(certVerify.marshal())
diff --git a/src/pkg/crypto/tls/handshake_server_test.go b/src/pkg/crypto/tls/handshake_server_test.go
index 1939f3d..f2b0a14 100644
--- a/src/pkg/crypto/tls/handshake_server_test.go
+++ b/src/pkg/crypto/tls/handshake_server_test.go
@@ -12,7 +12,6 @@
 	"flag"
 	"io"
 	"net"
-	"os"
 	"strconv"
 	"strings"
 	"testing"
@@ -20,7 +19,7 @@
 
 type zeroSource struct{}
 
-func (zeroSource) Read(b []byte) (n int, err os.Error) {
+func (zeroSource) Read(b []byte) (n int, err error) {
 	for i := range b {
 		b[i] = 0
 	}
@@ -41,7 +40,7 @@
 	testConfig.InsecureSkipVerify = true
 }
 
-func testClientHelloFailure(t *testing.T, m handshakeMessage, expected os.Error) {
+func testClientHelloFailure(t *testing.T, m handshakeMessage, expected error) {
 	// Create in-memory network connection,
 	// send message to server.  Should return
 	// expected error.
@@ -56,7 +55,7 @@
 	}()
 	err := Server(s, testConfig).Handshake()
 	s.Close()
-	if e, ok := err.(*net.OpError); !ok || e.Error != expected {
+	if e, ok := err.(*net.OpError); !ok || e.Err != expected {
 		t.Errorf("Got error: %s; expected: %s", err, expected)
 	}
 }
@@ -93,7 +92,7 @@
 
 	err := Server(s, testConfig).Handshake()
 	s.Close()
-	if e, ok := err.(*net.OpError); !ok || e.Error != os.Error(alertUnknownCA) {
+	if e, ok := err.(*net.OpError); !ok || e.Err != error(alertUnknownCA) {
 		t.Errorf("Got error: %s; expected: %s", err, alertUnknownCA)
 	}
 }
@@ -104,8 +103,8 @@
 
 	err := Server(s, testConfig).Handshake()
 	s.Close()
-	if err != os.EOF {
-		t.Errorf("Got error: %s; expected: %s", err, os.EOF)
+	if err != io.EOF {
+		t.Errorf("Got error: %s; expected: %s", err, io.EOF)
 	}
 }
 
diff --git a/src/pkg/crypto/tls/key_agreement.go b/src/pkg/crypto/tls/key_agreement.go
index e347528..ba34606 100644
--- a/src/pkg/crypto/tls/key_agreement.go
+++ b/src/pkg/crypto/tls/key_agreement.go
@@ -12,19 +12,19 @@
 	"crypto/rsa"
 	"crypto/sha1"
 	"crypto/x509"
+	"errors"
 	"io"
-	"os"
 )
 
 // rsaKeyAgreement implements the standard TLS key agreement where the client
 // encrypts the pre-master secret to the server's public key.
 type rsaKeyAgreement struct{}
 
-func (ka rsaKeyAgreement) generateServerKeyExchange(config *Config, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, os.Error) {
+func (ka rsaKeyAgreement) generateServerKeyExchange(config *Config, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) {
 	return nil, nil
 }
 
-func (ka rsaKeyAgreement) processClientKeyExchange(config *Config, ckx *clientKeyExchangeMsg, version uint16) ([]byte, os.Error) {
+func (ka rsaKeyAgreement) processClientKeyExchange(config *Config, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) {
 	preMasterSecret := make([]byte, 48)
 	_, err := io.ReadFull(config.rand(), preMasterSecret[2:])
 	if err != nil {
@@ -32,14 +32,14 @@
 	}
 
 	if len(ckx.ciphertext) < 2 {
-		return nil, os.NewError("bad ClientKeyExchange")
+		return nil, errors.New("bad ClientKeyExchange")
 	}
 
 	ciphertext := ckx.ciphertext
 	if version != versionSSL30 {
 		ciphertextLen := int(ckx.ciphertext[0])<<8 | int(ckx.ciphertext[1])
 		if ciphertextLen != len(ckx.ciphertext)-2 {
-			return nil, os.NewError("bad ClientKeyExchange")
+			return nil, errors.New("bad ClientKeyExchange")
 		}
 		ciphertext = ckx.ciphertext[2:]
 	}
@@ -57,11 +57,11 @@
 	return preMasterSecret, nil
 }
 
-func (ka rsaKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) os.Error {
-	return os.NewError("unexpected ServerKeyExchange")
+func (ka rsaKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error {
+	return errors.New("unexpected ServerKeyExchange")
 }
 
-func (ka rsaKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, os.Error) {
+func (ka rsaKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) {
 	preMasterSecret := make([]byte, 48)
 	preMasterSecret[0] = byte(clientHello.vers >> 8)
 	preMasterSecret[1] = byte(clientHello.vers)
@@ -109,7 +109,7 @@
 	x, y       *big.Int
 }
 
-func (ka *ecdheRSAKeyAgreement) generateServerKeyExchange(config *Config, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, os.Error) {
+func (ka *ecdheRSAKeyAgreement) generateServerKeyExchange(config *Config, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) {
 	var curveid uint16
 
 Curve:
@@ -131,7 +131,7 @@
 	}
 
 	var x, y *big.Int
-	var err os.Error
+	var err error
 	ka.privateKey, x, y, err = ka.curve.GenerateKey(config.rand())
 	if err != nil {
 		return nil, err
@@ -149,7 +149,7 @@
 	md5sha1 := md5SHA1Hash(clientHello.random, hello.random, serverECDHParams)
 	sig, err := rsa.SignPKCS1v15(config.rand(), config.Certificates[0].PrivateKey, crypto.MD5SHA1, md5sha1)
 	if err != nil {
-		return nil, os.NewError("failed to sign ECDHE parameters: " + err.String())
+		return nil, errors.New("failed to sign ECDHE parameters: " + err.Error())
 	}
 
 	skx := new(serverKeyExchangeMsg)
@@ -163,13 +163,13 @@
 	return skx, nil
 }
 
-func (ka *ecdheRSAKeyAgreement) processClientKeyExchange(config *Config, ckx *clientKeyExchangeMsg, version uint16) ([]byte, os.Error) {
+func (ka *ecdheRSAKeyAgreement) processClientKeyExchange(config *Config, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) {
 	if len(ckx.ciphertext) == 0 || int(ckx.ciphertext[0]) != len(ckx.ciphertext)-1 {
-		return nil, os.NewError("bad ClientKeyExchange")
+		return nil, errors.New("bad ClientKeyExchange")
 	}
 	x, y := ka.curve.Unmarshal(ckx.ciphertext[1:])
 	if x == nil {
-		return nil, os.NewError("bad ClientKeyExchange")
+		return nil, errors.New("bad ClientKeyExchange")
 	}
 	x, _ = ka.curve.ScalarMult(x, y, ka.privateKey)
 	preMasterSecret := make([]byte, (ka.curve.BitSize+7)>>3)
@@ -179,14 +179,14 @@
 	return preMasterSecret, nil
 }
 
-var errServerKeyExchange = os.NewError("invalid ServerKeyExchange")
+var errServerKeyExchange = errors.New("invalid ServerKeyExchange")
 
-func (ka *ecdheRSAKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) os.Error {
+func (ka *ecdheRSAKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error {
 	if len(skx.key) < 4 {
 		return errServerKeyExchange
 	}
 	if skx.key[0] != 3 { // named curve
-		return os.NewError("server selected unsupported curve")
+		return errors.New("server selected unsupported curve")
 	}
 	curveid := uint16(skx.key[1])<<8 | uint16(skx.key[2])
 
@@ -198,7 +198,7 @@
 	case curveP521:
 		ka.curve = elliptic.P521()
 	default:
-		return os.NewError("server selected unsupported curve")
+		return errors.New("server selected unsupported curve")
 	}
 
 	publicLen := int(skx.key[3])
@@ -225,9 +225,9 @@
 	return rsa.VerifyPKCS1v15(cert.PublicKey.(*rsa.PublicKey), crypto.MD5SHA1, md5sha1, sig)
 }
 
-func (ka *ecdheRSAKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, os.Error) {
+func (ka *ecdheRSAKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) {
 	if ka.curve == nil {
-		return nil, nil, os.NewError("missing ServerKeyExchange message")
+		return nil, nil, errors.New("missing ServerKeyExchange message")
 	}
 	priv, mx, my, err := ka.curve.GenerateKey(config.rand())
 	if err != nil {
diff --git a/src/pkg/crypto/tls/prf.go b/src/pkg/crypto/tls/prf.go
index 2d58dc5..d758f21 100644
--- a/src/pkg/crypto/tls/prf.go
+++ b/src/pkg/crypto/tls/prf.go
@@ -9,7 +9,6 @@
 	"crypto/md5"
 	"crypto/sha1"
 	"hash"
-	"os"
 )
 
 // Split a premaster secret in two as specified in RFC 4346, section 5.
@@ -156,7 +155,7 @@
 	version    uint16
 }
 
-func (h finishedHash) Write(msg []byte) (n int, err os.Error) {
+func (h finishedHash) Write(msg []byte) (n int, err error) {
 	h.clientMD5.Write(msg)
 	h.clientSHA1.Write(msg)
 	h.serverMD5.Write(msg)
diff --git a/src/pkg/crypto/tls/tls.go b/src/pkg/crypto/tls/tls.go
index 4f0859f..3ca6240 100644
--- a/src/pkg/crypto/tls/tls.go
+++ b/src/pkg/crypto/tls/tls.go
@@ -10,9 +10,9 @@
 	"crypto/rsa"
 	"crypto/x509"
 	"encoding/pem"
+	"errors"
 	"io/ioutil"
 	"net"
-	"os"
 	"strings"
 )
 
@@ -41,7 +41,7 @@
 
 // Accept waits for and returns the next incoming TLS connection.
 // The returned connection c is a *tls.Conn.
-func (l *Listener) Accept() (c net.Conn, err os.Error) {
+func (l *Listener) Accept() (c net.Conn, err error) {
 	c, err = l.listener.Accept()
 	if err != nil {
 		return
@@ -51,7 +51,7 @@
 }
 
 // Close closes the listener.
-func (l *Listener) Close() os.Error { return l.listener.Close() }
+func (l *Listener) Close() error { return l.listener.Close() }
 
 // Addr returns the listener's network address.
 func (l *Listener) Addr() net.Addr { return l.listener.Addr() }
@@ -71,9 +71,9 @@
 // given network address using net.Listen.
 // The configuration config must be non-nil and must have
 // at least one certificate.
-func Listen(network, laddr string, config *Config) (*Listener, os.Error) {
+func Listen(network, laddr string, config *Config) (*Listener, error) {
 	if config == nil || len(config.Certificates) == 0 {
-		return nil, os.NewError("tls.Listen: no certificates in configuration")
+		return nil, errors.New("tls.Listen: no certificates in configuration")
 	}
 	l, err := net.Listen(network, laddr)
 	if err != nil {
@@ -88,7 +88,7 @@
 // Dial interprets a nil configuration as equivalent to
 // the zero configuration; see the documentation of Config
 // for the defaults.
-func Dial(network, addr string, config *Config) (*Conn, os.Error) {
+func Dial(network, addr string, config *Config) (*Conn, error) {
 	raddr := addr
 	c, err := net.Dial(network, raddr)
 	if err != nil {
@@ -120,7 +120,7 @@
 
 // LoadX509KeyPair reads and parses a public/private key pair from a pair of
 // files. The files must contain PEM encoded data.
-func LoadX509KeyPair(certFile string, keyFile string) (cert Certificate, err os.Error) {
+func LoadX509KeyPair(certFile string, keyFile string) (cert Certificate, err error) {
 	certPEMBlock, err := ioutil.ReadFile(certFile)
 	if err != nil {
 		return
@@ -134,7 +134,7 @@
 
 // X509KeyPair parses a public/private key pair from a pair of
 // PEM encoded data.
-func X509KeyPair(certPEMBlock, keyPEMBlock []byte) (cert Certificate, err os.Error) {
+func X509KeyPair(certPEMBlock, keyPEMBlock []byte) (cert Certificate, err error) {
 	var certDERBlock *pem.Block
 	for {
 		certDERBlock, certPEMBlock = pem.Decode(certPEMBlock)
@@ -147,19 +147,19 @@
 	}
 
 	if len(cert.Certificate) == 0 {
-		err = os.NewError("crypto/tls: failed to parse certificate PEM data")
+		err = errors.New("crypto/tls: failed to parse certificate PEM data")
 		return
 	}
 
 	keyDERBlock, _ := pem.Decode(keyPEMBlock)
 	if keyDERBlock == nil {
-		err = os.NewError("crypto/tls: failed to parse key PEM data")
+		err = errors.New("crypto/tls: failed to parse key PEM data")
 		return
 	}
 
 	key, err := x509.ParsePKCS1PrivateKey(keyDERBlock.Bytes)
 	if err != nil {
-		err = os.NewError("crypto/tls: failed to parse key: " + err.String())
+		err = errors.New("crypto/tls: failed to parse key: " + err.Error())
 		return
 	}
 
@@ -173,7 +173,7 @@
 	}
 
 	if x509Cert.PublicKeyAlgorithm != x509.RSA || x509Cert.PublicKey.(*rsa.PublicKey).N.Cmp(key.PublicKey.N) != 0 {
-		err = os.NewError("crypto/tls: private key does not match public key")
+		err = errors.New("crypto/tls: private key does not match public key")
 		return
 	}
 
diff --git a/src/pkg/crypto/twofish/twofish.go b/src/pkg/crypto/twofish/twofish.go
index 2e537c6..0616e7b 100644
--- a/src/pkg/crypto/twofish/twofish.go
+++ b/src/pkg/crypto/twofish/twofish.go
@@ -12,10 +12,7 @@
 // LibTomCrypt is free for all purposes under the public domain.
 // It was heavily inspired by the go blowfish package.
 
-import (
-	"os"
-	"strconv"
-)
+import "strconv"
 
 // BlockSize is the constant block size of Twofish.
 const BlockSize = 16
@@ -31,13 +28,13 @@
 
 type KeySizeError int
 
-func (k KeySizeError) String() string {
+func (k KeySizeError) Error() string {
 	return "crypto/twofish: invalid key size " + strconv.Itoa(int(k))
 }
 
 // NewCipher creates and returns a Cipher.
 // The key argument should be the Twofish key, 16, 24 or 32 bytes.
-func NewCipher(key []byte) (*Cipher, os.Error) {
+func NewCipher(key []byte) (*Cipher, error) {
 	keylen := len(key)
 
 	if keylen != 16 && keylen != 24 && keylen != 32 {
diff --git a/src/pkg/crypto/x509/pkcs1.go b/src/pkg/crypto/x509/pkcs1.go
index 42afcb4..0d3ade3 100644
--- a/src/pkg/crypto/x509/pkcs1.go
+++ b/src/pkg/crypto/x509/pkcs1.go
@@ -7,7 +7,7 @@
 import (
 	"asn1"
 	"big"
-	"os"
+	"errors"
 	"crypto/rsa"
 )
 
@@ -36,7 +36,7 @@
 }
 
 // ParsePKCS1PrivateKey returns an RSA private key from its ASN.1 PKCS#1 DER encoded form.
-func ParsePKCS1PrivateKey(der []byte) (key *rsa.PrivateKey, err os.Error) {
+func ParsePKCS1PrivateKey(der []byte) (key *rsa.PrivateKey, err error) {
 	var priv pkcs1PrivateKey
 	rest, err := asn1.Unmarshal(der, &priv)
 	if len(rest) > 0 {
@@ -48,11 +48,11 @@
 	}
 
 	if priv.Version > 1 {
-		return nil, os.NewError("x509: unsupported private key version")
+		return nil, errors.New("x509: unsupported private key version")
 	}
 
 	if priv.N.Sign() <= 0 || priv.D.Sign() <= 0 || priv.P.Sign() <= 0 || priv.Q.Sign() <= 0 {
-		return nil, os.NewError("private key contains zero or negative value")
+		return nil, errors.New("private key contains zero or negative value")
 	}
 
 	key = new(rsa.PrivateKey)
@@ -67,7 +67,7 @@
 	key.Primes[1] = priv.Q
 	for i, a := range priv.AdditionalPrimes {
 		if a.Prime.Sign() <= 0 {
-			return nil, os.NewError("private key contains zero or negative prime")
+			return nil, errors.New("private key contains zero or negative prime")
 		}
 		key.Primes[i+2] = a.Prime
 		// We ignore the other two values because rsa will calculate
diff --git a/src/pkg/crypto/x509/verify.go b/src/pkg/crypto/x509/verify.go
index 4c0fecc..49056cf 100644
--- a/src/pkg/crypto/x509/verify.go
+++ b/src/pkg/crypto/x509/verify.go
@@ -5,7 +5,6 @@
 package x509
 
 import (
-	"os"
 	"strings"
 	"time"
 )
@@ -32,7 +31,7 @@
 	Reason InvalidReason
 }
 
-func (e CertificateInvalidError) String() string {
+func (e CertificateInvalidError) Error() string {
 	switch e.Reason {
 	case NotAuthorizedToSign:
 		return "x509: certificate is not authorized to sign other other certificates"
@@ -51,7 +50,7 @@
 	Host        string
 }
 
-func (h HostnameError) String() string {
+func (h HostnameError) Error() string {
 	var valid string
 	c := h.Certificate
 	if len(c.DNSNames) > 0 {
@@ -67,7 +66,7 @@
 	cert *Certificate
 }
 
-func (e UnknownAuthorityError) String() string {
+func (e UnknownAuthorityError) Error() string {
 	return "x509: certificate signed by unknown authority"
 }
 
@@ -87,7 +86,7 @@
 )
 
 // isValid performs validity checks on the c.
-func (c *Certificate) isValid(certType int, opts *VerifyOptions) os.Error {
+func (c *Certificate) isValid(certType int, opts *VerifyOptions) error {
 	if opts.CurrentTime < c.NotBefore.Seconds() ||
 		opts.CurrentTime > c.NotAfter.Seconds() {
 		return CertificateInvalidError{c, Expired}
@@ -136,7 +135,7 @@
 // the chain is c and the last element is from opts.Roots.
 //
 // WARNING: this doesn't do any revocation checking.
-func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err os.Error) {
+func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err error) {
 	if opts.CurrentTime == 0 {
 		opts.CurrentTime = time.Seconds()
 	}
@@ -160,7 +159,7 @@
 	return n
 }
 
-func (c *Certificate) buildChains(cache map[int][][]*Certificate, currentChain []*Certificate, opts *VerifyOptions) (chains [][]*Certificate, err os.Error) {
+func (c *Certificate) buildChains(cache map[int][][]*Certificate, currentChain []*Certificate, opts *VerifyOptions) (chains [][]*Certificate, err error) {
 	for _, rootNum := range opts.Roots.findVerifiedParents(c) {
 		root := opts.Roots.certs[rootNum]
 		err = root.isValid(rootCertificate, opts)
@@ -228,7 +227,7 @@
 
 // VerifyHostname returns nil if c is a valid certificate for the named host.
 // Otherwise it returns an os.Error describing the mismatch.
-func (c *Certificate) VerifyHostname(h string) os.Error {
+func (c *Certificate) VerifyHostname(h string) error {
 	if len(c.DNSNames) > 0 {
 		for _, match := range c.DNSNames {
 			if matchHostnames(match, h) {
diff --git a/src/pkg/crypto/x509/verify_test.go b/src/pkg/crypto/x509/verify_test.go
index eaa8169..2194d15 100644
--- a/src/pkg/crypto/x509/verify_test.go
+++ b/src/pkg/crypto/x509/verify_test.go
@@ -7,7 +7,7 @@
 import (
 	"crypto/x509/pkix"
 	"encoding/pem"
-	"os"
+	"errors"
 	"strings"
 	"testing"
 )
@@ -19,7 +19,7 @@
 	currentTime   int64
 	dnsName       string
 
-	errorCallback  func(*testing.T, int, os.Error) bool
+	errorCallback  func(*testing.T, int, error) bool
 	expectedChains [][]string
 }
 
@@ -95,7 +95,7 @@
 	},
 }
 
-func expectHostnameError(t *testing.T, i int, err os.Error) (ok bool) {
+func expectHostnameError(t *testing.T, i int, err error) (ok bool) {
 	if _, ok := err.(HostnameError); !ok {
 		t.Errorf("#%d: error was not a HostnameError: %s", i, err)
 		return false
@@ -103,7 +103,7 @@
 	return true
 }
 
-func expectExpired(t *testing.T, i int, err os.Error) (ok bool) {
+func expectExpired(t *testing.T, i int, err error) (ok bool) {
 	if inval, ok := err.(CertificateInvalidError); !ok || inval.Reason != Expired {
 		t.Errorf("#%d: error was not Expired: %s", i, err)
 		return false
@@ -111,7 +111,7 @@
 	return true
 }
 
-func expectAuthorityUnknown(t *testing.T, i int, err os.Error) (ok bool) {
+func expectAuthorityUnknown(t *testing.T, i int, err error) (ok bool) {
 	if _, ok := err.(UnknownAuthorityError); !ok {
 		t.Errorf("#%d: error was not UnknownAuthorityError: %s", i, err)
 		return false
@@ -119,10 +119,10 @@
 	return true
 }
 
-func certificateFromPEM(pemBytes string) (*Certificate, os.Error) {
+func certificateFromPEM(pemBytes string) (*Certificate, error) {
 	block, _ := pem.Decode([]byte(pemBytes))
 	if block == nil {
-		return nil, os.NewError("failed to decode PEM")
+		return nil, errors.New("failed to decode PEM")
 	}
 	return ParseCertificate(block.Bytes)
 }
diff --git a/src/pkg/crypto/x509/x509.go b/src/pkg/crypto/x509/x509.go
index 73b32e7..da8b283 100644
--- a/src/pkg/crypto/x509/x509.go
+++ b/src/pkg/crypto/x509/x509.go
@@ -15,8 +15,8 @@
 	"crypto/sha1"
 	"crypto/x509/pkix"
 	"encoding/pem"
+	"errors"
 	"io"
-	"os"
 	"time"
 )
 
@@ -29,20 +29,20 @@
 
 // ParsePKIXPublicKey parses a DER encoded public key. These values are
 // typically found in PEM blocks with "BEGIN PUBLIC KEY".
-func ParsePKIXPublicKey(derBytes []byte) (pub interface{}, err os.Error) {
+func ParsePKIXPublicKey(derBytes []byte) (pub interface{}, err error) {
 	var pki publicKeyInfo
 	if _, err = asn1.Unmarshal(derBytes, &pki); err != nil {
 		return
 	}
 	algo := getPublicKeyAlgorithmFromOID(pki.Algorithm.Algorithm)
 	if algo == UnknownPublicKeyAlgorithm {
-		return nil, os.NewError("ParsePKIXPublicKey: unknown public key algorithm")
+		return nil, errors.New("ParsePKIXPublicKey: unknown public key algorithm")
 	}
 	return parsePublicKey(algo, &pki)
 }
 
 // MarshalPKIXPublicKey serialises a public key to DER-encoded PKIX format.
-func MarshalPKIXPublicKey(pub interface{}) ([]byte, os.Error) {
+func MarshalPKIXPublicKey(pub interface{}) ([]byte, error) {
 	var pubBytes []byte
 
 	switch pub := pub.(type) {
@@ -52,7 +52,7 @@
 			E: pub.E,
 		})
 	default:
-		return nil, os.NewError("MarshalPKIXPublicKey: unknown public key type")
+		return nil, errors.New("MarshalPKIXPublicKey: unknown public key type")
 	}
 
 	pkix := pkixPublicKey{
@@ -331,7 +331,7 @@
 // that involves algorithms that are not currently implemented.
 type UnsupportedAlgorithmError struct{}
 
-func (UnsupportedAlgorithmError) String() string {
+func (UnsupportedAlgorithmError) Error() string {
 	return "cannot verify signature: algorithm unimplemented"
 }
 
@@ -340,7 +340,7 @@
 // certificate signing key.
 type ConstraintViolationError struct{}
 
-func (ConstraintViolationError) String() string {
+func (ConstraintViolationError) Error() string {
 	return "invalid signature: parent certificate cannot sign this kind of certificate"
 }
 
@@ -350,7 +350,7 @@
 
 // CheckSignatureFrom verifies that the signature on c is a valid signature
 // from parent.
-func (c *Certificate) CheckSignatureFrom(parent *Certificate) (err os.Error) {
+func (c *Certificate) CheckSignatureFrom(parent *Certificate) (err error) {
 	// RFC 5280, 4.2.1.9:
 	// "If the basic constraints extension is not present in a version 3
 	// certificate, or the extension is present but the cA boolean is not
@@ -376,7 +376,7 @@
 
 // CheckSignature verifies that signature is a valid signature over signed from
 // c's public key.
-func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature []byte) (err os.Error) {
+func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature []byte) (err error) {
 	var hashType crypto.Hash
 
 	switch algo {
@@ -409,10 +409,10 @@
 			return err
 		}
 		if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 {
-			return os.NewError("DSA signature contained zero or negative values")
+			return errors.New("DSA signature contained zero or negative values")
 		}
 		if !dsa.Verify(pub, digest, dsaSig.R, dsaSig.S) {
-			return os.NewError("DSA verification failure")
+			return errors.New("DSA verification failure")
 		}
 		return
 	}
@@ -420,14 +420,14 @@
 }
 
 // CheckCRLSignature checks that the signature in crl is from c.
-func (c *Certificate) CheckCRLSignature(crl *pkix.CertificateList) (err os.Error) {
+func (c *Certificate) CheckCRLSignature(crl *pkix.CertificateList) (err error) {
 	algo := getSignatureAlgorithmFromOID(crl.SignatureAlgorithm.Algorithm)
 	return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign())
 }
 
 type UnhandledCriticalExtension struct{}
 
-func (h UnhandledCriticalExtension) String() string {
+func (h UnhandledCriticalExtension) Error() string {
 	return "unhandled critical extension"
 }
 
@@ -454,7 +454,7 @@
 	Max  int    `asn1:"optional,tag:1"`
 }
 
-func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{}, os.Error) {
+func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{}, error) {
 	asn1Data := keyData.PublicKey.RightAlign()
 	switch algo {
 	case RSA:
@@ -482,7 +482,7 @@
 			return nil, err
 		}
 		if p.Sign() <= 0 || params.P.Sign() <= 0 || params.Q.Sign() <= 0 || params.G.Sign() <= 0 {
-			return nil, os.NewError("zero or negative DSA parameter")
+			return nil, errors.New("zero or negative DSA parameter")
 		}
 		pub := &dsa.PublicKey{
 			Parameters: dsa.Parameters{
@@ -499,7 +499,7 @@
 	panic("unreachable")
 }
 
-func parseCertificate(in *certificate) (*Certificate, os.Error) {
+func parseCertificate(in *certificate) (*Certificate, error) {
 	out := new(Certificate)
 	out.Raw = in.Raw
 	out.RawTBSCertificate = in.TBSCertificate.Raw
@@ -513,14 +513,14 @@
 
 	out.PublicKeyAlgorithm =
 		getPublicKeyAlgorithmFromOID(in.TBSCertificate.PublicKey.Algorithm.Algorithm)
-	var err os.Error
+	var err error
 	out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey)
 	if err != nil {
 		return nil, err
 	}
 
 	if in.TBSCertificate.SerialNumber.Sign() < 0 {
-		return nil, os.NewError("negative serial number")
+		return nil, errors.New("negative serial number")
 	}
 
 	out.Version = in.TBSCertificate.Version + 1
@@ -737,7 +737,7 @@
 }
 
 // ParseCertificate parses a single certificate from the given ASN.1 DER data.
-func ParseCertificate(asn1Data []byte) (*Certificate, os.Error) {
+func ParseCertificate(asn1Data []byte) (*Certificate, error) {
 	var cert certificate
 	rest, err := asn1.Unmarshal(asn1Data, &cert)
 	if err != nil {
@@ -752,12 +752,12 @@
 
 // ParseCertificates parses one or more certificates from the given ASN.1 DER
 // data. The certificates must be concatenated with no intermediate padding.
-func ParseCertificates(asn1Data []byte) ([]*Certificate, os.Error) {
+func ParseCertificates(asn1Data []byte) ([]*Certificate, error) {
 	var v []*certificate
 
 	for len(asn1Data) > 0 {
 		cert := new(certificate)
-		var err os.Error
+		var err error
 		asn1Data, err = asn1.Unmarshal(asn1Data, cert)
 		if err != nil {
 			return nil, err
@@ -794,7 +794,7 @@
 	oidExtensionNameConstraints     = []int{2, 5, 29, 30}
 )
 
-func buildExtensions(template *Certificate) (ret []pkix.Extension, err os.Error) {
+func buildExtensions(template *Certificate) (ret []pkix.Extension, err error) {
 	ret = make([]pkix.Extension, 7 /* maximum number of elements. */ )
 	n := 0
 
@@ -910,7 +910,7 @@
 // signee and priv is the private key of the signer.
 //
 // The returned slice is the certificate in DER encoding.
-func CreateCertificate(rand io.Reader, template, parent *Certificate, pub *rsa.PublicKey, priv *rsa.PrivateKey) (cert []byte, err os.Error) {
+func CreateCertificate(rand io.Reader, template, parent *Certificate, pub *rsa.PublicKey, priv *rsa.PrivateKey) (cert []byte, err error) {
 	asn1PublicKey, err := asn1.Marshal(rsaPublicKey{
 		N: pub.N,
 		E: pub.E,
@@ -984,7 +984,7 @@
 // encoded CRLs will appear where they should be DER encoded, so this function
 // will transparently handle PEM encoding as long as there isn't any leading
 // garbage.
-func ParseCRL(crlBytes []byte) (certList *pkix.CertificateList, err os.Error) {
+func ParseCRL(crlBytes []byte) (certList *pkix.CertificateList, err error) {
 	if bytes.HasPrefix(crlBytes, pemCRLPrefix) {
 		block, _ := pem.Decode(crlBytes)
 		if block != nil && block.Type == pemType {
@@ -995,7 +995,7 @@
 }
 
 // ParseDERCRL parses a DER encoded CRL from the given bytes.
-func ParseDERCRL(derBytes []byte) (certList *pkix.CertificateList, err os.Error) {
+func ParseDERCRL(derBytes []byte) (certList *pkix.CertificateList, err error) {
 	certList = new(pkix.CertificateList)
 	_, err = asn1.Unmarshal(derBytes, certList)
 	if err != nil {
@@ -1006,7 +1006,7 @@
 
 // CreateCRL returns a DER encoded CRL, signed by this Certificate, that
 // contains the given list of revoked certificates.
-func (c *Certificate) CreateCRL(rand io.Reader, priv *rsa.PrivateKey, revokedCerts []pkix.RevokedCertificate, now, expiry *time.Time) (crlBytes []byte, err os.Error) {
+func (c *Certificate) CreateCRL(rand io.Reader, priv *rsa.PrivateKey, revokedCerts []pkix.RevokedCertificate, now, expiry *time.Time) (crlBytes []byte, err error) {
 	tbsCertList := pkix.TBSCertificateList{
 		Version: 2,
 		Signature: pkix.AlgorithmIdentifier{
diff --git a/src/pkg/crypto/xtea/cipher.go b/src/pkg/crypto/xtea/cipher.go
index b3fba3c..64d933c 100644
--- a/src/pkg/crypto/xtea/cipher.go
+++ b/src/pkg/crypto/xtea/cipher.go
@@ -8,10 +8,7 @@
 
 // For details, see http://www.cix.co.uk/~klockstone/xtea.pdf
 
-import (
-	"os"
-	"strconv"
-)
+import "strconv"
 
 // The XTEA block size in bytes.
 const BlockSize = 8
@@ -24,14 +21,14 @@
 
 type KeySizeError int
 
-func (k KeySizeError) String() string {
+func (k KeySizeError) Error() string {
 	return "crypto/xtea: invalid key size " + strconv.Itoa(int(k))
 }
 
 // NewCipher creates and returns a new Cipher.
 // The key argument should be the XTEA key.
 // XTEA only supports 128 bit (16 byte) keys.
-func NewCipher(key []byte) (*Cipher, os.Error) {
+func NewCipher(key []byte) (*Cipher, error) {
 	k := len(key)
 	switch k {
 	default:
diff --git a/src/pkg/csv/reader.go b/src/pkg/csv/reader.go
index a06b9789..ae0f567 100644
--- a/src/pkg/csv/reader.go
+++ b/src/pkg/csv/reader.go
@@ -52,30 +52,30 @@
 import (
 	"bufio"
 	"bytes"
+	"errors"
 	"fmt"
 	"io"
-	"os"
 	"unicode"
 )
 
 // A ParseError is returned for parsing errors.
 // The first line is 1.  The first column is 0.
 type ParseError struct {
-	Line   int      // Line where the error occurred
-	Column int      // Column (rune index) where the error occurred
-	Error  os.Error // The actual error
+	Line   int   // Line where the error occurred
+	Column int   // Column (rune index) where the error occurred
+	Err    error // The actual error
 }
 
-func (e *ParseError) String() string {
-	return fmt.Sprintf("line %d, column %d: %s", e.Line, e.Column, e.Error)
+func (e *ParseError) Error() string {
+	return fmt.Sprintf("line %d, column %d: %s", e.Line, e.Column, e.Err)
 }
 
 // These are the errors that can be returned in ParseError.Error
 var (
-	ErrTrailingComma = os.NewError("extra delimiter at end of line")
-	ErrBareQuote     = os.NewError("bare \" in non-quoted-field")
-	ErrQuote         = os.NewError("extraneous \" in field")
-	ErrFieldCount    = os.NewError("wrong number of fields in line")
+	ErrTrailingComma = errors.New("extra delimiter at end of line")
+	ErrBareQuote     = errors.New("bare \" in non-quoted-field")
+	ErrQuote         = errors.New("extraneous \" in field")
+	ErrFieldCount    = errors.New("wrong number of fields in line")
 )
 
 // A Reader reads records from a CSV-encoded file.
@@ -122,17 +122,17 @@
 }
 
 // error creates a new ParseError based on err.
-func (r *Reader) error(err os.Error) os.Error {
+func (r *Reader) error(err error) error {
 	return &ParseError{
 		Line:   r.line,
 		Column: r.column,
-		Error:  err,
+		Err:    err,
 	}
 }
 
 // Read reads one record from r.  The record is a slice of strings with each
 // string representing one field.
-func (r *Reader) Read() (record []string, err os.Error) {
+func (r *Reader) Read() (record []string, err error) {
 	for {
 		record, err = r.parseRecord()
 		if record != nil {
@@ -156,10 +156,10 @@
 
 // ReadAll reads all the remaining records from r.
 // Each record is a slice of fields.
-func (r *Reader) ReadAll() (records [][]string, err os.Error) {
+func (r *Reader) ReadAll() (records [][]string, err error) {
 	for {
 		record, err := r.Read()
-		if err == os.EOF {
+		if err == io.EOF {
 			return records, nil
 		}
 		if err != nil {
@@ -173,7 +173,7 @@
 // readRune reads one rune from r, folding \r\n to \n and keeping track
 // of how far into the line we have read.  r.column will point to the start
 // of this rune, not the end of this rune.
-func (r *Reader) readRune() (rune, os.Error) {
+func (r *Reader) readRune() (rune, error) {
 	r1, _, err := r.r.ReadRune()
 
 	// Handle \r\n here.  We make the simplifying assumption that
@@ -199,7 +199,7 @@
 }
 
 // skip reads runes up to and including the rune delim or until error.
-func (r *Reader) skip(delim rune) os.Error {
+func (r *Reader) skip(delim rune) error {
 	for {
 		r1, err := r.readRune()
 		if err != nil {
@@ -213,7 +213,7 @@
 }
 
 // parseRecord reads and parses a single csv record from r.
-func (r *Reader) parseRecord() (fields []string, err os.Error) {
+func (r *Reader) parseRecord() (fields []string, err error) {
 	// Each record starts on a new line.  We increment our line
 	// number (lines start at 1, not 0) and set column to -1
 	// so as we increment in readRune it points to the character we read.
@@ -240,7 +240,7 @@
 		if haveField {
 			fields = append(fields, r.field.String())
 		}
-		if delim == '\n' || err == os.EOF {
+		if delim == '\n' || err == io.EOF {
 			return fields, err
 		} else if err != nil {
 			return nil, err
@@ -252,7 +252,7 @@
 // parseField parses the next field in the record.  The read field is
 // located in r.field.  Delim is the first character not part of the field
 // (r.Comma or '\n').
-func (r *Reader) parseField() (haveField bool, delim rune, err os.Error) {
+func (r *Reader) parseField() (haveField bool, delim rune, err error) {
 	r.field.Reset()
 
 	r1, err := r.readRune()
@@ -260,7 +260,7 @@
 		// If we have EOF and are not at the start of a line
 		// then we return the empty field.  We have already
 		// checked for trailing commas if needed.
-		if err == os.EOF && r.column != 0 {
+		if err == io.EOF && r.column != 0 {
 			return true, 0, err
 		}
 		return false, 0, err
@@ -292,7 +292,7 @@
 		for {
 			r1, err = r.readRune()
 			if err != nil {
-				if err == os.EOF {
+				if err == io.EOF {
 					if r.LazyQuotes {
 						return true, 0, err
 					}
@@ -342,7 +342,7 @@
 	}
 
 	if err != nil {
-		if err == os.EOF {
+		if err == io.EOF {
 			return true, 0, err
 		}
 		return false, 0, err
@@ -362,7 +362,7 @@
 				}
 			}
 		}
-		if err == os.EOF || r1 == '\n' {
+		if err == io.EOF || r1 == '\n' {
 			r.column = c // report the comma
 			return false, 0, r.error(ErrTrailingComma)
 		}
diff --git a/src/pkg/csv/reader_test.go b/src/pkg/csv/reader_test.go
index 1b23605..5fd84a7 100644
--- a/src/pkg/csv/reader_test.go
+++ b/src/pkg/csv/reader_test.go
@@ -267,7 +267,7 @@
 		out, err := r.ReadAll()
 		perr, _ := err.(*ParseError)
 		if tt.Error != "" {
-			if err == nil || !strings.Contains(err.String(), tt.Error) {
+			if err == nil || !strings.Contains(err.Error(), tt.Error) {
 				t.Errorf("%s: error %v, want error %q", tt.Name, err, tt.Error)
 			} else if tt.Line != 0 && (tt.Line != perr.Line || tt.Column != perr.Column) {
 				t.Errorf("%s: error at %d:%d expected %d:%d", tt.Name, perr.Line, perr.Column, tt.Line, tt.Column)
diff --git a/src/pkg/csv/writer.go b/src/pkg/csv/writer.go
index 98573c2..5ea20e1 100644
--- a/src/pkg/csv/writer.go
+++ b/src/pkg/csv/writer.go
@@ -7,7 +7,6 @@
 import (
 	"bufio"
 	"io"
-	"os"
 	"strings"
 	"unicode"
 	"utf8"
@@ -38,7 +37,7 @@
 
 // Writer writes a single CSV record to w along with any necessary quoting.
 // A record is a slice of strings with each string being one field.
-func (w *Writer) Write(record []string) (err os.Error) {
+func (w *Writer) Write(record []string) (err error) {
 	for n, field := range record {
 		if n > 0 {
 			if _, err = w.w.WriteRune(w.Comma); err != nil {
@@ -98,7 +97,7 @@
 }
 
 // WriteAll writes multiple CSV records to w using Write and then calls Flush.
-func (w *Writer) WriteAll(records [][]string) (err os.Error) {
+func (w *Writer) WriteAll(records [][]string) (err error) {
 	for _, record := range records {
 		err = w.Write(record)
 		if err != nil {
diff --git a/src/pkg/debug/dwarf/buf.go b/src/pkg/debug/dwarf/buf.go
index 2d29ceb..6b4af7d 100644
--- a/src/pkg/debug/dwarf/buf.go
+++ b/src/pkg/debug/dwarf/buf.go
@@ -8,7 +8,6 @@
 
 import (
 	"encoding/binary"
-	"os"
 	"strconv"
 )
 
@@ -20,7 +19,7 @@
 	off      Offset
 	data     []byte
 	addrsize int
-	err      os.Error
+	err      error
 }
 
 func makeBuf(d *Data, name string, off Offset, data []byte, addrsize int) buf {
@@ -146,9 +145,9 @@
 type DecodeError struct {
 	Name   string
 	Offset Offset
-	Error  string
+	Err    string
 }
 
-func (e DecodeError) String() string {
-	return "decoding dwarf section " + e.Name + " at offset 0x" + strconv.Itob64(int64(e.Offset), 16) + ": " + e.Error
+func (e DecodeError) Error() string {
+	return "decoding dwarf section " + e.Name + " at offset 0x" + strconv.Itob64(int64(e.Offset), 16) + ": " + e.Err
 }
diff --git a/src/pkg/debug/dwarf/entry.go b/src/pkg/debug/dwarf/entry.go
index 549e5c2..2885d8f 100644
--- a/src/pkg/debug/dwarf/entry.go
+++ b/src/pkg/debug/dwarf/entry.go
@@ -10,7 +10,7 @@
 
 package dwarf
 
-import "os"
+import "errors"
 
 // a single entry's description: a sequence of attributes
 type abbrev struct {
@@ -29,7 +29,7 @@
 
 // ParseAbbrev returns the abbreviation table that starts at byte off
 // in the .debug_abbrev section.
-func (d *Data) parseAbbrev(off uint32) (abbrevTable, os.Error) {
+func (d *Data) parseAbbrev(off uint32) (abbrevTable, error) {
 	if m, ok := d.abbrevCache[off]; ok {
 		return m, nil
 	}
@@ -232,7 +232,7 @@
 type Reader struct {
 	b            buf
 	d            *Data
-	err          os.Error
+	err          error
 	unit         int
 	lastChildren bool   // .Children of last entry returned by Next
 	lastSibling  Offset // .Val(AttrSibling) of last entry returned by Next
@@ -273,7 +273,7 @@
 			return
 		}
 	}
-	r.err = os.NewError("offset out of range")
+	r.err = errors.New("offset out of range")
 }
 
 // maybeNextUnit advances to the next unit if this one is finished.
@@ -289,7 +289,7 @@
 // It returns nil, nil when it reaches the end of the section.
 // It returns an error if the current offset is invalid or the data at the
 // offset cannot be decoded as a valid Entry.
-func (r *Reader) Next() (*Entry, os.Error) {
+func (r *Reader) Next() (*Entry, error) {
 	if r.err != nil {
 		return nil, r.err
 	}
diff --git a/src/pkg/debug/dwarf/open.go b/src/pkg/debug/dwarf/open.go
index d9525f7..9543297 100644
--- a/src/pkg/debug/dwarf/open.go
+++ b/src/pkg/debug/dwarf/open.go
@@ -7,10 +7,7 @@
 // http://dwarfstd.org/doc/dwarf-2.0.0.pdf
 package dwarf
 
-import (
-	"encoding/binary"
-	"os"
-)
+import "encoding/binary"
 
 // Data represents the DWARF debugging information
 // loaded from an executable file (for example, an ELF or Mach-O executable).
@@ -40,7 +37,7 @@
 // The []byte arguments are the data from the corresponding debug section
 // in the object file; for example, for an ELF object, abbrev is the contents of
 // the ".debug_abbrev" section.
-func New(abbrev, aranges, frame, info, line, pubnames, ranges, str []byte) (*Data, os.Error) {
+func New(abbrev, aranges, frame, info, line, pubnames, ranges, str []byte) (*Data, error) {
 	d := &Data{
 		abbrev:      abbrev,
 		aranges:     aranges,
diff --git a/src/pkg/debug/dwarf/type.go b/src/pkg/debug/dwarf/type.go
index 9fa221b..e8ce8d5 100644
--- a/src/pkg/debug/dwarf/type.go
+++ b/src/pkg/debug/dwarf/type.go
@@ -8,10 +8,7 @@
 
 package dwarf
 
-import (
-	"os"
-	"strconv"
-)
+import "strconv"
 
 // A Type conventionally represents a pointer to any of the
 // specific Type structures (CharType, StructType, etc.).
@@ -254,7 +251,7 @@
 
 func (t *TypedefType) Size() int64 { return t.Type.Size() }
 
-func (d *Data) Type(off Offset) (Type, os.Error) {
+func (d *Data) Type(off Offset) (Type, error) {
 	if t, ok := d.typeCache[off]; ok {
 		return t, nil
 	}
diff --git a/src/pkg/debug/dwarf/unit.go b/src/pkg/debug/dwarf/unit.go
index 02cb363..c10d75d 100644
--- a/src/pkg/debug/dwarf/unit.go
+++ b/src/pkg/debug/dwarf/unit.go
@@ -4,10 +4,7 @@
 
 package dwarf
 
-import (
-	"os"
-	"strconv"
-)
+import "strconv"
 
 // DWARF debug info is split into a sequence of compilation units.
 // Each unit has its own abbreviation table and address size.
@@ -20,7 +17,7 @@
 	addrsize int
 }
 
-func (d *Data) parseUnits() ([]unit, os.Error) {
+func (d *Data) parseUnits() ([]unit, error) {
 	// Count units.
 	nunit := 0
 	b := makeBuf(d, "info", 0, d.info, 0)
diff --git a/src/pkg/debug/elf/file.go b/src/pkg/debug/elf/file.go
index a0ddb1f..184ca83 100644
--- a/src/pkg/debug/elf/file.go
+++ b/src/pkg/debug/elf/file.go
@@ -9,6 +9,7 @@
 	"bytes"
 	"debug/dwarf"
 	"encoding/binary"
+	"errors"
 	"fmt"
 	"io"
 	"os"
@@ -71,7 +72,7 @@
 }
 
 // Data reads and returns the contents of the ELF section.
-func (s *Section) Data() ([]byte, os.Error) {
+func (s *Section) Data() ([]byte, error) {
 	dat := make([]byte, s.sr.Size())
 	n, err := s.sr.ReadAt(dat, 0)
 	return dat[0:n], err
@@ -79,9 +80,9 @@
 
 // stringTable reads and returns the string table given by the
 // specified link value.
-func (f *File) stringTable(link uint32) ([]byte, os.Error) {
+func (f *File) stringTable(link uint32) ([]byte, error) {
 	if link <= 0 || link >= uint32(len(f.Sections)) {
-		return nil, os.NewError("section has invalid string table link")
+		return nil, errors.New("section has invalid string table link")
 	}
 	return f.Sections[link].Data()
 }
@@ -136,7 +137,7 @@
 	val interface{}
 }
 
-func (e *FormatError) String() string {
+func (e *FormatError) Error() string {
 	msg := e.msg
 	if e.val != nil {
 		msg += fmt.Sprintf(" '%v' ", e.val)
@@ -146,7 +147,7 @@
 }
 
 // Open opens the named file using os.Open and prepares it for use as an ELF binary.
-func Open(name string) (*File, os.Error) {
+func Open(name string) (*File, error) {
 	f, err := os.Open(name)
 	if err != nil {
 		return nil, err
@@ -163,8 +164,8 @@
 // Close closes the File.
 // If the File was created using NewFile directly instead of Open,
 // Close has no effect.
-func (f *File) Close() os.Error {
-	var err os.Error
+func (f *File) Close() error {
+	var err error
 	if f.closer != nil {
 		err = f.closer.Close()
 		f.closer = nil
@@ -185,7 +186,7 @@
 
 // NewFile creates a new File for accessing an ELF binary in an underlying reader.
 // The ELF binary is expected to start at position 0 in the ReaderAt.
-func NewFile(r io.ReaderAt) (*File, os.Error) {
+func NewFile(r io.ReaderAt) (*File, error) {
 	sr := io.NewSectionReader(r, 0, 1<<63-1)
 	// Read and decode ELF identifier
 	var ident [16]uint8
@@ -381,7 +382,7 @@
 
 // getSymbols returns a slice of Symbols from parsing the symbol table
 // with the given type, along with the associated string table.
-func (f *File) getSymbols(typ SectionType) ([]Symbol, []byte, os.Error) {
+func (f *File) getSymbols(typ SectionType) ([]Symbol, []byte, error) {
 	switch f.Class {
 	case ELFCLASS64:
 		return f.getSymbols64(typ)
@@ -390,27 +391,27 @@
 		return f.getSymbols32(typ)
 	}
 
-	return nil, nil, os.NewError("not implemented")
+	return nil, nil, errors.New("not implemented")
 }
 
-func (f *File) getSymbols32(typ SectionType) ([]Symbol, []byte, os.Error) {
+func (f *File) getSymbols32(typ SectionType) ([]Symbol, []byte, error) {
 	symtabSection := f.SectionByType(typ)
 	if symtabSection == nil {
-		return nil, nil, os.NewError("no symbol section")
+		return nil, nil, errors.New("no symbol section")
 	}
 
 	data, err := symtabSection.Data()
 	if err != nil {
-		return nil, nil, os.NewError("cannot load symbol section")
+		return nil, nil, errors.New("cannot load symbol section")
 	}
 	symtab := bytes.NewBuffer(data)
 	if symtab.Len()%Sym32Size != 0 {
-		return nil, nil, os.NewError("length of symbol section is not a multiple of SymSize")
+		return nil, nil, errors.New("length of symbol section is not a multiple of SymSize")
 	}
 
 	strdata, err := f.stringTable(symtabSection.Link)
 	if err != nil {
-		return nil, nil, os.NewError("cannot load string table section")
+		return nil, nil, errors.New("cannot load string table section")
 	}
 
 	// The first entry is all zeros.
@@ -436,24 +437,24 @@
 	return symbols, strdata, nil
 }
 
-func (f *File) getSymbols64(typ SectionType) ([]Symbol, []byte, os.Error) {
+func (f *File) getSymbols64(typ SectionType) ([]Symbol, []byte, error) {
 	symtabSection := f.SectionByType(typ)
 	if symtabSection == nil {
-		return nil, nil, os.NewError("no symbol section")
+		return nil, nil, errors.New("no symbol section")
 	}
 
 	data, err := symtabSection.Data()
 	if err != nil {
-		return nil, nil, os.NewError("cannot load symbol section")
+		return nil, nil, errors.New("cannot load symbol section")
 	}
 	symtab := bytes.NewBuffer(data)
 	if symtab.Len()%Sym64Size != 0 {
-		return nil, nil, os.NewError("length of symbol section is not a multiple of Sym64Size")
+		return nil, nil, errors.New("length of symbol section is not a multiple of Sym64Size")
 	}
 
 	strdata, err := f.stringTable(symtabSection.Link)
 	if err != nil {
-		return nil, nil, os.NewError("cannot load string table section")
+		return nil, nil, errors.New("cannot load string table section")
 	}
 
 	// The first entry is all zeros.
@@ -506,17 +507,17 @@
 
 // applyRelocations applies relocations to dst. rels is a relocations section
 // in RELA format.
-func (f *File) applyRelocations(dst []byte, rels []byte) os.Error {
+func (f *File) applyRelocations(dst []byte, rels []byte) error {
 	if f.Class == ELFCLASS64 && f.Machine == EM_X86_64 {
 		return f.applyRelocationsAMD64(dst, rels)
 	}
 
-	return os.NewError("not implemented")
+	return errors.New("not implemented")
 }
 
-func (f *File) applyRelocationsAMD64(dst []byte, rels []byte) os.Error {
+func (f *File) applyRelocationsAMD64(dst []byte, rels []byte) error {
 	if len(rels)%Sym64Size != 0 {
-		return os.NewError("length of relocation section is not a multiple of Sym64Size")
+		return errors.New("length of relocation section is not a multiple of Sym64Size")
 	}
 
 	symbols, _, err := f.getSymbols(SHT_SYMTAB)
@@ -558,7 +559,7 @@
 	return nil
 }
 
-func (f *File) DWARF() (*dwarf.Data, os.Error) {
+func (f *File) DWARF() (*dwarf.Data, error) {
 	// There are many other DWARF sections, but these
 	// are the required ones, and the debug/dwarf package
 	// does not use the others, so don't bother loading them.
@@ -596,7 +597,7 @@
 }
 
 // Symbols returns the symbol table for f.
-func (f *File) Symbols() ([]Symbol, os.Error) {
+func (f *File) Symbols() ([]Symbol, error) {
 	sym, _, err := f.getSymbols(SHT_SYMTAB)
 	return sym, err
 }
@@ -611,7 +612,7 @@
 // referred to by the binary f that are expected to be
 // satisfied by other libraries at dynamic load time.
 // It does not return weak symbols.
-func (f *File) ImportedSymbols() ([]ImportedSymbol, os.Error) {
+func (f *File) ImportedSymbols() ([]ImportedSymbol, error) {
 	sym, str, err := f.getSymbols(SHT_DYNSYM)
 	if err != nil {
 		return nil, err
@@ -721,7 +722,7 @@
 // ImportedLibraries returns the names of all libraries
 // referred to by the binary f that are expected to be
 // linked with the binary at dynamic link time.
-func (f *File) ImportedLibraries() ([]string, os.Error) {
+func (f *File) ImportedLibraries() ([]string, error) {
 	ds := f.SectionByType(SHT_DYNAMIC)
 	if ds == nil {
 		// not dynamic, so no libraries
diff --git a/src/pkg/debug/gosym/symtab.go b/src/pkg/debug/gosym/symtab.go
index dea460d..52d7d55 100644
--- a/src/pkg/debug/gosym/symtab.go
+++ b/src/pkg/debug/gosym/symtab.go
@@ -15,7 +15,6 @@
 import (
 	"encoding/binary"
 	"fmt"
-	"os"
 	"strconv"
 	"strings"
 )
@@ -105,7 +104,7 @@
 	name   []byte
 }
 
-func walksymtab(data []byte, fn func(sym) os.Error) os.Error {
+func walksymtab(data []byte, fn func(sym) error) error {
 	var s sym
 	p := data
 	for len(p) >= 6 {
@@ -149,9 +148,9 @@
 
 // NewTable decodes the Go symbol table in data,
 // returning an in-memory representation.
-func NewTable(symtab []byte, pcln *LineTable) (*Table, os.Error) {
+func NewTable(symtab []byte, pcln *LineTable) (*Table, error) {
 	var n int
-	err := walksymtab(symtab, func(s sym) os.Error {
+	err := walksymtab(symtab, func(s sym) error {
 		n++
 		return nil
 	})
@@ -165,7 +164,7 @@
 	nf := 0
 	nz := 0
 	lasttyp := uint8(0)
-	err = walksymtab(symtab, func(s sym) os.Error {
+	err = walksymtab(symtab, func(s sym) error {
 		n := len(t.Syms)
 		t.Syms = t.Syms[0 : n+1]
 		ts := &t.Syms[n]
@@ -355,7 +354,7 @@
 // LineToPC looks up the first program counter on the given line in
 // the named file.  Returns UnknownPathError or UnknownLineError if
 // there is an error looking up this line.
-func (t *Table) LineToPC(file string, line int) (pc uint64, fn *Func, err os.Error) {
+func (t *Table) LineToPC(file string, line int) (pc uint64, fn *Func, err error) {
 	obj, ok := t.Files[file]
 	if !ok {
 		return 0, nil, UnknownFileError(file)
@@ -466,7 +465,7 @@
 	return tos.path, aline - tos.start - tos.offset + 1
 }
 
-func (o *Obj) alineFromLine(path string, line int) (int, os.Error) {
+func (o *Obj) alineFromLine(path string, line int) (int, error) {
 	if line < 1 {
 		return 0, &UnknownLineError{path, line}
 	}
@@ -516,7 +515,7 @@
 // the symbol table.
 type UnknownFileError string
 
-func (e UnknownFileError) String() string { return "unknown file: " + string(e) }
+func (e UnknownFileError) Error() string { return "unknown file: " + string(e) }
 
 // UnknownLineError represents a failure to map a line to a program
 // counter, either because the line is beyond the bounds of the file
@@ -526,7 +525,7 @@
 	Line int
 }
 
-func (e *UnknownLineError) String() string {
+func (e *UnknownLineError) Error() string {
 	return "no code at " + e.File + ":" + strconv.Itoa(e.Line)
 }
 
@@ -538,7 +537,7 @@
 	val interface{}
 }
 
-func (e *DecodingError) String() string {
+func (e *DecodingError) Error() string {
 	msg := e.msg
 	if e.val != nil {
 		msg += fmt.Sprintf(" '%v'", e.val)
diff --git a/src/pkg/debug/macho/file.go b/src/pkg/debug/macho/file.go
index 721a4c4..c7cb905 100644
--- a/src/pkg/debug/macho/file.go
+++ b/src/pkg/debug/macho/file.go
@@ -12,6 +12,7 @@
 	"bytes"
 	"debug/dwarf"
 	"encoding/binary"
+	"errors"
 	"fmt"
 	"io"
 	"os"
@@ -71,7 +72,7 @@
 }
 
 // Data reads and returns the contents of the segment.
-func (s *Segment) Data() ([]byte, os.Error) {
+func (s *Segment) Data() ([]byte, error) {
 	dat := make([]byte, s.sr.Size())
 	n, err := s.sr.ReadAt(dat, 0)
 	return dat[0:n], err
@@ -106,7 +107,7 @@
 }
 
 // Data reads and returns the contents of the Mach-O section.
-func (s *Section) Data() ([]byte, os.Error) {
+func (s *Section) Data() ([]byte, error) {
 	dat := make([]byte, s.sr.Size())
 	n, err := s.sr.ReadAt(dat, 0)
 	return dat[0:n], err
@@ -148,7 +149,7 @@
 	val interface{}
 }
 
-func (e *FormatError) String() string {
+func (e *FormatError) Error() string {
 	msg := e.msg
 	if e.val != nil {
 		msg += fmt.Sprintf(" '%v'", e.val)
@@ -158,7 +159,7 @@
 }
 
 // Open opens the named file using os.Open and prepares it for use as a Mach-O binary.
-func Open(name string) (*File, os.Error) {
+func Open(name string) (*File, error) {
 	f, err := os.Open(name)
 	if err != nil {
 		return nil, err
@@ -175,8 +176,8 @@
 // Close closes the File.
 // If the File was created using NewFile directly instead of Open,
 // Close has no effect.
-func (f *File) Close() os.Error {
-	var err os.Error
+func (f *File) Close() error {
+	var err error
 	if f.closer != nil {
 		err = f.closer.Close()
 		f.closer = nil
@@ -186,7 +187,7 @@
 
 // NewFile creates a new File for accessing a Mach-O binary in an underlying reader.
 // The Mach-O binary is expected to start at position 0 in the ReaderAt.
-func NewFile(r io.ReaderAt) (*File, os.Error) {
+func NewFile(r io.ReaderAt) (*File, error) {
 	f := new(File)
 	sr := io.NewSectionReader(r, 0, 1<<63-1)
 
@@ -391,7 +392,7 @@
 	return f, nil
 }
 
-func (f *File) parseSymtab(symdat, strtab, cmddat []byte, hdr *SymtabCmd, offset int64) (*Symtab, os.Error) {
+func (f *File) parseSymtab(symdat, strtab, cmddat []byte, hdr *SymtabCmd, offset int64) (*Symtab, error) {
 	bo := f.ByteOrder
 	symtab := make([]Symbol, hdr.Nsyms)
 	b := bytes.NewBuffer(symdat)
@@ -463,7 +464,7 @@
 }
 
 // DWARF returns the DWARF debug information for the Mach-O file.
-func (f *File) DWARF() (*dwarf.Data, os.Error) {
+func (f *File) DWARF() (*dwarf.Data, error) {
 	// There are many other DWARF sections, but these
 	// are the required ones, and the debug/dwarf package
 	// does not use the others, so don't bother loading them.
@@ -473,7 +474,7 @@
 		name = "__debug_" + name
 		s := f.Section(name)
 		if s == nil {
-			return nil, os.NewError("missing Mach-O section " + name)
+			return nil, errors.New("missing Mach-O section " + name)
 		}
 		b, err := s.Data()
 		if err != nil && uint64(len(b)) < s.Size {
@@ -489,7 +490,7 @@
 // ImportedSymbols returns the names of all symbols
 // referred to by the binary f that are expected to be
 // satisfied by other libraries at dynamic load time.
-func (f *File) ImportedSymbols() ([]string, os.Error) {
+func (f *File) ImportedSymbols() ([]string, error) {
 	if f.Dysymtab == nil || f.Symtab == nil {
 		return nil, &FormatError{0, "missing symbol table", nil}
 	}
@@ -506,7 +507,7 @@
 // ImportedLibraries returns the paths of all libraries
 // referred to by the binary f that are expected to be
 // linked with the binary at dynamic link time.
-func (f *File) ImportedLibraries() ([]string, os.Error) {
+func (f *File) ImportedLibraries() ([]string, error) {
 	var all []string
 	for _, l := range f.Loads {
 		if lib, ok := l.(*Dylib); ok {
diff --git a/src/pkg/debug/pe/file.go b/src/pkg/debug/pe/file.go
index d86d916..6b98a5f 100644
--- a/src/pkg/debug/pe/file.go
+++ b/src/pkg/debug/pe/file.go
@@ -8,6 +8,7 @@
 import (
 	"debug/dwarf"
 	"encoding/binary"
+	"errors"
 	"fmt"
 	"io"
 	"os"
@@ -59,7 +60,7 @@
 }
 
 // Data reads and returns the contents of the PE section.
-func (s *Section) Data() ([]byte, os.Error) {
+func (s *Section) Data() ([]byte, error) {
 	dat := make([]byte, s.sr.Size())
 	n, err := s.sr.ReadAt(dat, 0)
 	return dat[0:n], err
@@ -74,7 +75,7 @@
 	val interface{}
 }
 
-func (e *FormatError) String() string {
+func (e *FormatError) Error() string {
 	msg := e.msg
 	if e.val != nil {
 		msg += fmt.Sprintf(" '%v'", e.val)
@@ -84,7 +85,7 @@
 }
 
 // Open opens the named file using os.Open and prepares it for use as a PE binary.
-func Open(name string) (*File, os.Error) {
+func Open(name string) (*File, error) {
 	f, err := os.Open(name)
 	if err != nil {
 		return nil, err
@@ -101,8 +102,8 @@
 // Close closes the File.
 // If the File was created using NewFile directly instead of Open,
 // Close has no effect.
-func (f *File) Close() os.Error {
-	var err os.Error
+func (f *File) Close() error {
+	var err error
 	if f.closer != nil {
 		err = f.closer.Close()
 		f.closer = nil
@@ -111,7 +112,7 @@
 }
 
 // NewFile creates a new File for accessing a PE binary in an underlying reader.
-func NewFile(r io.ReaderAt) (*File, os.Error) {
+func NewFile(r io.ReaderAt) (*File, error) {
 	f := new(File)
 	sr := io.NewSectionReader(r, 0, 1<<63-1)
 
@@ -124,7 +125,7 @@
 		var sign [4]byte
 		r.ReadAt(sign[0:], int64(dosheader[0x3c]))
 		if !(sign[0] == 'P' && sign[1] == 'E' && sign[2] == 0 && sign[3] == 0) {
-			return nil, os.NewError("Invalid PE File Format.")
+			return nil, errors.New("Invalid PE File Format.")
 		}
 		base = int64(dosheader[0x3c]) + 4
 	} else {
@@ -135,7 +136,7 @@
 		return nil, err
 	}
 	if f.FileHeader.Machine != IMAGE_FILE_MACHINE_UNKNOWN && f.FileHeader.Machine != IMAGE_FILE_MACHINE_AMD64 && f.FileHeader.Machine != IMAGE_FILE_MACHINE_I386 {
-		return nil, os.NewError("Invalid PE File Format.")
+		return nil, errors.New("Invalid PE File Format.")
 	}
 	// get symbol string table
 	sr.Seek(int64(f.FileHeader.PointerToSymbolTable+18*f.FileHeader.NumberOfSymbols), os.SEEK_SET)
@@ -215,7 +216,7 @@
 	return nil
 }
 
-func (f *File) DWARF() (*dwarf.Data, os.Error) {
+func (f *File) DWARF() (*dwarf.Data, error) {
 	// There are many other DWARF sections, but these
 	// are the required ones, and the debug/dwarf package
 	// does not use the others, so don't bother loading them.
@@ -242,7 +243,7 @@
 // referred to by the binary f that are expected to be
 // satisfied by other libraries at dynamic load time.
 // It does not return weak symbols.
-func (f *File) ImportedSymbols() ([]string, os.Error) {
+func (f *File) ImportedSymbols() ([]string, error) {
 	pe64 := f.Machine == IMAGE_FILE_MACHINE_AMD64
 	ds := f.Section(".idata")
 	if ds == nil {
@@ -308,7 +309,7 @@
 // ImportedLibraries returns the names of all libraries
 // referred to by the binary f that are expected to be
 // linked with the binary at dynamic link time.
-func (f *File) ImportedLibraries() ([]string, os.Error) {
+func (f *File) ImportedLibraries() ([]string, error) {
 	// TODO
 	// cgo -dynimport don't use this for windows PE, so just return.
 	return nil, nil
diff --git a/src/pkg/encoding/ascii85/ascii85.go b/src/pkg/encoding/ascii85/ascii85.go
index ead0c24..6f592f3 100644
--- a/src/pkg/encoding/ascii85/ascii85.go
+++ b/src/pkg/encoding/ascii85/ascii85.go
@@ -8,7 +8,6 @@
 
 import (
 	"io"
-	"os"
 	"strconv"
 )
 
@@ -93,14 +92,14 @@
 func NewEncoder(w io.Writer) io.WriteCloser { return &encoder{w: w} }
 
 type encoder struct {
-	err  os.Error
+	err  error
 	w    io.Writer
 	buf  [4]byte    // buffered data waiting to be encoded
 	nbuf int        // number of bytes in buf
 	out  [1024]byte // output buffer
 }
 
-func (e *encoder) Write(p []byte) (n int, err os.Error) {
+func (e *encoder) Write(p []byte) (n int, err error) {
 	if e.err != nil {
 		return 0, e.err
 	}
@@ -152,7 +151,7 @@
 
 // Close flushes any pending output from the encoder.
 // It is an error to call Write after calling Close.
-func (e *encoder) Close() os.Error {
+func (e *encoder) Close() error {
 	// If there's anything left in the buffer, flush it out
 	if e.err == nil && e.nbuf > 0 {
 		nout := Encode(e.out[0:], e.buf[0:e.nbuf])
@@ -168,7 +167,7 @@
 
 type CorruptInputError int64
 
-func (e CorruptInputError) String() string {
+func (e CorruptInputError) Error() string {
 	return "illegal ascii85 data at input byte " + strconv.Itoa64(int64(e))
 }
 
@@ -186,7 +185,7 @@
 //
 // NewDecoder wraps an io.Reader interface around Decode.
 //
-func Decode(dst, src []byte, flush bool) (ndst, nsrc int, err os.Error) {
+func Decode(dst, src []byte, flush bool) (ndst, nsrc int, err error) {
 	var v uint32
 	var nb int
 	for i, b := range src {
@@ -246,8 +245,8 @@
 func NewDecoder(r io.Reader) io.Reader { return &decoder{r: r} }
 
 type decoder struct {
-	err     os.Error
-	readErr os.Error
+	err     error
+	readErr error
 	r       io.Reader
 	end     bool       // saw end of message
 	buf     [1024]byte // leftover input
@@ -256,7 +255,7 @@
 	outbuf  [1024]byte
 }
 
-func (d *decoder) Read(p []byte) (n int, err os.Error) {
+func (d *decoder) Read(p []byte) (n int, err error) {
 	if len(p) == 0 {
 		return 0, nil
 	}
diff --git a/src/pkg/encoding/ascii85/ascii85_test.go b/src/pkg/encoding/ascii85/ascii85_test.go
index fdfeb88..70e67d8 100644
--- a/src/pkg/encoding/ascii85/ascii85_test.go
+++ b/src/pkg/encoding/ascii85/ascii85_test.go
@@ -6,8 +6,8 @@
 
 import (
 	"bytes"
+	"io"
 	"io/ioutil"
-	"os"
 	"testing"
 )
 
@@ -83,11 +83,11 @@
 				end = len(input)
 			}
 			n, err := encoder.Write(input[pos:end])
-			testEqual(t, "Write(%q) gave error %v, want %v", input[pos:end], err, os.Error(nil))
+			testEqual(t, "Write(%q) gave error %v, want %v", input[pos:end], err, error(nil))
 			testEqual(t, "Write(%q) gave length %v, want %v", input[pos:end], n, end-pos)
 		}
 		err := encoder.Close()
-		testEqual(t, "Close gave error %v, want %v", err, os.Error(nil))
+		testEqual(t, "Close gave error %v, want %v", err, error(nil))
 		testEqual(t, "Encoding/%d of %q = %q, want %q", bs, bigtest.decoded, strip85(bb.String()), strip85(bigtest.encoded))
 	}
 }
@@ -96,7 +96,7 @@
 	for _, p := range pairs {
 		dbuf := make([]byte, 4*len(p.encoded))
 		ndst, nsrc, err := Decode(dbuf, []byte(p.encoded), true)
-		testEqual(t, "Decode(%q) = error %v, want %v", p.encoded, err, os.Error(nil))
+		testEqual(t, "Decode(%q) = error %v, want %v", p.encoded, err, error(nil))
 		testEqual(t, "Decode(%q) = nsrc %v, want %v", p.encoded, nsrc, len(p.encoded))
 		testEqual(t, "Decode(%q) = ndst %v, want %v", p.encoded, ndst, len(p.decoded))
 		testEqual(t, "Decode(%q) = %q, want %q", p.encoded, string(dbuf[0:ndst]), p.decoded)
@@ -113,7 +113,7 @@
 		testEqual(t, "Read from %q = length %v, want %v", p.encoded, len(dbuf), len(p.decoded))
 		testEqual(t, "Decoding of %q = %q, want %q", p.encoded, string(dbuf), p.decoded)
 		if err != nil {
-			testEqual(t, "Read from %q = %v, want %v", p.encoded, err, os.EOF)
+			testEqual(t, "Read from %q = %v, want %v", p.encoded, err, io.EOF)
 		}
 	}
 }
@@ -125,7 +125,7 @@
 		var total int
 		for total = 0; total < len(bigtest.decoded); {
 			n, err := decoder.Read(buf[total : total+bs])
-			testEqual(t, "Read from %q at pos %d = %d, %v, want _, %v", bigtest.encoded, total, n, err, os.Error(nil))
+			testEqual(t, "Read from %q at pos %d = %d, %v, want _, %v", bigtest.encoded, total, n, err, error(nil))
 			total += n
 		}
 		testEqual(t, "Decoding/%d of %q = %q, want %q", bs, bigtest.encoded, string(buf[0:total]), bigtest.decoded)
diff --git a/src/pkg/encoding/base32/base32.go b/src/pkg/encoding/base32/base32.go
index acace30..494c760 100644
--- a/src/pkg/encoding/base32/base32.go
+++ b/src/pkg/encoding/base32/base32.go
@@ -7,7 +7,6 @@
 
 import (
 	"io"
-	"os"
 	"strconv"
 )
 
@@ -127,7 +126,7 @@
 }
 
 type encoder struct {
-	err  os.Error
+	err  error
 	enc  *Encoding
 	w    io.Writer
 	buf  [5]byte    // buffered data waiting to be encoded
@@ -135,7 +134,7 @@
 	out  [1024]byte // output buffer
 }
 
-func (e *encoder) Write(p []byte) (n int, err os.Error) {
+func (e *encoder) Write(p []byte) (n int, err error) {
 	if e.err != nil {
 		return 0, e.err
 	}
@@ -187,7 +186,7 @@
 
 // Close flushes any pending output from the encoder.
 // It is an error to call Write after calling Close.
-func (e *encoder) Close() os.Error {
+func (e *encoder) Close() error {
 	// If there's anything left in the buffer, flush it out
 	if e.err == nil && e.nbuf > 0 {
 		e.enc.Encode(e.out[0:], e.buf[0:e.nbuf])
@@ -216,7 +215,7 @@
 
 type CorruptInputError int64
 
-func (e CorruptInputError) String() string {
+func (e CorruptInputError) Error() string {
 	return "illegal base32 data at input byte " + strconv.Itoa64(int64(e))
 }
 
@@ -224,7 +223,7 @@
 // indicates if end-of-message padding was encountered and thus any
 // additional data is an error.  decode also assumes len(src)%8==0,
 // since it is meant for internal use.
-func (enc *Encoding) decode(dst, src []byte) (n int, end bool, err os.Error) {
+func (enc *Encoding) decode(dst, src []byte) (n int, end bool, err error) {
 	for i := 0; i < len(src)/8 && !end; i++ {
 		// Decode quantum using the base32 alphabet
 		var dbuf [8]byte
@@ -290,7 +289,7 @@
 // DecodedLen(len(src)) bytes to dst and returns the number of bytes
 // written.  If src contains invalid base32 data, it will return the
 // number of bytes successfully written and CorruptInputError.
-func (enc *Encoding) Decode(dst, src []byte) (n int, err os.Error) {
+func (enc *Encoding) Decode(dst, src []byte) (n int, err error) {
 	if len(src)%8 != 0 {
 		return 0, CorruptInputError(len(src) / 8 * 8)
 	}
@@ -300,7 +299,7 @@
 }
 
 type decoder struct {
-	err    os.Error
+	err    error
 	enc    *Encoding
 	r      io.Reader
 	end    bool       // saw end of message
@@ -310,7 +309,7 @@
 	outbuf [1024 / 8 * 5]byte
 }
 
-func (d *decoder) Read(p []byte) (n int, err os.Error) {
+func (d *decoder) Read(p []byte) (n int, err error) {
 	if d.err != nil {
 		return 0, d.err
 	}
diff --git a/src/pkg/encoding/base32/base32_test.go b/src/pkg/encoding/base32/base32_test.go
index 3fa1c2b..facf5d0 100644
--- a/src/pkg/encoding/base32/base32_test.go
+++ b/src/pkg/encoding/base32/base32_test.go
@@ -6,8 +6,8 @@
 
 import (
 	"bytes"
+	"io"
 	"io/ioutil"
-	"os"
 	"testing"
 )
 
@@ -78,11 +78,11 @@
 				end = len(input)
 			}
 			n, err := encoder.Write(input[pos:end])
-			testEqual(t, "Write(%q) gave error %v, want %v", input[pos:end], err, os.Error(nil))
+			testEqual(t, "Write(%q) gave error %v, want %v", input[pos:end], err, error(nil))
 			testEqual(t, "Write(%q) gave length %v, want %v", input[pos:end], n, end-pos)
 		}
 		err := encoder.Close()
-		testEqual(t, "Close gave error %v, want %v", err, os.Error(nil))
+		testEqual(t, "Close gave error %v, want %v", err, error(nil))
 		testEqual(t, "Encoding/%d of %q = %q, want %q", bs, bigtest.decoded, bb.String(), bigtest.encoded)
 	}
 }
@@ -91,7 +91,7 @@
 	for _, p := range pairs {
 		dbuf := make([]byte, StdEncoding.DecodedLen(len(p.encoded)))
 		count, end, err := StdEncoding.decode(dbuf, []byte(p.encoded))
-		testEqual(t, "Decode(%q) = error %v, want %v", p.encoded, err, os.Error(nil))
+		testEqual(t, "Decode(%q) = error %v, want %v", p.encoded, err, error(nil))
 		testEqual(t, "Decode(%q) = length %v, want %v", p.encoded, count, len(p.decoded))
 		if len(p.encoded) > 0 {
 			testEqual(t, "Decode(%q) = end %v, want %v", p.encoded, end, (p.encoded[len(p.encoded)-1] == '='))
@@ -107,15 +107,15 @@
 		decoder := NewDecoder(StdEncoding, bytes.NewBufferString(p.encoded))
 		dbuf := make([]byte, StdEncoding.DecodedLen(len(p.encoded)))
 		count, err := decoder.Read(dbuf)
-		if err != nil && err != os.EOF {
+		if err != nil && err != io.EOF {
 			t.Fatal("Read failed", err)
 		}
 		testEqual(t, "Read from %q = length %v, want %v", p.encoded, count, len(p.decoded))
 		testEqual(t, "Decoding of %q = %q, want %q", p.encoded, string(dbuf[0:count]), p.decoded)
-		if err != os.EOF {
+		if err != io.EOF {
 			count, err = decoder.Read(dbuf)
 		}
-		testEqual(t, "Read from %q = %v, want %v", p.encoded, err, os.EOF)
+		testEqual(t, "Read from %q = %v, want %v", p.encoded, err, io.EOF)
 	}
 }
 
@@ -126,7 +126,7 @@
 		var total int
 		for total = 0; total < len(bigtest.decoded); {
 			n, err := decoder.Read(buf[total : total+bs])
-			testEqual(t, "Read from %q at pos %d = %d, %v, want _, %v", bigtest.encoded, total, n, err, os.Error(nil))
+			testEqual(t, "Read from %q at pos %d = %d, %v, want _, %v", bigtest.encoded, total, n, err, error(nil))
 			total += n
 		}
 		testEqual(t, "Decoding/%d of %q = %q, want %q", bs, bigtest.encoded, string(buf[0:total]), bigtest.decoded)
diff --git a/src/pkg/encoding/base64/base64.go b/src/pkg/encoding/base64/base64.go
index c6b2a13..9451289 100644
--- a/src/pkg/encoding/base64/base64.go
+++ b/src/pkg/encoding/base64/base64.go
@@ -7,7 +7,6 @@
 
 import (
 	"io"
-	"os"
 	"strconv"
 )
 
@@ -114,7 +113,7 @@
 }
 
 type encoder struct {
-	err  os.Error
+	err  error
 	enc  *Encoding
 	w    io.Writer
 	buf  [3]byte    // buffered data waiting to be encoded
@@ -122,7 +121,7 @@
 	out  [1024]byte // output buffer
 }
 
-func (e *encoder) Write(p []byte) (n int, err os.Error) {
+func (e *encoder) Write(p []byte) (n int, err error) {
 	if e.err != nil {
 		return 0, e.err
 	}
@@ -174,7 +173,7 @@
 
 // Close flushes any pending output from the encoder.
 // It is an error to call Write after calling Close.
-func (e *encoder) Close() os.Error {
+func (e *encoder) Close() error {
 	// If there's anything left in the buffer, flush it out
 	if e.err == nil && e.nbuf > 0 {
 		e.enc.Encode(e.out[0:], e.buf[0:e.nbuf])
@@ -203,7 +202,7 @@
 
 type CorruptInputError int64
 
-func (e CorruptInputError) String() string {
+func (e CorruptInputError) Error() string {
 	return "illegal base64 data at input byte " + strconv.Itoa64(int64(e))
 }
 
@@ -211,7 +210,7 @@
 // indicates if end-of-message padding was encountered and thus any
 // additional data is an error.  decode also assumes len(src)%4==0,
 // since it is meant for internal use.
-func (enc *Encoding) decode(dst, src []byte) (n int, end bool, err os.Error) {
+func (enc *Encoding) decode(dst, src []byte) (n int, end bool, err error) {
 	for i := 0; i < len(src)/4 && !end; i++ {
 		// Decode quantum using the base64 alphabet
 		var dbuf [4]byte
@@ -258,7 +257,7 @@
 // DecodedLen(len(src)) bytes to dst and returns the number of bytes
 // written.  If src contains invalid base64 data, it will return the
 // number of bytes successfully written and CorruptInputError.
-func (enc *Encoding) Decode(dst, src []byte) (n int, err os.Error) {
+func (enc *Encoding) Decode(dst, src []byte) (n int, err error) {
 	if len(src)%4 != 0 {
 		return 0, CorruptInputError(len(src) / 4 * 4)
 	}
@@ -268,14 +267,14 @@
 }
 
 // DecodeString returns the bytes represented by the base64 string s.
-func (enc *Encoding) DecodeString(s string) ([]byte, os.Error) {
+func (enc *Encoding) DecodeString(s string) ([]byte, error) {
 	dbuf := make([]byte, enc.DecodedLen(len(s)))
 	n, err := enc.Decode(dbuf, []byte(s))
 	return dbuf[:n], err
 }
 
 type decoder struct {
-	err    os.Error
+	err    error
 	enc    *Encoding
 	r      io.Reader
 	end    bool       // saw end of message
@@ -285,7 +284,7 @@
 	outbuf [1024 / 4 * 3]byte
 }
 
-func (d *decoder) Read(p []byte) (n int, err os.Error) {
+func (d *decoder) Read(p []byte) (n int, err error) {
 	if d.err != nil {
 		return 0, d.err
 	}
diff --git a/src/pkg/encoding/base64/base64_test.go b/src/pkg/encoding/base64/base64_test.go
index c163dae..8310d8a 100644
--- a/src/pkg/encoding/base64/base64_test.go
+++ b/src/pkg/encoding/base64/base64_test.go
@@ -6,8 +6,8 @@
 
 import (
 	"bytes"
+	"io"
 	"io/ioutil"
-	"os"
 	"testing"
 )
 
@@ -82,11 +82,11 @@
 				end = len(input)
 			}
 			n, err := encoder.Write(input[pos:end])
-			testEqual(t, "Write(%q) gave error %v, want %v", input[pos:end], err, os.Error(nil))
+			testEqual(t, "Write(%q) gave error %v, want %v", input[pos:end], err, error(nil))
 			testEqual(t, "Write(%q) gave length %v, want %v", input[pos:end], n, end-pos)
 		}
 		err := encoder.Close()
-		testEqual(t, "Close gave error %v, want %v", err, os.Error(nil))
+		testEqual(t, "Close gave error %v, want %v", err, error(nil))
 		testEqual(t, "Encoding/%d of %q = %q, want %q", bs, bigtest.decoded, bb.String(), bigtest.encoded)
 	}
 }
@@ -95,7 +95,7 @@
 	for _, p := range pairs {
 		dbuf := make([]byte, StdEncoding.DecodedLen(len(p.encoded)))
 		count, end, err := StdEncoding.decode(dbuf, []byte(p.encoded))
-		testEqual(t, "Decode(%q) = error %v, want %v", p.encoded, err, os.Error(nil))
+		testEqual(t, "Decode(%q) = error %v, want %v", p.encoded, err, error(nil))
 		testEqual(t, "Decode(%q) = length %v, want %v", p.encoded, count, len(p.decoded))
 		if len(p.encoded) > 0 {
 			testEqual(t, "Decode(%q) = end %v, want %v", p.encoded, end, (p.encoded[len(p.encoded)-1] == '='))
@@ -103,7 +103,7 @@
 		testEqual(t, "Decode(%q) = %q, want %q", p.encoded, string(dbuf[0:count]), p.decoded)
 
 		dbuf, err = StdEncoding.DecodeString(p.encoded)
-		testEqual(t, "DecodeString(%q) = error %v, want %v", p.encoded, err, os.Error(nil))
+		testEqual(t, "DecodeString(%q) = error %v, want %v", p.encoded, err, error(nil))
 		testEqual(t, "DecodeString(%q) = %q, want %q", string(dbuf), p.decoded)
 	}
 }
@@ -113,15 +113,15 @@
 		decoder := NewDecoder(StdEncoding, bytes.NewBufferString(p.encoded))
 		dbuf := make([]byte, StdEncoding.DecodedLen(len(p.encoded)))
 		count, err := decoder.Read(dbuf)
-		if err != nil && err != os.EOF {
+		if err != nil && err != io.EOF {
 			t.Fatal("Read failed", err)
 		}
 		testEqual(t, "Read from %q = length %v, want %v", p.encoded, count, len(p.decoded))
 		testEqual(t, "Decoding of %q = %q, want %q", p.encoded, string(dbuf[0:count]), p.decoded)
-		if err != os.EOF {
+		if err != io.EOF {
 			count, err = decoder.Read(dbuf)
 		}
-		testEqual(t, "Read from %q = %v, want %v", p.encoded, err, os.EOF)
+		testEqual(t, "Read from %q = %v, want %v", p.encoded, err, io.EOF)
 	}
 }
 
@@ -132,7 +132,7 @@
 		var total int
 		for total = 0; total < len(bigtest.decoded); {
 			n, err := decoder.Read(buf[total : total+bs])
-			testEqual(t, "Read from %q at pos %d = %d, %v, want _, %v", bigtest.encoded, total, n, err, os.Error(nil))
+			testEqual(t, "Read from %q at pos %d = %d, %v, want _, %v", bigtest.encoded, total, n, err, error(nil))
 			total += n
 		}
 		testEqual(t, "Decoding/%d of %q = %q, want %q", bs, bigtest.encoded, string(buf[0:total]), bigtest.decoded)
diff --git a/src/pkg/encoding/binary/binary.go b/src/pkg/encoding/binary/binary.go
index c58f736..65b9f01 100644
--- a/src/pkg/encoding/binary/binary.go
+++ b/src/pkg/encoding/binary/binary.go
@@ -8,9 +8,9 @@
 package binary
 
 import (
+	"errors"
 	"math"
 	"io"
-	"os"
 	"reflect"
 )
 
@@ -124,7 +124,7 @@
 // or an array or struct containing only fixed-size values.
 // Bytes read from r are decoded using the specified byte order
 // and written to successive fields of the data.
-func Read(r io.Reader, order ByteOrder, data interface{}) os.Error {
+func Read(r io.Reader, order ByteOrder, data interface{}) error {
 	// Fast path for basic types.
 	if n := intDestSize(data); n != 0 {
 		var b [8]byte
@@ -161,11 +161,11 @@
 	case reflect.Slice:
 		v = d
 	default:
-		return os.NewError("binary.Read: invalid type " + d.Type().String())
+		return errors.New("binary.Read: invalid type " + d.Type().String())
 	}
 	size := TotalSize(v)
 	if size < 0 {
-		return os.NewError("binary.Read: invalid type " + v.Type().String())
+		return errors.New("binary.Read: invalid type " + v.Type().String())
 	}
 	d := &decoder{order: order, buf: make([]byte, size)}
 	if _, err := io.ReadFull(r, d.buf); err != nil {
@@ -183,7 +183,7 @@
 // or an array or struct containing only fixed-size values.
 // Bytes written to w are encoded using the specified byte order
 // and read from successive fields of the data.
-func Write(w io.Writer, order ByteOrder, data interface{}) os.Error {
+func Write(w io.Writer, order ByteOrder, data interface{}) error {
 	// Fast path for basic types.
 	var b [8]byte
 	var bs []byte
@@ -244,7 +244,7 @@
 	v := reflect.Indirect(reflect.ValueOf(data))
 	size := TotalSize(v)
 	if size < 0 {
-		return os.NewError("binary.Write: invalid type " + v.Type().String())
+		return errors.New("binary.Write: invalid type " + v.Type().String())
 	}
 	buf := make([]byte, size)
 	e := &encoder{order: order, buf: buf}
diff --git a/src/pkg/encoding/binary/binary_test.go b/src/pkg/encoding/binary/binary_test.go
index 73def50..e3bf17c 100644
--- a/src/pkg/encoding/binary/binary_test.go
+++ b/src/pkg/encoding/binary/binary_test.go
@@ -6,7 +6,6 @@
 
 import (
 	"io"
-	"os"
 	"bytes"
 	"math"
 	"reflect"
@@ -99,7 +98,7 @@
 var src = []byte{1, 2, 3, 4, 5, 6, 7, 8}
 var res = []int32{0x01020304, 0x05060708}
 
-func checkResult(t *testing.T, dir string, order ByteOrder, err os.Error, have, want interface{}) {
+func checkResult(t *testing.T, dir string, order ByteOrder, err error, have, want interface{}) {
 	if err != nil {
 		t.Errorf("%v %v: %v", dir, order, err)
 		return
@@ -166,7 +165,7 @@
 	remain []byte
 }
 
-func (br *byteSliceReader) Read(p []byte) (int, os.Error) {
+func (br *byteSliceReader) Read(p []byte) (int, error) {
 	n := copy(p, br.remain)
 	br.remain = br.remain[n:]
 	return n, nil
diff --git a/src/pkg/encoding/binary/varint.go b/src/pkg/encoding/binary/varint.go
index c98e0e2..d4872ee 100644
--- a/src/pkg/encoding/binary/varint.go
+++ b/src/pkg/encoding/binary/varint.go
@@ -25,8 +25,8 @@
 // format incompatible with a varint encoding for larger numbers (say 128-bit).
 
 import (
+	"errors"
 	"io"
-	"os"
 )
 
 // MaxVarintLenN is the maximum length of a varint-encoded N-bit integer.
@@ -99,17 +99,17 @@
 }
 
 // WriteUvarint encodes x and writes the result to w.
-func WriteUvarint(w io.Writer, x uint64) os.Error {
+func WriteUvarint(w io.Writer, x uint64) error {
 	var buf [MaxVarintLen64]byte
 	n := PutUvarint(buf[:], x)
 	_, err := w.Write(buf[0:n])
 	return err
 }
 
-var overflow = os.NewError("binary: varint overflows a 64-bit integer")
+var overflow = errors.New("binary: varint overflows a 64-bit integer")
 
 // ReadUvarint reads an encoded unsigned integer from r and returns it as a uint64.
-func ReadUvarint(r io.ByteReader) (uint64, os.Error) {
+func ReadUvarint(r io.ByteReader) (uint64, error) {
 	var x uint64
 	var s uint
 	for i := 0; ; i++ {
@@ -130,7 +130,7 @@
 }
 
 // WriteVarint encodes x and writes the result to w.
-func WriteVarint(w io.Writer, x int64) os.Error {
+func WriteVarint(w io.Writer, x int64) error {
 	ux := uint64(x) << 1
 	if x < 0 {
 		ux = ^ux
@@ -139,7 +139,7 @@
 }
 
 // ReadVarint reads an encoded unsigned integer from r and returns it as a uint64.
-func ReadVarint(r io.ByteReader) (int64, os.Error) {
+func ReadVarint(r io.ByteReader) (int64, error) {
 	ux, err := ReadUvarint(r) // ok to continue in presence of error
 	x := int64(ux >> 1)
 	if ux&1 != 0 {
diff --git a/src/pkg/encoding/binary/varint_test.go b/src/pkg/encoding/binary/varint_test.go
index ef51f09..b553d6d 100644
--- a/src/pkg/encoding/binary/varint_test.go
+++ b/src/pkg/encoding/binary/varint_test.go
@@ -6,7 +6,7 @@
 
 import (
 	"bytes"
-	"os"
+	"io"
 	"testing"
 )
 
@@ -131,13 +131,13 @@
 		}
 
 		x, err := ReadUvarint(bytes.NewBuffer(buf))
-		if x != 0 || err != os.EOF {
+		if x != 0 || err != io.EOF {
 			t.Errorf("ReadUvarint(%v): got x = %d, err = %s", buf, x, err)
 		}
 	}
 }
 
-func testOverflow(t *testing.T, buf []byte, n0 int, err0 os.Error) {
+func testOverflow(t *testing.T, buf []byte, n0 int, err0 error) {
 	x, n := Uvarint(buf)
 	if x != 0 || n != n0 {
 		t.Errorf("Uvarint(%v): got x = %d, n = %d; want 0, %d", buf, x, n, n0)
diff --git a/src/pkg/encoding/git85/git.go b/src/pkg/encoding/git85/git.go
index 6bb74f4..b6ad6e2 100644
--- a/src/pkg/encoding/git85/git.go
+++ b/src/pkg/encoding/git85/git.go
@@ -9,13 +9,12 @@
 import (
 	"bytes"
 	"io"
-	"os"
 	"strconv"
 )
 
 type CorruptInputError int64
 
-func (e CorruptInputError) String() string {
+func (e CorruptInputError) Error() string {
 	return "illegal git85 data at input byte " + strconv.Itoa64(int64(e))
 }
 
@@ -96,7 +95,7 @@
 //
 // If Decode encounters invalid input, it returns a CorruptInputError.
 //
-func Decode(dst, src []byte) (n int, err os.Error) {
+func Decode(dst, src []byte) (n int, err error) {
 	ndst := 0
 	nsrc := 0
 	for nsrc < len(src) {
@@ -153,14 +152,14 @@
 
 type encoder struct {
 	w    io.Writer
-	err  os.Error
+	err  error
 	buf  [52]byte
 	nbuf int
 	out  [1024]byte
 	nout int
 }
 
-func (e *encoder) Write(p []byte) (n int, err os.Error) {
+func (e *encoder) Write(p []byte) (n int, err error) {
 	if e.err != nil {
 		return 0, e.err
 	}
@@ -209,7 +208,7 @@
 	return
 }
 
-func (e *encoder) Close() os.Error {
+func (e *encoder) Close() error {
 	// If there's anything left in the buffer, flush it out
 	if e.err == nil && e.nbuf > 0 {
 		nout := Encode(e.out[0:], e.buf[0:e.nbuf])
@@ -224,8 +223,8 @@
 
 type decoder struct {
 	r       io.Reader
-	err     os.Error
-	readErr os.Error
+	err     error
+	readErr error
 	buf     [1024]byte
 	nbuf    int
 	out     []byte
@@ -233,7 +232,7 @@
 	off     int64
 }
 
-func (d *decoder) Read(p []byte) (n int, err os.Error) {
+func (d *decoder) Read(p []byte) (n int, err error) {
 	if len(p) == 0 {
 		return 0, nil
 	}
diff --git a/src/pkg/encoding/git85/git_test.go b/src/pkg/encoding/git85/git_test.go
index c76385c..81f5b0e 100644
--- a/src/pkg/encoding/git85/git_test.go
+++ b/src/pkg/encoding/git85/git_test.go
@@ -6,8 +6,8 @@
 
 import (
 	"bytes"
+	"io"
 	"io/ioutil"
-	"os"
 	"testing"
 )
 
@@ -90,11 +90,11 @@
 				end = len(input)
 			}
 			n, err := encoder.Write(input[pos:end])
-			testEqual(t, "Write(%q) gave error %v, want %v", input[pos:end], err, os.Error(nil))
+			testEqual(t, "Write(%q) gave error %v, want %v", input[pos:end], err, error(nil))
 			testEqual(t, "Write(%q) gave length %v, want %v", input[pos:end], n, end-pos)
 		}
 		err := encoder.Close()
-		testEqual(t, "Close gave error %v, want %v", err, os.Error(nil))
+		testEqual(t, "Close gave error %v, want %v", err, error(nil))
 		testEqual(t, "Encoding/%d of %q = %q, want %q", bs, gitBigtest.decoded, bb.String(), gitBigtest.encoded)
 	}
 }
@@ -103,7 +103,7 @@
 	for _, p := range gitPairs {
 		dbuf := make([]byte, 4*len(p.encoded))
 		ndst, err := Decode(dbuf, []byte(p.encoded))
-		testEqual(t, "Decode(%q) = error %v, want %v", p.encoded, err, os.Error(nil))
+		testEqual(t, "Decode(%q) = error %v, want %v", p.encoded, err, error(nil))
 		testEqual(t, "Decode(%q) = ndst %v, want %v", p.encoded, ndst, len(p.decoded))
 		testEqual(t, "Decode(%q) = %q, want %q", p.encoded, string(dbuf[0:ndst]), p.decoded)
 	}
@@ -119,7 +119,7 @@
 		testEqual(t, "Read from %q = length %v, want %v", p.encoded, len(dbuf), len(p.decoded))
 		testEqual(t, "Decoding of %q = %q, want %q", p.encoded, string(dbuf), p.decoded)
 		if err != nil {
-			testEqual(t, "Read from %q = %v, want %v", p.encoded, err, os.EOF)
+			testEqual(t, "Read from %q = %v, want %v", p.encoded, err, io.EOF)
 		}
 	}
 }
@@ -131,7 +131,7 @@
 		var total int
 		for total = 0; total < len(gitBigtest.decoded); {
 			n, err := decoder.Read(buf[total : total+bs])
-			testEqual(t, "Read from %q at pos %d = %d, %v, want _, %v", gitBigtest.encoded, total, n, err, os.Error(nil))
+			testEqual(t, "Read from %q at pos %d = %d, %v, want _, %v", gitBigtest.encoded, total, n, err, error(nil))
 			total += n
 		}
 		testEqual(t, "Decoding/%d of %q = %q, want %q", bs, gitBigtest.encoded, string(buf[0:total]), gitBigtest.decoded)
diff --git a/src/pkg/encoding/hex/hex.go b/src/pkg/encoding/hex/hex.go
index e7ea8b0..eb7e7ca 100644
--- a/src/pkg/encoding/hex/hex.go
+++ b/src/pkg/encoding/hex/hex.go
@@ -8,7 +8,6 @@
 import (
 	"bytes"
 	"io"
-	"os"
 	"strconv"
 )
 
@@ -33,12 +32,12 @@
 // OddLengthInputError results from decoding an odd length slice.
 type OddLengthInputError struct{}
 
-func (OddLengthInputError) String() string { return "odd length hex string" }
+func (OddLengthInputError) Error() string { return "odd length hex string" }
 
 // InvalidHexCharError results from finding an invalid character in a hex string.
 type InvalidHexCharError byte
 
-func (e InvalidHexCharError) String() string {
+func (e InvalidHexCharError) Error() string {
 	return "invalid hex char: " + strconv.Itoa(int(e))
 }
 
@@ -49,7 +48,7 @@
 //
 // If Decode encounters invalid input, it returns an OddLengthInputError or an
 // InvalidHexCharError.
-func Decode(dst, src []byte) (int, os.Error) {
+func Decode(dst, src []byte) (int, error) {
 	if len(src)%2 == 1 {
 		return 0, OddLengthInputError{}
 	}
@@ -91,7 +90,7 @@
 }
 
 // DecodeString returns the bytes represented by the hexadecimal string s.
-func DecodeString(s string) ([]byte, os.Error) {
+func DecodeString(s string) ([]byte, error) {
 	src := []byte(s)
 	dst := make([]byte, DecodedLen(len(src)))
 	_, err := Decode(dst, src)
@@ -133,7 +132,7 @@
 	return b
 }
 
-func (h *dumper) Write(data []byte) (n int, err os.Error) {
+func (h *dumper) Write(data []byte) (n int, err error) {
 	// Output lines look like:
 	// 00000010  2e 2f 30 31 32 33 34 35  36 37 38 39 3a 3b 3c 3d  |./0123456789:;<=|
 	// ^ offset                          ^ extra space              ^ ASCII of line.
@@ -185,7 +184,7 @@
 	return
 }
 
-func (h *dumper) Close() (err os.Error) {
+func (h *dumper) Close() (err error) {
 	// See the comments in Write() for the details of this format.
 	if h.used == 0 {
 		return
diff --git a/src/pkg/encoding/pem/pem.go b/src/pkg/encoding/pem/pem.go
index 12689b5..3eb7c9f 100644
--- a/src/pkg/encoding/pem/pem.go
+++ b/src/pkg/encoding/pem/pem.go
@@ -11,7 +11,6 @@
 	"bytes"
 	"encoding/base64"
 	"io"
-	"os"
 )
 
 // A Block represents a PEM encoded structure.
@@ -170,7 +169,7 @@
 	out  io.Writer
 }
 
-func (l *lineBreaker) Write(b []byte) (n int, err os.Error) {
+func (l *lineBreaker) Write(b []byte) (n int, err error) {
 	if l.used+len(b) < pemLineLength {
 		copy(l.line[l.used:], b)
 		l.used += len(b)
@@ -197,7 +196,7 @@
 	return l.Write(b[excess:])
 }
 
-func (l *lineBreaker) Close() (err os.Error) {
+func (l *lineBreaker) Close() (err error) {
 	if l.used > 0 {
 		_, err = l.out.Write(l.line[0:l.used])
 		if err != nil {
@@ -209,7 +208,7 @@
 	return
 }
 
-func Encode(out io.Writer, b *Block) (err os.Error) {
+func Encode(out io.Writer, b *Block) (err error) {
 	_, err = out.Write(pemStart[1:])
 	if err != nil {
 		return
diff --git a/src/pkg/exec/exec.go b/src/pkg/exec/exec.go
index 3b818c2..ebdfd54 100644
--- a/src/pkg/exec/exec.go
+++ b/src/pkg/exec/exec.go
@@ -9,6 +9,7 @@
 
 import (
 	"bytes"
+	"errors"
 	"io"
 	"os"
 	"strconv"
@@ -18,12 +19,12 @@
 // Error records the name of a binary that failed to be be executed
 // and the reason it failed.
 type Error struct {
-	Name  string
-	Error os.Error
+	Name string
+	Err  error
 }
 
-func (e *Error) String() string {
-	return "exec: " + strconv.Quote(e.Name) + ": " + e.Error.String()
+func (e *Error) Error() string {
+	return "exec: " + strconv.Quote(e.Name) + ": " + e.Err.Error()
 }
 
 // Cmd represents an external command being prepared or run.
@@ -75,13 +76,13 @@
 	// Process is the underlying process, once started.
 	Process *os.Process
 
-	err             os.Error // last error (from LookPath, stdin, stdout, stderr)
-	finished        bool     // when Wait was called
+	err             error // last error (from LookPath, stdin, stdout, stderr)
+	finished        bool  // when Wait was called
 	childFiles      []*os.File
 	closeAfterStart []io.Closer
 	closeAfterWait  []io.Closer
-	goroutine       []func() os.Error
-	errch           chan os.Error // one send per goroutine
+	goroutine       []func() error
+	errch           chan error // one send per goroutine
 }
 
 // Command returns the Cmd struct to execute the named program with
@@ -132,7 +133,7 @@
 	return []string{c.Path}
 }
 
-func (c *Cmd) stdin() (f *os.File, err os.Error) {
+func (c *Cmd) stdin() (f *os.File, err error) {
 	if c.Stdin == nil {
 		f, err = os.Open(os.DevNull)
 		c.closeAfterStart = append(c.closeAfterStart, f)
@@ -150,7 +151,7 @@
 
 	c.closeAfterStart = append(c.closeAfterStart, pr)
 	c.closeAfterWait = append(c.closeAfterWait, pw)
-	c.goroutine = append(c.goroutine, func() os.Error {
+	c.goroutine = append(c.goroutine, func() error {
 		_, err := io.Copy(pw, c.Stdin)
 		if err1 := pw.Close(); err == nil {
 			err = err1
@@ -160,18 +161,18 @@
 	return pr, nil
 }
 
-func (c *Cmd) stdout() (f *os.File, err os.Error) {
+func (c *Cmd) stdout() (f *os.File, err error) {
 	return c.writerDescriptor(c.Stdout)
 }
 
-func (c *Cmd) stderr() (f *os.File, err os.Error) {
+func (c *Cmd) stderr() (f *os.File, err error) {
 	if c.Stderr != nil && interfaceEqual(c.Stderr, c.Stdout) {
 		return c.childFiles[1], nil
 	}
 	return c.writerDescriptor(c.Stderr)
 }
 
-func (c *Cmd) writerDescriptor(w io.Writer) (f *os.File, err os.Error) {
+func (c *Cmd) writerDescriptor(w io.Writer) (f *os.File, err error) {
 	if w == nil {
 		f, err = os.OpenFile(os.DevNull, os.O_WRONLY, 0)
 		c.closeAfterStart = append(c.closeAfterStart, f)
@@ -189,7 +190,7 @@
 
 	c.closeAfterStart = append(c.closeAfterStart, pw)
 	c.closeAfterWait = append(c.closeAfterWait, pr)
-	c.goroutine = append(c.goroutine, func() os.Error {
+	c.goroutine = append(c.goroutine, func() error {
 		_, err := io.Copy(w, pr)
 		return err
 	})
@@ -205,7 +206,7 @@
 // If the command fails to run or doesn't complete successfully, the
 // error is of type *ExitError. Other error types may be
 // returned for I/O problems.
-func (c *Cmd) Run() os.Error {
+func (c *Cmd) Run() error {
 	if err := c.Start(); err != nil {
 		return err
 	}
@@ -213,15 +214,15 @@
 }
 
 // Start starts the specified command but does not wait for it to complete.
-func (c *Cmd) Start() os.Error {
+func (c *Cmd) Start() error {
 	if c.err != nil {
 		return c.err
 	}
 	if c.Process != nil {
-		return os.NewError("exec: already started")
+		return errors.New("exec: already started")
 	}
 
-	type F func(*Cmd) (*os.File, os.Error)
+	type F func(*Cmd) (*os.File, error)
 	for _, setupFd := range []F{(*Cmd).stdin, (*Cmd).stdout, (*Cmd).stderr} {
 		fd, err := setupFd(c)
 		if err != nil {
@@ -231,7 +232,7 @@
 	}
 	c.childFiles = append(c.childFiles, c.ExtraFiles...)
 
-	var err os.Error
+	var err error
 	c.Process, err = os.StartProcess(c.Path, c.argv(), &os.ProcAttr{
 		Dir:   c.Dir,
 		Files: c.childFiles,
@@ -246,9 +247,9 @@
 		fd.Close()
 	}
 
-	c.errch = make(chan os.Error, len(c.goroutine))
+	c.errch = make(chan error, len(c.goroutine))
 	for _, fn := range c.goroutine {
-		go func(fn func() os.Error) {
+		go func(fn func() error) {
 			c.errch <- fn()
 		}(fn)
 	}
@@ -261,7 +262,7 @@
 	*os.Waitmsg
 }
 
-func (e *ExitError) String() string {
+func (e *ExitError) Error() string {
 	return e.Waitmsg.String()
 }
 
@@ -275,17 +276,17 @@
 // If the command fails to run or doesn't complete successfully, the
 // error is of type *ExitError. Other error types may be
 // returned for I/O problems.
-func (c *Cmd) Wait() os.Error {
+func (c *Cmd) Wait() error {
 	if c.Process == nil {
-		return os.NewError("exec: not started")
+		return errors.New("exec: not started")
 	}
 	if c.finished {
-		return os.NewError("exec: Wait was already called")
+		return errors.New("exec: Wait was already called")
 	}
 	c.finished = true
 	msg, err := c.Process.Wait(0)
 
-	var copyError os.Error
+	var copyError error
 	for _ = range c.goroutine {
 		if err := <-c.errch; err != nil && copyError == nil {
 			copyError = err
@@ -306,9 +307,9 @@
 }
 
 // Output runs the command and returns its standard output.
-func (c *Cmd) Output() ([]byte, os.Error) {
+func (c *Cmd) Output() ([]byte, error) {
 	if c.Stdout != nil {
-		return nil, os.NewError("exec: Stdout already set")
+		return nil, errors.New("exec: Stdout already set")
 	}
 	var b bytes.Buffer
 	c.Stdout = &b
@@ -318,12 +319,12 @@
 
 // CombinedOutput runs the command and returns its combined standard
 // output and standard error.
-func (c *Cmd) CombinedOutput() ([]byte, os.Error) {
+func (c *Cmd) CombinedOutput() ([]byte, error) {
 	if c.Stdout != nil {
-		return nil, os.NewError("exec: Stdout already set")
+		return nil, errors.New("exec: Stdout already set")
 	}
 	if c.Stderr != nil {
-		return nil, os.NewError("exec: Stderr already set")
+		return nil, errors.New("exec: Stderr already set")
 	}
 	var b bytes.Buffer
 	c.Stdout = &b
@@ -334,12 +335,12 @@
 
 // StdinPipe returns a pipe that will be connected to the command's
 // standard input when the command starts.
-func (c *Cmd) StdinPipe() (io.WriteCloser, os.Error) {
+func (c *Cmd) StdinPipe() (io.WriteCloser, error) {
 	if c.Stdin != nil {
-		return nil, os.NewError("exec: Stdin already set")
+		return nil, errors.New("exec: Stdin already set")
 	}
 	if c.Process != nil {
-		return nil, os.NewError("exec: StdinPipe after process started")
+		return nil, errors.New("exec: StdinPipe after process started")
 	}
 	pr, pw, err := os.Pipe()
 	if err != nil {
@@ -354,12 +355,12 @@
 // StdoutPipe returns a pipe that will be connected to the command's
 // standard output when the command starts.
 // The pipe will be closed automatically after Wait sees the command exit.
-func (c *Cmd) StdoutPipe() (io.ReadCloser, os.Error) {
+func (c *Cmd) StdoutPipe() (io.ReadCloser, error) {
 	if c.Stdout != nil {
-		return nil, os.NewError("exec: Stdout already set")
+		return nil, errors.New("exec: Stdout already set")
 	}
 	if c.Process != nil {
-		return nil, os.NewError("exec: StdoutPipe after process started")
+		return nil, errors.New("exec: StdoutPipe after process started")
 	}
 	pr, pw, err := os.Pipe()
 	if err != nil {
@@ -374,12 +375,12 @@
 // StderrPipe returns a pipe that will be connected to the command's
 // standard error when the command starts.
 // The pipe will be closed automatically after Wait sees the command exit.
-func (c *Cmd) StderrPipe() (io.ReadCloser, os.Error) {
+func (c *Cmd) StderrPipe() (io.ReadCloser, error) {
 	if c.Stderr != nil {
-		return nil, os.NewError("exec: Stderr already set")
+		return nil, errors.New("exec: Stderr already set")
 	}
 	if c.Process != nil {
-		return nil, os.NewError("exec: StderrPipe after process started")
+		return nil, errors.New("exec: StderrPipe after process started")
 	}
 	pr, pw, err := os.Pipe()
 	if err != nil {
diff --git a/src/pkg/exec/exec_test.go b/src/pkg/exec/exec_test.go
index 3183919..6d5e893 100644
--- a/src/pkg/exec/exec_test.go
+++ b/src/pkg/exec/exec_test.go
@@ -82,7 +82,7 @@
 	// Test that exit values are returned correctly
 	err := helperCommand("exit", "42").Run()
 	if werr, ok := err.(*ExitError); ok {
-		if s, e := werr.String(), "exit status 42"; s != e {
+		if s, e := werr.Error(), "exit status 42"; s != e {
 			t.Errorf("from exit 42 got exit %q, want %q", s, e)
 		}
 	} else {
@@ -91,7 +91,7 @@
 }
 
 func TestPipes(t *testing.T) {
-	check := func(what string, err os.Error) {
+	check := func(what string, err error) {
 		if err != nil {
 			t.Fatalf("%s: %v", what, err)
 		}
@@ -224,7 +224,7 @@
 		bufr := bufio.NewReader(os.Stdin)
 		for {
 			line, _, err := bufr.ReadLine()
-			if err == os.EOF {
+			if err == io.EOF {
 				break
 			} else if err != nil {
 				os.Exit(1)
diff --git a/src/pkg/exec/lp_plan9.go b/src/pkg/exec/lp_plan9.go
index e4751a4..d4ffc17 100644
--- a/src/pkg/exec/lp_plan9.go
+++ b/src/pkg/exec/lp_plan9.go
@@ -5,14 +5,15 @@
 package exec
 
 import (
+	"errors"
 	"os"
 	"strings"
 )
 
 // ErrNotFound is the error resulting if a path search failed to find an executable file.
-var ErrNotFound = os.NewError("executable file not found in $path")
+var ErrNotFound = errors.New("executable file not found in $path")
 
-func findExecutable(file string) os.Error {
+func findExecutable(file string) error {
 	d, err := os.Stat(file)
 	if err != nil {
 		return err
@@ -27,7 +28,7 @@
 // in the directories named by the path environment variable.
 // If file begins with "/", "#", "./", or "../", it is tried
 // directly and the path is not consulted.
-func LookPath(file string) (string, os.Error) {
+func LookPath(file string) (string, error) {
 	// skip the path lookup for these prefixes
 	skip := []string{"/", "#", "./", "../"}
 
diff --git a/src/pkg/exec/lp_unix.go b/src/pkg/exec/lp_unix.go
index 0cd19e7..d234641 100644
--- a/src/pkg/exec/lp_unix.go
+++ b/src/pkg/exec/lp_unix.go
@@ -7,14 +7,15 @@
 package exec
 
 import (
+	"errors"
 	"os"
 	"strings"
 )
 
 // ErrNotFound is the error resulting if a path search failed to find an executable file.
-var ErrNotFound = os.NewError("executable file not found in $PATH")
+var ErrNotFound = errors.New("executable file not found in $PATH")
 
-func findExecutable(file string) os.Error {
+func findExecutable(file string) error {
 	d, err := os.Stat(file)
 	if err != nil {
 		return err
@@ -28,7 +29,7 @@
 // LookPath searches for an executable binary named file
 // in the directories named by the PATH environment variable.
 // If file contains a slash, it is tried directly and the PATH is not consulted.
-func LookPath(file string) (string, os.Error) {
+func LookPath(file string) (string, error) {
 	// NOTE(rsc): I wish we could use the Plan 9 behavior here
 	// (only bypass the path if file begins with / or ./ or ../)
 	// but that would not match all the Unix shells.
diff --git a/src/pkg/exec/lp_windows.go b/src/pkg/exec/lp_windows.go
index 7581088..db32623 100644
--- a/src/pkg/exec/lp_windows.go
+++ b/src/pkg/exec/lp_windows.go
@@ -5,14 +5,15 @@
 package exec
 
 import (
+	"errors"
 	"os"
 	"strings"
 )
 
 // ErrNotFound is the error resulting if a path search failed to find an executable file.
-var ErrNotFound = os.NewError("executable file not found in %PATH%")
+var ErrNotFound = errors.New("executable file not found in %PATH%")
 
-func chkStat(file string) os.Error {
+func chkStat(file string) error {
 	d, err := os.Stat(file)
 	if err != nil {
 		return err
@@ -23,7 +24,7 @@
 	return os.EPERM
 }
 
-func findExecutable(file string, exts []string) (string, os.Error) {
+func findExecutable(file string, exts []string) (string, error) {
 	if len(exts) == 0 {
 		return file, chkStat(file)
 	}
@@ -41,7 +42,7 @@
 	return ``, os.ENOENT
 }
 
-func LookPath(file string) (f string, err os.Error) {
+func LookPath(file string) (f string, err error) {
 	x := os.Getenv(`PATHEXT`)
 	if x == `` {
 		x = `.COM;.EXE;.BAT;.CMD`
diff --git a/src/pkg/exp/ebnf/ebnf.go b/src/pkg/exp/ebnf/ebnf.go
index 7070cc7..15c199a 100644
--- a/src/pkg/exp/ebnf/ebnf.go
+++ b/src/pkg/exp/ebnf/ebnf.go
@@ -23,8 +23,8 @@
 package ebnf
 
 import (
+	"errors"
 	"fmt"
-	"os"
 	"scanner"
 	"unicode"
 	"utf8"
@@ -33,27 +33,27 @@
 // ----------------------------------------------------------------------------
 // Error handling
 
-type errorList []os.Error
+type errorList []error
 
-func (list errorList) Error() os.Error {
+func (list errorList) Err() error {
 	if len(list) == 0 {
 		return nil
 	}
 	return list
 }
 
-func (list errorList) String() string {
+func (list errorList) Error() string {
 	switch len(list) {
 	case 0:
 		return "no errors"
 	case 1:
-		return list[0].String()
+		return list[0].Error()
 	}
 	return fmt.Sprintf("%s (and %d more errors)", list[0], len(list)-1)
 }
 
-func newError(pos scanner.Position, msg string) os.Error {
-	return os.NewError(fmt.Sprintf("%s: %s", pos, msg))
+func newError(pos scanner.Position, msg string) error {
+	return errors.New(fmt.Sprintf("%s: %s", pos, msg))
 }
 
 // ----------------------------------------------------------------------------
@@ -262,8 +262,8 @@
 //
 // Position information is interpreted relative to the file set fset.
 //
-func Verify(grammar Grammar, start string) os.Error {
+func Verify(grammar Grammar, start string) error {
 	var v verifier
 	v.verify(grammar, start)
-	return v.errors.Error()
+	return v.errors.Err()
 }
diff --git a/src/pkg/exp/ebnf/parser.go b/src/pkg/exp/ebnf/parser.go
index b550c2b..2dad9b4 100644
--- a/src/pkg/exp/ebnf/parser.go
+++ b/src/pkg/exp/ebnf/parser.go
@@ -6,7 +6,6 @@
 
 import (
 	"io"
-	"os"
 	"scanner"
 	"strconv"
 )
@@ -184,7 +183,7 @@
 // more than once; the filename is used only for error
 // positions.
 //
-func Parse(filename string, src io.Reader) (Grammar, os.Error) {
+func Parse(filename string, src io.Reader) (Grammar, error) {
 	var p parser
 	grammar := p.parse(filename, src)
 	return grammar, p.errors.Err()
diff --git a/src/pkg/exp/ebnflint/ebnflint.go b/src/pkg/exp/ebnflint/ebnflint.go
index c827716..6d6f516 100644
--- a/src/pkg/exp/ebnflint/ebnflint.go
+++ b/src/pkg/exp/ebnflint/ebnflint.go
@@ -31,7 +31,7 @@
 	close = []byte(`</pre>`)
 )
 
-func report(err os.Error) {
+func report(err error) {
 	scanner.PrintError(os.Stderr, err)
 	os.Exit(1)
 }
@@ -78,7 +78,7 @@
 	var (
 		filename string
 		src      []byte
-		err      os.Error
+		err      error
 	)
 	switch flag.NArg() {
 	case 0:
diff --git a/src/pkg/exp/gotype/gotype.go b/src/pkg/exp/gotype/gotype.go
index 9199213..bc4a112 100644
--- a/src/pkg/exp/gotype/gotype.go
+++ b/src/pkg/exp/gotype/gotype.go
@@ -5,6 +5,7 @@
 package main
 
 import (
+	"errors"
 	"exp/types"
 	"flag"
 	"fmt"
@@ -38,7 +39,7 @@
 	os.Exit(2)
 }
 
-func report(err os.Error) {
+func report(err error) {
 	scanner.PrintError(os.Stderr, err)
 	exitCode = 2
 }
@@ -111,7 +112,7 @@
 		}
 		if file := parse(fset, filename, src); file != nil {
 			if files[filename] != nil {
-				report(os.NewError(fmt.Sprintf("%q: duplicate file", filename)))
+				report(errors.New(fmt.Sprintf("%q: duplicate file", filename)))
 				continue
 			}
 			files[filename] = file
diff --git a/src/pkg/exp/gui/gui.go b/src/pkg/exp/gui/gui.go
index 1714991..a69f83a 100644
--- a/src/pkg/exp/gui/gui.go
+++ b/src/pkg/exp/gui/gui.go
@@ -8,7 +8,6 @@
 import (
 	"image"
 	"image/draw"
-	"os"
 )
 
 // A Window represents a single graphics window.
@@ -21,7 +20,7 @@
 	// mouse movements and window resizes.
 	EventChan() <-chan interface{}
 	// Close closes the window.
-	Close() os.Error
+	Close() error
 }
 
 // A KeyEvent is sent for a key press or release.
@@ -54,5 +53,5 @@
 
 // An ErrEvent is sent when an error occurs.
 type ErrEvent struct {
-	Err os.Error
+	Err error
 }
diff --git a/src/pkg/exp/gui/x11/auth.go b/src/pkg/exp/gui/x11/auth.go
index 732f103..24e941c 100644
--- a/src/pkg/exp/gui/x11/auth.go
+++ b/src/pkg/exp/gui/x11/auth.go
@@ -6,12 +6,13 @@
 
 import (
 	"bufio"
+	"errors"
 	"io"
 	"os"
 )
 
 // readU16BE reads a big-endian uint16 from r, using b as a scratch buffer.
-func readU16BE(r io.Reader, b []byte) (uint16, os.Error) {
+func readU16BE(r io.Reader, b []byte) (uint16, error) {
 	_, err := io.ReadFull(r, b[0:2])
 	if err != nil {
 		return 0, err
@@ -20,13 +21,13 @@
 }
 
 // readStr reads a length-prefixed string from r, using b as a scratch buffer.
-func readStr(r io.Reader, b []byte) (string, os.Error) {
+func readStr(r io.Reader, b []byte) (string, error) {
 	n, err := readU16BE(r, b)
 	if err != nil {
 		return "", err
 	}
 	if int(n) > len(b) {
-		return "", os.NewError("Xauthority entry too long for buffer")
+		return "", errors.New("Xauthority entry too long for buffer")
 	}
 	_, err = io.ReadFull(r, b[0:n])
 	if err != nil {
@@ -37,7 +38,7 @@
 
 // readAuth reads the X authority file and returns the name/data pair for the display.
 // displayStr is the "12" out of a $DISPLAY like ":12.0".
-func readAuth(displayStr string) (name, data string, err os.Error) {
+func readAuth(displayStr string) (name, data string, err error) {
 	// b is a scratch buffer to use and should be at least 256 bytes long
 	// (i.e. it should be able to hold a hostname).
 	var b [256]byte
@@ -48,7 +49,7 @@
 	if fn == "" {
 		home := os.Getenv("HOME")
 		if home == "" {
-			err = os.NewError("Xauthority not found: $XAUTHORITY, $HOME not set")
+			err = errors.New("Xauthority not found: $XAUTHORITY, $HOME not set")
 			return
 		}
 		fn = home + "/.Xauthority"
diff --git a/src/pkg/exp/gui/x11/conn.go b/src/pkg/exp/gui/x11/conn.go
index f4a453e..15afc65 100644
--- a/src/pkg/exp/gui/x11/conn.go
+++ b/src/pkg/exp/gui/x11/conn.go
@@ -10,6 +10,7 @@
 
 import (
 	"bufio"
+	"errors"
 	"exp/gui"
 	"image"
 	"image/draw"
@@ -86,7 +87,7 @@
 		for y := b.Min.Y; y < b.Max.Y; y++ {
 			setU32LE(c.flushBuf0[16:20], uint32(y<<16))
 			if _, err := c.w.Write(c.flushBuf0[:24]); err != nil {
-				if err != os.EOF {
+				if err != io.EOF {
 					log.Println("x11:", err)
 				}
 				return
@@ -105,7 +106,7 @@
 				}
 				x += nx
 				if _, err := c.w.Write(c.flushBuf1[:nx]); err != nil {
-					if err != os.EOF {
+					if err != io.EOF {
 						log.Println("x11:", err)
 					}
 					return
@@ -113,7 +114,7 @@
 			}
 		}
 		if err := c.w.Flush(); err != nil {
-			if err != os.EOF {
+			if err != io.EOF {
 				log.Println("x11:", err)
 			}
 			return
@@ -133,7 +134,7 @@
 	}
 }
 
-func (c *conn) Close() os.Error {
+func (c *conn) Close() error {
 	// Shut down the writeSocket goroutine. This will close the socket to the
 	// X11 server, which will cause c.eventc to close.
 	close(c.flush)
@@ -156,7 +157,7 @@
 	for {
 		// X events are always 32 bytes long.
 		if _, err := io.ReadFull(c.r, c.buf[:32]); err != nil {
-			if err != os.EOF {
+			if err != io.EOF {
 				c.eventc <- gui.ErrEvent{err}
 			}
 			return
@@ -167,7 +168,7 @@
 			if cookie != 1 {
 				// We issued only one request (GetKeyboardMapping) with a cookie of 1,
 				// so we shouldn't get any other reply from the X server.
-				c.eventc <- gui.ErrEvent{os.NewError("x11: unexpected cookie")}
+				c.eventc <- gui.ErrEvent{errors.New("x11: unexpected cookie")}
 				return
 			}
 			keysymsPerKeycode = int(c.buf[1])
@@ -180,7 +181,7 @@
 				for j := range m {
 					u, err := readU32LE(c.r, c.buf[:4])
 					if err != nil {
-						if err != os.EOF {
+						if err != io.EOF {
 							c.eventc <- gui.ErrEvent{err}
 						}
 						return
@@ -253,10 +254,10 @@
 //	connect("/tmp/launch-123/:0") // calls net.Dial("unix", "", "/tmp/launch-123/:0"), displayStr="0"
 //	connect("hostname:2.1")       // calls net.Dial("tcp", "", "hostname:6002"), displayStr="2"
 //	connect("tcp/hostname:1.0")   // calls net.Dial("tcp", "", "hostname:6001"), displayStr="1"
-func connect(display string) (conn net.Conn, displayStr string, err os.Error) {
+func connect(display string) (conn net.Conn, displayStr string, err error) {
 	colonIdx := strings.LastIndex(display, ":")
 	if colonIdx < 0 {
-		return nil, "", os.NewError("bad display: " + display)
+		return nil, "", errors.New("bad display: " + display)
 	}
 	// Parse the section before the colon.
 	var protocol, host, socket string
@@ -275,7 +276,7 @@
 	// Parse the section after the colon.
 	after := display[colonIdx+1:]
 	if after == "" {
-		return nil, "", os.NewError("bad display: " + display)
+		return nil, "", errors.New("bad display: " + display)
 	}
 	if i := strings.LastIndex(after, "."); i < 0 {
 		displayStr = after
@@ -284,7 +285,7 @@
 	}
 	displayInt, err := strconv.Atoi(displayStr)
 	if err != nil || displayInt < 0 {
-		return nil, "", os.NewError("bad display: " + display)
+		return nil, "", errors.New("bad display: " + display)
 	}
 	// Make the connection.
 	if socket != "" {
@@ -295,21 +296,21 @@
 		conn, err = net.Dial("unix", "/tmp/.X11-unix/X"+displayStr)
 	}
 	if err != nil {
-		return nil, "", os.NewError("cannot connect to " + display + ": " + err.String())
+		return nil, "", errors.New("cannot connect to " + display + ": " + err.Error())
 	}
 	return
 }
 
 // authenticate authenticates ourselves with the X server.
 // displayStr is the "12" out of ":12.0".
-func authenticate(w *bufio.Writer, displayStr string) os.Error {
+func authenticate(w *bufio.Writer, displayStr string) error {
 	key, value, err := readAuth(displayStr)
 	if err != nil {
 		return err
 	}
 	// Assume that the authentication protocol is "MIT-MAGIC-COOKIE-1".
 	if len(key) != 18 || len(value) != 16 {
-		return os.NewError("unsupported Xauth")
+		return errors.New("unsupported Xauth")
 	}
 	// 0x006c means little-endian. 0x000b, 0x0000 means X major version 11, minor version 0.
 	// 0x0012 and 0x0010 means the auth key and value have lengths 18 and 16.
@@ -339,7 +340,7 @@
 }
 
 // readU8 reads a uint8 from r, using b as a scratch buffer.
-func readU8(r io.Reader, b []byte) (uint8, os.Error) {
+func readU8(r io.Reader, b []byte) (uint8, error) {
 	_, err := io.ReadFull(r, b[:1])
 	if err != nil {
 		return 0, err
@@ -348,7 +349,7 @@
 }
 
 // readU16LE reads a little-endian uint16 from r, using b as a scratch buffer.
-func readU16LE(r io.Reader, b []byte) (uint16, os.Error) {
+func readU16LE(r io.Reader, b []byte) (uint16, error) {
 	_, err := io.ReadFull(r, b[:2])
 	if err != nil {
 		return 0, err
@@ -357,7 +358,7 @@
 }
 
 // readU32LE reads a little-endian uint32 from r, using b as a scratch buffer.
-func readU32LE(r io.Reader, b []byte) (uint32, os.Error) {
+func readU32LE(r io.Reader, b []byte) (uint32, error) {
 	_, err := io.ReadFull(r, b[:4])
 	if err != nil {
 		return 0, err
@@ -374,7 +375,7 @@
 }
 
 // checkPixmapFormats checks that we have an agreeable X pixmap Format.
-func checkPixmapFormats(r io.Reader, b []byte, n int) (agree bool, err os.Error) {
+func checkPixmapFormats(r io.Reader, b []byte, n int) (agree bool, err error) {
 	for i := 0; i < n; i++ {
 		_, err = io.ReadFull(r, b[:8])
 		if err != nil {
@@ -389,7 +390,7 @@
 }
 
 // checkDepths checks that we have an agreeable X Depth (i.e. one that has an agreeable X VisualType).
-func checkDepths(r io.Reader, b []byte, n int, visual uint32) (agree bool, err os.Error) {
+func checkDepths(r io.Reader, b []byte, n int, visual uint32) (agree bool, err error) {
 	for i := 0; i < n; i++ {
 		var depth, visualsLen uint16
 		depth, err = readU16LE(r, b)
@@ -427,7 +428,7 @@
 }
 
 // checkScreens checks that we have an agreeable X Screen.
-func checkScreens(r io.Reader, b []byte, n int) (root, visual uint32, err os.Error) {
+func checkScreens(r io.Reader, b []byte, n int) (root, visual uint32, err error) {
 	for i := 0; i < n; i++ {
 		var root0, visual0, x uint32
 		root0, err = readU32LE(r, b)
@@ -465,14 +466,14 @@
 
 // handshake performs the protocol handshake with the X server, and ensures
 // that the server provides a compatible Screen, Depth, etc.
-func (c *conn) handshake() os.Error {
+func (c *conn) handshake() error {
 	_, err := io.ReadFull(c.r, c.buf[:8])
 	if err != nil {
 		return err
 	}
 	// Byte 0 should be 1 (success), bytes 2:6 should be 0xb0000000 (major/minor version 11.0).
 	if c.buf[0] != 1 || c.buf[2] != 11 || c.buf[3] != 0 || c.buf[4] != 0 || c.buf[5] != 0 {
-		return os.NewError("unsupported X version")
+		return errors.New("unsupported X version")
 	}
 	// Ignore the release number.
 	_, err = io.ReadFull(c.r, c.buf[:4])
@@ -490,7 +491,7 @@
 		return err
 	}
 	if resourceIdMask < 256 {
-		return os.NewError("X resource ID mask is too small")
+		return errors.New("X resource ID mask is too small")
 	}
 	// Ignore the motion buffer size.
 	_, err = io.ReadFull(c.r, c.buf[:4])
@@ -510,7 +511,7 @@
 		return err
 	}
 	if maxReqLen != 0xffff {
-		return os.NewError("unsupported X maximum request length")
+		return errors.New("unsupported X maximum request length")
 	}
 	// Read the roots length.
 	rootsLen, err := readU8(c.r, c.buf[:1])
@@ -526,7 +527,7 @@
 	// imageByteOrder(1), bitmapFormatBitOrder(1), bitmapFormatScanlineUnit(1) bitmapFormatScanlinePad(1),
 	// minKeycode(1), maxKeycode(1), padding(4), vendor (vendorLen).
 	if 10+int(vendorLen) > cap(c.buf) {
-		return os.NewError("unsupported X vendor")
+		return errors.New("unsupported X vendor")
 	}
 	_, err = io.ReadFull(c.r, c.buf[:10+int(vendorLen)])
 	if err != nil {
@@ -538,7 +539,7 @@
 		return err
 	}
 	if !agree {
-		return os.NewError("unsupported X pixmap formats")
+		return errors.New("unsupported X pixmap formats")
 	}
 	// Check that we have an agreeable screen.
 	root, visual, err := checkScreens(c.r, c.buf[:24], int(rootsLen))
@@ -546,7 +547,7 @@
 		return err
 	}
 	if root == 0 || visual == 0 {
-		return os.NewError("unsupported X screen")
+		return errors.New("unsupported X screen")
 	}
 	c.gc = resID(resourceIdBase)
 	c.window = resID(resourceIdBase + 1)
@@ -556,10 +557,10 @@
 }
 
 // NewWindow calls NewWindowDisplay with $DISPLAY.
-func NewWindow() (gui.Window, os.Error) {
+func NewWindow() (gui.Window, error) {
 	display := os.Getenv("DISPLAY")
 	if len(display) == 0 {
-		return nil, os.NewError("$DISPLAY not set")
+		return nil, errors.New("$DISPLAY not set")
 	}
 	return NewWindowDisplay(display)
 }
@@ -567,7 +568,7 @@
 // NewWindowDisplay returns a new gui.Window, backed by a newly created and
 // mapped X11 window. The X server to connect to is specified by the display
 // string, such as ":1".
-func NewWindowDisplay(display string) (gui.Window, os.Error) {
+func NewWindowDisplay(display string) (gui.Window, error) {
 	socket, displayStr, err := connect(display)
 	if err != nil {
 		return nil, err
diff --git a/src/pkg/exp/inotify/inotify_linux.go b/src/pkg/exp/inotify/inotify_linux.go
index ee3c75f..d6b7e85 100644
--- a/src/pkg/exp/inotify/inotify_linux.go
+++ b/src/pkg/exp/inotify/inotify_linux.go
@@ -27,6 +27,7 @@
 package inotify
 
 import (
+	"errors"
 	"fmt"
 	"os"
 	"strings"
@@ -49,14 +50,14 @@
 	fd       int               // File descriptor (as returned by the inotify_init() syscall)
 	watches  map[string]*watch // Map of inotify watches (key: path)
 	paths    map[int]string    // Map of watched paths (key: watch descriptor)
-	Error    chan os.Error     // Errors are sent on this channel
+	Error    chan error        // Errors are sent on this channel
 	Event    chan *Event       // Events are returned on this channel
 	done     chan bool         // Channel for sending a "quit message" to the reader goroutine
 	isClosed bool              // Set to true when Close() is first called
 }
 
 // NewWatcher creates and returns a new inotify instance using inotify_init(2)
-func NewWatcher() (*Watcher, os.Error) {
+func NewWatcher() (*Watcher, error) {
 	fd, errno := syscall.InotifyInit()
 	if fd == -1 {
 		return nil, os.NewSyscallError("inotify_init", errno)
@@ -66,7 +67,7 @@
 		watches: make(map[string]*watch),
 		paths:   make(map[int]string),
 		Event:   make(chan *Event),
-		Error:   make(chan os.Error),
+		Error:   make(chan error),
 		done:    make(chan bool, 1),
 	}
 
@@ -77,7 +78,7 @@
 // Close closes an inotify watcher instance
 // It sends a message to the reader goroutine to quit and removes all watches
 // associated with the inotify instance
-func (w *Watcher) Close() os.Error {
+func (w *Watcher) Close() error {
 	if w.isClosed {
 		return nil
 	}
@@ -94,9 +95,9 @@
 
 // AddWatch adds path to the watched file set.
 // The flags are interpreted as described in inotify_add_watch(2).
-func (w *Watcher) AddWatch(path string, flags uint32) os.Error {
+func (w *Watcher) AddWatch(path string, flags uint32) error {
 	if w.isClosed {
-		return os.NewError("inotify instance already closed")
+		return errors.New("inotify instance already closed")
 	}
 
 	watchEntry, found := w.watches[path]
@@ -117,15 +118,15 @@
 }
 
 // Watch adds path to the watched file set, watching all events.
-func (w *Watcher) Watch(path string) os.Error {
+func (w *Watcher) Watch(path string) error {
 	return w.AddWatch(path, IN_ALL_EVENTS)
 }
 
 // RemoveWatch removes path from the watched file set.
-func (w *Watcher) RemoveWatch(path string) os.Error {
+func (w *Watcher) RemoveWatch(path string) error {
 	watch, ok := w.watches[path]
 	if !ok {
-		return os.NewError(fmt.Sprintf("can't remove non-existent inotify watch for: %s", path))
+		return errors.New(fmt.Sprintf("can't remove non-existent inotify watch for: %s", path))
 	}
 	success, errno := syscall.InotifyRmWatch(w.fd, watch.wd)
 	if success == -1 {
@@ -168,7 +169,7 @@
 			continue
 		}
 		if n < syscall.SizeofInotifyEvent {
-			w.Error <- os.NewError("inotify: short read in readEvents()")
+			w.Error <- errors.New("inotify: short read in readEvents()")
 			continue
 		}
 
diff --git a/src/pkg/exp/norm/maketables.go b/src/pkg/exp/norm/maketables.go
index 93edf22..c7a3762 100644
--- a/src/pkg/exp/norm/maketables.go
+++ b/src/pkg/exp/norm/maketables.go
@@ -220,7 +220,7 @@
 	return
 }
 
-func parseDecomposition(s string, skipfirst bool) (a []rune, e os.Error) {
+func parseDecomposition(s string, skipfirst bool) (a []rune, e error) {
 	decomp := strings.Split(s, " ")
 	if len(decomp) > 0 && skipfirst {
 		decomp = decomp[1:]
@@ -310,7 +310,7 @@
 	for {
 		line, err := input.ReadString('\n')
 		if err != nil {
-			if err == os.EOF {
+			if err == io.EOF {
 				break
 			}
 			logger.Fatal(err)
@@ -350,7 +350,7 @@
 	for {
 		line, err := input.ReadString('\n')
 		if err != nil {
-			if err == os.EOF {
+			if err == io.EOF {
 				break
 			}
 			logger.Fatal(err)
@@ -782,7 +782,7 @@
 	for {
 		line, err := input.ReadString('\n')
 		if err != nil {
-			if err == os.EOF {
+			if err == io.EOF {
 				break
 			}
 			logger.Fatal(err)
diff --git a/src/pkg/exp/norm/normregtest.go b/src/pkg/exp/norm/normregtest.go
index cf3b340..6e27f63 100644
--- a/src/pkg/exp/norm/normregtest.go
+++ b/src/pkg/exp/norm/normregtest.go
@@ -11,6 +11,7 @@
 	"flag"
 	"fmt"
 	"http"
+	"io"
 	"log"
 	"os"
 	"path"
@@ -141,7 +142,7 @@
 	for {
 		line, err := input.ReadString('\n')
 		if err != nil {
-			if err == os.EOF {
+			if err == io.EOF {
 				break
 			}
 			logger.Fatal(err)
diff --git a/src/pkg/exp/norm/readwriter.go b/src/pkg/exp/norm/readwriter.go
index 48ae135..ee58abd 100644
--- a/src/pkg/exp/norm/readwriter.go
+++ b/src/pkg/exp/norm/readwriter.go
@@ -4,10 +4,7 @@
 
 package norm
 
-import (
-	"io"
-	"os"
-)
+import "io"
 
 type normWriter struct {
 	rb  reorderBuffer
@@ -18,7 +15,7 @@
 // Write implements the standard write interface.  If the last characters are
 // not at a normalization boundary, the bytes will be buffered for the next
 // write. The remaining bytes will be written on close.
-func (w *normWriter) Write(data []byte) (n int, err os.Error) {
+func (w *normWriter) Write(data []byte) (n int, err error) {
 	// Process data in pieces to keep w.buf size bounded.
 	const chunk = 4000
 
@@ -52,7 +49,7 @@
 }
 
 // Close forces data that remains in the buffer to be written.
-func (w *normWriter) Close() os.Error {
+func (w *normWriter) Close() error {
 	if len(w.buf) > 0 {
 		_, err := w.w.Write(w.buf)
 		if err != nil {
@@ -79,11 +76,11 @@
 	outbuf       []byte
 	bufStart     int
 	lastBoundary int
-	err          os.Error
+	err          error
 }
 
 // Read implements the standard read interface.
-func (r *normReader) Read(p []byte) (int, os.Error) {
+func (r *normReader) Read(p []byte) (int, error) {
 	for {
 		if r.lastBoundary-r.bufStart > 0 {
 			n := copy(p, r.outbuf[r.bufStart:r.lastBoundary])
@@ -106,7 +103,7 @@
 		if n > 0 {
 			r.outbuf = doAppend(&r.rb, r.outbuf)
 		}
-		if err == os.EOF {
+		if err == io.EOF {
 			r.lastBoundary = len(r.outbuf)
 		} else {
 			r.lastBoundary = lastBoundary(&r.rb.f, r.outbuf)
diff --git a/src/pkg/exp/norm/readwriter_test.go b/src/pkg/exp/norm/readwriter_test.go
index 68652ef..3b49eb0 100644
--- a/src/pkg/exp/norm/readwriter_test.go
+++ b/src/pkg/exp/norm/readwriter_test.go
@@ -7,7 +7,6 @@
 import (
 	"bytes"
 	"fmt"
-	"os"
 	"strings"
 	"testing"
 )
@@ -27,7 +26,7 @@
 		r := f.Reader(bytes.NewBuffer(out))
 		buf := make([]byte, size)
 		result := []byte{}
-		for n, err := 0, os.Error(nil); err == nil; {
+		for n, err := 0, error(nil); err == nil; {
 			n, err = r.Read(buf)
 			result = append(result, buf[:n]...)
 		}
diff --git a/src/pkg/exp/spdy/read.go b/src/pkg/exp/spdy/read.go
index 2b1fd3d..3de80c0 100644
--- a/src/pkg/exp/spdy/read.go
+++ b/src/pkg/exp/spdy/read.go
@@ -9,19 +9,18 @@
 	"encoding/binary"
 	"http"
 	"io"
-	"os"
 	"strings"
 )
 
-func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) os.Error {
+func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error {
 	return f.readSynStreamFrame(h, frame)
 }
 
-func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) os.Error {
+func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error {
 	return f.readSynReplyFrame(h, frame)
 }
 
-func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) os.Error {
+func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error {
 	frame.CFHeader = h
 	if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
 		return err
@@ -32,7 +31,7 @@
 	return nil
 }
 
-func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) os.Error {
+func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error {
 	frame.CFHeader = h
 	var numSettings uint32
 	if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil {
@@ -52,12 +51,12 @@
 	return nil
 }
 
-func (frame *NoopFrame) read(h ControlFrameHeader, f *Framer) os.Error {
+func (frame *NoopFrame) read(h ControlFrameHeader, f *Framer) error {
 	frame.CFHeader = h
 	return nil
 }
 
-func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) os.Error {
+func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error {
 	frame.CFHeader = h
 	if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil {
 		return err
@@ -65,7 +64,7 @@
 	return nil
 }
 
-func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) os.Error {
+func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error {
 	frame.CFHeader = h
 	if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil {
 		return err
@@ -73,11 +72,11 @@
 	return nil
 }
 
-func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) os.Error {
+func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error {
 	return f.readHeadersFrame(h, frame)
 }
 
-func newControlFrame(frameType ControlFrameType) (controlFrame, os.Error) {
+func newControlFrame(frameType ControlFrameType) (controlFrame, error) {
 	ctor, ok := cframeCtor[frameType]
 	if !ok {
 		return nil, &Error{Err: InvalidControlFrame}
@@ -97,7 +96,7 @@
 	// TODO(willchan): Add TypeWindowUpdate
 }
 
-func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) os.Error {
+func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error {
 	if f.headerDecompressor != nil {
 		f.headerReader.N = payloadSize
 		return nil
@@ -112,7 +111,7 @@
 }
 
 // ReadFrame reads SPDY encoded data and returns a decompressed Frame.
-func (f *Framer) ReadFrame() (Frame, os.Error) {
+func (f *Framer) ReadFrame() (Frame, error) {
 	var firstWord uint32
 	if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil {
 		return nil, err
@@ -125,7 +124,7 @@
 	return f.parseDataFrame(firstWord & 0x7fffffff)
 }
 
-func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, os.Error) {
+func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) {
 	var length uint32
 	if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
 		return nil, err
@@ -143,12 +142,12 @@
 	return cframe, nil
 }
 
-func parseHeaderValueBlock(r io.Reader, streamId uint32) (http.Header, os.Error) {
+func parseHeaderValueBlock(r io.Reader, streamId uint32) (http.Header, error) {
 	var numHeaders uint16
 	if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil {
 		return nil, err
 	}
-	var e os.Error
+	var e error
 	h := make(http.Header, int(numHeaders))
 	for i := 0; i < int(numHeaders); i++ {
 		var length uint16
@@ -185,9 +184,9 @@
 	return h, nil
 }
 
-func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) os.Error {
+func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error {
 	frame.CFHeader = h
-	var err os.Error
+	var err error
 	if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
 		return err
 	}
@@ -206,7 +205,7 @@
 	}
 
 	frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
-	if !f.headerCompressionDisabled && ((err == os.EOF && f.headerReader.N == 0) || f.headerReader.N != 0) {
+	if !f.headerCompressionDisabled && ((err == io.EOF && f.headerReader.N == 0) || f.headerReader.N != 0) {
 		err = &Error{WrongCompressedPayloadSize, 0}
 	}
 	if err != nil {
@@ -223,9 +222,9 @@
 	return nil
 }
 
-func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) os.Error {
+func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error {
 	frame.CFHeader = h
-	var err os.Error
+	var err error
 	if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
 		return err
 	}
@@ -239,7 +238,7 @@
 		reader = f.headerDecompressor
 	}
 	frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
-	if !f.headerCompressionDisabled && ((err == os.EOF && f.headerReader.N == 0) || f.headerReader.N != 0) {
+	if !f.headerCompressionDisabled && ((err == io.EOF && f.headerReader.N == 0) || f.headerReader.N != 0) {
 		err = &Error{WrongCompressedPayloadSize, 0}
 	}
 	if err != nil {
@@ -256,9 +255,9 @@
 	return nil
 }
 
-func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) os.Error {
+func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error {
 	frame.CFHeader = h
-	var err os.Error
+	var err error
 	if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
 		return err
 	}
@@ -272,7 +271,7 @@
 		reader = f.headerDecompressor
 	}
 	frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
-	if !f.headerCompressionDisabled && ((err == os.EOF && f.headerReader.N == 0) || f.headerReader.N != 0) {
+	if !f.headerCompressionDisabled && ((err == io.EOF && f.headerReader.N == 0) || f.headerReader.N != 0) {
 		err = &Error{WrongCompressedPayloadSize, 0}
 	}
 	if err != nil {
@@ -296,7 +295,7 @@
 	return nil
 }
 
-func (f *Framer) parseDataFrame(streamId uint32) (*DataFrame, os.Error) {
+func (f *Framer) parseDataFrame(streamId uint32) (*DataFrame, error) {
 	var length uint32
 	if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
 		return nil, err
diff --git a/src/pkg/exp/spdy/types.go b/src/pkg/exp/spdy/types.go
index 41cafb1..87d6edb 100644
--- a/src/pkg/exp/spdy/types.go
+++ b/src/pkg/exp/spdy/types.go
@@ -9,7 +9,6 @@
 	"compress/zlib"
 	"http"
 	"io"
-	"os"
 )
 
 //  Data Frame Format
@@ -161,7 +160,7 @@
 // Frame is a single SPDY frame in its unpacked in-memory representation. Use
 // Framer to read and write it.
 type Frame interface {
-	write(f *Framer) os.Error
+	write(f *Framer) error
 }
 
 // ControlFrameHeader contains all the fields in a control frame header,
@@ -176,7 +175,7 @@
 
 type controlFrame interface {
 	Frame
-	read(h ControlFrameHeader, f *Framer) os.Error
+	read(h ControlFrameHeader, f *Framer) error
 }
 
 // SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM
@@ -321,7 +320,7 @@
 	StreamId uint32
 }
 
-func (e *Error) String() string {
+func (e *Error) Error() string {
 	return string(e.Err)
 }
 
@@ -354,7 +353,7 @@
 // a io.Writer and io.Reader. Note that Framer will read and write individual fields 
 // from/to the Reader and Writer, so the caller should pass in an appropriately 
 // buffered implementation to optimize performance.
-func NewFramer(w io.Writer, r io.Reader) (*Framer, os.Error) {
+func NewFramer(w io.Writer, r io.Reader) (*Framer, error) {
 	compressBuf := new(bytes.Buffer)
 	compressor, err := zlib.NewWriterDict(compressBuf, zlib.BestCompression, []byte(HeaderDictionary))
 	if err != nil {
diff --git a/src/pkg/exp/spdy/write.go b/src/pkg/exp/spdy/write.go
index 7d40bbe..537154f 100644
--- a/src/pkg/exp/spdy/write.go
+++ b/src/pkg/exp/spdy/write.go
@@ -8,19 +8,18 @@
 	"encoding/binary"
 	"http"
 	"io"
-	"os"
 	"strings"
 )
 
-func (frame *SynStreamFrame) write(f *Framer) os.Error {
+func (frame *SynStreamFrame) write(f *Framer) error {
 	return f.writeSynStreamFrame(frame)
 }
 
-func (frame *SynReplyFrame) write(f *Framer) os.Error {
+func (frame *SynReplyFrame) write(f *Framer) error {
 	return f.writeSynReplyFrame(frame)
 }
 
-func (frame *RstStreamFrame) write(f *Framer) (err os.Error) {
+func (frame *RstStreamFrame) write(f *Framer) (err error) {
 	frame.CFHeader.version = Version
 	frame.CFHeader.frameType = TypeRstStream
 	frame.CFHeader.length = 8
@@ -38,7 +37,7 @@
 	return
 }
 
-func (frame *SettingsFrame) write(f *Framer) (err os.Error) {
+func (frame *SettingsFrame) write(f *Framer) (err error) {
 	frame.CFHeader.version = Version
 	frame.CFHeader.frameType = TypeSettings
 	frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4)
@@ -62,7 +61,7 @@
 	return
 }
 
-func (frame *NoopFrame) write(f *Framer) os.Error {
+func (frame *NoopFrame) write(f *Framer) error {
 	frame.CFHeader.version = Version
 	frame.CFHeader.frameType = TypeNoop
 
@@ -70,7 +69,7 @@
 	return writeControlFrameHeader(f.w, frame.CFHeader)
 }
 
-func (frame *PingFrame) write(f *Framer) (err os.Error) {
+func (frame *PingFrame) write(f *Framer) (err error) {
 	frame.CFHeader.version = Version
 	frame.CFHeader.frameType = TypePing
 	frame.CFHeader.length = 4
@@ -85,7 +84,7 @@
 	return
 }
 
-func (frame *GoAwayFrame) write(f *Framer) (err os.Error) {
+func (frame *GoAwayFrame) write(f *Framer) (err error) {
 	frame.CFHeader.version = Version
 	frame.CFHeader.frameType = TypeGoAway
 	frame.CFHeader.length = 4
@@ -100,20 +99,20 @@
 	return nil
 }
 
-func (frame *HeadersFrame) write(f *Framer) os.Error {
+func (frame *HeadersFrame) write(f *Framer) error {
 	return f.writeHeadersFrame(frame)
 }
 
-func (frame *DataFrame) write(f *Framer) os.Error {
+func (frame *DataFrame) write(f *Framer) error {
 	return f.writeDataFrame(frame)
 }
 
 // WriteFrame writes a frame.
-func (f *Framer) WriteFrame(frame Frame) os.Error {
+func (f *Framer) WriteFrame(frame Frame) error {
 	return frame.write(f)
 }
 
-func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) os.Error {
+func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error {
 	if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil {
 		return err
 	}
@@ -127,7 +126,7 @@
 	return nil
 }
 
-func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err os.Error) {
+func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) {
 	n = 0
 	if err = binary.Write(w, binary.BigEndian, uint16(len(h))); err != nil {
 		return
@@ -156,7 +155,7 @@
 	return
 }
 
-func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err os.Error) {
+func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) {
 	// Marshal the headers.
 	var writer io.Writer = f.headerBuf
 	if !f.headerCompressionDisabled {
@@ -194,7 +193,7 @@
 	return nil
 }
 
-func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err os.Error) {
+func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) {
 	// Marshal the headers.
 	var writer io.Writer = f.headerBuf
 	if !f.headerCompressionDisabled {
@@ -229,7 +228,7 @@
 	return
 }
 
-func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err os.Error) {
+func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) {
 	// Marshal the headers.
 	var writer io.Writer = f.headerBuf
 	if !f.headerCompressionDisabled {
@@ -264,7 +263,7 @@
 	return
 }
 
-func (f *Framer) writeDataFrame(frame *DataFrame) (err os.Error) {
+func (f *Framer) writeDataFrame(frame *DataFrame) (err error) {
 	// Validate DataFrame
 	if frame.StreamId&0x80000000 != 0 || len(frame.Data) >= 0x0f000000 {
 		return &Error{InvalidDataFrame, frame.StreamId}
diff --git a/src/pkg/exp/sql/convert.go b/src/pkg/exp/sql/convert.go
index a35e0be..b1feef0 100644
--- a/src/pkg/exp/sql/convert.go
+++ b/src/pkg/exp/sql/convert.go
@@ -7,8 +7,8 @@
 package sql
 
 import (
+	"errors"
 	"fmt"
-	"os"
 	"reflect"
 	"strconv"
 )
@@ -16,7 +16,7 @@
 // convertAssign copies to dest the value in src, converting it if possible.
 // An error is returned if the copy would result in loss of information.
 // dest should be a pointer type.
-func convertAssign(dest, src interface{}) os.Error {
+func convertAssign(dest, src interface{}) error {
 	// Common cases, without reflect.  Fall through.
 	switch s := src.(type) {
 	case string:
@@ -56,7 +56,7 @@
 
 	dpv := reflect.ValueOf(dest)
 	if dpv.Kind() != reflect.Ptr {
-		return os.NewError("destination not a pointer")
+		return errors.New("destination not a pointer")
 	}
 
 	dv := reflect.Indirect(dpv)
diff --git a/src/pkg/exp/sql/convert_test.go b/src/pkg/exp/sql/convert_test.go
index 8499918..f85ed99 100644
--- a/src/pkg/exp/sql/convert_test.go
+++ b/src/pkg/exp/sql/convert_test.go
@@ -68,7 +68,7 @@
 		err := convertAssign(ct.d, ct.s)
 		errstr := ""
 		if err != nil {
-			errstr = err.String()
+			errstr = err.Error()
 		}
 		errf := func(format string, args ...interface{}) {
 			base := fmt.Sprintf("convertAssign #%d: for %v (%T) -> %T, ", n, ct.s, ct.s, ct.d)
diff --git a/src/pkg/exp/sql/driver/driver.go b/src/pkg/exp/sql/driver/driver.go
index 7508b19..52714e8 100644
--- a/src/pkg/exp/sql/driver/driver.go
+++ b/src/pkg/exp/sql/driver/driver.go
@@ -19,9 +19,7 @@
 //
 package driver
 
-import (
-	"os"
-)
+import "errors"
 
 // Driver is the interface that must be implemented by a database
 // driver.
@@ -31,7 +29,7 @@
 	//
 	// The returned connection is only used by one goroutine at a
 	// time.
-	Open(name string) (Conn, os.Error)
+	Open(name string) (Conn, error)
 }
 
 // Execer is an optional interface that may be implemented by a Driver
@@ -48,7 +46,7 @@
 //
 // All arguments are of a subset type as defined in the package docs.
 type Execer interface {
-	Exec(query string, args []interface{}) (Result, os.Error)
+	Exec(query string, args []interface{}) (Result, error)
 }
 
 // Conn is a connection to a database. It is not used concurrently
@@ -57,16 +55,16 @@
 // Conn is assumed to be stateful.
 type Conn interface {
 	// Prepare returns a prepared statement, bound to this connection.
-	Prepare(query string) (Stmt, os.Error)
+	Prepare(query string) (Stmt, error)
 
 	// Close invalidates and potentially stops any current
 	// prepared statements and transactions, marking this
 	// connection as no longer in use.  The driver may cache or
 	// close its underlying connection to its database.
-	Close() os.Error
+	Close() error
 
 	// Begin starts and returns a new transaction.
-	Begin() (Tx, os.Error)
+	Begin() (Tx, error)
 }
 
 // Result is the result of a query execution.
@@ -74,18 +72,18 @@
 	// LastInsertId returns the database's auto-generated ID
 	// after, for example, an INSERT into a table with primary
 	// key.
-	LastInsertId() (int64, os.Error)
+	LastInsertId() (int64, error)
 
 	// RowsAffected returns the number of rows affected by the
 	// query.
-	RowsAffected() (int64, os.Error)
+	RowsAffected() (int64, error)
 }
 
 // Stmt is a prepared statement. It is bound to a Conn and not
 // used by multiple goroutines concurrently.
 type Stmt interface {
 	// Close closes the statement.
-	Close() os.Error
+	Close() error
 
 	// NumInput returns the number of placeholder parameters.
 	NumInput() int
@@ -93,11 +91,11 @@
 	// Exec executes a query that doesn't return rows, such
 	// as an INSERT or UPDATE.  The args are all of a subset
 	// type as defined above.
-	Exec(args []interface{}) (Result, os.Error)
+	Exec(args []interface{}) (Result, error)
 
 	// Exec executes a query that may return rows, such as a
 	// SELECT.  The args of all of a subset type as defined above.
-	Query(args []interface{}) (Rows, os.Error)
+	Query(args []interface{}) (Rows, error)
 }
 
 // ColumnConverter may be optionally implemented by Stmt if the
@@ -120,7 +118,7 @@
 	Columns() []string
 
 	// Close closes the rows iterator.
-	Close() os.Error
+	Close() error
 
 	// Next is called to populate the next row of data into
 	// the provided slice. The provided slice will be the same
@@ -129,13 +127,13 @@
 	// The dest slice may be populated with only with values
 	// of subset types defined above, but excluding string.
 	// All string values must be converted to []byte.
-	Next(dest []interface{}) os.Error
+	Next(dest []interface{}) error
 }
 
 // Tx is a transaction.
 type Tx interface {
-	Commit() os.Error
-	Rollback() os.Error
+	Commit() error
+	Rollback() error
 }
 
 // RowsAffected implements Result for an INSERT or UPDATE operation
@@ -144,11 +142,11 @@
 
 var _ Result = RowsAffected(0)
 
-func (RowsAffected) LastInsertId() (int64, os.Error) {
-	return 0, os.NewError("no LastInsertId available")
+func (RowsAffected) LastInsertId() (int64, error) {
+	return 0, errors.New("no LastInsertId available")
 }
 
-func (v RowsAffected) RowsAffected() (int64, os.Error) {
+func (v RowsAffected) RowsAffected() (int64, error) {
 	return int64(v), nil
 }
 
@@ -160,10 +158,10 @@
 
 var _ Result = ddlSuccess{}
 
-func (ddlSuccess) LastInsertId() (int64, os.Error) {
-	return 0, os.NewError("no LastInsertId available after DDL statement")
+func (ddlSuccess) LastInsertId() (int64, error) {
+	return 0, errors.New("no LastInsertId available after DDL statement")
 }
 
-func (ddlSuccess) RowsAffected() (int64, os.Error) {
-	return 0, os.NewError("no RowsAffected available after DDL statement")
+func (ddlSuccess) RowsAffected() (int64, error) {
+	return 0, errors.New("no RowsAffected available after DDL statement")
 }
diff --git a/src/pkg/exp/sql/driver/types.go b/src/pkg/exp/sql/driver/types.go
index 5521d53..9faf32f 100644
--- a/src/pkg/exp/sql/driver/types.go
+++ b/src/pkg/exp/sql/driver/types.go
@@ -6,7 +6,6 @@
 
 import (
 	"fmt"
-	"os"
 	"reflect"
 	"strconv"
 )
@@ -14,7 +13,7 @@
 // ValueConverter is the interface providing the ConvertValue method.
 type ValueConverter interface {
 	// ConvertValue converts a value to a restricted subset type.
-	ConvertValue(v interface{}) (interface{}, os.Error)
+	ConvertValue(v interface{}) (interface{}, error)
 }
 
 // Bool is a ValueConverter that converts input values to bools.
@@ -27,7 +26,7 @@
 
 var _ ValueConverter = boolType{}
 
-func (boolType) ConvertValue(v interface{}) (interface{}, os.Error) {
+func (boolType) ConvertValue(v interface{}) (interface{}, error) {
 	return nil, fmt.Errorf("TODO(bradfitz): bool conversions")
 }
 
@@ -39,7 +38,7 @@
 
 var _ ValueConverter = int32Type{}
 
-func (int32Type) ConvertValue(v interface{}) (interface{}, os.Error) {
+func (int32Type) ConvertValue(v interface{}) (interface{}, error) {
 	rv := reflect.ValueOf(v)
 	switch rv.Kind() {
 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
@@ -72,7 +71,7 @@
 
 type stringType struct{}
 
-func (stringType) ConvertValue(v interface{}) (interface{}, os.Error) {
+func (stringType) ConvertValue(v interface{}) (interface{}, error) {
 	switch v.(type) {
 	case string, []byte:
 		return v, nil
@@ -137,7 +136,7 @@
 
 var _ ValueConverter = defaultConverter{}
 
-func (defaultConverter) ConvertValue(v interface{}) (interface{}, os.Error) {
+func (defaultConverter) ConvertValue(v interface{}) (interface{}, error) {
 	if IsParameterSubsetType(v) {
 		return v, nil
 	}
diff --git a/src/pkg/exp/sql/fakedb_test.go b/src/pkg/exp/sql/fakedb_test.go
index c906185..289294b 100644
--- a/src/pkg/exp/sql/fakedb_test.go
+++ b/src/pkg/exp/sql/fakedb_test.go
@@ -5,9 +5,10 @@
 package sql
 
 import (
+	"errors"
 	"fmt"
+	"io"
 	"log"
-	"os"
 	"strconv"
 	"strings"
 	"sync"
@@ -108,7 +109,7 @@
 // Supports dsn forms:
 //    <dbname>
 //    <dbname>;wipe
-func (d *fakeDriver) Open(dsn string) (driver.Conn, os.Error) {
+func (d *fakeDriver) Open(dsn string) (driver.Conn, error) {
 	d.mu.Lock()
 	defer d.mu.Unlock()
 	d.openCount++
@@ -117,7 +118,7 @@
 	}
 	parts := strings.Split(dsn, ";")
 	if len(parts) < 1 {
-		return nil, os.NewError("fakedb: no database name")
+		return nil, errors.New("fakedb: no database name")
 	}
 	name := parts[0]
 	db, ok := d.dbs[name]
@@ -134,7 +135,7 @@
 	db.tables = nil
 }
 
-func (db *fakeDB) createTable(name string, columnNames, columnTypes []string) os.Error {
+func (db *fakeDB) createTable(name string, columnNames, columnTypes []string) error {
 	db.mu.Lock()
 	defer db.mu.Unlock()
 	if db.tables == nil {
@@ -175,33 +176,33 @@
 	return "", false
 }
 
-func (c *fakeConn) Begin() (driver.Tx, os.Error) {
+func (c *fakeConn) Begin() (driver.Tx, error) {
 	if c.currTx != nil {
-		return nil, os.NewError("already in a transaction")
+		return nil, errors.New("already in a transaction")
 	}
 	c.currTx = &fakeTx{c: c}
 	return c.currTx, nil
 }
 
-func (c *fakeConn) Close() os.Error {
+func (c *fakeConn) Close() error {
 	if c.currTx != nil {
-		return os.NewError("can't close; in a Transaction")
+		return errors.New("can't close; in a Transaction")
 	}
 	if c.db == nil {
-		return os.NewError("can't close; already closed")
+		return errors.New("can't close; already closed")
 	}
 	c.db = nil
 	return nil
 }
 
-func errf(msg string, args ...interface{}) os.Error {
-	return os.NewError("fakedb: " + fmt.Sprintf(msg, args...))
+func errf(msg string, args ...interface{}) error {
+	return errors.New("fakedb: " + fmt.Sprintf(msg, args...))
 }
 
 // parts are table|selectCol1,selectCol2|whereCol=?,whereCol2=?
 // (note that where where columns must always contain ? marks,
 //  just a limitation for fakedb)
-func (c *fakeConn) prepareSelect(stmt *fakeStmt, parts []string) (driver.Stmt, os.Error) {
+func (c *fakeConn) prepareSelect(stmt *fakeStmt, parts []string) (driver.Stmt, error) {
 	if len(parts) != 3 {
 		return nil, errf("invalid SELECT syntax with %d parts; want 3", len(parts))
 	}
@@ -228,7 +229,7 @@
 }
 
 // parts are table|col=type,col2=type2
-func (c *fakeConn) prepareCreate(stmt *fakeStmt, parts []string) (driver.Stmt, os.Error) {
+func (c *fakeConn) prepareCreate(stmt *fakeStmt, parts []string) (driver.Stmt, error) {
 	if len(parts) != 2 {
 		return nil, errf("invalid CREATE syntax with %d parts; want 2", len(parts))
 	}
@@ -245,7 +246,7 @@
 }
 
 // parts are table|col=?,col2=val
-func (c *fakeConn) prepareInsert(stmt *fakeStmt, parts []string) (driver.Stmt, os.Error) {
+func (c *fakeConn) prepareInsert(stmt *fakeStmt, parts []string) (driver.Stmt, error) {
 	if len(parts) != 2 {
 		return nil, errf("invalid INSERT syntax with %d parts; want 2", len(parts))
 	}
@@ -287,7 +288,7 @@
 	return stmt, nil
 }
 
-func (c *fakeConn) Prepare(query string) (driver.Stmt, os.Error) {
+func (c *fakeConn) Prepare(query string) (driver.Stmt, error) {
 	if c.db == nil {
 		panic("nil c.db; conn = " + fmt.Sprintf("%#v", c))
 	}
@@ -317,11 +318,11 @@
 	return s.placeholderConverter[idx]
 }
 
-func (s *fakeStmt) Close() os.Error {
+func (s *fakeStmt) Close() error {
 	return nil
 }
 
-func (s *fakeStmt) Exec(args []interface{}) (driver.Result, os.Error) {
+func (s *fakeStmt) Exec(args []interface{}) (driver.Result, error) {
 	db := s.c.db
 	switch s.cmd {
 	case "WIPE":
@@ -339,7 +340,7 @@
 	return nil, fmt.Errorf("unimplemented statement Exec command type of %q", s.cmd)
 }
 
-func (s *fakeStmt) execInsert(args []interface{}) (driver.Result, os.Error) {
+func (s *fakeStmt) execInsert(args []interface{}) (driver.Result, error) {
 	db := s.c.db
 	if len(args) != s.placeholders {
 		panic("error in pkg db; should only get here if size is correct")
@@ -375,7 +376,7 @@
 	return driver.RowsAffected(1), nil
 }
 
-func (s *fakeStmt) Query(args []interface{}) (driver.Rows, os.Error) {
+func (s *fakeStmt) Query(args []interface{}) (driver.Rows, error) {
 	db := s.c.db
 	if len(args) != s.placeholders {
 		panic("error in pkg db; should only get here if size is correct")
@@ -438,12 +439,12 @@
 	return s.placeholders
 }
 
-func (tx *fakeTx) Commit() os.Error {
+func (tx *fakeTx) Commit() error {
 	tx.c.currTx = nil
 	return nil
 }
 
-func (tx *fakeTx) Rollback() os.Error {
+func (tx *fakeTx) Rollback() error {
 	tx.c.currTx = nil
 	return nil
 }
@@ -455,7 +456,7 @@
 	closed bool
 }
 
-func (rc *rowsCursor) Close() os.Error {
+func (rc *rowsCursor) Close() error {
 	rc.closed = true
 	return nil
 }
@@ -464,13 +465,13 @@
 	return rc.cols
 }
 
-func (rc *rowsCursor) Next(dest []interface{}) os.Error {
+func (rc *rowsCursor) Next(dest []interface{}) error {
 	if rc.closed {
-		return os.NewError("fakedb: cursor is closed")
+		return errors.New("fakedb: cursor is closed")
 	}
 	rc.pos++
 	if rc.pos >= len(rc.rows) {
-		return os.EOF // per interface spec
+		return io.EOF // per interface spec
 	}
 	for i, v := range rc.rows[rc.pos].cols {
 		// TODO(bradfitz): convert to subset types? naah, I
diff --git a/src/pkg/exp/sql/sql.go b/src/pkg/exp/sql/sql.go
index 7f0e0b2..4f1c539 100644
--- a/src/pkg/exp/sql/sql.go
+++ b/src/pkg/exp/sql/sql.go
@@ -7,8 +7,9 @@
 package sql
 
 import (
+	"errors"
 	"fmt"
-	"os"
+	"io"
 	"runtime"
 	"sync"
 
@@ -50,7 +51,7 @@
 }
 
 // ScanInto implements the ScannerInto interface.
-func (ms *NullableString) ScanInto(value interface{}) os.Error {
+func (ms *NullableString) ScanInto(value interface{}) error {
 	if value == nil {
 		ms.String, ms.Valid = "", false
 		return nil
@@ -74,13 +75,13 @@
 	//
 	// An error should be returned if the value can not be stored
 	// without loss of information.
-	ScanInto(value interface{}) os.Error
+	ScanInto(value interface{}) error
 }
 
 // ErrNoRows is returned by Scan when QueryRow doesn't return a
 // row. In such a case, QueryRow returns a placeholder *Row value that
 // defers this error until a Scan.
-var ErrNoRows = os.NewError("db: no rows in result set")
+var ErrNoRows = errors.New("db: no rows in result set")
 
 // DB is a database handle. It's safe for concurrent use by multiple
 // goroutines.
@@ -98,7 +99,7 @@
 //
 // Most users will open a database via a driver-specific connection
 // helper function that returns a *DB.
-func Open(driverName, dataSourceName string) (*DB, os.Error) {
+func Open(driverName, dataSourceName string) (*DB, error) {
 	driver, ok := drivers[driverName]
 	if !ok {
 		return nil, fmt.Errorf("db: unknown driver %q (forgotten import?)", driverName)
@@ -114,7 +115,7 @@
 }
 
 // conn returns a newly-opened or cached driver.Conn
-func (db *DB) conn() (driver.Conn, os.Error) {
+func (db *DB) conn() (driver.Conn, error) {
 	db.mu.Lock()
 	if n := len(db.freeConn); n > 0 {
 		conn := db.freeConn[n-1]
@@ -154,7 +155,7 @@
 }
 
 // Prepare creates a prepared statement for later execution.
-func (db *DB) Prepare(query string) (*Stmt, os.Error) {
+func (db *DB) Prepare(query string) (*Stmt, error) {
 	// TODO: check if db.driver supports an optional
 	// driver.Preparer interface and call that instead, if so,
 	// otherwise we make a prepared statement that's bound
@@ -179,7 +180,7 @@
 }
 
 // Exec executes a query without returning any rows.
-func (db *DB) Exec(query string, args ...interface{}) (Result, os.Error) {
+func (db *DB) Exec(query string, args ...interface{}) (Result, error) {
 	// Optional fast path, if the driver implements driver.Execer.
 	if execer, ok := db.driver.(driver.Execer); ok {
 		resi, err := execer.Exec(query, args)
@@ -218,7 +219,7 @@
 }
 
 // Query executes a query that returns rows, typically a SELECT.
-func (db *DB) Query(query string, args ...interface{}) (*Rows, os.Error) {
+func (db *DB) Query(query string, args ...interface{}) (*Rows, error) {
 	stmt, err := db.Prepare(query)
 	if err != nil {
 		return nil, err
@@ -240,7 +241,7 @@
 
 // Begin starts a transaction.  The isolation level is dependent on
 // the driver.
-func (db *DB) Begin() (*Tx, os.Error) {
+func (db *DB) Begin() (*Tx, error) {
 	// TODO(bradfitz): add another method for beginning a transaction
 	// at a specific isolation level.
 	panic(todo())
@@ -257,17 +258,17 @@
 }
 
 // Commit commits the transaction.
-func (tx *Tx) Commit() os.Error {
+func (tx *Tx) Commit() error {
 	panic(todo())
 }
 
 // Rollback aborts the transaction.
-func (tx *Tx) Rollback() os.Error {
+func (tx *Tx) Rollback() error {
 	panic(todo())
 }
 
 // Prepare creates a prepared statement.
-func (tx *Tx) Prepare(query string) (*Stmt, os.Error) {
+func (tx *Tx) Prepare(query string) (*Stmt, error) {
 	panic(todo())
 }
 
@@ -278,7 +279,7 @@
 }
 
 // Query executes a query that returns rows, typically a SELECT.
-func (tx *Tx) Query(query string, args ...interface{}) (*Rows, os.Error) {
+func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) {
 	panic(todo())
 }
 
@@ -313,7 +314,7 @@
 
 // Exec executes a prepared statement with the given arguments and
 // returns a Result summarizing the effect of the statement.
-func (s *Stmt) Exec(args ...interface{}) (Result, os.Error) {
+func (s *Stmt) Exec(args ...interface{}) (Result, error) {
 	ci, si, err := s.connStmt()
 	if err != nil {
 		return nil, err
@@ -352,10 +353,10 @@
 	return result{resi}, nil
 }
 
-func (s *Stmt) connStmt(args ...interface{}) (driver.Conn, driver.Stmt, os.Error) {
+func (s *Stmt) connStmt(args ...interface{}) (driver.Conn, driver.Stmt, error) {
 	s.mu.Lock()
 	if s.closed {
-		return nil, nil, os.NewError("db: statement is closed")
+		return nil, nil, errors.New("db: statement is closed")
 	}
 	var cs connStmt
 	match := false
@@ -391,7 +392,7 @@
 
 // Query executes a prepared query statement with the given arguments
 // and returns the query results as a *Rows.
-func (s *Stmt) Query(args ...interface{}) (*Rows, os.Error) {
+func (s *Stmt) Query(args ...interface{}) (*Rows, error) {
 	ci, si, err := s.connStmt(args...)
 	if err != nil {
 		return nil, err
@@ -433,7 +434,7 @@
 }
 
 // Close closes the statement.
-func (s *Stmt) Close() os.Error {
+func (s *Stmt) Close() error {
 	s.mu.Lock()
 	defer s.mu.Unlock() // TODO(bradfitz): move this unlock after 'closed = true'?
 	if s.closed {
@@ -473,7 +474,7 @@
 
 	closed   bool
 	lastcols []interface{}
-	lasterr  os.Error
+	lasterr  error
 }
 
 // Next prepares the next result row for reading with the Scan method.
@@ -495,8 +496,8 @@
 }
 
 // Error returns the error, if any, that was encountered during iteration.
-func (rs *Rows) Error() os.Error {
-	if rs.lasterr == os.EOF {
+func (rs *Rows) Error() error {
+	if rs.lasterr == io.EOF {
 		return nil
 	}
 	return rs.lasterr
@@ -506,15 +507,15 @@
 // at by dest. If dest contains pointers to []byte, the slices should
 // not be modified and should only be considered valid until the next
 // call to Next or Scan.
-func (rs *Rows) Scan(dest ...interface{}) os.Error {
+func (rs *Rows) Scan(dest ...interface{}) error {
 	if rs.closed {
-		return os.NewError("db: Rows closed")
+		return errors.New("db: Rows closed")
 	}
 	if rs.lasterr != nil {
 		return rs.lasterr
 	}
 	if rs.lastcols == nil {
-		return os.NewError("db: Scan called without calling Next")
+		return errors.New("db: Scan called without calling Next")
 	}
 	if len(dest) != len(rs.lastcols) {
 		return fmt.Errorf("db: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest))
@@ -531,7 +532,7 @@
 // Close closes the Rows, preventing further enumeration. If the
 // end is encountered, the Rows are closed automatically. Close
 // is idempotent.
-func (rs *Rows) Close() os.Error {
+func (rs *Rows) Close() error {
 	if rs.closed {
 		return nil
 	}
@@ -544,7 +545,7 @@
 // Row is the result of calling QueryRow to select a single row.
 type Row struct {
 	// One of these two will be non-nil:
-	err  os.Error // deferred error for easy chaining
+	err  error // deferred error for easy chaining
 	rows *Rows
 }
 
@@ -556,7 +557,7 @@
 // If dest contains pointers to []byte, the slices should not be
 // modified and should only be considered valid until the next call to
 // Next or Scan.
-func (r *Row) Scan(dest ...interface{}) os.Error {
+func (r *Row) Scan(dest ...interface{}) error {
 	if r.err != nil {
 		return r.err
 	}
@@ -569,8 +570,8 @@
 
 // A Result summarizes an executed SQL command.
 type Result interface {
-	LastInsertId() (int64, os.Error)
-	RowsAffected() (int64, os.Error)
+	LastInsertId() (int64, error)
+	RowsAffected() (int64, error)
 }
 
 type result struct {
diff --git a/src/pkg/exp/sql/sql_test.go b/src/pkg/exp/sql/sql_test.go
index eaa0a90..eb1bb58 100644
--- a/src/pkg/exp/sql/sql_test.go
+++ b/src/pkg/exp/sql/sql_test.go
@@ -40,7 +40,7 @@
 	var age int
 
 	err := db.QueryRow("SELECT|people|age,name|age=?", 3).Scan(&age)
-	if err == nil || !strings.Contains(err.String(), "expected 2 destination arguments") {
+	if err == nil || !strings.Contains(err.Error(), "expected 2 destination arguments") {
 		t.Errorf("expected error from wrong number of arguments; actually got: %v", err)
 	}
 
@@ -99,7 +99,7 @@
 	if err == nil {
 		t.Fatalf("expected error")
 	}
-	if err.String() != `fakedb: invalid conversion to int32 from "bogusconversion"` {
+	if err.Error() != `fakedb: invalid conversion to int32 from "bogusconversion"` {
 		t.Errorf("unexpected error: %v", err)
 	}
 }
@@ -135,7 +135,7 @@
 		_, err := stmt.Exec(et.args...)
 		errStr := ""
 		if err != nil {
-			errStr = err.String()
+			errStr = err.Error()
 		}
 		if errStr != et.wantErr {
 			t.Errorf("stmt.Execute #%d: for %v, got error %q, want error %q",
diff --git a/src/pkg/exp/ssh/channel.go b/src/pkg/exp/ssh/channel.go
index f69b735..428e71c 100644
--- a/src/pkg/exp/ssh/channel.go
+++ b/src/pkg/exp/ssh/channel.go
@@ -5,7 +5,8 @@
 package ssh
 
 import (
-	"os"
+	"errors"
+	"io"
 	"sync"
 )
 
@@ -13,19 +14,19 @@
 // SSH connection.
 type Channel interface {
 	// Accept accepts the channel creation request.
-	Accept() os.Error
+	Accept() error
 	// Reject rejects the channel creation request. After calling this, no
 	// other methods on the Channel may be called. If they are then the
 	// peer is likely to signal a protocol error and drop the connection.
-	Reject(reason RejectionReason, message string) os.Error
+	Reject(reason RejectionReason, message string) error
 
 	// Read may return a ChannelRequest as an os.Error.
-	Read(data []byte) (int, os.Error)
-	Write(data []byte) (int, os.Error)
-	Close() os.Error
+	Read(data []byte) (int, error)
+	Write(data []byte) (int, error)
+	Close() error
 
 	// AckRequest either sends an ack or nack to the channel request.
-	AckRequest(ok bool) os.Error
+	AckRequest(ok bool) error
 
 	// ChannelType returns the type of the channel, as supplied by the
 	// client.
@@ -43,7 +44,7 @@
 	Payload   []byte
 }
 
-func (c ChannelRequest) String() string {
+func (c ChannelRequest) Error() string {
 	return "channel request received"
 }
 
@@ -72,7 +73,7 @@
 	myId, theirId         uint32
 	myWindow, theirWindow uint32
 	maxPacketSize         uint32
-	err                   os.Error
+	err                   error
 
 	pendingRequests []ChannelRequest
 	pendingData     []byte
@@ -83,7 +84,7 @@
 	cond *sync.Cond
 }
 
-func (c *channel) Accept() os.Error {
+func (c *channel) Accept() error {
 	c.serverConn.lock.Lock()
 	defer c.serverConn.lock.Unlock()
 
@@ -100,7 +101,7 @@
 	return c.serverConn.writePacket(marshal(msgChannelOpenConfirm, confirm))
 }
 
-func (c *channel) Reject(reason RejectionReason, message string) os.Error {
+func (c *channel) Reject(reason RejectionReason, message string) error {
 	c.serverConn.lock.Lock()
 	defer c.serverConn.lock.Unlock()
 
@@ -167,7 +168,7 @@
 	c.cond.Signal()
 }
 
-func (c *channel) Read(data []byte) (n int, err os.Error) {
+func (c *channel) Read(data []byte) (n int, err error) {
 	c.lock.Lock()
 	defer c.lock.Unlock()
 
@@ -187,7 +188,7 @@
 
 	for {
 		if c.theySentEOF || c.theyClosed || c.dead {
-			return 0, os.EOF
+			return 0, io.EOF
 		}
 
 		if len(c.pendingRequests) > 0 {
@@ -223,11 +224,11 @@
 	panic("unreachable")
 }
 
-func (c *channel) Write(data []byte) (n int, err os.Error) {
+func (c *channel) Write(data []byte) (n int, err error) {
 	for len(data) > 0 {
 		c.lock.Lock()
 		if c.dead || c.weClosed {
-			return 0, os.EOF
+			return 0, io.EOF
 		}
 
 		if c.theirWindow == 0 {
@@ -267,7 +268,7 @@
 	return
 }
 
-func (c *channel) Close() os.Error {
+func (c *channel) Close() error {
 	c.serverConn.lock.Lock()
 	defer c.serverConn.lock.Unlock()
 
@@ -276,7 +277,7 @@
 	}
 
 	if c.weClosed {
-		return os.NewError("ssh: channel already closed")
+		return errors.New("ssh: channel already closed")
 	}
 	c.weClosed = true
 
@@ -286,7 +287,7 @@
 	return c.serverConn.writePacket(marshal(msgChannelClose, closeMsg))
 }
 
-func (c *channel) AckRequest(ok bool) os.Error {
+func (c *channel) AckRequest(ok bool) error {
 	c.serverConn.lock.Lock()
 	defer c.serverConn.lock.Unlock()
 
diff --git a/src/pkg/exp/ssh/client.go b/src/pkg/exp/ssh/client.go
index fe76db1..345e707 100644
--- a/src/pkg/exp/ssh/client.go
+++ b/src/pkg/exp/ssh/client.go
@@ -8,9 +8,9 @@
 	"big"
 	"crypto"
 	"crypto/rand"
+	"errors"
 	"fmt"
 	"io"
-	"os"
 	"net"
 	"sync"
 )
@@ -26,7 +26,7 @@
 }
 
 // Client returns a new SSH client connection using c as the underlying transport.
-func Client(c net.Conn, config *ClientConfig) (*ClientConn, os.Error) {
+func Client(c net.Conn, config *ClientConfig) (*ClientConn, error) {
 	conn := &ClientConn{
 		transport: newTransport(c, config.rand()),
 		config:    config,
@@ -44,7 +44,7 @@
 }
 
 // handshake performs the client side key exchange. See RFC 4253 Section 7.
-func (c *ClientConn) handshake() os.Error {
+func (c *ClientConn) handshake() error {
 	var magics handshakeMagics
 
 	if _, err := c.Write(clientVersion); err != nil {
@@ -91,7 +91,7 @@
 
 	kexAlgo, hostKeyAlgo, ok := findAgreedAlgorithms(c.transport, &clientKexInit, &serverKexInit)
 	if !ok {
-		return os.NewError("ssh: no common algorithms")
+		return errors.New("ssh: no common algorithms")
 	}
 
 	if serverKexInit.FirstKexFollows && kexAlgo != serverKexInit.KexAlgos[0] {
@@ -133,7 +133,7 @@
 
 // authenticate authenticates with the remote server. See RFC 4252. 
 // Only "password" authentication is supported.
-func (c *ClientConn) authenticate() os.Error {
+func (c *ClientConn) authenticate() error {
 	if err := c.writePacket(marshal(msgServiceRequest, serviceRequestMsg{serviceUserAuth})); err != nil {
 		return err
 	}
@@ -166,7 +166,7 @@
 	return nil
 }
 
-func (c *ClientConn) sendUserAuthReq(method string) os.Error {
+func (c *ClientConn) sendUserAuthReq(method string) error {
 	length := stringLength([]byte(c.config.Password)) + 1
 	payload := make([]byte, length)
 	// always false for password auth, see RFC 4252 Section 8.
@@ -183,7 +183,7 @@
 
 // kexDH performs Diffie-Hellman key agreement on a ClientConn. The
 // returned values are given the same names as in RFC 4253, section 8.
-func (c *ClientConn) kexDH(group *dhGroup, hashFunc crypto.Hash, magics *handshakeMagics, hostKeyAlgo string) ([]byte, []byte, os.Error) {
+func (c *ClientConn) kexDH(group *dhGroup, hashFunc crypto.Hash, magics *handshakeMagics, hostKeyAlgo string) ([]byte, []byte, error) {
 	x, err := rand.Int(c.config.rand(), group.p)
 	if err != nil {
 		return nil, nil, err
@@ -207,7 +207,7 @@
 	}
 
 	if kexDHReply.Y.Sign() == 0 || kexDHReply.Y.Cmp(group.p) >= 0 {
-		return nil, nil, os.NewError("server DH parameter out of bounds")
+		return nil, nil, errors.New("server DH parameter out of bounds")
 	}
 
 	kInt := new(big.Int).Exp(kexDHReply.Y, x, group.p)
@@ -230,7 +230,7 @@
 
 // openChan opens a new client channel. The most common session type is "session". 
 // The full set of valid session types are listed in RFC 4250 4.9.1.
-func (c *ClientConn) openChan(typ string) (*clientChan, os.Error) {
+func (c *ClientConn) openChan(typ string) (*clientChan, error) {
 	ch := c.newChan(c.transport)
 	if err := c.writePacket(marshal(msgChannelOpen, channelOpenMsg{
 		ChanType:      typ,
@@ -247,10 +247,10 @@
 		ch.peersId = msg.MyId
 	case *channelOpenFailureMsg:
 		c.chanlist.remove(ch.id)
-		return nil, os.NewError(msg.Message)
+		return nil, errors.New(msg.Message)
 	default:
 		c.chanlist.remove(ch.id)
-		return nil, os.NewError("Unexpected packet")
+		return nil, errors.New("Unexpected packet")
 	}
 	return ch, nil
 }
@@ -329,7 +329,7 @@
 
 // Dial connects to the given network address using net.Dial and 
 // then initiates a SSH handshake, returning the resulting client connection.
-func Dial(network, addr string, config *ClientConfig) (*ClientConn, os.Error) {
+func Dial(network, addr string, config *ClientConfig) (*ClientConn, error) {
 	conn, err := net.Dial(network, addr)
 	if err != nil {
 		return nil, err
@@ -382,13 +382,13 @@
 }
 
 // Close closes the channel. This does not close the underlying connection.
-func (c *clientChan) Close() os.Error {
+func (c *clientChan) Close() error {
 	return c.writePacket(marshal(msgChannelClose, channelCloseMsg{
 		PeersId: c.id,
 	}))
 }
 
-func (c *clientChan) sendChanReq(req channelRequestMsg) os.Error {
+func (c *clientChan) sendChanReq(req channelRequestMsg) error {
 	if err := c.writePacket(marshal(msgChannelRequest, req)); err != nil {
 		return err
 	}
@@ -447,12 +447,12 @@
 }
 
 // Write writes data to the remote process's standard input.
-func (w *chanWriter) Write(data []byte) (n int, err os.Error) {
+func (w *chanWriter) Write(data []byte) (n int, err error) {
 	for {
 		if w.rwin == 0 {
 			win, ok := <-w.win
 			if !ok {
-				return 0, os.EOF
+				return 0, io.EOF
 			}
 			w.rwin += win
 			continue
@@ -469,7 +469,7 @@
 	panic("unreachable")
 }
 
-func (w *chanWriter) Close() os.Error {
+func (w *chanWriter) Close() error {
 	return w.writePacket(marshal(msgChannelEOF, channelEOFMsg{w.id}))
 }
 
@@ -485,7 +485,7 @@
 }
 
 // Read reads data from the remote process's stdout or stderr.
-func (r *chanReader) Read(data []byte) (int, os.Error) {
+func (r *chanReader) Read(data []byte) (int, error) {
 	var ok bool
 	for {
 		if len(r.buf) > 0 {
@@ -499,12 +499,12 @@
 		}
 		r.buf, ok = <-r.data
 		if !ok {
-			return 0, os.EOF
+			return 0, io.EOF
 		}
 	}
 	panic("unreachable")
 }
 
-func (r *chanReader) Close() os.Error {
+func (r *chanReader) Close() error {
 	return r.writePacket(marshal(msgChannelEOF, channelEOFMsg{r.id}))
 }
diff --git a/src/pkg/exp/ssh/common.go b/src/pkg/exp/ssh/common.go
index 739bd2f..f68c353 100644
--- a/src/pkg/exp/ssh/common.go
+++ b/src/pkg/exp/ssh/common.go
@@ -53,7 +53,7 @@
 	expected, got uint8
 }
 
-func (u UnexpectedMessageError) String() string {
+func (u UnexpectedMessageError) Error() string {
 	return "ssh: unexpected message type " + strconv.Itoa(int(u.got)) + " (expected " + strconv.Itoa(int(u.expected)) + ")"
 }
 
@@ -62,7 +62,7 @@
 	msgType uint8
 }
 
-func (p ParseError) String() string {
+func (p ParseError) Error() string {
 	return "ssh: parse error in message type " + strconv.Itoa(int(p.msgType))
 }
 
diff --git a/src/pkg/exp/ssh/messages.go b/src/pkg/exp/ssh/messages.go
index 5f2c447..5eae181 100644
--- a/src/pkg/exp/ssh/messages.go
+++ b/src/pkg/exp/ssh/messages.go
@@ -8,7 +8,6 @@
 	"big"
 	"bytes"
 	"io"
-	"os"
 	"reflect"
 )
 
@@ -192,7 +191,7 @@
 // unmarshal parses the SSH wire data in packet into out using reflection.
 // expectedType is the expected SSH message type. It either returns nil on
 // success, or a ParseError or UnexpectedMessageError on error.
-func unmarshal(out interface{}, packet []byte, expectedType uint8) os.Error {
+func unmarshal(out interface{}, packet []byte, expectedType uint8) error {
 	if len(packet) == 0 {
 		return ParseError{expectedType}
 	}
diff --git a/src/pkg/exp/ssh/server.go b/src/pkg/exp/ssh/server.go
index 0dd24ec..2ae8079 100644
--- a/src/pkg/exp/ssh/server.go
+++ b/src/pkg/exp/ssh/server.go
@@ -12,9 +12,9 @@
 	"crypto/rsa"
 	"crypto/x509"
 	"encoding/pem"
+	"errors"
 	"io"
 	"net"
-	"os"
 	"sync"
 )
 
@@ -53,12 +53,12 @@
 // private key configured in order to accept connections. The private key must
 // be in the form of a PEM encoded, PKCS#1, RSA private key. The file "id_rsa"
 // typically contains such a key.
-func (s *ServerConfig) SetRSAPrivateKey(pemBytes []byte) os.Error {
+func (s *ServerConfig) SetRSAPrivateKey(pemBytes []byte) error {
 	block, _ := pem.Decode(pemBytes)
 	if block == nil {
-		return os.NewError("ssh: no key found")
+		return errors.New("ssh: no key found")
 	}
-	var err os.Error
+	var err error
 	s.rsa, err = x509.ParsePKCS1PrivateKey(block.Bytes)
 	if err != nil {
 		return err
@@ -140,7 +140,7 @@
 	// lock protects err and also allows Channels to serialise their writes
 	// to out.
 	lock sync.RWMutex
-	err  os.Error
+	err  error
 
 	// cachedPubKeys contains the cache results of tests for public keys.
 	// Since SSH clients will query whether a public key is acceptable
@@ -162,7 +162,7 @@
 
 // kexDH performs Diffie-Hellman key agreement on a ServerConnection. The
 // returned values are given the same names as in RFC 4253, section 8.
-func (s *ServerConn) kexDH(group *dhGroup, hashFunc crypto.Hash, magics *handshakeMagics, hostKeyAlgo string) (H, K []byte, err os.Error) {
+func (s *ServerConn) kexDH(group *dhGroup, hashFunc crypto.Hash, magics *handshakeMagics, hostKeyAlgo string) (H, K []byte, err error) {
 	packet, err := s.readPacket()
 	if err != nil {
 		return
@@ -173,7 +173,7 @@
 	}
 
 	if kexDHInit.X.Sign() == 0 || kexDHInit.X.Cmp(group.p) >= 0 {
-		return nil, nil, os.NewError("client DH parameter out of bounds")
+		return nil, nil, errors.New("client DH parameter out of bounds")
 	}
 
 	y, err := rand.Int(s.config.rand(), group.p)
@@ -189,7 +189,7 @@
 	case hostAlgoRSA:
 		serializedHostKey = s.config.rsaSerialized
 	default:
-		return nil, nil, os.NewError("internal error")
+		return nil, nil, errors.New("internal error")
 	}
 
 	h := hashFunc.New()
@@ -218,7 +218,7 @@
 			return
 		}
 	default:
-		return nil, nil, os.NewError("internal error")
+		return nil, nil, errors.New("internal error")
 	}
 
 	serializedSig := serializeRSASignature(sig)
@@ -279,7 +279,7 @@
 }
 
 // Handshake performs an SSH transport and client authentication on the given ServerConn.
-func (s *ServerConn) Handshake() os.Error {
+func (s *ServerConn) Handshake() error {
 	var magics handshakeMagics
 	if _, err := s.Write(serverVersion); err != nil {
 		return err
@@ -326,7 +326,7 @@
 
 	kexAlgo, hostKeyAlgo, ok := findAgreedAlgorithms(s.transport, &clientKexInit, &serverKexInit)
 	if !ok {
-		return os.NewError("ssh: no common algorithms")
+		return errors.New("ssh: no common algorithms")
 	}
 
 	if clientKexInit.FirstKexFollows && kexAlgo != clientKexInit.KexAlgos[0] {
@@ -345,7 +345,7 @@
 		dhGroup14Once.Do(initDHGroup14)
 		H, K, err = s.kexDH(dhGroup14, hashFunc, &magics, hostKeyAlgo)
 	default:
-		err = os.NewError("ssh: unexpected key exchange algorithm " + kexAlgo)
+		err = errors.New("ssh: unexpected key exchange algorithm " + kexAlgo)
 	}
 	if err != nil {
 		return err
@@ -374,7 +374,7 @@
 		return err
 	}
 	if serviceRequest.Service != serviceUserAuth {
-		return os.NewError("ssh: requested service '" + serviceRequest.Service + "' before authenticating")
+		return errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating")
 	}
 	serviceAccept := serviceAcceptMsg{
 		Service: serviceUserAuth,
@@ -420,9 +420,9 @@
 	return result
 }
 
-func (s *ServerConn) authenticate(H []byte) os.Error {
+func (s *ServerConn) authenticate(H []byte) error {
 	var userAuthReq userAuthRequestMsg
-	var err os.Error
+	var err error
 	var packet []byte
 
 userAuthLoop:
@@ -435,7 +435,7 @@
 		}
 
 		if userAuthReq.Service != serviceSSH {
-			return os.NewError("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service)
+			return errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service)
 		}
 
 		switch userAuthReq.Method {
@@ -523,7 +523,7 @@
 						return ParseError{msgUserAuthRequest}
 					}
 				default:
-					return os.NewError("ssh: isAcceptableAlgo incorrect")
+					return errors.New("ssh: isAcceptableAlgo incorrect")
 				}
 				if s.testPubKey(userAuthReq.User, algo, pubKey) {
 					break userAuthLoop
@@ -540,7 +540,7 @@
 		}
 
 		if len(failureMsg.Methods) == 0 {
-			return os.NewError("ssh: no authentication methods configured but NoClientAuth is also false")
+			return errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
 		}
 
 		if err = s.writePacket(marshal(msgUserAuthFailure, failureMsg)); err != nil {
@@ -560,7 +560,7 @@
 
 // Accept reads and processes messages on a ServerConn. It must be called
 // in order to demultiplex messages to any resulting Channels.
-func (s *ServerConn) Accept() (Channel, os.Error) {
+func (s *ServerConn) Accept() (Channel, error) {
 	if s.err != nil {
 		return nil, s.err
 	}
@@ -660,7 +660,7 @@
 			case UnexpectedMessageError:
 				return nil, msg
 			case *disconnectMsg:
-				return nil, os.EOF
+				return nil, io.EOF
 			default:
 				// Unknown message. Ignore.
 			}
@@ -679,7 +679,7 @@
 // Accept waits for and returns the next incoming SSH connection.
 // The receiver should call Handshake() in another goroutine 
 // to avoid blocking the accepter.
-func (l *Listener) Accept() (*ServerConn, os.Error) {
+func (l *Listener) Accept() (*ServerConn, error) {
 	c, err := l.listener.Accept()
 	if err != nil {
 		return nil, err
@@ -694,13 +694,13 @@
 }
 
 // Close closes the listener.
-func (l *Listener) Close() os.Error {
+func (l *Listener) Close() error {
 	return l.listener.Close()
 }
 
 // Listen creates an SSH listener accepting connections on
 // the given network address using net.Listen.
-func Listen(network, addr string, config *ServerConfig) (*Listener, os.Error) {
+func Listen(network, addr string, config *ServerConfig) (*Listener, error) {
 	l, err := net.Listen(network, addr)
 	if err != nil {
 		return nil, err
diff --git a/src/pkg/exp/ssh/server_shell.go b/src/pkg/exp/ssh/server_shell.go
index 0e9967a..5243d0e 100644
--- a/src/pkg/exp/ssh/server_shell.go
+++ b/src/pkg/exp/ssh/server_shell.go
@@ -4,9 +4,7 @@
 
 package ssh
 
-import (
-	"os"
-)
+import "io"
 
 // ServerShell contains the state for running a VT100 terminal that is capable
 // of reading lines of input.
@@ -326,12 +324,12 @@
 	return
 }
 
-func (ss *ServerShell) Write(buf []byte) (n int, err os.Error) {
+func (ss *ServerShell) Write(buf []byte) (n int, err error) {
 	return ss.c.Write(buf)
 }
 
 // ReadLine returns a line of input from the terminal.
-func (ss *ServerShell) ReadLine() (line string, err os.Error) {
+func (ss *ServerShell) ReadLine() (line string, err error) {
 	ss.writeLine([]byte(ss.prompt))
 	ss.c.Write(ss.outBuf)
 	ss.outBuf = ss.outBuf[:0]
@@ -353,7 +351,7 @@
 					break
 				}
 				if key == keyCtrlD {
-					return "", os.EOF
+					return "", io.EOF
 				}
 				line, lineOk = ss.handleKey(key)
 			}
diff --git a/src/pkg/exp/ssh/server_shell_test.go b/src/pkg/exp/ssh/server_shell_test.go
index 622cf7c..aa69ef7 100644
--- a/src/pkg/exp/ssh/server_shell_test.go
+++ b/src/pkg/exp/ssh/server_shell_test.go
@@ -5,8 +5,8 @@
 package ssh
 
 import (
+	"io"
 	"testing"
-	"os"
 )
 
 type MockChannel struct {
@@ -15,15 +15,15 @@
 	received     []byte
 }
 
-func (c *MockChannel) Accept() os.Error {
+func (c *MockChannel) Accept() error {
 	return nil
 }
 
-func (c *MockChannel) Reject(RejectionReason, string) os.Error {
+func (c *MockChannel) Reject(RejectionReason, string) error {
 	return nil
 }
 
-func (c *MockChannel) Read(data []byte) (n int, err os.Error) {
+func (c *MockChannel) Read(data []byte) (n int, err error) {
 	n = len(data)
 	if n == 0 {
 		return
@@ -32,7 +32,7 @@
 		n = len(c.toSend)
 	}
 	if n == 0 {
-		return 0, os.EOF
+		return 0, io.EOF
 	}
 	if c.bytesPerRead > 0 && n > c.bytesPerRead {
 		n = c.bytesPerRead
@@ -42,16 +42,16 @@
 	return
 }
 
-func (c *MockChannel) Write(data []byte) (n int, err os.Error) {
+func (c *MockChannel) Write(data []byte) (n int, err error) {
 	c.received = append(c.received, data...)
 	return len(data), nil
 }
 
-func (c *MockChannel) Close() os.Error {
+func (c *MockChannel) Close() error {
 	return nil
 }
 
-func (c *MockChannel) AckRequest(ok bool) os.Error {
+func (c *MockChannel) AckRequest(ok bool) error {
 	return nil
 }
 
@@ -70,7 +70,7 @@
 	if line != "" {
 		t.Errorf("Expected empty line but got: %s", line)
 	}
-	if err != os.EOF {
+	if err != io.EOF {
 		t.Errorf("Error should have been EOF but got: %s", err)
 	}
 }
@@ -78,12 +78,12 @@
 var keyPressTests = []struct {
 	in   string
 	line string
-	err  os.Error
+	err  error
 }{
 	{
 		"",
 		"",
-		os.EOF,
+		io.EOF,
 	},
 	{
 		"\r",
diff --git a/src/pkg/exp/ssh/session.go b/src/pkg/exp/ssh/session.go
index 13df2f0..77154f2 100644
--- a/src/pkg/exp/ssh/session.go
+++ b/src/pkg/exp/ssh/session.go
@@ -9,8 +9,8 @@
 
 import (
 	"encoding/binary"
+	"errors"
 	"io"
-	"os"
 )
 
 // A Session represents a connection to a remote command or shell.
@@ -34,7 +34,7 @@
 
 // Setenv sets an environment variable that will be applied to any
 // command executed by Shell or Exec.
-func (s *Session) Setenv(name, value string) os.Error {
+func (s *Session) Setenv(name, value string) error {
 	n, v := []byte(name), []byte(value)
 	nlen, vlen := stringLength(n), stringLength(v)
 	payload := make([]byte, nlen+vlen)
@@ -53,7 +53,7 @@
 var emptyModeList = []byte{0, 0, 0, 1, 0}
 
 // RequestPty requests the association of a pty with the session on the remote host.
-func (s *Session) RequestPty(term string, h, w int) os.Error {
+func (s *Session) RequestPty(term string, h, w int) error {
 	buf := make([]byte, 4+len(term)+16+len(emptyModeList))
 	b := marshalString(buf, []byte(term))
 	binary.BigEndian.PutUint32(b, uint32(h))
@@ -73,9 +73,9 @@
 // Exec runs cmd on the remote host. Typically, the remote 
 // server passes cmd to the shell for interpretation. 
 // A Session only accepts one call to Exec or Shell.
-func (s *Session) Exec(cmd string) os.Error {
+func (s *Session) Exec(cmd string) error {
 	if s.started {
-		return os.NewError("session already started")
+		return errors.New("session already started")
 	}
 	cmdLen := stringLength([]byte(cmd))
 	payload := make([]byte, cmdLen)
@@ -92,9 +92,9 @@
 
 // Shell starts a login shell on the remote host. A Session only 
 // accepts one call to Exec or Shell.
-func (s *Session) Shell() os.Error {
+func (s *Session) Shell() error {
 	if s.started {
-		return os.NewError("session already started")
+		return errors.New("session already started")
 	}
 	s.started = true
 
@@ -106,7 +106,7 @@
 }
 
 // NewSession returns a new interactive session on the remote host.
-func (c *ClientConn) NewSession() (*Session, os.Error) {
+func (c *ClientConn) NewSession() (*Session, error) {
 	ch, err := c.openChan("session")
 	if err != nil {
 		return nil, err
diff --git a/src/pkg/exp/ssh/transport.go b/src/pkg/exp/ssh/transport.go
index 97eaf97..579a9d8 100644
--- a/src/pkg/exp/ssh/transport.go
+++ b/src/pkg/exp/ssh/transport.go
@@ -11,10 +11,10 @@
 	"crypto/cipher"
 	"crypto/hmac"
 	"crypto/subtle"
+	"errors"
 	"hash"
 	"io"
 	"net"
-	"os"
 	"sync"
 )
 
@@ -27,7 +27,7 @@
 // TODO(dfc) suggestions for a better name will be warmly received.
 type filteredConn interface {
 	// Close closes the connection.
-	Close() os.Error
+	Close() error
 
 	// LocalAddr returns the local network address.
 	LocalAddr() net.Addr
@@ -40,7 +40,7 @@
 // an SSH peer.
 type packetWriter interface {
 	// Encrypt and send a packet of data to the remote peer.
-	writePacket(packet []byte) os.Error
+	writePacket(packet []byte) error
 }
 
 // transport represents the SSH connection to the remote peer.
@@ -79,7 +79,7 @@
 }
 
 // Read and decrypt a single packet from the remote peer.
-func (r *reader) readOnePacket() ([]byte, os.Error) {
+func (r *reader) readOnePacket() ([]byte, error) {
 	var lengthBytes = make([]byte, 5)
 	var macSize uint32
 
@@ -108,10 +108,10 @@
 	paddingLength := uint32(lengthBytes[4])
 
 	if length <= paddingLength+1 {
-		return nil, os.NewError("invalid packet length")
+		return nil, errors.New("invalid packet length")
 	}
 	if length > maxPacketSize {
-		return nil, os.NewError("packet too large")
+		return nil, errors.New("packet too large")
 	}
 
 	packet := make([]byte, length-1+macSize)
@@ -126,7 +126,7 @@
 	if r.mac != nil {
 		r.mac.Write(packet[:length-1])
 		if subtle.ConstantTimeCompare(r.mac.Sum(), mac) != 1 {
-			return nil, os.NewError("ssh: MAC failure")
+			return nil, errors.New("ssh: MAC failure")
 		}
 	}
 
@@ -135,7 +135,7 @@
 }
 
 // Read and decrypt next packet discarding debug and noop messages.
-func (t *transport) readPacket() ([]byte, os.Error) {
+func (t *transport) readPacket() ([]byte, error) {
 	for {
 		packet, err := t.readOnePacket()
 		if err != nil {
@@ -149,7 +149,7 @@
 }
 
 // Encrypt and send a packet of data to the remote peer.
-func (w *writer) writePacket(packet []byte) os.Error {
+func (w *writer) writePacket(packet []byte) error {
 	w.Mutex.Lock()
 	defer w.Mutex.Unlock()
 
@@ -218,7 +218,7 @@
 }
 
 // Send a message to the remote peer
-func (t *transport) sendMessage(typ uint8, msg interface{}) os.Error {
+func (t *transport) sendMessage(typ uint8, msg interface{}) error {
 	packet := marshal(typ, msg)
 	return t.writePacket(packet)
 }
@@ -252,7 +252,7 @@
 // setupKeys sets the cipher and MAC keys from K, H and sessionId, as
 // described in RFC 4253, section 6.4. direction should either be serverKeys
 // (to setup server->client keys) or clientKeys (for client->server keys).
-func (c *common) setupKeys(d direction, K, H, sessionId []byte, hashFunc crypto.Hash) os.Error {
+func (c *common) setupKeys(d direction, K, H, sessionId []byte, hashFunc crypto.Hash) error {
 	h := hashFunc.New()
 
 	blockSize := 16
@@ -308,7 +308,7 @@
 	hmac   hash.Hash
 }
 
-func (t truncatingMAC) Write(data []byte) (int, os.Error) {
+func (t truncatingMAC) Write(data []byte) (int, error) {
 	return t.hmac.Write(data)
 }
 
@@ -332,7 +332,7 @@
 const maxVersionStringBytes = 1024
 
 // Read version string as specified by RFC 4253, section 4.2.
-func readVersion(r io.Reader) ([]byte, os.Error) {
+func readVersion(r io.Reader) ([]byte, error) {
 	versionString := make([]byte, 0, 64)
 	var ok, seenCR bool
 	var buf [1]byte
@@ -360,7 +360,7 @@
 	}
 
 	if !ok {
-		return nil, os.NewError("failed to read version string")
+		return nil, errors.New("failed to read version string")
 	}
 
 	// We need to remove the CR from versionString
diff --git a/src/pkg/exp/template/html/error.go b/src/pkg/exp/template/html/error.go
index 5515bfe..22fca9e 100644
--- a/src/pkg/exp/template/html/error.go
+++ b/src/pkg/exp/template/html/error.go
@@ -197,7 +197,7 @@
 	ErrSlashAmbig
 )
 
-func (e *Error) String() string {
+func (e *Error) Error() string {
 	if e.Line != 0 {
 		return fmt.Sprintf("exp/template/html:%s:%d: %s", e.Name, e.Line, e.Description)
 	} else if e.Name != "" {
diff --git a/src/pkg/exp/template/html/escape.go b/src/pkg/exp/template/html/escape.go
index 74abcce..28615a9 100644
--- a/src/pkg/exp/template/html/escape.go
+++ b/src/pkg/exp/template/html/escape.go
@@ -8,14 +8,13 @@
 	"bytes"
 	"fmt"
 	"html"
-	"os"
 	"template"
 	"template/parse"
 )
 
 // Escape rewrites each action in the template to guarantee that the output is
 // properly escaped.
-func Escape(t *template.Template) (*template.Template, os.Error) {
+func Escape(t *template.Template) (*template.Template, error) {
 	var s template.Set
 	s.Add(t)
 	if _, err := EscapeSet(&s, t.Name()); err != nil {
@@ -32,7 +31,7 @@
 // need not include helper templates.
 // If no error is returned, then the named templates have been modified. 
 // Otherwise the named templates have been rendered unusable.
-func EscapeSet(s *template.Set, names ...string) (*template.Set, os.Error) {
+func EscapeSet(s *template.Set, names ...string) (*template.Set, error) {
 	if len(names) == 0 {
 		// TODO: Maybe add a method to Set to enumerate template names
 		// and use those instead.
@@ -41,7 +40,7 @@
 	e := newEscaper(s)
 	for _, name := range names {
 		c, _ := e.escapeTree(context{}, name, 0)
-		var err os.Error
+		var err error
 		if c.err != nil {
 			err, c.err.Name = c.err, name
 		} else if c.state != stateText {
diff --git a/src/pkg/exp/template/html/escape_test.go b/src/pkg/exp/template/html/escape_test.go
index 1b3b256..20599bc 100644
--- a/src/pkg/exp/template/html/escape_test.go
+++ b/src/pkg/exp/template/html/escape_test.go
@@ -8,7 +8,6 @@
 	"bytes"
 	"fmt"
 	"json"
-	"os"
 	"strings"
 	"template"
 	"template/parse"
@@ -17,14 +16,14 @@
 
 type badMarshaler struct{}
 
-func (x *badMarshaler) MarshalJSON() ([]byte, os.Error) {
+func (x *badMarshaler) MarshalJSON() ([]byte, error) {
 	// Keys in valid JSON must be double quoted as must all strings.
 	return []byte("{ foo: 'not quite valid JSON' }"), nil
 }
 
 type goodMarshaler struct{}
 
-func (x *goodMarshaler) MarshalJSON() ([]byte, os.Error) {
+func (x *goodMarshaler) MarshalJSON() ([]byte, error) {
 	return []byte(`{ "<foo>": "O'Reilly" }`), nil
 }
 
@@ -783,7 +782,7 @@
 
 	// pred is a template function that returns the predecessor of a
 	// natural number for testing recursive templates.
-	fns := template.FuncMap{"pred": func(a ...interface{}) (interface{}, os.Error) {
+	fns := template.FuncMap{"pred": func(a ...interface{}) (interface{}, error) {
 		if len(a) == 1 {
 			if i, _ := a[0].(int); i > 0 {
 				return i - 1, nil
@@ -807,7 +806,7 @@
 		var b bytes.Buffer
 
 		if err := s.Execute(&b, "main", data); err != nil {
-			t.Errorf("%q executing %v", err.String(), s.Template("main"))
+			t.Errorf("%q executing %v", err.Error(), s.Template("main"))
 			continue
 		}
 		if got := b.String(); test.want != got {
@@ -962,7 +961,7 @@
 	}
 
 	for _, test := range tests {
-		var err os.Error
+		var err error
 		if strings.HasPrefix(test.input, "{{define") {
 			var s template.Set
 			_, err = s.Parse(test.input)
@@ -977,7 +976,7 @@
 		}
 		var got string
 		if err != nil {
-			got = err.String()
+			got = err.Error()
 		}
 		if test.err == "" {
 			if got != "" {
@@ -1549,7 +1548,7 @@
 	}
 }
 
-func expectExecuteFailure(t *testing.T, b *bytes.Buffer, err os.Error) {
+func expectExecuteFailure(t *testing.T, b *bytes.Buffer, err error) {
 	if err != nil {
 		if b.Len() != 0 {
 			t.Errorf("output on buffer: %q", b.String())
diff --git a/src/pkg/exp/template/html/js.go b/src/pkg/exp/template/html/js.go
index 5646f8a..22be418 100644
--- a/src/pkg/exp/template/html/js.go
+++ b/src/pkg/exp/template/html/js.go
@@ -148,7 +148,7 @@
 		// turning into
 		//     x//* error marshalling y:
 		//          second line of error message */null
-		return fmt.Sprintf(" /* %s */null ", strings.Replace(err.String(), "*/", "* /", -1))
+		return fmt.Sprintf(" /* %s */null ", strings.Replace(err.Error(), "*/", "* /", -1))
 	}
 
 	// TODO: maybe post-process output to prevent it from containing
diff --git a/src/pkg/exp/terminal/shell.go b/src/pkg/exp/terminal/shell.go
index e3f5847..5c59167 100644
--- a/src/pkg/exp/terminal/shell.go
+++ b/src/pkg/exp/terminal/shell.go
@@ -4,10 +4,7 @@
 
 package terminal
 
-import (
-	"os"
-	"io"
-)
+import "io"
 
 // Shell contains the state for running a VT100 terminal that is capable of
 // reading lines of input.
@@ -306,12 +303,12 @@
 	}
 }
 
-func (ss *Shell) Write(buf []byte) (n int, err os.Error) {
+func (ss *Shell) Write(buf []byte) (n int, err error) {
 	return ss.c.Write(buf)
 }
 
 // ReadLine returns a line of input from the terminal.
-func (ss *Shell) ReadLine() (line string, err os.Error) {
+func (ss *Shell) ReadLine() (line string, err error) {
 	ss.writeLine([]byte(ss.prompt))
 	ss.c.Write(ss.outBuf)
 	ss.outBuf = ss.outBuf[:0]
@@ -337,7 +334,7 @@
 					break
 				}
 				if key == keyCtrlD {
-					return "", os.EOF
+					return "", io.EOF
 				}
 				line, lineOk = ss.handleKey(key)
 			}
diff --git a/src/pkg/exp/terminal/shell_test.go b/src/pkg/exp/terminal/shell_test.go
index 2bbe4a4..8a76a85 100644
--- a/src/pkg/exp/terminal/shell_test.go
+++ b/src/pkg/exp/terminal/shell_test.go
@@ -5,8 +5,8 @@
 package terminal
 
 import (
+	"io"
 	"testing"
-	"os"
 )
 
 type MockTerminal struct {
@@ -15,7 +15,7 @@
 	received     []byte
 }
 
-func (c *MockTerminal) Read(data []byte) (n int, err os.Error) {
+func (c *MockTerminal) Read(data []byte) (n int, err error) {
 	n = len(data)
 	if n == 0 {
 		return
@@ -24,7 +24,7 @@
 		n = len(c.toSend)
 	}
 	if n == 0 {
-		return 0, os.EOF
+		return 0, io.EOF
 	}
 	if c.bytesPerRead > 0 && n > c.bytesPerRead {
 		n = c.bytesPerRead
@@ -34,7 +34,7 @@
 	return
 }
 
-func (c *MockTerminal) Write(data []byte) (n int, err os.Error) {
+func (c *MockTerminal) Write(data []byte) (n int, err error) {
 	c.received = append(c.received, data...)
 	return len(data), nil
 }
@@ -46,7 +46,7 @@
 	if line != "" {
 		t.Errorf("Expected empty line but got: %s", line)
 	}
-	if err != os.EOF {
+	if err != io.EOF {
 		t.Errorf("Error should have been EOF but got: %s", err)
 	}
 }
@@ -54,12 +54,12 @@
 var keyPressTests = []struct {
 	in   string
 	line string
-	err  os.Error
+	err  error
 }{
 	{
 		"",
 		"",
-		os.EOF,
+		io.EOF,
 	},
 	{
 		"\r",
diff --git a/src/pkg/exp/terminal/terminal.go b/src/pkg/exp/terminal/terminal.go
index aacd909..d711493 100644
--- a/src/pkg/exp/terminal/terminal.go
+++ b/src/pkg/exp/terminal/terminal.go
@@ -15,6 +15,7 @@
 package terminal
 
 import (
+	"io"
 	"os"
 	"syscall"
 	"unsafe"
@@ -35,7 +36,7 @@
 // MakeRaw put the terminal connected to the given file descriptor into raw
 // mode and returns the previous state of the terminal so that it can be
 // restored.
-func MakeRaw(fd int) (*State, os.Error) {
+func MakeRaw(fd int) (*State, error) {
 	var oldState State
 	if _, _, e := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TCGETS), uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); e != 0 {
 		return nil, os.Errno(e)
@@ -53,7 +54,7 @@
 
 // Restore restores the terminal connected to the given file descriptor to a
 // previous state.
-func Restore(fd int, state *State) os.Error {
+func Restore(fd int, state *State) error {
 	_, _, e := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TCSETS), uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0)
 	return os.Errno(e)
 }
@@ -61,7 +62,7 @@
 // ReadPassword reads a line of input from a terminal without local echo.  This
 // is commonly used for inputting passwords and other sensitive data. The slice
 // returned does not include the \n.
-func ReadPassword(fd int) ([]byte, os.Error) {
+func ReadPassword(fd int) ([]byte, error) {
 	var oldState syscall.Termios
 	if _, _, e := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TCGETS), uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 {
 		return nil, os.Errno(e)
@@ -86,7 +87,7 @@
 		}
 		if n == 0 {
 			if len(ret) == 0 {
-				return nil, os.EOF
+				return nil, io.EOF
 			}
 			break
 		}
diff --git a/src/pkg/exp/types/check.go b/src/pkg/exp/types/check.go
index 87e3e93..09e29d1 100644
--- a/src/pkg/exp/types/check.go
+++ b/src/pkg/exp/types/check.go
@@ -11,7 +11,6 @@
 	"go/ast"
 	"go/scanner"
 	"go/token"
-	"os"
 	"strconv"
 )
 
@@ -213,7 +212,7 @@
 // of types for all expression nodes in statements, and a scanner.ErrorList if
 // there are errors.
 //
-func Check(fset *token.FileSet, pkg *ast.Package) (types map[ast.Expr]Type, err os.Error) {
+func Check(fset *token.FileSet, pkg *ast.Package) (types map[ast.Expr]Type, err error) {
 	var c checker
 	c.fset = fset
 	c.types = make(map[ast.Expr]Type)
diff --git a/src/pkg/exp/types/check_test.go b/src/pkg/exp/types/check_test.go
index 034acd0..4a30acf 100644
--- a/src/pkg/exp/types/check_test.go
+++ b/src/pkg/exp/types/check_test.go
@@ -67,7 +67,7 @@
 
 // TODO(gri) Need to revisit parser interface. We should be able to use parser.ParseFiles
 //           or a similar function instead.
-func parseFiles(t *testing.T, testname string, filenames []string) (map[string]*ast.File, os.Error) {
+func parseFiles(t *testing.T, testname string, filenames []string) (map[string]*ast.File, error) {
 	files := make(map[string]*ast.File)
 	var errors scanner.ErrorList
 	for _, filename := range filenames {
@@ -132,7 +132,7 @@
 	return errors
 }
 
-func eliminate(t *testing.T, expected map[token.Pos]string, errors os.Error) {
+func eliminate(t *testing.T, expected map[token.Pos]string, errors error) {
 	if errors == nil {
 		return
 	}
diff --git a/src/pkg/exp/types/exportdata.go b/src/pkg/exp/types/exportdata.go
index 784ffff..fa5b6a3 100644
--- a/src/pkg/exp/types/exportdata.go
+++ b/src/pkg/exp/types/exportdata.go
@@ -8,6 +8,7 @@
 
 import (
 	"bufio"
+	"errors"
 	"fmt"
 	"io"
 	"os"
@@ -15,7 +16,7 @@
 	"strings"
 )
 
-func readGopackHeader(buf *bufio.Reader) (name string, size int, err os.Error) {
+func readGopackHeader(buf *bufio.Reader) (name string, size int, err error) {
 	// See $GOROOT/include/ar.h.
 	hdr := make([]byte, 16+12+6+6+8+10+2)
 	_, err = io.ReadFull(buf, hdr)
@@ -28,7 +29,7 @@
 	s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
 	size, err = strconv.Atoi(s)
 	if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
-		err = os.NewError("invalid archive header")
+		err = errors.New("invalid archive header")
 		return
 	}
 	name = strings.TrimSpace(string(hdr[:16]))
@@ -44,7 +45,7 @@
 // export data section of the given object/archive file, or an error.
 // It is the caller's responsibility to close the readCloser.
 //
-func ExportData(filename string) (rc io.ReadCloser, err os.Error) {
+func ExportData(filename string) (rc io.ReadCloser, err error) {
 	file, err := os.Open(filename)
 	if err != nil {
 		return
@@ -77,7 +78,7 @@
 			return
 		}
 		if name != "__.SYMDEF" {
-			err = os.NewError("go archive does not begin with __.SYMDEF")
+			err = errors.New("go archive does not begin with __.SYMDEF")
 			return
 		}
 		const block = 4096
@@ -99,7 +100,7 @@
 			return
 		}
 		if name != "__.PKGDEF" {
-			err = os.NewError("go archive is missing __.PKGDEF")
+			err = errors.New("go archive is missing __.PKGDEF")
 			return
 		}
 
@@ -114,7 +115,7 @@
 	// Now at __.PKGDEF in archive or still at beginning of file.
 	// Either way, line should begin with "go object ".
 	if !strings.HasPrefix(string(line), "go object ") {
-		err = os.NewError("not a go object file")
+		err = errors.New("not a go object file")
 		return
 	}
 
diff --git a/src/pkg/exp/types/gcimporter.go b/src/pkg/exp/types/gcimporter.go
index 4e5172a..d88af95 100644
--- a/src/pkg/exp/types/gcimporter.go
+++ b/src/pkg/exp/types/gcimporter.go
@@ -9,6 +9,7 @@
 
 import (
 	"big"
+	"errors"
 	"fmt"
 	"go/ast"
 	"go/token"
@@ -102,7 +103,7 @@
 }
 
 // GcImporter implements the ast.Importer signature.
-func GcImporter(imports map[string]*ast.Object, path string) (pkg *ast.Object, err os.Error) {
+func GcImporter(imports map[string]*ast.Object, path string) (pkg *ast.Object, err error) {
 	if path == "unsafe" {
 		return Unsafe, nil
 	}
@@ -118,7 +119,7 @@
 
 	filename, id := findPkg(path)
 	if filename == "" {
-		err = os.NewError("can't find import: " + id)
+		err = errors.New("can't find import: " + id)
 		return
 	}
 
@@ -176,19 +177,19 @@
 // Internal errors are boxed as importErrors.
 type importError struct {
 	pos scanner.Position
-	err os.Error
+	err error
 }
 
-func (e importError) String() string {
+func (e importError) Error() string {
 	return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err)
 }
 
 func (p *gcParser) error(err interface{}) {
 	if s, ok := err.(string); ok {
-		err = os.NewError(s)
+		err = errors.New(s)
 	}
 	// panic with a runtime.Error if err is not an os.Error
-	panic(importError{p.scanner.Pos(), err.(os.Error)})
+	panic(importError{p.scanner.Pos(), err.(error)})
 }
 
 func (p *gcParser) errorf(format string, args ...interface{}) {
diff --git a/src/pkg/exp/winfsnotify/winfsnotify.go b/src/pkg/exp/winfsnotify/winfsnotify.go
index c5dfe99..d133740 100644
--- a/src/pkg/exp/winfsnotify/winfsnotify.go
+++ b/src/pkg/exp/winfsnotify/winfsnotify.go
@@ -7,6 +7,7 @@
 package winfsnotify
 
 import (
+	"errors"
 	"fmt"
 	"os"
 	"path/filepath"
@@ -36,7 +37,7 @@
 	op    int
 	path  string
 	flags uint32
-	reply chan os.Error
+	reply chan error
 }
 
 type inode struct {
@@ -65,14 +66,14 @@
 	watches  watchMap       // Map of watches (key: i-number)
 	input    chan *input    // Inputs to the reader are sent on this channel
 	Event    chan *Event    // Events are returned on this channel
-	Error    chan os.Error  // Errors are sent on this channel
+	Error    chan error     // Errors are sent on this channel
 	isClosed bool           // Set to true when Close() is first called
-	quit     chan chan<- os.Error
+	quit     chan chan<- error
 	cookie   uint32
 }
 
 // NewWatcher creates and returns a Watcher.
-func NewWatcher() (*Watcher, os.Error) {
+func NewWatcher() (*Watcher, error) {
 	port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
 	if e != 0 {
 		return nil, os.NewSyscallError("CreateIoCompletionPort", e)
@@ -82,8 +83,8 @@
 		watches: make(watchMap),
 		input:   make(chan *input, 1),
 		Event:   make(chan *Event, 50),
-		Error:   make(chan os.Error),
-		quit:    make(chan chan<- os.Error, 1),
+		Error:   make(chan error),
+		quit:    make(chan chan<- error, 1),
 	}
 	go w.readEvents()
 	return w, nil
@@ -92,14 +93,14 @@
 // Close closes a Watcher.
 // It sends a message to the reader goroutine to quit and removes all watches
 // associated with the watcher.
-func (w *Watcher) Close() os.Error {
+func (w *Watcher) Close() error {
 	if w.isClosed {
 		return nil
 	}
 	w.isClosed = true
 
 	// Send "quit" message to the reader goroutine
-	ch := make(chan os.Error)
+	ch := make(chan error)
 	w.quit <- ch
 	if err := w.wakeupReader(); err != nil {
 		return err
@@ -108,15 +109,15 @@
 }
 
 // AddWatch adds path to the watched file set.
-func (w *Watcher) AddWatch(path string, flags uint32) os.Error {
+func (w *Watcher) AddWatch(path string, flags uint32) error {
 	if w.isClosed {
-		return os.NewError("watcher already closed")
+		return errors.New("watcher already closed")
 	}
 	in := &input{
 		op:    opAddWatch,
 		path:  filepath.Clean(path),
 		flags: flags,
-		reply: make(chan os.Error),
+		reply: make(chan error),
 	}
 	w.input <- in
 	if err := w.wakeupReader(); err != nil {
@@ -126,16 +127,16 @@
 }
 
 // Watch adds path to the watched file set, watching all events.
-func (w *Watcher) Watch(path string) os.Error {
+func (w *Watcher) Watch(path string) error {
 	return w.AddWatch(path, FS_ALL_EVENTS)
 }
 
 // RemoveWatch removes path from the watched file set.
-func (w *Watcher) RemoveWatch(path string) os.Error {
+func (w *Watcher) RemoveWatch(path string) error {
 	in := &input{
 		op:    opRemoveWatch,
 		path:  filepath.Clean(path),
-		reply: make(chan os.Error),
+		reply: make(chan error),
 	}
 	w.input <- in
 	if err := w.wakeupReader(); err != nil {
@@ -144,7 +145,7 @@
 	return <-in.reply
 }
 
-func (w *Watcher) wakeupReader() os.Error {
+func (w *Watcher) wakeupReader() error {
 	e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
 	if e != 0 {
 		return os.NewSyscallError("PostQueuedCompletionStatus", e)
@@ -152,7 +153,7 @@
 	return nil
 }
 
-func getDir(pathname string) (dir string, err os.Error) {
+func getDir(pathname string) (dir string, err error) {
 	attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
 	if e != 0 {
 		return "", os.NewSyscallError("GetFileAttributes", e)
@@ -166,7 +167,7 @@
 	return
 }
 
-func getIno(path string) (ino *inode, err os.Error) {
+func getIno(path string) (ino *inode, err error) {
 	h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
 		syscall.FILE_LIST_DIRECTORY,
 		syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
@@ -207,7 +208,7 @@
 }
 
 // Must run within the I/O thread.
-func (w *Watcher) addWatch(pathname string, flags uint64) os.Error {
+func (w *Watcher) addWatch(pathname string, flags uint64) error {
 	dir, err := getDir(pathname)
 	if err != nil {
 		return err
@@ -252,7 +253,7 @@
 }
 
 // Must run within the I/O thread.
-func (w *Watcher) removeWatch(pathname string) os.Error {
+func (w *Watcher) removeWatch(pathname string) error {
 	dir, err := getDir(pathname)
 	if err != nil {
 		return err
@@ -293,7 +294,7 @@
 }
 
 // Must run within the I/O thread.
-func (w *Watcher) startRead(watch *watch) os.Error {
+func (w *Watcher) startRead(watch *watch) error {
 	if e := syscall.CancelIo(watch.ino.handle); e != 0 {
 		w.Error <- os.NewSyscallError("CancelIo", e)
 		w.deleteWatch(watch)
@@ -352,7 +353,7 @@
 						w.startRead(watch)
 					}
 				}
-				var err os.Error
+				var err error
 				if e := syscall.CloseHandle(w.port); e != 0 {
 					err = os.NewSyscallError("CloseHandle", e)
 				}
@@ -392,7 +393,7 @@
 		for {
 			if n == 0 {
 				w.Event <- &Event{Mask: FS_Q_OVERFLOW}
-				w.Error <- os.NewError("short read in readEvents()")
+				w.Error <- errors.New("short read in readEvents()")
 				break
 			}
 
diff --git a/src/pkg/flag/flag.go b/src/pkg/flag/flag.go
index f13f7a4..9f115d5 100644
--- a/src/pkg/flag/flag.go
+++ b/src/pkg/flag/flag.go
@@ -60,6 +60,7 @@
 package flag
 
 import (
+	"errors"
 	"fmt"
 	"os"
 	"sort"
@@ -67,7 +68,7 @@
 )
 
 // ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
-var ErrHelp = os.NewError("flag: help requested")
+var ErrHelp = errors.New("flag: help requested")
 
 // -- Bool Value
 type boolValue bool
@@ -580,7 +581,7 @@
 
 // failf prints to standard error a formatted error and usage message and
 // returns the error.
-func (f *FlagSet) failf(format string, a ...interface{}) os.Error {
+func (f *FlagSet) failf(format string, a ...interface{}) error {
 	err := fmt.Errorf(format, a...)
 	fmt.Fprintln(os.Stderr, err)
 	f.usage()
@@ -600,7 +601,7 @@
 }
 
 // parseOne parses one flag. It returns whether a flag was seen.
-func (f *FlagSet) parseOne() (bool, os.Error) {
+func (f *FlagSet) parseOne() (bool, error) {
 	if len(f.args) == 0 {
 		return false, nil
 	}
@@ -676,7 +677,7 @@
 // include the command name.  Must be called after all flags in the FlagSet
 // are defined and before flags are accessed by the program.
 // The return value will be ErrHelp if -help was set but not defined.
-func (f *FlagSet) Parse(arguments []string) os.Error {
+func (f *FlagSet) Parse(arguments []string) error {
 	f.parsed = true
 	f.args = arguments
 	for {
diff --git a/src/pkg/fmt/print.go b/src/pkg/fmt/print.go
index 5e0237f..8191ab3 100644
--- a/src/pkg/fmt/print.go
+++ b/src/pkg/fmt/print.go
@@ -6,6 +6,7 @@
 
 import (
 	"bytes"
+	"errors"
 	"io"
 	"os"
 	"reflect"
@@ -37,7 +38,7 @@
 // the flags and options for the operand's format specifier.
 type State interface {
 	// Write is the function to call to emit formatted output to be printed.
-	Write(b []byte) (ret int, err os.Error)
+	Write(b []byte) (ret int, err error)
 	// Width returns the value of the width option and whether it has been set.
 	Width() (wid int, ok bool)
 	// Precision returns the value of the precision option and whether it has been set.
@@ -165,7 +166,7 @@
 
 // Implement Write so we can call Fprintf on a pp (through State), for
 // recursive use in custom verbs.
-func (p *pp) Write(b []byte) (ret int, err os.Error) {
+func (p *pp) Write(b []byte) (ret int, err error) {
 	return p.buf.Write(b)
 }
 
@@ -173,7 +174,7 @@
 
 // Fprintf formats according to a format specifier and writes to w.
 // It returns the number of bytes written and any write error encountered.
-func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err os.Error) {
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
 	p := newPrinter()
 	p.doPrintf(format, a)
 	n64, err := p.buf.WriteTo(w)
@@ -183,7 +184,7 @@
 
 // Printf formats according to a format specifier and writes to standard output.
 // It returns the number of bytes written and any write error encountered.
-func Printf(format string, a ...interface{}) (n int, err os.Error) {
+func Printf(format string, a ...interface{}) (n int, err error) {
 	return Fprintf(os.Stdout, format, a...)
 }
 
@@ -198,8 +199,8 @@
 
 // Errorf formats according to a format specifier and returns the string 
 // as a value that satisfies os.Error.
-func Errorf(format string, a ...interface{}) os.Error {
-	return os.NewError(Sprintf(format, a...))
+func Errorf(format string, a ...interface{}) error {
+	return errors.New(Sprintf(format, a...))
 }
 
 // These routines do not take a format string
@@ -207,7 +208,7 @@
 // Fprint formats using the default formats for its operands and writes to w.
 // Spaces are added between operands when neither is a string.
 // It returns the number of bytes written and any write error encountered.
-func Fprint(w io.Writer, a ...interface{}) (n int, err os.Error) {
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
 	p := newPrinter()
 	p.doPrint(a, false, false)
 	n64, err := p.buf.WriteTo(w)
@@ -218,7 +219,7 @@
 // Print formats using the default formats for its operands and writes to standard output.
 // Spaces are added between operands when neither is a string.
 // It returns the number of bytes written and any write error encountered.
-func Print(a ...interface{}) (n int, err os.Error) {
+func Print(a ...interface{}) (n int, err error) {
 	return Fprint(os.Stdout, a...)
 }
 
@@ -239,7 +240,7 @@
 // Fprintln formats using the default formats for its operands and writes to w.
 // Spaces are always added between operands and a newline is appended.
 // It returns the number of bytes written and any write error encountered.
-func Fprintln(w io.Writer, a ...interface{}) (n int, err os.Error) {
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
 	p := newPrinter()
 	p.doPrint(a, true, true)
 	n64, err := p.buf.WriteTo(w)
@@ -250,7 +251,7 @@
 // Println formats using the default formats for its operands and writes to standard output.
 // Spaces are always added between operands and a newline is appended.
 // It returns the number of bytes written and any write error encountered.
-func Println(a ...interface{}) (n int, err os.Error) {
+func Println(a ...interface{}) (n int, err error) {
 	return Fprintln(os.Stdout, a...)
 }
 
@@ -635,11 +636,11 @@
 		// setting wasString and handled and deferring catchPanic
 		// must happen before calling the method.
 		switch v := p.field.(type) {
-		case os.Error:
+		case error:
 			wasString = false
 			handled = true
 			defer p.catchPanic(p.field, verb)
-			p.printField(v.String(), verb, plus, false, depth)
+			p.printField(v.Error(), verb, plus, false, depth)
 			return
 
 		case Stringer:
diff --git a/src/pkg/fmt/scan.go b/src/pkg/fmt/scan.go
index eae952c..54a9fe2 100644
--- a/src/pkg/fmt/scan.go
+++ b/src/pkg/fmt/scan.go
@@ -6,6 +6,7 @@
 
 import (
 	"bytes"
+	"errors"
 	"io"
 	"math"
 	"os"
@@ -21,7 +22,7 @@
 // a local buffer will be used to back up the input, but its contents
 // will be lost when Scan returns.
 type runeUnreader interface {
-	UnreadRune() os.Error
+	UnreadRune() error
 }
 
 // ScanState represents the scanner state passed to custom scanners.
@@ -32,9 +33,9 @@
 	// If invoked during Scanln, Fscanln, or Sscanln, ReadRune() will
 	// return EOF after returning the first '\n' or when reading beyond
 	// the specified width.
-	ReadRune() (r rune, size int, err os.Error)
+	ReadRune() (r rune, size int, err error)
 	// UnreadRune causes the next call to ReadRune to return the same rune.
-	UnreadRune() os.Error
+	UnreadRune() error
 	// SkipSpace skips space in the input. Newlines are treated as space 
 	// unless the scan operation is Scanln, Fscanln or Sscanln, in which case 
 	// a newline is treated as EOF.
@@ -47,14 +48,14 @@
 	// EOF.  The returned slice points to shared data that may be overwritten
 	// by the next call to Token, a call to a Scan function using the ScanState
 	// as input, or when the calling Scan method returns.
-	Token(skipSpace bool, f func(rune) bool) (token []byte, err os.Error)
+	Token(skipSpace bool, f func(rune) bool) (token []byte, err error)
 	// Width returns the value of the width option and whether it has been set.
 	// The unit is Unicode code points.
 	Width() (wid int, ok bool)
 	// Because ReadRune is implemented by the interface, Read should never be
 	// called by the scanning routines and a valid implementation of
 	// ScanState may choose always to return an error from Read.
-	Read(buf []byte) (n int, err os.Error)
+	Read(buf []byte) (n int, err error)
 }
 
 // Scanner is implemented by any value that has a Scan method, which scans
@@ -62,27 +63,27 @@
 // receiver, which must be a pointer to be useful.  The Scan method is called
 // for any argument to Scan, Scanf, or Scanln that implements it.
 type Scanner interface {
-	Scan(state ScanState, verb rune) os.Error
+	Scan(state ScanState, verb rune) error
 }
 
 // Scan scans text read from standard input, storing successive
 // space-separated values into successive arguments.  Newlines count
 // as space.  It returns the number of items successfully scanned.
 // If that is less than the number of arguments, err will report why.
-func Scan(a ...interface{}) (n int, err os.Error) {
+func Scan(a ...interface{}) (n int, err error) {
 	return Fscan(os.Stdin, a...)
 }
 
 // Scanln is similar to Scan, but stops scanning at a newline and
 // after the final item there must be a newline or EOF.
-func Scanln(a ...interface{}) (n int, err os.Error) {
+func Scanln(a ...interface{}) (n int, err error) {
 	return Fscanln(os.Stdin, a...)
 }
 
 // Scanf scans text read from standard input, storing successive
 // space-separated values into successive arguments as determined by
 // the format.  It returns the number of items successfully scanned.
-func Scanf(format string, a ...interface{}) (n int, err os.Error) {
+func Scanf(format string, a ...interface{}) (n int, err error) {
 	return Fscanf(os.Stdin, format, a...)
 }
 
@@ -90,20 +91,20 @@
 // values into successive arguments.  Newlines count as space.  It
 // returns the number of items successfully scanned.  If that is less
 // than the number of arguments, err will report why.
-func Sscan(str string, a ...interface{}) (n int, err os.Error) {
+func Sscan(str string, a ...interface{}) (n int, err error) {
 	return Fscan(strings.NewReader(str), a...)
 }
 
 // Sscanln is similar to Sscan, but stops scanning at a newline and
 // after the final item there must be a newline or EOF.
-func Sscanln(str string, a ...interface{}) (n int, err os.Error) {
+func Sscanln(str string, a ...interface{}) (n int, err error) {
 	return Fscanln(strings.NewReader(str), a...)
 }
 
 // Sscanf scans the argument string, storing successive space-separated
 // values into successive arguments as determined by the format.  It
 // returns the number of items successfully parsed.
-func Sscanf(str string, format string, a ...interface{}) (n int, err os.Error) {
+func Sscanf(str string, format string, a ...interface{}) (n int, err error) {
 	return Fscanf(strings.NewReader(str), format, a...)
 }
 
@@ -111,7 +112,7 @@
 // values into successive arguments.  Newlines count as space.  It
 // returns the number of items successfully scanned.  If that is less
 // than the number of arguments, err will report why.
-func Fscan(r io.Reader, a ...interface{}) (n int, err os.Error) {
+func Fscan(r io.Reader, a ...interface{}) (n int, err error) {
 	s, old := newScanState(r, true, false)
 	n, err = s.doScan(a)
 	s.free(old)
@@ -120,7 +121,7 @@
 
 // Fscanln is similar to Fscan, but stops scanning at a newline and
 // after the final item there must be a newline or EOF.
-func Fscanln(r io.Reader, a ...interface{}) (n int, err os.Error) {
+func Fscanln(r io.Reader, a ...interface{}) (n int, err error) {
 	s, old := newScanState(r, false, true)
 	n, err = s.doScan(a)
 	s.free(old)
@@ -130,7 +131,7 @@
 // Fscanf scans text read from r, storing successive space-separated
 // values into successive arguments as determined by the format.  It
 // returns the number of items successfully parsed.
-func Fscanf(r io.Reader, format string, a ...interface{}) (n int, err os.Error) {
+func Fscanf(r io.Reader, format string, a ...interface{}) (n int, err error) {
 	s, old := newScanState(r, false, false)
 	n, err = s.doScanf(format, a)
 	s.free(old)
@@ -140,7 +141,7 @@
 // scanError represents an error generated by the scanning software.
 // It's used as a unique signature to identify such errors when recovering.
 type scanError struct {
-	err os.Error
+	err error
 }
 
 const eof = -1
@@ -170,11 +171,11 @@
 // The Read method is only in ScanState so that ScanState
 // satisfies io.Reader. It will never be called when used as
 // intended, so there is no need to make it actually work.
-func (s *ss) Read(buf []byte) (n int, err os.Error) {
-	return 0, os.NewError("ScanState's Read should not be called. Use ReadRune")
+func (s *ss) Read(buf []byte) (n int, err error) {
+	return 0, errors.New("ScanState's Read should not be called. Use ReadRune")
 }
 
-func (s *ss) ReadRune() (r rune, size int, err os.Error) {
+func (s *ss) ReadRune() (r rune, size int, err error) {
 	if s.peekRune >= 0 {
 		s.count++
 		r = s.peekRune
@@ -184,7 +185,7 @@
 		return
 	}
 	if s.atEOF || s.nlIsEnd && s.prevRune == '\n' || s.count >= s.fieldLimit {
-		err = os.EOF
+		err = io.EOF
 		return
 	}
 
@@ -192,7 +193,7 @@
 	if err == nil {
 		s.count++
 		s.prevRune = r
-	} else if err == os.EOF {
+	} else if err == io.EOF {
 		s.atEOF = true
 	}
 	return
@@ -210,7 +211,7 @@
 func (s *ss) getRune() (r rune) {
 	r, _, err := s.ReadRune()
 	if err != nil {
-		if err == os.EOF {
+		if err == io.EOF {
 			return eof
 		}
 		s.error(err)
@@ -229,7 +230,7 @@
 	return
 }
 
-func (s *ss) UnreadRune() os.Error {
+func (s *ss) UnreadRune() error {
 	if u, ok := s.rr.(runeUnreader); ok {
 		u.UnreadRune()
 	} else {
@@ -240,15 +241,15 @@
 	return nil
 }
 
-func (s *ss) error(err os.Error) {
+func (s *ss) error(err error) {
 	panic(scanError{err})
 }
 
 func (s *ss) errorString(err string) {
-	panic(scanError{os.NewError(err)})
+	panic(scanError{errors.New(err)})
 }
 
-func (s *ss) Token(skipSpace bool, f func(rune) bool) (tok []byte, err os.Error) {
+func (s *ss) Token(skipSpace bool, f func(rune) bool) (tok []byte, err error) {
 	defer func() {
 		if e := recover(); e != nil {
 			if se, ok := e.(scanError); ok {
@@ -289,7 +290,7 @@
 
 // readByte returns the next byte from the input, which may be
 // left over from a previous read if the UTF-8 was ill-formed.
-func (r *readRune) readByte() (b byte, err os.Error) {
+func (r *readRune) readByte() (b byte, err error) {
 	if r.pending > 0 {
 		b = r.pendBuf[0]
 		copy(r.pendBuf[0:], r.pendBuf[1:])
@@ -308,7 +309,7 @@
 
 // ReadRune returns the next UTF-8 encoded code point from the
 // io.Reader inside r.
-func (r *readRune) ReadRune() (rr rune, size int, err os.Error) {
+func (r *readRune) ReadRune() (rr rune, size int, err error) {
 	r.buf[0], err = r.readByte()
 	if err != nil {
 		return 0, 0, err
@@ -321,7 +322,7 @@
 	for n = 1; !utf8.FullRune(r.buf[0:n]); n++ {
 		r.buf[n], err = r.readByte()
 		if err != nil {
-			if err == os.EOF {
+			if err == io.EOF {
 				err = nil
 				break
 			}
@@ -435,8 +436,8 @@
 	s.errorString("expected field of type pointer to " + expected + "; found " + reflect.TypeOf(field).String())
 }
 
-var complexError = os.NewError("syntax error scanning complex number")
-var boolError = os.NewError("syntax error scanning boolean")
+var complexError = errors.New("syntax error scanning complex number")
+var boolError = errors.New("syntax error scanning boolean")
 
 // consume reads the next rune in the input and reports whether it is in the ok string.
 // If accept is true, it puts the character into the input token.
@@ -469,7 +470,7 @@
 func (s *ss) notEOF() {
 	// Guarantee there is data to be read.
 	if r := s.getRune(); r == eof {
-		panic(os.EOF)
+		panic(io.EOF)
 	}
 	s.UnreadRune()
 }
@@ -874,12 +875,12 @@
 // scanOne scans a single value, deriving the scanner from the type of the argument.
 func (s *ss) scanOne(verb rune, field interface{}) {
 	s.buf.Reset()
-	var err os.Error
+	var err error
 	// If the parameter has its own Scan method, use that.
 	if v, ok := field.(Scanner); ok {
 		err = v.Scan(s, verb)
 		if err != nil {
-			if err == os.EOF {
+			if err == io.EOF {
 				err = io.ErrUnexpectedEOF
 			}
 			s.error(err)
@@ -976,11 +977,11 @@
 }
 
 // errorHandler turns local panics into error returns.
-func errorHandler(errp *os.Error) {
+func errorHandler(errp *error) {
 	if e := recover(); e != nil {
 		if se, ok := e.(scanError); ok { // catch local error
 			*errp = se.err
-		} else if eof, ok := e.(os.Error); ok && eof == os.EOF { // out of input
+		} else if eof, ok := e.(error); ok && eof == io.EOF { // out of input
 			*errp = eof
 		} else {
 			panic(e)
@@ -989,7 +990,7 @@
 }
 
 // doScan does the real work for scanning without a format string.
-func (s *ss) doScan(a []interface{}) (numProcessed int, err os.Error) {
+func (s *ss) doScan(a []interface{}) (numProcessed int, err error) {
 	defer errorHandler(&err)
 	for _, field := range a {
 		s.scanOne('v', field)
@@ -1061,7 +1062,7 @@
 
 // doScanf does the real work when scanning with a format string.
 //  At the moment, it handles only pointers to basic types.
-func (s *ss) doScanf(format string, a []interface{}) (numProcessed int, err os.Error) {
+func (s *ss) doScanf(format string, a []interface{}) (numProcessed int, err error) {
 	defer errorHandler(&err)
 	end := len(format) - 1
 	// We process one item per non-trivial format
diff --git a/src/pkg/fmt/scan_test.go b/src/pkg/fmt/scan_test.go
index fbc28c1..7dd0015 100644
--- a/src/pkg/fmt/scan_test.go
+++ b/src/pkg/fmt/scan_test.go
@@ -7,10 +7,10 @@
 import (
 	"bufio"
 	"bytes"
+	"errors"
 	. "fmt"
 	"io"
 	"math"
-	"os"
 	"reflect"
 	"regexp"
 	"strings"
@@ -87,14 +87,14 @@
 // Xs accepts any non-empty run of the verb character
 type Xs string
 
-func (x *Xs) Scan(state ScanState, verb rune) os.Error {
+func (x *Xs) Scan(state ScanState, verb rune) error {
 	tok, err := state.Token(true, func(r rune) bool { return r == verb })
 	if err != nil {
 		return err
 	}
 	s := string(tok)
 	if !regexp.MustCompile("^" + string(verb) + "+$").MatchString(s) {
-		return os.NewError("syntax error for xs")
+		return errors.New("syntax error for xs")
 	}
 	*x = Xs(s)
 	return nil
@@ -109,7 +109,7 @@
 	s string
 }
 
-func (s *IntString) Scan(state ScanState, verb rune) os.Error {
+func (s *IntString) Scan(state ScanState, verb rune) error {
 	if _, err := Fscan(state, &s.i); err != nil {
 		return err
 	}
@@ -130,7 +130,7 @@
 	r *strings.Reader
 }
 
-func (s *myStringReader) Read(p []byte) (n int, err os.Error) {
+func (s *myStringReader) Read(p []byte) (n int, err error) {
 	return s.r.Read(p)
 }
 
@@ -350,7 +350,7 @@
 	{"%c%c%c", "\xc2X\xc2", args(&i, &j, &k), args(utf8.RuneError, 'X', utf8.RuneError), ""},
 }
 
-func testScan(name string, t *testing.T, scan func(r io.Reader, a ...interface{}) (int, os.Error)) {
+func testScan(name string, t *testing.T, scan func(r io.Reader, a ...interface{}) (int, error)) {
 	for _, test := range scanTests {
 		var r io.Reader
 		if name == "StringReader" {
@@ -431,7 +431,7 @@
 			t.Errorf("expected overflow scanning %q", test.text)
 			continue
 		}
-		if !re.MatchString(err.String()) {
+		if !re.MatchString(err.Error()) {
 			t.Errorf("expected overflow error scanning %q: %s", test.text, err)
 		}
 	}
@@ -500,7 +500,7 @@
 		if err != nil {
 			if test.err == "" {
 				t.Errorf("got error scanning (%q, %q): %q", test.format, test.text, err)
-			} else if strings.Index(err.String(), test.err) < 0 {
+			} else if strings.Index(err.Error(), test.err) < 0 {
 				t.Errorf("got wrong error scanning (%q, %q): %q; expected %q", test.format, test.text, err, test.err)
 			}
 			continue
@@ -594,7 +594,7 @@
 	_, err := Fscan(r, a)
 	if err == nil {
 		t.Error("expected error scanning non-pointer")
-	} else if strings.Index(err.String(), "pointer") < 0 {
+	} else if strings.Index(err.Error(), "pointer") < 0 {
 		t.Errorf("expected pointer error scanning non-pointer, got: %s", err)
 	}
 }
@@ -604,7 +604,7 @@
 	_, err := Sscanln("1 x\n", &a)
 	if err == nil {
 		t.Error("expected error scanning string missing newline")
-	} else if strings.Index(err.String(), "newline") < 0 {
+	} else if strings.Index(err.Error(), "newline") < 0 {
 		t.Errorf("expected newline error scanning string missing newline, got: %s", err)
 	}
 }
@@ -615,7 +615,7 @@
 	_, err := Fscanln(r, &a, &b)
 	if err == nil {
 		t.Error("expected error scanning string with extra newline")
-	} else if strings.Index(err.String(), "newline") < 0 {
+	} else if strings.Index(err.Error(), "newline") < 0 {
 		t.Errorf("expected newline error scanning string with extra newline, got: %s", err)
 	}
 }
@@ -626,7 +626,7 @@
 	eofCount int
 }
 
-func (ec *eofCounter) Read(b []byte) (n int, err os.Error) {
+func (ec *eofCounter) Read(b []byte) (n int, err error) {
 	n, err = ec.reader.Read(b)
 	if n == 0 {
 		ec.eofCount++
@@ -670,14 +670,14 @@
 	if n != 1 || i != 23 {
 		t.Errorf("Sscanf expected one value of 23; got %d %d", n, i)
 	}
-	if err != os.EOF {
+	if err != io.EOF {
 		t.Errorf("Sscanf expected EOF; got %q", err)
 	}
 	n, err = Sscan("234", &i, &j)
 	if n != 1 || i != 234 {
 		t.Errorf("Sscan expected one value of 234; got %d %d", n, i)
 	}
-	if err != os.EOF {
+	if err != io.EOF {
 		t.Errorf("Sscan expected EOF; got %q", err)
 	}
 	// Trailing space is tougher.
@@ -685,7 +685,7 @@
 	if n != 1 || i != 234 {
 		t.Errorf("Sscan expected one value of 234; got %d %d", n, i)
 	}
-	if err != os.EOF {
+	if err != io.EOF {
 		t.Errorf("Sscan expected EOF; got %q", err)
 	}
 }
@@ -715,10 +715,10 @@
 
 func TestEOFAllTypes(t *testing.T) {
 	for i, test := range eofTests {
-		if _, err := Sscanf("", test.format, test.v); err != os.EOF {
+		if _, err := Sscanf("", test.format, test.v); err != io.EOF {
 			t.Errorf("#%d: %s %T not eof on empty string: %s", i, test.format, test.v, err)
 		}
-		if _, err := Sscanf("   ", test.format, test.v); err != os.EOF {
+		if _, err := Sscanf("   ", test.format, test.v); err != io.EOF {
 			t.Errorf("#%d: %s %T not eof on trailing blanks: %s", i, test.format, test.v, err)
 		}
 	}
@@ -749,7 +749,7 @@
 
 // Attempt to read two lines into the object.  Scanln should prevent this
 // because it stops at newline; Scan and Scanf should be fine.
-func (t *TwoLines) Scan(state ScanState, verb rune) os.Error {
+func (t *TwoLines) Scan(state ScanState, verb rune) error {
 	chars := make([]rune, 0, 100)
 	for nlCount := 0; nlCount < 2; {
 		c, _, err := state.ReadRune()
@@ -812,7 +812,7 @@
 	next *RecursiveInt
 }
 
-func (r *RecursiveInt) Scan(state ScanState, verb rune) (err os.Error) {
+func (r *RecursiveInt) Scan(state ScanState, verb rune) (err error) {
 	_, err = Fscan(state, &r.i)
 	if err != nil {
 		return
@@ -820,7 +820,7 @@
 	next := new(RecursiveInt)
 	_, err = Fscanf(state, ".%v", next)
 	if err != nil {
-		if err == os.NewError("input does not match format") || err == io.ErrUnexpectedEOF {
+		if err == errors.New("input does not match format") || err == io.ErrUnexpectedEOF {
 			err = nil
 		}
 		return
@@ -832,7 +832,7 @@
 // Perform the same scanning task as RecursiveInt.Scan
 // but without recurring through scanner, so we can compare
 // performance more directly.
-func scanInts(r *RecursiveInt, b *bytes.Buffer) (err os.Error) {
+func scanInts(r *RecursiveInt, b *bytes.Buffer) (err error) {
 	r.next = nil
 	_, err = Fscan(b, &r.i)
 	if err != nil {
@@ -840,7 +840,7 @@
 	}
 	c, _, err := b.ReadRune()
 	if err != nil {
-		if err == os.EOF {
+		if err == io.EOF {
 			err = nil
 		}
 		return
@@ -867,7 +867,7 @@
 
 func TestScanInts(t *testing.T) {
 	testScanInts(t, scanInts)
-	testScanInts(t, func(r *RecursiveInt, b *bytes.Buffer) (err os.Error) {
+	testScanInts(t, func(r *RecursiveInt, b *bytes.Buffer) (err error) {
 		_, err = Fscan(b, r)
 		return
 	})
@@ -877,7 +877,7 @@
 // platform that does not support split stack.
 const intCount = 800
 
-func testScanInts(t *testing.T, scan func(*RecursiveInt, *bytes.Buffer) os.Error) {
+func testScanInts(t *testing.T, scan func(*RecursiveInt, *bytes.Buffer) error) {
 	r := new(RecursiveInt)
 	ints := makeInts(intCount)
 	buf := bytes.NewBuffer(ints)
diff --git a/src/pkg/go/ast/print.go b/src/pkg/go/ast/print.go
index e36f99f..70c9547 100644
--- a/src/pkg/go/ast/print.go
+++ b/src/pkg/go/ast/print.go
@@ -36,7 +36,7 @@
 // struct fields for which f(fieldname, fieldvalue) is true are
 // are printed; all others are filtered from the output.
 //
-func Fprint(w io.Writer, fset *token.FileSet, x interface{}, f FieldFilter) (n int, err os.Error) {
+func Fprint(w io.Writer, fset *token.FileSet, x interface{}, f FieldFilter) (n int, err error) {
 	// setup printer
 	p := printer{
 		output: w,
@@ -67,7 +67,7 @@
 
 // Print prints x to standard output, skipping nil fields.
 // Print(fset, x) is the same as Fprint(os.Stdout, fset, x, NotNilFilter).
-func Print(fset *token.FileSet, x interface{}) (int, os.Error) {
+func Print(fset *token.FileSet, x interface{}) (int, error) {
 	return Fprint(os.Stdout, fset, x, NotNilFilter)
 }
 
@@ -84,7 +84,7 @@
 
 var indent = []byte(".  ")
 
-func (p *printer) Write(data []byte) (n int, err os.Error) {
+func (p *printer) Write(data []byte) (n int, err error) {
 	var m int
 	for i, b := range data {
 		// invariant: data[0:n] has been written
@@ -117,7 +117,7 @@
 // localError wraps locally caught os.Errors so we can distinguish
 // them from genuine panics which we don't want to return as errors.
 type localError struct {
-	err os.Error
+	err error
 }
 
 // printf is a convenience wrapper that takes care of print errors.
diff --git a/src/pkg/go/ast/resolve.go b/src/pkg/go/ast/resolve.go
index 3927a79..b24688d 100644
--- a/src/pkg/go/ast/resolve.go
+++ b/src/pkg/go/ast/resolve.go
@@ -10,7 +10,6 @@
 	"fmt"
 	"go/scanner"
 	"go/token"
-	"os"
 	"strconv"
 )
 
@@ -61,7 +60,7 @@
 // Importer should load the package data for the given path into 
 // a new *Object (pkg), record pkg in the imports map, and then
 // return pkg.
-type Importer func(imports map[string]*Object, path string) (pkg *Object, err os.Error)
+type Importer func(imports map[string]*Object, path string) (pkg *Object, err error)
 
 // NewPackage creates a new Package node from a set of File nodes. It resolves
 // unresolved identifiers across files and updates each file's Unresolved list
@@ -72,7 +71,7 @@
 // different package names are reported and then ignored.
 // The result is a package node and a scanner.ErrorList if there were errors.
 //
-func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer, universe *Scope) (*Package, os.Error) {
+func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer, universe *Scope) (*Package, error) {
 	var p pkgBuilder
 	p.fset = fset
 
diff --git a/src/pkg/go/build/build.go b/src/pkg/go/build/build.go
index 97f92bf..282a508 100644
--- a/src/pkg/go/build/build.go
+++ b/src/pkg/go/build/build.go
@@ -7,6 +7,7 @@
 
 import (
 	"bytes"
+	"errors"
 	"exec"
 	"fmt"
 	"os"
@@ -17,7 +18,7 @@
 )
 
 // Build produces a build Script for the given package.
-func Build(tree *Tree, pkg string, info *DirInfo) (*Script, os.Error) {
+func Build(tree *Tree, pkg string, info *DirInfo) (*Script, error) {
 	s := &Script{}
 	b := &build{
 		script: s,
@@ -29,7 +30,7 @@
 	if g := os.Getenv("GOARCH"); g != "" {
 		b.goarch = g
 	}
-	var err os.Error
+	var err error
 	b.arch, err = ArchChar(b.goarch)
 	if err != nil {
 		return nil, err
@@ -42,7 +43,7 @@
 			// FindTree should always be able to suggest an import
 			// path and tree. The path must be malformed
 			// (for example, an absolute or relative path).
-			return nil, os.NewError("build: invalid import: " + pkg)
+			return nil, errors.New("build: invalid import: " + pkg)
 		}
 		s.addInput(filepath.Join(t.PkgDir(), p+".a"))
 	}
@@ -89,7 +90,7 @@
 	}
 
 	if len(ofiles) == 0 {
-		return nil, os.NewError("make: no object files to build")
+		return nil, errors.New("make: no object files to build")
 	}
 
 	// choose target file
@@ -138,7 +139,7 @@
 }
 
 // Run runs the Script's Cmds in order.
-func (s *Script) Run() os.Error {
+func (s *Script) Run() error {
 	for _, c := range s.Cmd {
 		if err := c.Run(); err != nil {
 			return err
@@ -174,7 +175,7 @@
 
 // Clean removes the Script's Intermediate files.
 // It tries to remove every file and returns the first error it encounters.
-func (s *Script) Clean() (err os.Error) {
+func (s *Script) Clean() (err error) {
 	// Reverse order so that directories get removed after the files they contain.
 	for i := len(s.Intermediate) - 1; i >= 0; i-- {
 		if e := os.Remove(s.Intermediate[i]); err == nil {
@@ -186,7 +187,7 @@
 
 // Nuke removes the Script's Intermediate and Output files.
 // It tries to remove every file and returns the first error it encounters.
-func (s *Script) Nuke() (err os.Error) {
+func (s *Script) Nuke() (err error) {
 	// Reverse order so that directories get removed after the files they contain.
 	for i := len(s.Output) - 1; i >= 0; i-- {
 		if e := os.Remove(s.Output[i]); err == nil {
@@ -214,7 +215,7 @@
 }
 
 // Run executes the Cmd.
-func (c *Cmd) Run() os.Error {
+func (c *Cmd) Run() error {
 	if c.Args[0] == "mkdir" {
 		for _, p := range c.Output {
 			if err := os.MkdirAll(p, 0777); err != nil {
@@ -245,7 +246,7 @@
 
 // ArchChar returns the architecture character for the given goarch.
 // For example, ArchChar("amd64") returns "6".
-func ArchChar(goarch string) (string, os.Error) {
+func ArchChar(goarch string) (string, error) {
 	switch goarch {
 	case "386":
 		return "8", nil
@@ -254,7 +255,7 @@
 	case "arm":
 		return "5", nil
 	}
-	return "", os.NewError("unsupported GOARCH " + goarch)
+	return "", errors.New("unsupported GOARCH " + goarch)
 }
 
 type build struct {
diff --git a/src/pkg/go/build/dir.go b/src/pkg/go/build/dir.go
index b67f999..0d175c7 100644
--- a/src/pkg/go/build/dir.go
+++ b/src/pkg/go/build/dir.go
@@ -6,6 +6,7 @@
 
 import (
 	"bytes"
+	"errors"
 	"fmt"
 	"go/ast"
 	"go/doc"
@@ -41,7 +42,7 @@
 	// describing the content of the named directory.
 	// The dir argument is the argument to ScanDir.
 	// If ReadDir is nil, ScanDir uses io.ReadDir.
-	ReadDir func(dir string) (fi []*os.FileInfo, err os.Error)
+	ReadDir func(dir string) (fi []*os.FileInfo, err error)
 
 	// ReadFile returns the content of the file named file
 	// in the directory named dir.  The dir argument is the
@@ -52,17 +53,17 @@
 	//
 	// If ReadFile is nil, ScanDir uses filepath.Join(dir, file)
 	// as the path and ioutil.ReadFile to read the data.
-	ReadFile func(dir, file string) (path string, content []byte, err os.Error)
+	ReadFile func(dir, file string) (path string, content []byte, err error)
 }
 
-func (ctxt *Context) readDir(dir string) ([]*os.FileInfo, os.Error) {
+func (ctxt *Context) readDir(dir string) ([]*os.FileInfo, error) {
 	if f := ctxt.ReadDir; f != nil {
 		return f(dir)
 	}
 	return ioutil.ReadDir(dir)
 }
 
-func (ctxt *Context) readFile(dir, file string) (string, []byte, os.Error) {
+func (ctxt *Context) readFile(dir, file string) (string, []byte, error) {
 	if f := ctxt.ReadFile; f != nil {
 		return f(dir, file)
 	}
@@ -116,7 +117,7 @@
 }
 
 // ScanDir calls DefaultContext.ScanDir.
-func ScanDir(dir string) (info *DirInfo, err os.Error) {
+func ScanDir(dir string) (info *DirInfo, err error) {
 	return DefaultContext.ScanDir(dir)
 }
 
@@ -128,7 +129,7 @@
 //	- files ending in _test.go
 //	- files starting with _ or .
 //
-func (ctxt *Context) ScanDir(dir string) (info *DirInfo, err os.Error) {
+func (ctxt *Context) ScanDir(dir string) (info *DirInfo, err error) {
 	dirs, err := ctxt.readDir(dir)
 	if err != nil {
 		return nil, err
@@ -364,7 +365,7 @@
 //
 // TODO(rsc): This duplicates code in cgo.
 // Once the dust settles, remove this code from cgo.
-func (ctxt *Context) saveCgo(filename string, di *DirInfo, cg *ast.CommentGroup) os.Error {
+func (ctxt *Context) saveCgo(filename string, di *DirInfo, cg *ast.CommentGroup) error {
 	text := doc.CommentText(cg)
 	for _, line := range strings.Split(text, "\n") {
 		orig := line
@@ -459,7 +460,7 @@
 //
 //     []string{"a", "b:c d", "ef", `g"`}
 //
-func splitQuoted(s string) (r []string, err os.Error) {
+func splitQuoted(s string) (r []string, err error) {
 	var args []string
 	arg := make([]rune, len(s))
 	escaped := false
@@ -497,9 +498,9 @@
 		args = append(args, string(arg[:i]))
 	}
 	if quote != 0 {
-		err = os.NewError("unclosed quote")
+		err = errors.New("unclosed quote")
 	} else if escaped {
-		err = os.NewError("unfinished escaping")
+		err = errors.New("unfinished escaping")
 	}
 	return args, err
 }
diff --git a/src/pkg/go/build/path.go b/src/pkg/go/build/path.go
index e39b5f8..7ccb129 100644
--- a/src/pkg/go/build/path.go
+++ b/src/pkg/go/build/path.go
@@ -5,6 +5,7 @@
 package build
 
 import (
+	"errors"
 	"fmt"
 	"log"
 	"os"
@@ -21,9 +22,9 @@
 	Goroot bool
 }
 
-func newTree(p string) (*Tree, os.Error) {
+func newTree(p string) (*Tree, error) {
 	if !filepath.IsAbs(p) {
-		return nil, os.NewError("must be absolute")
+		return nil, errors.New("must be absolute")
 	}
 	ep, err := filepath.EvalSymlinks(p)
 	if err != nil {
@@ -84,13 +85,13 @@
 }
 
 var (
-	ErrNotFound     = os.NewError("go/build: package could not be found locally")
-	ErrTreeNotFound = os.NewError("go/build: no valid GOROOT or GOPATH could be found")
+	ErrNotFound     = errors.New("go/build: package could not be found locally")
+	ErrTreeNotFound = errors.New("go/build: no valid GOROOT or GOPATH could be found")
 )
 
 // FindTree takes an import or filesystem path and returns the
 // tree where the package source should be and the package import path.
-func FindTree(path string) (tree *Tree, pkg string, err os.Error) {
+func FindTree(path string) (tree *Tree, pkg string, err error) {
 	if isLocalPath(path) {
 		if path, err = filepath.Abs(path); err != nil {
 			return
diff --git a/src/pkg/go/parser/interface.go b/src/pkg/go/parser/interface.go
index 4f980fc..d3bab31 100644
--- a/src/pkg/go/parser/interface.go
+++ b/src/pkg/go/parser/interface.go
@@ -8,6 +8,7 @@
 
 import (
 	"bytes"
+	"errors"
 	"go/ast"
 	"go/scanner"
 	"go/token"
@@ -21,7 +22,7 @@
 // otherwise it returns an error. If src == nil, readSource returns
 // the result of reading the file specified by filename.
 //
-func readSource(filename string, src interface{}) ([]byte, os.Error) {
+func readSource(filename string, src interface{}) ([]byte, error) {
 	if src != nil {
 		switch s := src.(type) {
 		case string:
@@ -41,14 +42,14 @@
 			}
 			return buf.Bytes(), nil
 		default:
-			return nil, os.NewError("invalid source")
+			return nil, errors.New("invalid source")
 		}
 	}
 
 	return ioutil.ReadFile(filename)
 }
 
-func (p *parser) errors() os.Error {
+func (p *parser) errors() error {
 	mode := scanner.Sorted
 	if p.mode&SpuriousErrors == 0 {
 		mode = scanner.NoMultiples
@@ -61,7 +62,7 @@
 // as for ParseFile. If there is an error, the result expression
 // may be nil or contain a partial AST.
 //
-func ParseExpr(fset *token.FileSet, filename string, src interface{}) (ast.Expr, os.Error) {
+func ParseExpr(fset *token.FileSet, filename string, src interface{}) (ast.Expr, error) {
 	data, err := readSource(filename, src)
 	if err != nil {
 		return nil, err
@@ -83,7 +84,7 @@
 // interpretation as for ParseFile. If there is an error, the node
 // list may be nil or contain partial ASTs.
 //
-func ParseStmtList(fset *token.FileSet, filename string, src interface{}) ([]ast.Stmt, os.Error) {
+func ParseStmtList(fset *token.FileSet, filename string, src interface{}) ([]ast.Stmt, error) {
 	data, err := readSource(filename, src)
 	if err != nil {
 		return nil, err
@@ -102,7 +103,7 @@
 // interpretation as for ParseFile. If there is an error, the node
 // list may be nil or contain partial ASTs.
 //
-func ParseDeclList(fset *token.FileSet, filename string, src interface{}) ([]ast.Decl, os.Error) {
+func ParseDeclList(fset *token.FileSet, filename string, src interface{}) ([]ast.Decl, error) {
 	data, err := readSource(filename, src)
 	if err != nil {
 		return nil, err
@@ -136,7 +137,7 @@
 // representing the fragments of erroneous source code). Multiple errors
 // are returned via a scanner.ErrorList which is sorted by file position.
 //
-func ParseFile(fset *token.FileSet, filename string, src interface{}, mode uint) (*ast.File, os.Error) {
+func ParseFile(fset *token.FileSet, filename string, src interface{}, mode uint) (*ast.File, error) {
 	data, err := readSource(filename, src)
 	if err != nil {
 		return nil, err
@@ -158,7 +159,7 @@
 // be incomplete (missing packages and/or incomplete packages) and the first
 // error encountered is returned.
 //
-func ParseFiles(fset *token.FileSet, filenames []string, mode uint) (pkgs map[string]*ast.Package, first os.Error) {
+func ParseFiles(fset *token.FileSet, filenames []string, mode uint) (pkgs map[string]*ast.Package, first error) {
 	pkgs = make(map[string]*ast.Package)
 	for _, filename := range filenames {
 		if src, err := ParseFile(fset, filename, nil, mode); err == nil {
@@ -187,7 +188,7 @@
 // returned. If a parse error occurred, a non-nil but incomplete map and the
 // error are returned.
 //
-func ParseDir(fset *token.FileSet, path string, filter func(*os.FileInfo) bool, mode uint) (map[string]*ast.Package, os.Error) {
+func ParseDir(fset *token.FileSet, path string, filter func(*os.FileInfo) bool, mode uint) (map[string]*ast.Package, error) {
 	fd, err := os.Open(path)
 	if err != nil {
 		return nil, err
diff --git a/src/pkg/go/printer/printer.go b/src/pkg/go/printer/printer.go
index bfabd74..2a1445d 100644
--- a/src/pkg/go/printer/printer.go
+++ b/src/pkg/go/printer/printer.go
@@ -57,7 +57,7 @@
 // local error wrapper so we can distinguish os.Errors we want to return
 // as errors from genuine panics (which we don't want to return as errors)
 type osError struct {
-	err os.Error
+	err error
 }
 
 type printer struct {
@@ -837,7 +837,7 @@
 //              However, this would mess up any formatting done by
 //              the tabwriter.
 
-func (p *trimmer) Write(data []byte) (n int, err os.Error) {
+func (p *trimmer) Write(data []byte) (n int, err error) {
 	// invariants:
 	// p.state == inSpace:
 	//	p.space is unwritten
@@ -925,7 +925,7 @@
 }
 
 // fprint implements Fprint and takes a nodesSizes map for setting up the printer state.
-func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{}, nodeSizes map[ast.Node]int) (written int, err os.Error) {
+func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{}, nodeSizes map[ast.Node]int) (written int, err error) {
 	// redirect output through a trimmer to eliminate trailing whitespace
 	// (Input to a tabwriter must be untrimmed since trailing tabs provide
 	// formatting information. The tabwriter could provide trimming
@@ -1004,14 +1004,14 @@
 // The node type must be *ast.File, or assignment-compatible to ast.Expr,
 // ast.Decl, ast.Spec, or ast.Stmt.
 //
-func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{}) (int, os.Error) {
+func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{}) (int, error) {
 	return cfg.fprint(output, fset, node, make(map[ast.Node]int))
 }
 
 // Fprint "pretty-prints" an AST node to output.
 // It calls Config.Fprint with default settings.
 //
-func Fprint(output io.Writer, fset *token.FileSet, node interface{}) os.Error {
+func Fprint(output io.Writer, fset *token.FileSet, node interface{}) error {
 	_, err := (&Config{Tabwidth: 8}).Fprint(output, fset, node) // don't care about number of bytes written
 	return err
 }
diff --git a/src/pkg/go/scanner/errors.go b/src/pkg/go/scanner/errors.go
index df2a46b..7621cf5 100644
--- a/src/pkg/go/scanner/errors.go
+++ b/src/pkg/go/scanner/errors.go
@@ -8,7 +8,6 @@
 	"fmt"
 	"go/token"
 	"io"
-	"os"
 	"sort"
 )
 
@@ -49,7 +48,7 @@
 	Msg string
 }
 
-func (e *Error) String() string {
+func (e *Error) Error() string {
 	if e.Pos.Filename != "" || e.Pos.IsValid() {
 		// don't print "<unknown position>"
 		// TODO(gri) reconsider the semantics of Position.IsValid
@@ -85,12 +84,12 @@
 	return false
 }
 
-func (p ErrorList) String() string {
+func (p ErrorList) Error() string {
 	switch len(p) {
 	case 0:
 		return "unspecified error"
 	case 1:
-		return p[0].String()
+		return p[0].Error()
 	}
 	return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
 }
@@ -140,7 +139,7 @@
 // so that a nil result can be assigned to an os.Error variable and
 // remains nil.
 //
-func (h *ErrorVector) GetError(mode int) os.Error {
+func (h *ErrorVector) GetError(mode int) error {
 	if len(h.errors) == 0 {
 		return nil
 	}
@@ -157,7 +156,7 @@
 // one error per line, if the err parameter is an ErrorList. Otherwise
 // it prints the err string.
 //
-func PrintError(w io.Writer, err os.Error) {
+func PrintError(w io.Writer, err error) {
 	if list, ok := err.(ErrorList); ok {
 		for _, e := range list {
 			fmt.Fprintf(w, "%s\n", e)
diff --git a/src/pkg/go/token/serialize.go b/src/pkg/go/token/serialize.go
index 5fb47f7..611b1b3 100644
--- a/src/pkg/go/token/serialize.go
+++ b/src/pkg/go/token/serialize.go
@@ -7,7 +7,6 @@
 import (
 	"gob"
 	"io"
-	"os"
 )
 
 type serializedFile struct {
@@ -24,17 +23,17 @@
 	Files []serializedFile
 }
 
-func (s *serializedFileSet) Read(r io.Reader) os.Error {
+func (s *serializedFileSet) Read(r io.Reader) error {
 	return gob.NewDecoder(r).Decode(s)
 }
 
-func (s *serializedFileSet) Write(w io.Writer) os.Error {
+func (s *serializedFileSet) Write(w io.Writer) error {
 	return gob.NewEncoder(w).Encode(s)
 }
 
 // Read reads the fileset from r into s; s must not be nil.
 // If r does not also implement io.ByteReader, it will be wrapped in a bufio.Reader.
-func (s *FileSet) Read(r io.Reader) os.Error {
+func (s *FileSet) Read(r io.Reader) error {
 	var ss serializedFileSet
 	if err := ss.Read(r); err != nil {
 		return err
@@ -55,7 +54,7 @@
 }
 
 // Write writes the fileset s to w.
-func (s *FileSet) Write(w io.Writer) os.Error {
+func (s *FileSet) Write(w io.Writer) error {
 	var ss serializedFileSet
 
 	s.mutex.Lock()
diff --git a/src/pkg/go/token/serialize_test.go b/src/pkg/go/token/serialize_test.go
index 24e419a..a8ce30ab 100644
--- a/src/pkg/go/token/serialize_test.go
+++ b/src/pkg/go/token/serialize_test.go
@@ -7,13 +7,12 @@
 import (
 	"bytes"
 	"fmt"
-	"os"
 	"testing"
 )
 
 // equal returns nil if p and q describe the same file set;
 // otherwise it returns an error describing the discrepancy.
-func equal(p, q *FileSet) os.Error {
+func equal(p, q *FileSet) error {
 	if p == q {
 		// avoid deadlock if p == q
 		return nil
diff --git a/src/pkg/gob/codec_test.go b/src/pkg/gob/codec_test.go
index 5306354..dc0e007 100644
--- a/src/pkg/gob/codec_test.go
+++ b/src/pkg/gob/codec_test.go
@@ -6,8 +6,8 @@
 
 import (
 	"bytes"
+	"errors"
 	"math"
-	"os"
 	"reflect"
 	"strings"
 	"testing"
@@ -330,7 +330,7 @@
 // Test instruction execution for decoding.
 // Do not run the machine yet; instead do individual instructions crafted by hand.
 func TestScalarDecInstructions(t *testing.T) {
-	ovfl := os.NewError("overflow")
+	ovfl := errors.New("overflow")
 
 	// bool
 	{
@@ -633,7 +633,7 @@
 		Minc complex128
 	}
 	var it inputT
-	var err os.Error
+	var err error
 	b := new(bytes.Buffer)
 	enc := NewEncoder(b)
 	dec := NewDecoder(b)
@@ -650,7 +650,7 @@
 	var o1 outi8
 	enc.Encode(it)
 	err = dec.Decode(&o1)
-	if err == nil || err.String() != `value for "Maxi" out of range` {
+	if err == nil || err.Error() != `value for "Maxi" out of range` {
 		t.Error("wrong overflow error for int8:", err)
 	}
 	it = inputT{
@@ -659,7 +659,7 @@
 	b.Reset()
 	enc.Encode(it)
 	err = dec.Decode(&o1)
-	if err == nil || err.String() != `value for "Mini" out of range` {
+	if err == nil || err.Error() != `value for "Mini" out of range` {
 		t.Error("wrong underflow error for int8:", err)
 	}
 
@@ -675,7 +675,7 @@
 	var o2 outi16
 	enc.Encode(it)
 	err = dec.Decode(&o2)
-	if err == nil || err.String() != `value for "Maxi" out of range` {
+	if err == nil || err.Error() != `value for "Maxi" out of range` {
 		t.Error("wrong overflow error for int16:", err)
 	}
 	it = inputT{
@@ -684,7 +684,7 @@
 	b.Reset()
 	enc.Encode(it)
 	err = dec.Decode(&o2)
-	if err == nil || err.String() != `value for "Mini" out of range` {
+	if err == nil || err.Error() != `value for "Mini" out of range` {
 		t.Error("wrong underflow error for int16:", err)
 	}
 
@@ -700,7 +700,7 @@
 	var o3 outi32
 	enc.Encode(it)
 	err = dec.Decode(&o3)
-	if err == nil || err.String() != `value for "Maxi" out of range` {
+	if err == nil || err.Error() != `value for "Maxi" out of range` {
 		t.Error("wrong overflow error for int32:", err)
 	}
 	it = inputT{
@@ -709,7 +709,7 @@
 	b.Reset()
 	enc.Encode(it)
 	err = dec.Decode(&o3)
-	if err == nil || err.String() != `value for "Mini" out of range` {
+	if err == nil || err.Error() != `value for "Mini" out of range` {
 		t.Error("wrong underflow error for int32:", err)
 	}
 
@@ -724,7 +724,7 @@
 	var o4 outu8
 	enc.Encode(it)
 	err = dec.Decode(&o4)
-	if err == nil || err.String() != `value for "Maxu" out of range` {
+	if err == nil || err.Error() != `value for "Maxu" out of range` {
 		t.Error("wrong overflow error for uint8:", err)
 	}
 
@@ -739,7 +739,7 @@
 	var o5 outu16
 	enc.Encode(it)
 	err = dec.Decode(&o5)
-	if err == nil || err.String() != `value for "Maxu" out of range` {
+	if err == nil || err.Error() != `value for "Maxu" out of range` {
 		t.Error("wrong overflow error for uint16:", err)
 	}
 
@@ -754,7 +754,7 @@
 	var o6 outu32
 	enc.Encode(it)
 	err = dec.Decode(&o6)
-	if err == nil || err.String() != `value for "Maxu" out of range` {
+	if err == nil || err.Error() != `value for "Maxu" out of range` {
 		t.Error("wrong overflow error for uint32:", err)
 	}
 
@@ -770,7 +770,7 @@
 	var o7 outf32
 	enc.Encode(it)
 	err = dec.Decode(&o7)
-	if err == nil || err.String() != `value for "Maxf" out of range` {
+	if err == nil || err.Error() != `value for "Maxf" out of range` {
 		t.Error("wrong overflow error for float32:", err)
 	}
 
@@ -786,7 +786,7 @@
 	var o8 outc64
 	enc.Encode(it)
 	err = dec.Decode(&o8)
-	if err == nil || err.String() != `value for "Maxc" out of range` {
+	if err == nil || err.Error() != `value for "Maxc" out of range` {
 		t.Error("wrong overflow error for complex64:", err)
 	}
 }
@@ -995,7 +995,7 @@
 	err := NewEncoder(b).Encode(&rec)
 	if err == nil {
 		t.Error("expected error; got none")
-	} else if strings.Index(err.String(), "recursive") < 0 {
+	} else if strings.Index(err.Error(), "recursive") < 0 {
 		t.Error("expected recursive type error; got", err)
 	}
 	// Can't test decode easily because we can't encode one, so we can't pass one to a Decoder.
@@ -1014,7 +1014,7 @@
 	dummyEncoder.encode(b, reflect.ValueOf(&bad0), userType(reflect.TypeOf(&bad0)))
 	if err := dummyEncoder.err; err == nil {
 		t.Error("expected error; got none")
-	} else if strings.Index(err.String(), "type") < 0 {
+	} else if strings.Index(err.Error(), "type") < 0 {
 		t.Error("expected type error; got", err)
 	}
 }
diff --git a/src/pkg/gob/debug.go b/src/pkg/gob/debug.go
index 16c2194..b21c7fa 100644
--- a/src/pkg/gob/debug.go
+++ b/src/pkg/gob/debug.go
@@ -56,7 +56,7 @@
 }
 
 // Read is the usual method. It will first take data that has been read ahead.
-func (p *peekReader) Read(b []byte) (n int, err os.Error) {
+func (p *peekReader) Read(b []byte) (n int, err error) {
 	if len(p.data) == 0 {
 		return p.r.Read(b)
 	}
@@ -70,7 +70,7 @@
 
 // peek returns as many bytes as possible from the unread
 // portion of the stream, up to the length of b.
-func (p *peekReader) peek(b []byte) (n int, err os.Error) {
+func (p *peekReader) peek(b []byte) (n int, err error) {
 	if len(p.data) > 0 {
 		n = copy(b, p.data)
 		if n == len(b) {
@@ -92,7 +92,7 @@
 		if n > 0 {
 			e = nil
 		} else {
-			e = os.EOF
+			e = io.EOF
 		}
 	}
 	return n, e
@@ -164,7 +164,7 @@
 
 // debug implements Debug, but catches panics and returns
 // them as errors to be printed by Debug.
-func debug(r io.Reader) (err os.Error) {
+func debug(r io.Reader) (err error) {
 	defer catchError(&err)
 	fmt.Fprintln(os.Stderr, "Start of debugging")
 	deb := &debugger{
@@ -238,7 +238,7 @@
 func (deb *debugger) loadBlock(eofOK bool) int {
 	n64, w, err := decodeUintReader(deb.r, deb.tmp) // deb.uint64 will error at EOF
 	if err != nil {
-		if eofOK && err == os.EOF {
+		if eofOK && err == io.EOF {
 			return -1
 		}
 		errorf("debug: unexpected error: %s", err)
diff --git a/src/pkg/gob/decode.go b/src/pkg/gob/decode.go
index d027d3f..1515d12 100644
--- a/src/pkg/gob/decode.go
+++ b/src/pkg/gob/decode.go
@@ -9,17 +9,17 @@
 
 import (
 	"bytes"
+	"errors"
 	"io"
 	"math"
-	"os"
 	"reflect"
 	"unsafe"
 )
 
 var (
-	errBadUint = os.NewError("gob: encoded unsigned integer out of range")
-	errBadType = os.NewError("gob: unknown type id or corrupted data")
-	errRange   = os.NewError("gob: bad data: field numbers out of bounds")
+	errBadUint = errors.New("gob: encoded unsigned integer out of range")
+	errBadType = errors.New("gob: unknown type id or corrupted data")
+	errRange   = errors.New("gob: bad data: field numbers out of bounds")
 )
 
 // decoderState is the execution state of an instance of the decoder. A new state
@@ -54,13 +54,13 @@
 	dec.freeList = d
 }
 
-func overflow(name string) os.Error {
-	return os.NewError(`value for "` + name + `" out of range`)
+func overflow(name string) error {
+	return errors.New(`value for "` + name + `" out of range`)
 }
 
 // decodeUintReader reads an encoded unsigned integer from an io.Reader.
 // Used only by the Decoder to read the message length.
-func decodeUintReader(r io.Reader, buf []byte) (x uint64, width int, err os.Error) {
+func decodeUintReader(r io.Reader, buf []byte) (x uint64, width int, err error) {
 	width = 1
 	_, err = r.Read(buf[0:width])
 	if err != nil {
@@ -77,7 +77,7 @@
 	}
 	width, err = io.ReadFull(r, buf[0:n])
 	if err != nil {
-		if err == os.EOF {
+		if err == io.EOF {
 			err = io.ErrUnexpectedEOF
 		}
 		return
@@ -95,18 +95,18 @@
 func (state *decoderState) decodeUint() (x uint64) {
 	b, err := state.b.ReadByte()
 	if err != nil {
-		error(err)
+		error_(err)
 	}
 	if b <= 0x7f {
 		return uint64(b)
 	}
 	n := -int(int8(b))
 	if n > uint64Size {
-		error(errBadUint)
+		error_(errBadUint)
 	}
 	width, err := state.b.Read(state.buf[0:n])
 	if err != nil {
-		error(err)
+		error_(err)
 	}
 	// Don't need to check error; it's safe to loop regardless.
 	// Could check that the high byte is zero but it's not worth it.
@@ -132,10 +132,10 @@
 // The 'instructions' of the decoding machine
 type decInstr struct {
 	op     decOp
-	field  int      // field number of the wire type
-	indir  int      // how many pointer indirections to reach the value in the struct
-	offset uintptr  // offset in the structure of the field to encode
-	ovfl   os.Error // error message for overflow/underflow (for arrays, of the elements)
+	field  int     // field number of the wire type
+	indir  int     // how many pointer indirections to reach the value in the struct
+	offset uintptr // offset in the structure of the field to encode
+	ovfl   error   // error message for overflow/underflow (for arrays, of the elements)
 }
 
 // Since the encoder writes no zeros, if we arrive at a decoder we have
@@ -190,7 +190,7 @@
 	}
 	v := state.decodeInt()
 	if v < math.MinInt8 || math.MaxInt8 < v {
-		error(i.ovfl)
+		error_(i.ovfl)
 	} else {
 		*(*int8)(p) = int8(v)
 	}
@@ -206,7 +206,7 @@
 	}
 	v := state.decodeUint()
 	if math.MaxUint8 < v {
-		error(i.ovfl)
+		error_(i.ovfl)
 	} else {
 		*(*uint8)(p) = uint8(v)
 	}
@@ -222,7 +222,7 @@
 	}
 	v := state.decodeInt()
 	if v < math.MinInt16 || math.MaxInt16 < v {
-		error(i.ovfl)
+		error_(i.ovfl)
 	} else {
 		*(*int16)(p) = int16(v)
 	}
@@ -238,7 +238,7 @@
 	}
 	v := state.decodeUint()
 	if math.MaxUint16 < v {
-		error(i.ovfl)
+		error_(i.ovfl)
 	} else {
 		*(*uint16)(p) = uint16(v)
 	}
@@ -254,7 +254,7 @@
 	}
 	v := state.decodeInt()
 	if v < math.MinInt32 || math.MaxInt32 < v {
-		error(i.ovfl)
+		error_(i.ovfl)
 	} else {
 		*(*int32)(p) = int32(v)
 	}
@@ -270,7 +270,7 @@
 	}
 	v := state.decodeUint()
 	if math.MaxUint32 < v {
-		error(i.ovfl)
+		error_(i.ovfl)
 	} else {
 		*(*uint32)(p) = uint32(v)
 	}
@@ -323,7 +323,7 @@
 	}
 	// +Inf is OK in both 32- and 64-bit floats.  Underflow is always OK.
 	if math.MaxFloat32 < av && av <= math.MaxFloat64 {
-		error(i.ovfl)
+		error_(i.ovfl)
 	} else {
 		*(*float32)(p) = float32(v)
 	}
@@ -464,7 +464,7 @@
 // decodeSingle decodes a top-level value that is not a struct and stores it through p.
 // Such values are preceded by a zero, making them have the memory layout of a
 // struct field (although with an illegal field number).
-func (dec *Decoder) decodeSingle(engine *decEngine, ut *userTypeInfo, basep uintptr) (err os.Error) {
+func (dec *Decoder) decodeSingle(engine *decEngine, ut *userTypeInfo, basep uintptr) (err error) {
 	state := dec.newDecoderState(&dec.buf)
 	state.fieldnum = singletonField
 	delta := int(state.decodeUint())
@@ -473,7 +473,7 @@
 	}
 	instr := &engine.instr[singletonField]
 	if instr.indir != ut.indir {
-		return os.NewError("gob: internal error: inconsistent indirection")
+		return errors.New("gob: internal error: inconsistent indirection")
 	}
 	ptr := unsafe.Pointer(basep) // offset will be zero
 	if instr.indir > 1 {
@@ -504,7 +504,7 @@
 		}
 		fieldnum := state.fieldnum + delta
 		if fieldnum >= len(engine.instr) {
-			error(errRange)
+			error_(errRange)
 			break
 		}
 		instr := &engine.instr[fieldnum]
@@ -532,7 +532,7 @@
 		}
 		fieldnum := state.fieldnum + delta
 		if fieldnum >= len(engine.instr) {
-			error(errRange)
+			error_(errRange)
 		}
 		instr := &engine.instr[fieldnum]
 		instr.op(instr, state, unsafe.Pointer(nil))
@@ -556,7 +556,7 @@
 }
 
 // decodeArrayHelper does the work for decoding arrays and slices.
-func (dec *Decoder) decodeArrayHelper(state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, length, elemIndir int, ovfl os.Error) {
+func (dec *Decoder) decodeArrayHelper(state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, length, elemIndir int, ovfl error) {
 	instr := &decInstr{elemOp, 0, elemIndir, 0, ovfl}
 	for i := 0; i < length; i++ {
 		up := unsafe.Pointer(p)
@@ -571,7 +571,7 @@
 // decodeArray decodes an array and stores it through p, that is, p points to the zeroth element.
 // The length is an unsigned integer preceding the elements.  Even though the length is redundant
 // (it's part of the type), it's a useful check and is included in the encoding.
-func (dec *Decoder) decodeArray(atyp reflect.Type, state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, length, indir, elemIndir int, ovfl os.Error) {
+func (dec *Decoder) decodeArray(atyp reflect.Type, state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, length, indir, elemIndir int, ovfl error) {
 	if indir > 0 {
 		p = allocate(atyp, p, 1) // All but the last level has been allocated by dec.Indirect
 	}
@@ -583,7 +583,7 @@
 
 // decodeIntoValue is a helper for map decoding.  Since maps are decoded using reflection,
 // unlike the other items we can't use a pointer directly.
-func decodeIntoValue(state *decoderState, op decOp, indir int, v reflect.Value, ovfl os.Error) reflect.Value {
+func decodeIntoValue(state *decoderState, op decOp, indir int, v reflect.Value, ovfl error) reflect.Value {
 	instr := &decInstr{op, 0, indir, 0, ovfl}
 	up := unsafe.Pointer(unsafeAddr(v))
 	if indir > 1 {
@@ -597,7 +597,7 @@
 // Maps are encoded as a length followed by key:value pairs.
 // Because the internals of maps are not visible to us, we must
 // use reflection rather than pointer magic.
-func (dec *Decoder) decodeMap(mtyp reflect.Type, state *decoderState, p uintptr, keyOp, elemOp decOp, indir, keyIndir, elemIndir int, ovfl os.Error) {
+func (dec *Decoder) decodeMap(mtyp reflect.Type, state *decoderState, p uintptr, keyOp, elemOp decOp, indir, keyIndir, elemIndir int, ovfl error) {
 	if indir > 0 {
 		p = allocate(mtyp, p, 1) // All but the last level has been allocated by dec.Indirect
 	}
@@ -620,7 +620,7 @@
 
 // ignoreArrayHelper does the work for discarding arrays and slices.
 func (dec *Decoder) ignoreArrayHelper(state *decoderState, elemOp decOp, length int) {
-	instr := &decInstr{elemOp, 0, 0, 0, os.NewError("no error")}
+	instr := &decInstr{elemOp, 0, 0, 0, errors.New("no error")}
 	for i := 0; i < length; i++ {
 		elemOp(instr, state, nil)
 	}
@@ -637,8 +637,8 @@
 // ignoreMap discards the data for a map value with no destination.
 func (dec *Decoder) ignoreMap(state *decoderState, keyOp, elemOp decOp) {
 	n := int(state.decodeUint())
-	keyInstr := &decInstr{keyOp, 0, 0, 0, os.NewError("no error")}
-	elemInstr := &decInstr{elemOp, 0, 0, 0, os.NewError("no error")}
+	keyInstr := &decInstr{keyOp, 0, 0, 0, errors.New("no error")}
+	elemInstr := &decInstr{elemOp, 0, 0, 0, errors.New("no error")}
 	for i := 0; i < n; i++ {
 		keyOp(keyInstr, state, nil)
 		elemOp(elemInstr, state, nil)
@@ -647,7 +647,7 @@
 
 // decodeSlice decodes a slice and stores the slice header through p.
 // Slices are encoded as an unsigned length followed by the elements.
-func (dec *Decoder) decodeSlice(atyp reflect.Type, state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, indir, elemIndir int, ovfl os.Error) {
+func (dec *Decoder) decodeSlice(atyp reflect.Type, state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, indir, elemIndir int, ovfl error) {
 	n := int(uintptr(state.decodeUint()))
 	if indir > 0 {
 		up := unsafe.Pointer(p)
@@ -707,7 +707,7 @@
 	// Read the type id of the concrete value.
 	concreteId := dec.decodeTypeSequence(true)
 	if concreteId < 0 {
-		error(dec.err)
+		error_(dec.err)
 	}
 	// Byte count of value is next; we don't care what it is (it's there
 	// in case we want to ignore the value by skipping it completely).
@@ -716,7 +716,7 @@
 	value := allocValue(typ)
 	dec.decodeValue(concreteId, value)
 	if dec.err != nil {
-		error(dec.err)
+		error_(dec.err)
 	}
 	// Allocate the destination interface value.
 	if indir > 0 {
@@ -736,11 +736,11 @@
 	b := make([]byte, state.decodeUint())
 	_, err := state.b.Read(b)
 	if err != nil {
-		error(err)
+		error_(err)
 	}
 	id := dec.decodeTypeSequence(true)
 	if id < 0 {
-		error(dec.err)
+		error_(dec.err)
 	}
 	// At this point, the decoder buffer contains a delimited value. Just toss it.
 	state.b.Next(int(state.decodeUint()))
@@ -753,12 +753,12 @@
 	b := make([]byte, state.decodeUint())
 	_, err := state.b.Read(b)
 	if err != nil {
-		error(err)
+		error_(err)
 	}
 	// We know it's a GobDecoder, so just call the method directly.
 	err = v.Interface().(GobDecoder).GobDecode(b)
 	if err != nil {
-		error(err)
+		error_(err)
 	}
 }
 
@@ -768,7 +768,7 @@
 	b := make([]byte, state.decodeUint())
 	_, err := state.b.Read(b)
 	if err != nil {
-		error(err)
+		error_(err)
 	}
 }
 
@@ -868,7 +868,7 @@
 			// Generate a closure that calls out to the engine for the nested type.
 			enginePtr, err := dec.getDecEnginePtr(wireId, userType(typ))
 			if err != nil {
-				error(err)
+				error_(err)
 			}
 			op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
 				// indirect through enginePtr to delay evaluation for recursive structs.
@@ -930,7 +930,7 @@
 			// Generate a closure that calls out to the engine for the nested type.
 			enginePtr, err := dec.getIgnoreEnginePtr(wireId)
 			if err != nil {
-				error(err)
+				error_(err)
 			}
 			op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
 				// indirect through enginePtr to delay evaluation for recursive structs
@@ -1062,23 +1062,23 @@
 
 // compileSingle compiles the decoder engine for a non-struct top-level value, including
 // GobDecoders.
-func (dec *Decoder) compileSingle(remoteId typeId, ut *userTypeInfo) (engine *decEngine, err os.Error) {
+func (dec *Decoder) compileSingle(remoteId typeId, ut *userTypeInfo) (engine *decEngine, err error) {
 	rt := ut.user
 	engine = new(decEngine)
 	engine.instr = make([]decInstr, 1) // one item
 	name := rt.String()                // best we can do
 	if !dec.compatibleType(rt, remoteId, make(map[reflect.Type]typeId)) {
-		return nil, os.NewError("gob: wrong type received for local value " + name + ": " + dec.typeString(remoteId))
+		return nil, errors.New("gob: wrong type received for local value " + name + ": " + dec.typeString(remoteId))
 	}
 	op, indir := dec.decOpFor(remoteId, rt, name, make(map[reflect.Type]*decOp))
-	ovfl := os.NewError(`value for "` + name + `" out of range`)
+	ovfl := errors.New(`value for "` + name + `" out of range`)
 	engine.instr[singletonField] = decInstr{*op, singletonField, indir, 0, ovfl}
 	engine.numInstr = 1
 	return
 }
 
 // compileIgnoreSingle compiles the decoder engine for a non-struct top-level value that will be discarded.
-func (dec *Decoder) compileIgnoreSingle(remoteId typeId) (engine *decEngine, err os.Error) {
+func (dec *Decoder) compileIgnoreSingle(remoteId typeId) (engine *decEngine, err error) {
 	engine = new(decEngine)
 	engine.instr = make([]decInstr, 1) // one item
 	op := dec.decIgnoreOpFor(remoteId)
@@ -1090,7 +1090,7 @@
 
 // compileDec compiles the decoder engine for a value.  If the value is not a struct,
 // it calls out to compileSingle.
-func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEngine, err os.Error) {
+func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEngine, err error) {
 	rt := ut.base
 	srt := rt
 	if srt.Kind() != reflect.Struct ||
@@ -1105,7 +1105,7 @@
 	} else {
 		wire := dec.wireType[remoteId]
 		if wire == nil {
-			error(errBadType)
+			error_(errBadType)
 		}
 		wireStruct = wire.StructT
 	}
@@ -1141,7 +1141,7 @@
 }
 
 // getDecEnginePtr returns the engine for the specified type.
-func (dec *Decoder) getDecEnginePtr(remoteId typeId, ut *userTypeInfo) (enginePtr **decEngine, err os.Error) {
+func (dec *Decoder) getDecEnginePtr(remoteId typeId, ut *userTypeInfo) (enginePtr **decEngine, err error) {
 	rt := ut.base
 	decoderMap, ok := dec.decoderCache[rt]
 	if !ok {
@@ -1166,7 +1166,7 @@
 var emptyStructType = reflect.TypeOf(emptyStruct{})
 
 // getDecEnginePtr returns the engine for the specified type when the value is to be discarded.
-func (dec *Decoder) getIgnoreEnginePtr(wireId typeId) (enginePtr **decEngine, err os.Error) {
+func (dec *Decoder) getIgnoreEnginePtr(wireId typeId) (enginePtr **decEngine, err error) {
 	var ok bool
 	if enginePtr, ok = dec.ignorerCache[wireId]; !ok {
 		// To handle recursive types, mark this engine as underway before compiling.
diff --git a/src/pkg/gob/decoder.go b/src/pkg/gob/decoder.go
index 1d526e3..5e684d3 100644
--- a/src/pkg/gob/decoder.go
+++ b/src/pkg/gob/decoder.go
@@ -7,8 +7,8 @@
 import (
 	"bufio"
 	"bytes"
+	"errors"
 	"io"
-	"os"
 	"reflect"
 	"sync"
 )
@@ -25,7 +25,7 @@
 	freeList     *decoderState                           // list of free decoderStates; avoids reallocation
 	countBuf     []byte                                  // used for decoding integers while parsing messages
 	tmp          []byte                                  // temporary storage for i/o; saves reallocating
-	err          os.Error
+	err          error
 }
 
 // NewDecoder returns a new decoder that reads from the io.Reader.
@@ -50,7 +50,7 @@
 func (dec *Decoder) recvType(id typeId) {
 	// Have we already seen this type?  That's an error
 	if id < firstUserId || dec.wireType[id] != nil {
-		dec.err = os.NewError("gob: duplicate type received")
+		dec.err = errors.New("gob: duplicate type received")
 		return
 	}
 
@@ -64,7 +64,7 @@
 	dec.wireType[id] = wire
 }
 
-var errBadCount = os.NewError("invalid message length")
+var errBadCount = errors.New("invalid message length")
 
 // recvMessage reads the next count-delimited item from the input. It is the converse
 // of Encoder.writeMessage. It returns false on EOF or other error reading the message.
@@ -94,7 +94,7 @@
 	// Read the data
 	_, dec.err = io.ReadFull(dec.r, dec.tmp)
 	if dec.err != nil {
-		if dec.err == os.EOF {
+		if dec.err == io.EOF {
 			dec.err = io.ErrUnexpectedEOF
 		}
 		return
@@ -155,7 +155,7 @@
 		// will be absorbed by recvMessage.)
 		if dec.buf.Len() > 0 {
 			if !isInterface {
-				dec.err = os.NewError("extra data in buffer")
+				dec.err = errors.New("extra data in buffer")
 				break
 			}
 			dec.nextUint()
@@ -169,7 +169,7 @@
 // If e is nil, the value will be discarded. Otherwise,
 // the value underlying e must be a pointer to the
 // correct type for the next data item received.
-func (dec *Decoder) Decode(e interface{}) os.Error {
+func (dec *Decoder) Decode(e interface{}) error {
 	if e == nil {
 		return dec.DecodeValue(reflect.Value{})
 	}
@@ -177,7 +177,7 @@
 	// If e represents a value as opposed to a pointer, the answer won't
 	// get back to the caller.  Make sure it's a pointer.
 	if value.Type().Kind() != reflect.Ptr {
-		dec.err = os.NewError("gob: attempt to decode into a non-pointer")
+		dec.err = errors.New("gob: attempt to decode into a non-pointer")
 		return dec.err
 	}
 	return dec.DecodeValue(value)
@@ -187,12 +187,12 @@
 // If v is the zero reflect.Value (v.Kind() == Invalid), DecodeValue discards the value.
 // Otherwise, it stores the value into v.  In that case, v must represent
 // a non-nil pointer to data or be an assignable reflect.Value (v.CanSet())
-func (dec *Decoder) DecodeValue(v reflect.Value) os.Error {
+func (dec *Decoder) DecodeValue(v reflect.Value) error {
 	if v.IsValid() {
 		if v.Kind() == reflect.Ptr && !v.IsNil() {
 			// That's okay, we'll store through the pointer.
 		} else if !v.CanSet() {
-			return os.NewError("gob: DecodeValue of unassignable value")
+			return errors.New("gob: DecodeValue of unassignable value")
 		}
 	}
 	// Make sure we're single-threaded through here.
diff --git a/src/pkg/gob/dump.go b/src/pkg/gob/dump.go
index 1555f0f..c4d4331 100644
--- a/src/pkg/gob/dump.go
+++ b/src/pkg/gob/dump.go
@@ -9,7 +9,7 @@
 )
 
 func main() {
-	var err os.Error
+	var err error
 	file := os.Stdin
 	if len(os.Args) > 1 {
 		file, err = os.Open(os.Args[1])
diff --git a/src/pkg/gob/encode.go b/src/pkg/gob/encode.go
index c164435..c7e4823 100644
--- a/src/pkg/gob/encode.go
+++ b/src/pkg/gob/encode.go
@@ -55,7 +55,7 @@
 	if x <= 0x7F {
 		err := state.b.WriteByte(uint8(x))
 		if err != nil {
-			error(err)
+			error_(err)
 		}
 		return
 	}
@@ -68,7 +68,7 @@
 	state.buf[i] = uint8(i - uint64Size) // = loop count, negated
 	_, err := state.b.Write(state.buf[i : uint64Size+1])
 	if err != nil {
-		error(err)
+		error_(err)
 	}
 }
 
@@ -443,7 +443,7 @@
 	state.encodeUint(uint64(len(name)))
 	_, err := state.b.WriteString(name)
 	if err != nil {
-		error(err)
+		error_(err)
 	}
 	// Define the type id if necessary.
 	enc.sendTypeDescriptor(enc.writer(), state, ut)
@@ -456,12 +456,12 @@
 	data.Write(spaceForLength)
 	enc.encode(data, iv.Elem(), ut)
 	if enc.err != nil {
-		error(enc.err)
+		error_(enc.err)
 	}
 	enc.popWriter()
 	enc.writeMessage(b, data)
 	if enc.err != nil {
-		error(err)
+		error_(err)
 	}
 	enc.freeEncoderState(state)
 }
@@ -494,7 +494,7 @@
 	// We know it's a GobEncoder, so just call the method directly.
 	data, err := v.Interface().(GobEncoder).GobEncode()
 	if err != nil {
-		error(err)
+		error_(err)
 	}
 	state := enc.newEncoderState(b)
 	state.fieldnum = -1
@@ -681,7 +681,7 @@
 func (enc *Encoder) getEncEngine(ut *userTypeInfo) *encEngine {
 	info, err1 := getTypeInfo(ut)
 	if err1 != nil {
-		error(err1)
+		error_(err1)
 	}
 	if info.encoder == nil {
 		// mark this engine as underway before compiling to handle recursive types.
diff --git a/src/pkg/gob/encoder.go b/src/pkg/gob/encoder.go
index 878d082..e4a48df 100644
--- a/src/pkg/gob/encoder.go
+++ b/src/pkg/gob/encoder.go
@@ -6,8 +6,8 @@
 
 import (
 	"bytes"
+	"errors"
 	"io"
-	"os"
 	"reflect"
 	"sync"
 )
@@ -21,7 +21,7 @@
 	countState *encoderState           // stage for writing counts
 	freeList   *encoderState           // list of free encoderStates; avoids reallocation
 	byteBuf    bytes.Buffer            // buffer for top-level encoderState
-	err        os.Error
+	err        error
 }
 
 // Before we encode a message, we reserve space at the head of the
@@ -55,10 +55,10 @@
 }
 
 func (enc *Encoder) badType(rt reflect.Type) {
-	enc.setError(os.NewError("gob: can't encode type " + rt.String()))
+	enc.setError(errors.New("gob: can't encode type " + rt.String()))
 }
 
-func (enc *Encoder) setError(err os.Error) {
+func (enc *Encoder) setError(err error) {
 	if enc.err == nil { // remember the first.
 		enc.err = err
 	}
@@ -171,7 +171,7 @@
 
 // Encode transmits the data item represented by the empty interface value,
 // guaranteeing that all necessary type information has been transmitted first.
-func (enc *Encoder) Encode(e interface{}) os.Error {
+func (enc *Encoder) Encode(e interface{}) error {
 	return enc.EncodeValue(reflect.ValueOf(e))
 }
 
@@ -215,7 +215,7 @@
 
 // EncodeValue transmits the data item represented by the reflection value,
 // guaranteeing that all necessary type information has been transmitted first.
-func (enc *Encoder) EncodeValue(value reflect.Value) os.Error {
+func (enc *Encoder) EncodeValue(value reflect.Value) error {
 	// Make sure we're single-threaded through here, so multiple
 	// goroutines can share an encoder.
 	enc.mutex.Lock()
diff --git a/src/pkg/gob/encoder_test.go b/src/pkg/gob/encoder_test.go
index 98c0c97..bc5af12 100644
--- a/src/pkg/gob/encoder_test.go
+++ b/src/pkg/gob/encoder_test.go
@@ -8,7 +8,6 @@
 	"bytes"
 	"fmt"
 	"io"
-	"os"
 	"reflect"
 	"strings"
 	"testing"
@@ -116,7 +115,7 @@
 	badTypeCheck(new(ET4), true, "different type of field", t)
 }
 
-func corruptDataCheck(s string, err os.Error, t *testing.T) {
+func corruptDataCheck(s string, err error, t *testing.T) {
 	b := bytes.NewBufferString(s)
 	dec := NewDecoder(b)
 	err1 := dec.Decode(new(ET2))
@@ -127,7 +126,7 @@
 
 // Check that we survive bad data.
 func TestBadData(t *testing.T) {
-	corruptDataCheck("", os.EOF, t)
+	corruptDataCheck("", io.EOF, t)
 	corruptDataCheck("\x7Fhi", io.ErrUnexpectedEOF, t)
 	corruptDataCheck("\x03now is the time for all good men", errBadType, t)
 }
@@ -149,7 +148,7 @@
 	}
 }
 
-func encAndDec(in, out interface{}) os.Error {
+func encAndDec(in, out interface{}) error {
 	b := new(bytes.Buffer)
 	enc := NewEncoder(b)
 	err := enc.Encode(in)
@@ -225,7 +224,7 @@
 	}
 	t4p := &Type4{3}
 	var t4 Type4 // note: not a pointer.
-	if err := encAndDec(t4p, t4); err == nil || strings.Index(err.String(), "pointer") < 0 {
+	if err := encAndDec(t4p, t4); err == nil || strings.Index(err.Error(), "pointer") < 0 {
 		t.Error("expected error about pointer; got", err)
 	}
 }
@@ -333,7 +332,7 @@
 			t.Errorf("expected error decoding %v: %s", test.in, test.err)
 			continue
 		case err != nil && test.err != "":
-			if strings.Index(err.String(), test.err) < 0 {
+			if strings.Index(err.Error(), test.err) < 0 {
 				t.Errorf("wrong error decoding %v: wanted %s, got %v", test.in, test.err, err)
 			}
 			continue
@@ -359,7 +358,7 @@
 	var ns NonStruct
 	if err := encAndDec(s, &ns); err == nil {
 		t.Error("should get error for struct/non-struct")
-	} else if strings.Index(err.String(), "type") < 0 {
+	} else if strings.Index(err.Error(), "type") < 0 {
 		t.Error("for struct/non-struct expected type error; got", err)
 	}
 	// Now try the other way
@@ -369,7 +368,7 @@
 	}
 	if err := encAndDec(ns, &s); err == nil {
 		t.Error("should get error for non-struct/struct")
-	} else if strings.Index(err.String(), "type") < 0 {
+	} else if strings.Index(err.Error(), "type") < 0 {
 		t.Error("for non-struct/struct expected type error; got", err)
 	}
 }
@@ -524,7 +523,7 @@
 
 type Bug1StructMap map[string]Bug1Elem
 
-func bug1EncDec(in Bug1StructMap, out *Bug1StructMap) os.Error {
+func bug1EncDec(in Bug1StructMap, out *Bug1StructMap) error {
 	return nil
 }
 
@@ -634,7 +633,7 @@
 	b := []byte{0xfb, 0xa5, 0x82, 0x2f, 0xca, 0x1}
 	if err := NewDecoder(bytes.NewBuffer(b)).Decode(nil); err == nil {
 		t.Error("expected error from bad count")
-	} else if err.String() != errBadCount.String() {
+	} else if err.Error() != errBadCount.Error() {
 		t.Error("expected bad count error; got", err)
 	}
 }
diff --git a/src/pkg/gob/error.go b/src/pkg/gob/error.go
index 106543d..b0c4008 100644
--- a/src/pkg/gob/error.go
+++ b/src/pkg/gob/error.go
@@ -4,10 +4,7 @@
 
 package gob
 
-import (
-	"fmt"
-	"os"
-)
+import "fmt"
 
 // Errors in decoding and encoding are handled using panic and recover.
 // Panics caused by user error (that is, everything except run-time panics
@@ -18,23 +15,23 @@
 
 // A gobError wraps an os.Error and is used to distinguish errors (panics) generated in this package.
 type gobError struct {
-	err os.Error
+	err error
 }
 
 // errorf is like error but takes Printf-style arguments to construct an os.Error.
 // It always prefixes the message with "gob: ".
 func errorf(format string, args ...interface{}) {
-	error(fmt.Errorf("gob: "+format, args...))
+	error_(fmt.Errorf("gob: "+format, args...))
 }
 
 // error wraps the argument error and uses it as the argument to panic.
-func error(err os.Error) {
+func error_(err error) {
 	panic(gobError{err})
 }
 
 // catchError is meant to be used as a deferred function to turn a panic(gobError) into a
 // plain os.Error.  It overwrites the error return of the function that deferred its call.
-func catchError(err *os.Error) {
+func catchError(err *error) {
 	if e := recover(); e != nil {
 		*err = e.(gobError).err // Will re-panic if not one of our errors, such as a runtime error.
 	}
diff --git a/src/pkg/gob/gobencdec_test.go b/src/pkg/gob/gobencdec_test.go
index 01addbe..eacfd84 100644
--- a/src/pkg/gob/gobencdec_test.go
+++ b/src/pkg/gob/gobencdec_test.go
@@ -8,8 +8,9 @@
 
 import (
 	"bytes"
+	"errors"
 	"fmt"
-	"os"
+	"io"
 	"strings"
 	"testing"
 )
@@ -34,7 +35,7 @@
 
 // The relevant methods
 
-func (g *ByteStruct) GobEncode() ([]byte, os.Error) {
+func (g *ByteStruct) GobEncode() ([]byte, error) {
 	b := make([]byte, 3)
 	b[0] = g.a
 	b[1] = g.a + 1
@@ -42,68 +43,68 @@
 	return b, nil
 }
 
-func (g *ByteStruct) GobDecode(data []byte) os.Error {
+func (g *ByteStruct) GobDecode(data []byte) error {
 	if g == nil {
-		return os.NewError("NIL RECEIVER")
+		return errors.New("NIL RECEIVER")
 	}
 	// Expect N sequential-valued bytes.
 	if len(data) == 0 {
-		return os.EOF
+		return io.EOF
 	}
 	g.a = data[0]
 	for i, c := range data {
 		if c != g.a+byte(i) {
-			return os.NewError("invalid data sequence")
+			return errors.New("invalid data sequence")
 		}
 	}
 	return nil
 }
 
-func (g *StringStruct) GobEncode() ([]byte, os.Error) {
+func (g *StringStruct) GobEncode() ([]byte, error) {
 	return []byte(g.s), nil
 }
 
-func (g *StringStruct) GobDecode(data []byte) os.Error {
+func (g *StringStruct) GobDecode(data []byte) error {
 	// Expect N sequential-valued bytes.
 	if len(data) == 0 {
-		return os.EOF
+		return io.EOF
 	}
 	a := data[0]
 	for i, c := range data {
 		if c != a+byte(i) {
-			return os.NewError("invalid data sequence")
+			return errors.New("invalid data sequence")
 		}
 	}
 	g.s = string(data)
 	return nil
 }
 
-func (a *ArrayStruct) GobEncode() ([]byte, os.Error) {
+func (a *ArrayStruct) GobEncode() ([]byte, error) {
 	return a.a[:], nil
 }
 
-func (a *ArrayStruct) GobDecode(data []byte) os.Error {
+func (a *ArrayStruct) GobDecode(data []byte) error {
 	if len(data) != len(a.a) {
-		return os.NewError("wrong length in array decode")
+		return errors.New("wrong length in array decode")
 	}
 	copy(a.a[:], data)
 	return nil
 }
 
-func (g *Gobber) GobEncode() ([]byte, os.Error) {
+func (g *Gobber) GobEncode() ([]byte, error) {
 	return []byte(fmt.Sprintf("VALUE=%d", *g)), nil
 }
 
-func (g *Gobber) GobDecode(data []byte) os.Error {
+func (g *Gobber) GobDecode(data []byte) error {
 	_, err := fmt.Sscanf(string(data), "VALUE=%d", (*int)(g))
 	return err
 }
 
-func (v ValueGobber) GobEncode() ([]byte, os.Error) {
+func (v ValueGobber) GobEncode() ([]byte, error) {
 	return []byte(fmt.Sprintf("VALUE=%s", v)), nil
 }
 
-func (v *ValueGobber) GobDecode(data []byte) os.Error {
+func (v *ValueGobber) GobDecode(data []byte) error {
 	_, err := fmt.Sscanf(string(data), "VALUE=%s", (*string)(v))
 	return err
 }
@@ -372,7 +373,7 @@
 	if err == nil {
 		t.Fatal("expected decode error for mismatched fields (encoder to non-decoder)")
 	}
-	if strings.Index(err.String(), "type") < 0 {
+	if strings.Index(err.Error(), "type") < 0 {
 		t.Fatal("expected type error; got", err)
 	}
 	// Non-encoder to GobDecoder: error
@@ -386,7 +387,7 @@
 	if err == nil {
 		t.Fatal("expected decode error for mismatched fields (non-encoder to decoder)")
 	}
-	if strings.Index(err.String(), "type") < 0 {
+	if strings.Index(err.Error(), "type") < 0 {
 		t.Fatal("expected type error; got", err)
 	}
 }
@@ -497,11 +498,11 @@
 	return br.foo + "-" + br.bar
 }
 
-func (br *gobDecoderBug0) GobEncode() ([]byte, os.Error) {
+func (br *gobDecoderBug0) GobEncode() ([]byte, error) {
 	return []byte(br.String()), nil
 }
 
-func (br *gobDecoderBug0) GobDecode(b []byte) os.Error {
+func (br *gobDecoderBug0) GobDecode(b []byte) error {
 	br.foo = "foo"
 	br.bar = "bar"
 	return nil
diff --git a/src/pkg/gob/timing_test.go b/src/pkg/gob/timing_test.go
index 2a2be73..47437a6 100644
--- a/src/pkg/gob/timing_test.go
+++ b/src/pkg/gob/timing_test.go
@@ -39,7 +39,7 @@
 func BenchmarkEndToEndPipe(b *testing.B) {
 	r, w, err := os.Pipe()
 	if err != nil {
-		panic("can't get pipe:" + err.String())
+		panic("can't get pipe:" + err.Error())
 	}
 	benchmarkEndToEnd(r, w, b)
 }
diff --git a/src/pkg/gob/type.go b/src/pkg/gob/type.go
index 870101e..c3bc7c7 100644
--- a/src/pkg/gob/type.go
+++ b/src/pkg/gob/type.go
@@ -5,6 +5,7 @@
 package gob
 
 import (
+	"errors"
 	"fmt"
 	"os"
 	"reflect"
@@ -36,7 +37,7 @@
 // validType returns, and saves, the information associated with user-provided type rt.
 // If the user type is not valid, err will be non-nil.  To be used when the error handler
 // is not set up.
-func validUserType(rt reflect.Type) (ut *userTypeInfo, err os.Error) {
+func validUserType(rt reflect.Type) (ut *userTypeInfo, err error) {
 	userTypeLock.RLock()
 	ut = userTypeCache[rt]
 	userTypeLock.RUnlock()
@@ -67,7 +68,7 @@
 		ut.base = pt.Elem()
 		if ut.base == slowpoke { // ut.base lapped slowpoke
 			// recursive pointer type.
-			return nil, os.NewError("can't represent recursive pointer type " + ut.base.String())
+			return nil, errors.New("can't represent recursive pointer type " + ut.base.String())
 		}
 		if ut.indir%2 == 0 {
 			slowpoke = slowpoke.Elem()
@@ -125,7 +126,7 @@
 func userType(rt reflect.Type) *userTypeInfo {
 	ut, err := validUserType(rt)
 	if err != nil {
-		error(err)
+		error_(err)
 	}
 	return ut
 }
@@ -396,12 +397,12 @@
 // of ut.
 // This is only called from the encoding side. The decoding side
 // works through typeIds and userTypeInfos alone.
-func newTypeObject(name string, ut *userTypeInfo, rt reflect.Type) (gobType, os.Error) {
+func newTypeObject(name string, ut *userTypeInfo, rt reflect.Type) (gobType, error) {
 	// Does this type implement GobEncoder?
 	if ut.isGobEncoder {
 		return newGobEncoderType(name), nil
 	}
-	var err os.Error
+	var err error
 	var type0, type1 gobType
 	defer func() {
 		if err != nil {
@@ -503,7 +504,7 @@
 		return st, nil
 
 	default:
-		return nil, os.NewError("gob NewTypeObject can't handle type: " + rt.String())
+		return nil, errors.New("gob NewTypeObject can't handle type: " + rt.String())
 	}
 	return nil, nil
 }
@@ -516,7 +517,7 @@
 
 // getBaseType returns the Gob type describing the given reflect.Type's base type.
 // typeLock must be held.
-func getBaseType(name string, rt reflect.Type) (gobType, os.Error) {
+func getBaseType(name string, rt reflect.Type) (gobType, error) {
 	ut := userType(rt)
 	return getType(name, ut, ut.base)
 }
@@ -526,7 +527,7 @@
 // which may be pointers.  All other types are handled through the
 // base type, never a pointer.
 // typeLock must be held.
-func getType(name string, ut *userTypeInfo, rt reflect.Type) (gobType, os.Error) {
+func getType(name string, ut *userTypeInfo, rt reflect.Type) (gobType, error) {
 	typ, present := types[rt]
 	if present {
 		return typ, nil
@@ -609,7 +610,7 @@
 var typeInfoMap = make(map[reflect.Type]*typeInfo) // protected by typeLock
 
 // typeLock must be held.
-func getTypeInfo(ut *userTypeInfo) (*typeInfo, os.Error) {
+func getTypeInfo(ut *userTypeInfo) (*typeInfo, error) {
 	rt := ut.base
 	if ut.isGobEncoder {
 		// We want the user type, not the base type.
@@ -658,7 +659,7 @@
 func mustGetTypeInfo(rt reflect.Type) *typeInfo {
 	t, err := getTypeInfo(userType(rt))
 	if err != nil {
-		panic("getTypeInfo: " + err.String())
+		panic("getTypeInfo: " + err.Error())
 	}
 	return t
 }
@@ -678,7 +679,7 @@
 	// GobEncode returns a byte slice representing the encoding of the
 	// receiver for transmission to a GobDecoder, usually of the same
 	// concrete type.
-	GobEncode() ([]byte, os.Error)
+	GobEncode() ([]byte, error)
 }
 
 // GobDecoder is the interface describing data that provides its own
@@ -687,7 +688,7 @@
 	// GobDecode overwrites the receiver, which must be a pointer,
 	// with the value represented by the byte slice, which was written
 	// by GobEncode, usually for the same concrete type.
-	GobDecode([]byte) os.Error
+	GobDecode([]byte) error
 }
 
 var (
diff --git a/src/pkg/gob/type_test.go b/src/pkg/gob/type_test.go
index 411ffb7..a6ac9c4 100644
--- a/src/pkg/gob/type_test.go
+++ b/src/pkg/gob/type_test.go
@@ -28,7 +28,7 @@
 	defer typeLock.Unlock()
 	t, err := getBaseType(name, rt)
 	if err != nil {
-		panic("getTypeUnlocked: " + err.String())
+		panic("getTypeUnlocked: " + err.Error())
 	}
 	return t
 }
diff --git a/src/pkg/hash/adler32/adler32.go b/src/pkg/hash/adler32/adler32.go
index 84943d9..10bed2f 100644
--- a/src/pkg/hash/adler32/adler32.go
+++ b/src/pkg/hash/adler32/adler32.go
@@ -11,10 +11,7 @@
 //	significant-byte first (network) order.
 package adler32
 
-import (
-	"hash"
-	"os"
-)
+import "hash"
 
 const (
 	mod = 65521
@@ -67,7 +64,7 @@
 	return b<<16 | a
 }
 
-func (d *digest) Write(p []byte) (nn int, err os.Error) {
+func (d *digest) Write(p []byte) (nn int, err error) {
 	d.a, d.b = update(d.a, d.b, p)
 	return len(p), nil
 }
diff --git a/src/pkg/hash/crc32/crc32.go b/src/pkg/hash/crc32/crc32.go
index 0245b1e..5980ec0 100644
--- a/src/pkg/hash/crc32/crc32.go
+++ b/src/pkg/hash/crc32/crc32.go
@@ -9,7 +9,6 @@
 
 import (
 	"hash"
-	"os"
 	"sync"
 )
 
@@ -113,7 +112,7 @@
 	return update(crc, tab, p)
 }
 
-func (d *digest) Write(p []byte) (n int, err os.Error) {
+func (d *digest) Write(p []byte) (n int, err error) {
 	d.crc = Update(d.crc, d.tab, p)
 	return len(p), nil
 }
diff --git a/src/pkg/hash/crc64/crc64.go b/src/pkg/hash/crc64/crc64.go
index ae37e78..42e53c3 100644
--- a/src/pkg/hash/crc64/crc64.go
+++ b/src/pkg/hash/crc64/crc64.go
@@ -7,10 +7,7 @@
 // information.
 package crc64
 
-import (
-	"hash"
-	"os"
-)
+import "hash"
 
 // The size of a CRC-64 checksum in bytes.
 const Size = 8
@@ -71,7 +68,7 @@
 	return update(crc, tab, p)
 }
 
-func (d *digest) Write(p []byte) (n int, err os.Error) {
+func (d *digest) Write(p []byte) (n int, err error) {
 	d.crc = update(d.crc, d.tab, p)
 	return len(p), nil
 }
diff --git a/src/pkg/hash/fnv/fnv.go b/src/pkg/hash/fnv/fnv.go
index 3ff7d7c..ce3ed0d 100644
--- a/src/pkg/hash/fnv/fnv.go
+++ b/src/pkg/hash/fnv/fnv.go
@@ -10,7 +10,6 @@
 import (
 	"encoding/binary"
 	"hash"
-	"os"
 )
 
 type (
@@ -61,7 +60,7 @@
 func (s *sum64) Sum64() uint64  { return uint64(*s) }
 func (s *sum64a) Sum64() uint64 { return uint64(*s) }
 
-func (s *sum32) Write(data []byte) (int, os.Error) {
+func (s *sum32) Write(data []byte) (int, error) {
 	hash := *s
 	for _, c := range data {
 		hash *= prime32
@@ -71,7 +70,7 @@
 	return len(data), nil
 }
 
-func (s *sum32a) Write(data []byte) (int, os.Error) {
+func (s *sum32a) Write(data []byte) (int, error) {
 	hash := *s
 	for _, c := range data {
 		hash ^= sum32a(c)
@@ -81,7 +80,7 @@
 	return len(data), nil
 }
 
-func (s *sum64) Write(data []byte) (int, os.Error) {
+func (s *sum64) Write(data []byte) (int, error) {
 	hash := *s
 	for _, c := range data {
 		hash *= prime64
@@ -91,7 +90,7 @@
 	return len(data), nil
 }
 
-func (s *sum64a) Write(data []byte) (int, os.Error) {
+func (s *sum64a) Write(data []byte) (int, error) {
 	hash := *s
 	for _, c := range data {
 		hash ^= sum64a(c)
diff --git a/src/pkg/html/escape.go b/src/pkg/html/escape.go
index 69e0028..b8e6571 100644
--- a/src/pkg/html/escape.go
+++ b/src/pkg/html/escape.go
@@ -6,7 +6,6 @@
 
 import (
 	"bytes"
-	"os"
 	"strings"
 	"utf8"
 )
@@ -195,7 +194,7 @@
 
 const escapedChars = `&'<>"`
 
-func escape(w writer, s string) os.Error {
+func escape(w writer, s string) error {
 	i := strings.IndexAny(s, escapedChars)
 	for i != -1 {
 		if _, err := w.WriteString(s[:i]); err != nil {
diff --git a/src/pkg/html/parse.go b/src/pkg/html/parse.go
index 54f7e2e..c9f0165 100644
--- a/src/pkg/html/parse.go
+++ b/src/pkg/html/parse.go
@@ -6,7 +6,6 @@
 
 import (
 	"io"
-	"os"
 	"strings"
 )
 
@@ -240,7 +239,7 @@
 
 // read reads the next token. This is usually from the tokenizer, but it may
 // be the synthesized end tag implied by a self-closing tag.
-func (p *parser) read() os.Error {
+func (p *parser) read() error {
 	if p.hasSelfClosingToken {
 		p.hasSelfClosingToken = false
 		p.tok.Type = EndTagToken
@@ -1136,7 +1135,7 @@
 
 // Parse returns the parse tree for the HTML from the given Reader.
 // The input is assumed to be UTF-8 encoded.
-func Parse(r io.Reader) (*Node, os.Error) {
+func Parse(r io.Reader) (*Node, error) {
 	p := &parser{
 		tokenizer: NewTokenizer(r),
 		doc: &Node{
@@ -1150,7 +1149,7 @@
 	for {
 		if consumed {
 			if err := p.read(); err != nil {
-				if err == os.EOF {
+				if err == io.EOF {
 					break
 				}
 				return nil, err
diff --git a/src/pkg/html/parse_test.go b/src/pkg/html/parse_test.go
index b9572fa..3fa4037 100644
--- a/src/pkg/html/parse_test.go
+++ b/src/pkg/html/parse_test.go
@@ -7,6 +7,7 @@
 import (
 	"bufio"
 	"bytes"
+	"errors"
 	"fmt"
 	"io"
 	"io/ioutil"
@@ -15,7 +16,7 @@
 	"testing"
 )
 
-func pipeErr(err os.Error) io.Reader {
+func pipeErr(err error) io.Reader {
 	pr, pw := io.Pipe()
 	pw.CloseWithError(err)
 	return pr
@@ -76,13 +77,13 @@
 	}
 }
 
-func dumpLevel(w io.Writer, n *Node, level int) os.Error {
+func dumpLevel(w io.Writer, n *Node, level int) error {
 	dumpIndent(w, level)
 	switch n.Type {
 	case ErrorNode:
-		return os.NewError("unexpected ErrorNode")
+		return errors.New("unexpected ErrorNode")
 	case DocumentNode:
-		return os.NewError("unexpected DocumentNode")
+		return errors.New("unexpected DocumentNode")
 	case ElementNode:
 		fmt.Fprintf(w, "<%s>", n.Data)
 		for _, a := range n.Attr {
@@ -97,9 +98,9 @@
 	case DoctypeNode:
 		fmt.Fprintf(w, "<!DOCTYPE %s>", n.Data)
 	case scopeMarkerNode:
-		return os.NewError("unexpected scopeMarkerNode")
+		return errors.New("unexpected scopeMarkerNode")
 	default:
-		return os.NewError("unknown node type")
+		return errors.New("unknown node type")
 	}
 	io.WriteString(w, "\n")
 	for _, c := range n.Child {
@@ -110,7 +111,7 @@
 	return nil
 }
 
-func dump(n *Node) (string, os.Error) {
+func dump(n *Node) (string, error) {
 	if n == nil || len(n.Child) == 0 {
 		return "", nil
 	}
diff --git a/src/pkg/html/render.go b/src/pkg/html/render.go
index 0522b6e..c815f35 100644
--- a/src/pkg/html/render.go
+++ b/src/pkg/html/render.go
@@ -6,15 +6,15 @@
 
 import (
 	"bufio"
+	"errors"
 	"fmt"
 	"io"
-	"os"
 )
 
 type writer interface {
 	io.Writer
-	WriteByte(byte) os.Error
-	WriteString(string) (int, os.Error)
+	WriteByte(byte) error
+	WriteString(string) (int, error)
 }
 
 // Render renders the parse tree n to the given writer.
@@ -41,7 +41,7 @@
 // text node would become a tree containing <html>, <head> and <body> elements.
 // Another example is that the programmatic equivalent of "a<head>b</head>c"
 // becomes "<html><head><head/><body>abc</body></html>".
-func Render(w io.Writer, n *Node) os.Error {
+func Render(w io.Writer, n *Node) error {
 	if x, ok := w.(writer); ok {
 		return render(x, n)
 	}
@@ -52,11 +52,11 @@
 	return buf.Flush()
 }
 
-func render(w writer, n *Node) os.Error {
+func render(w writer, n *Node) error {
 	// Render non-element nodes; these are the easy cases.
 	switch n.Type {
 	case ErrorNode:
-		return os.NewError("html: cannot render an ErrorNode node")
+		return errors.New("html: cannot render an ErrorNode node")
 	case TextNode:
 		return escape(w, n.Data)
 	case DocumentNode:
@@ -88,7 +88,7 @@
 		}
 		return w.WriteByte('>')
 	default:
-		return os.NewError("html: unknown node type")
+		return errors.New("html: unknown node type")
 	}
 
 	// Render the <xxx> opening tag.
diff --git a/src/pkg/html/token.go b/src/pkg/html/token.go
index 952d174..c5b8a1c 100644
--- a/src/pkg/html/token.go
+++ b/src/pkg/html/token.go
@@ -7,7 +7,6 @@
 import (
 	"bytes"
 	"io"
-	"os"
 	"strconv"
 	"strings"
 )
@@ -127,7 +126,7 @@
 	// Next call would set z.err to os.EOF but return a TextToken, and all
 	// subsequent Next calls would return an ErrorToken.
 	// err is never reset. Once it becomes non-nil, it stays non-nil.
-	err os.Error
+	err error
 	// buf[raw.start:raw.end] holds the raw bytes of the current token.
 	// buf[raw.end:] is buffered input that will yield future tokens.
 	raw span
@@ -152,7 +151,7 @@
 
 // Error returns the error associated with the most recent ErrorToken token.
 // This is typically os.EOF, meaning the end of tokenization.
-func (z *Tokenizer) Error() os.Error {
+func (z *Tokenizer) Error() error {
 	if z.tt != ErrorToken {
 		return nil
 	}
diff --git a/src/pkg/html/token_test.go b/src/pkg/html/token_test.go
index a5efdf2..76cc9f8 100644
--- a/src/pkg/html/token_test.go
+++ b/src/pkg/html/token_test.go
@@ -6,7 +6,7 @@
 
 import (
 	"bytes"
-	"os"
+	"io"
 	"strings"
 	"testing"
 )
@@ -438,7 +438,7 @@
 			}
 		}
 		z.Next()
-		if z.Error() != os.EOF {
+		if z.Error() != io.EOF {
 			t.Errorf("%s: want EOF got %q", tt.desc, z.Error())
 		}
 	}
@@ -543,7 +543,7 @@
 		tt := z.Next()
 		switch tt {
 		case ErrorToken:
-			if z.Error() != os.EOF {
+			if z.Error() != io.EOF {
 				t.Error(z.Error())
 			}
 			break loop
diff --git a/src/pkg/http/cgi/child.go b/src/pkg/http/cgi/child.go
index bf14c04..1618268 100644
--- a/src/pkg/http/cgi/child.go
+++ b/src/pkg/http/cgi/child.go
@@ -10,6 +10,7 @@
 import (
 	"bufio"
 	"crypto/tls"
+	"errors"
 	"fmt"
 	"http"
 	"io"
@@ -25,7 +26,7 @@
 // environment. This assumes the current program is being run
 // by a web server in a CGI environment.
 // The returned Request's Body is populated, if applicable.
-func Request() (*http.Request, os.Error) {
+func Request() (*http.Request, error) {
 	r, err := RequestFromMap(envMap(os.Environ()))
 	if err != nil {
 		return nil, err
@@ -48,18 +49,18 @@
 
 // RequestFromMap creates an http.Request from CGI variables.
 // The returned Request's Body field is not populated.
-func RequestFromMap(params map[string]string) (*http.Request, os.Error) {
+func RequestFromMap(params map[string]string) (*http.Request, error) {
 	r := new(http.Request)
 	r.Method = params["REQUEST_METHOD"]
 	if r.Method == "" {
-		return nil, os.NewError("cgi: no REQUEST_METHOD in environment")
+		return nil, errors.New("cgi: no REQUEST_METHOD in environment")
 	}
 
 	r.Proto = params["SERVER_PROTOCOL"]
 	var ok bool
 	r.ProtoMajor, r.ProtoMinor, ok = http.ParseHTTPVersion(r.Proto)
 	if !ok {
-		return nil, os.NewError("cgi: invalid SERVER_PROTOCOL version")
+		return nil, errors.New("cgi: invalid SERVER_PROTOCOL version")
 	}
 
 	r.Close = true
@@ -71,7 +72,7 @@
 	if lenstr := params["CONTENT_LENGTH"]; lenstr != "" {
 		clen, err := strconv.Atoi64(lenstr)
 		if err != nil {
-			return nil, os.NewError("cgi: bad CONTENT_LENGTH in environment: " + lenstr)
+			return nil, errors.New("cgi: bad CONTENT_LENGTH in environment: " + lenstr)
 		}
 		r.ContentLength = clen
 	}
@@ -96,7 +97,7 @@
 		rawurl := "http://" + r.Host + params["REQUEST_URI"]
 		url, err := url.Parse(rawurl)
 		if err != nil {
-			return nil, os.NewError("cgi: failed to parse host and REQUEST_URI into a URL: " + rawurl)
+			return nil, errors.New("cgi: failed to parse host and REQUEST_URI into a URL: " + rawurl)
 		}
 		r.URL = url
 	}
@@ -106,7 +107,7 @@
 		uriStr := params["REQUEST_URI"]
 		url, err := url.Parse(uriStr)
 		if err != nil {
-			return nil, os.NewError("cgi: failed to parse REQUEST_URI into a URL: " + uriStr)
+			return nil, errors.New("cgi: failed to parse REQUEST_URI into a URL: " + uriStr)
 		}
 		r.URL = url
 	}
@@ -129,7 +130,7 @@
 // request, if any. If there's no current CGI environment
 // an error is returned. The provided handler may be nil to use
 // http.DefaultServeMux.
-func Serve(handler http.Handler) os.Error {
+func Serve(handler http.Handler) error {
 	req, err := Request()
 	if err != nil {
 		return err
@@ -164,7 +165,7 @@
 	return r.header
 }
 
-func (r *response) Write(p []byte) (n int, err os.Error) {
+func (r *response) Write(p []byte) (n int, err error) {
 	if !r.headerSent {
 		r.WriteHeader(http.StatusOK)
 	}
diff --git a/src/pkg/http/cgi/host.go b/src/pkg/http/cgi/host.go
index 365a712..8c999c0 100644
--- a/src/pkg/http/cgi/host.go
+++ b/src/pkg/http/cgi/host.go
@@ -188,7 +188,7 @@
 		cwd = "."
 	}
 
-	internalError := func(err os.Error) {
+	internalError := func(err error) {
 		rw.WriteHeader(http.StatusInternalServerError)
 		h.printf("CGI error: %v", err)
 	}
@@ -227,7 +227,7 @@
 			h.printf("cgi: long header line from subprocess.")
 			return
 		}
-		if err == os.EOF {
+		if err == io.EOF {
 			break
 		}
 		if err != nil {
diff --git a/src/pkg/http/cgi/host_test.go b/src/pkg/http/cgi/host_test.go
index 8111ba1..2bfe18b 100644
--- a/src/pkg/http/cgi/host_test.go
+++ b/src/pkg/http/cgi/host_test.go
@@ -45,7 +45,7 @@
 	for {
 		line, err := rw.Body.ReadString('\n')
 		switch {
-		case err == os.EOF:
+		case err == io.EOF:
 			break readlines
 		case err != nil:
 			t.Fatalf("unexpected error reading from CGI: %v", err)
@@ -410,7 +410,7 @@
 	cgifile, _ := filepath.Abs("testdata/test.cgi")
 
 	var perl string
-	var err os.Error
+	var err error
 	perl, err = exec.LookPath("perl")
 	if err != nil {
 		return
@@ -452,7 +452,7 @@
 	cgifile, _ := filepath.Abs("testdata/test.cgi")
 
 	var perl string
-	var err os.Error
+	var err error
 	perl, err = exec.LookPath("perl")
 	if err != nil {
 		return
diff --git a/src/pkg/http/chunked.go b/src/pkg/http/chunked.go
index eff9ae2..157e1c4 100644
--- a/src/pkg/http/chunked.go
+++ b/src/pkg/http/chunked.go
@@ -8,7 +8,6 @@
 	"bufio"
 	"io"
 	"log"
-	"os"
 	"strconv"
 )
 
@@ -37,7 +36,7 @@
 // Write the contents of data as one chunk to Wire.
 // NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has
 // a bug since it does not check for success of io.WriteString
-func (cw *chunkedWriter) Write(data []byte) (n int, err os.Error) {
+func (cw *chunkedWriter) Write(data []byte) (n int, err error) {
 
 	// Don't send 0-length data. It looks like EOF for chunked encoding.
 	if len(data) == 0 {
@@ -61,7 +60,7 @@
 	return
 }
 
-func (cw *chunkedWriter) Close() os.Error {
+func (cw *chunkedWriter) Close() error {
 	_, err := io.WriteString(cw.Wire, "0\r\n")
 	return err
 }
diff --git a/src/pkg/http/client.go b/src/pkg/http/client.go
index e939b96..503cc89 100644
--- a/src/pkg/http/client.go
+++ b/src/pkg/http/client.go
@@ -11,9 +11,9 @@
 
 import (
 	"encoding/base64"
+	"errors"
 	"fmt"
 	"io"
-	"os"
 	"strings"
 	"url"
 )
@@ -37,7 +37,7 @@
 	//
 	// If CheckRedirect is nil, the Client uses its default policy,
 	// which is to stop after 10 consecutive requests.
-	CheckRedirect func(req *Request, via []*Request) os.Error
+	CheckRedirect func(req *Request, via []*Request) error
 }
 
 // DefaultClient is the default Client and is used by Get, Head, and Post.
@@ -62,7 +62,7 @@
 	// RoundTrip should not modify the request, except for
 	// consuming the Body.  The request's URL and Header fields
 	// are guaranteed to be initialized.
-	RoundTrip(*Request) (*Response, os.Error)
+	RoundTrip(*Request) (*Response, error)
 }
 
 // Given a string of the form "host", "host:port", or "[ipv6::address]:port",
@@ -88,7 +88,7 @@
 // connection to the server for a subsequent "keep-alive" request.
 //
 // Generally Get, Post, or PostForm will be used instead of Do.
-func (c *Client) Do(req *Request) (resp *Response, err os.Error) {
+func (c *Client) Do(req *Request) (resp *Response, err error) {
 	if req.Method == "GET" || req.Method == "HEAD" {
 		return c.doFollowingRedirects(req)
 	}
@@ -96,17 +96,17 @@
 }
 
 // send issues an HTTP request.  Caller should close resp.Body when done reading from it.
-func send(req *Request, t RoundTripper) (resp *Response, err os.Error) {
+func send(req *Request, t RoundTripper) (resp *Response, err error) {
 	if t == nil {
 		t = DefaultTransport
 		if t == nil {
-			err = os.NewError("http: no Client.Transport or DefaultTransport")
+			err = errors.New("http: no Client.Transport or DefaultTransport")
 			return
 		}
 	}
 
 	if req.URL == nil {
-		return nil, os.NewError("http: nil Request.URL")
+		return nil, errors.New("http: nil Request.URL")
 	}
 
 	// Most the callers of send (Get, Post, et al) don't need
@@ -144,7 +144,7 @@
 // Caller should close r.Body when done reading from it.
 //
 // Get is a convenience wrapper around DefaultClient.Get.
-func Get(url string) (r *Response, err os.Error) {
+func Get(url string) (r *Response, err error) {
 	return DefaultClient.Get(url)
 }
 
@@ -158,7 +158,7 @@
 //    307 (Temporary Redirect)
 //
 // Caller should close r.Body when done reading from it.
-func (c *Client) Get(url string) (r *Response, err os.Error) {
+func (c *Client) Get(url string) (r *Response, err error) {
 	req, err := NewRequest("GET", url, nil)
 	if err != nil {
 		return nil, err
@@ -166,7 +166,7 @@
 	return c.doFollowingRedirects(req)
 }
 
-func (c *Client) doFollowingRedirects(ireq *Request) (r *Response, err os.Error) {
+func (c *Client) doFollowingRedirects(ireq *Request) (r *Response, err error) {
 	// TODO: if/when we add cookie support, the redirected request shouldn't
 	// necessarily supply the same cookies as the original.
 	var base *url.URL
@@ -177,7 +177,7 @@
 	var via []*Request
 
 	if ireq.URL == nil {
-		return nil, os.NewError("http: nil Request.URL")
+		return nil, errors.New("http: nil Request.URL")
 	}
 
 	req := ireq
@@ -212,7 +212,7 @@
 		if shouldRedirect(r.StatusCode) {
 			r.Body.Close()
 			if urlStr = r.Header.Get("Location"); urlStr == "" {
-				err = os.NewError(fmt.Sprintf("%d response missing Location header", r.StatusCode))
+				err = errors.New(fmt.Sprintf("%d response missing Location header", r.StatusCode))
 				break
 			}
 			base = req.URL
@@ -227,9 +227,9 @@
 	return
 }
 
-func defaultCheckRedirect(req *Request, via []*Request) os.Error {
+func defaultCheckRedirect(req *Request, via []*Request) error {
 	if len(via) >= 10 {
-		return os.NewError("stopped after 10 redirects")
+		return errors.New("stopped after 10 redirects")
 	}
 	return nil
 }
@@ -239,14 +239,14 @@
 // Caller should close r.Body when done reading from it.
 //
 // Post is a wrapper around DefaultClient.Post
-func Post(url string, bodyType string, body io.Reader) (r *Response, err os.Error) {
+func Post(url string, bodyType string, body io.Reader) (r *Response, err error) {
 	return DefaultClient.Post(url, bodyType, body)
 }
 
 // Post issues a POST to the specified URL.
 //
 // Caller should close r.Body when done reading from it.
-func (c *Client) Post(url string, bodyType string, body io.Reader) (r *Response, err os.Error) {
+func (c *Client) Post(url string, bodyType string, body io.Reader) (r *Response, err error) {
 	req, err := NewRequest("POST", url, body)
 	if err != nil {
 		return nil, err
@@ -261,7 +261,7 @@
 // Caller should close r.Body when done reading from it.
 //
 // PostForm is a wrapper around DefaultClient.PostForm
-func PostForm(url string, data url.Values) (r *Response, err os.Error) {
+func PostForm(url string, data url.Values) (r *Response, err error) {
 	return DefaultClient.PostForm(url, data)
 }
 
@@ -269,7 +269,7 @@
 // with data's keys and values urlencoded as the request body.
 //
 // Caller should close r.Body when done reading from it.
-func (c *Client) PostForm(url string, data url.Values) (r *Response, err os.Error) {
+func (c *Client) PostForm(url string, data url.Values) (r *Response, err error) {
 	return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
 }
 
@@ -283,7 +283,7 @@
 //    307 (Temporary Redirect)
 //
 // Head is a wrapper around DefaultClient.Head
-func Head(url string) (r *Response, err os.Error) {
+func Head(url string) (r *Response, err error) {
 	return DefaultClient.Head(url)
 }
 
@@ -295,7 +295,7 @@
 //    302 (Found)
 //    303 (See Other)
 //    307 (Temporary Redirect)
-func (c *Client) Head(url string) (r *Response, err os.Error) {
+func (c *Client) Head(url string) (r *Response, err error) {
 	req, err := NewRequest("HEAD", url, nil)
 	if err != nil {
 		return nil, err
diff --git a/src/pkg/http/client_test.go b/src/pkg/http/client_test.go
index 8f61286..fdad2cd 100644
--- a/src/pkg/http/client_test.go
+++ b/src/pkg/http/client_test.go
@@ -8,13 +8,13 @@
 
 import (
 	"crypto/tls"
+	"errors"
 	"fmt"
 	. "http"
 	"http/httptest"
 	"io"
 	"io/ioutil"
 	"net"
-	"os"
 	"strconv"
 	"strings"
 	"testing"
@@ -60,9 +60,9 @@
 	req *Request
 }
 
-func (t *recordingTransport) RoundTrip(req *Request) (resp *Response, err os.Error) {
+func (t *recordingTransport) RoundTrip(req *Request) (resp *Response, err error) {
 	t.req = req
-	return nil, os.NewError("dummy impl")
+	return nil, errors.New("dummy impl")
 }
 
 func TestGetRequestFormat(t *testing.T) {
@@ -185,9 +185,9 @@
 		t.Errorf("with default client Do, expected error %q, got %q", e, g)
 	}
 
-	var checkErr os.Error
+	var checkErr error
 	var lastVia []*Request
-	c = &Client{CheckRedirect: func(_ *Request, via []*Request) os.Error {
+	c = &Client{CheckRedirect: func(_ *Request, via []*Request) error {
 		lastVia = via
 		return checkErr
 	}}
@@ -203,7 +203,7 @@
 		t.Errorf("expected lastVia to have contained %d elements; got %d", e, g)
 	}
 
-	checkErr = os.NewError("no redirects allowed")
+	checkErr = errors.New("no redirects allowed")
 	res, err = c.Get(ts.URL)
 	finalUrl = res.Request.URL.String()
 	if e, g := "Get /?n=1: no redirects allowed", fmt.Sprintf("%v", err); e != g {
@@ -244,7 +244,7 @@
 	}
 	close(say)
 	_, err = io.ReadFull(res.Body, buf[0:1])
-	if err != os.EOF {
+	if err != io.EOF {
 		t.Fatalf("at end expected EOF, got %v", err)
 	}
 }
@@ -254,7 +254,7 @@
 	count *int
 }
 
-func (c *writeCountingConn) Write(p []byte) (int, os.Error) {
+func (c *writeCountingConn) Write(p []byte) (int, error) {
 	*c.count++
 	return c.Conn.Write(p)
 }
@@ -267,7 +267,7 @@
 	defer ts.Close()
 
 	writes := 0
-	dialer := func(netz string, addr string) (net.Conn, os.Error) {
+	dialer := func(netz string, addr string) (net.Conn, error) {
 		c, err := net.Dial(netz, addr)
 		if err == nil {
 			c = &writeCountingConn{c, &writes}
diff --git a/src/pkg/http/cookie_test.go b/src/pkg/http/cookie_test.go
index 5de6aab..9a537f9 100644
--- a/src/pkg/http/cookie_test.go
+++ b/src/pkg/http/cookie_test.go
@@ -7,7 +7,6 @@
 import (
 	"fmt"
 	"json"
-	"os"
 	"reflect"
 	"testing"
 	"time"
@@ -50,7 +49,7 @@
 	return Header(ho)
 }
 
-func (ho headerOnlyResponseWriter) Write([]byte) (int, os.Error) {
+func (ho headerOnlyResponseWriter) Write([]byte) (int, error) {
 	panic("NOIMPL")
 }
 
diff --git a/src/pkg/http/dump.go b/src/pkg/http/dump.go
index f78df5771..b85feea 100644
--- a/src/pkg/http/dump.go
+++ b/src/pkg/http/dump.go
@@ -8,14 +8,13 @@
 	"bytes"
 	"io"
 	"io/ioutil"
-	"os"
 )
 
 // One of the copies, say from b to r2, could be avoided by using a more
 // elaborate trick where the other copy is made during Request/Response.Write.
 // This would complicate things too much, given that these functions are for
 // debugging only.
-func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err os.Error) {
+func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) {
 	var buf bytes.Buffer
 	if _, err = buf.ReadFrom(b); err != nil {
 		return nil, nil, err
@@ -33,7 +32,7 @@
 // changes req.Body to refer to the in-memory copy.
 // The documentation for Request.Write details which fields
 // of req are used.
-func DumpRequest(req *Request, body bool) (dump []byte, err os.Error) {
+func DumpRequest(req *Request, body bool) (dump []byte, err error) {
 	var b bytes.Buffer
 	save := req.Body
 	if !body || req.Body == nil {
@@ -54,7 +53,7 @@
 }
 
 // DumpResponse is like DumpRequest but dumps a response.
-func DumpResponse(resp *Response, body bool) (dump []byte, err os.Error) {
+func DumpResponse(resp *Response, body bool) (dump []byte, err error) {
 	var b bytes.Buffer
 	save := resp.Body
 	savecl := resp.ContentLength
diff --git a/src/pkg/http/fcgi/child.go b/src/pkg/http/fcgi/child.go
index 61dd3fb..f6591e0 100644
--- a/src/pkg/http/fcgi/child.go
+++ b/src/pkg/http/fcgi/child.go
@@ -80,7 +80,7 @@
 	return r.header
 }
 
-func (r *response) Write(data []byte) (int, os.Error) {
+func (r *response) Write(data []byte) (int, error) {
 	if !r.wroteHeader {
 		r.WriteHeader(http.StatusOK)
 	}
@@ -117,7 +117,7 @@
 	r.w.Flush()
 }
 
-func (r *response) Close() os.Error {
+func (r *response) Close() error {
 	r.Flush()
 	return r.w.Close()
 }
@@ -214,7 +214,7 @@
 	if err != nil {
 		// there was an error reading the request
 		r.WriteHeader(http.StatusInternalServerError)
-		c.conn.writeRecord(typeStderr, req.reqId, []byte(err.String()))
+		c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error()))
 	} else {
 		httpReq.Body = body
 		c.handler.ServeHTTP(r, httpReq)
@@ -234,9 +234,9 @@
 // to reply to them.
 // If l is nil, Serve accepts connections on stdin.
 // If handler is nil, http.DefaultServeMux is used.
-func Serve(l net.Listener, handler http.Handler) os.Error {
+func Serve(l net.Listener, handler http.Handler) error {
 	if l == nil {
-		var err os.Error
+		var err error
 		l, err = net.FileListener(os.Stdin)
 		if err != nil {
 			return err
diff --git a/src/pkg/http/fcgi/fcgi.go b/src/pkg/http/fcgi/fcgi.go
index 8e2e1cd..70cf781 100644
--- a/src/pkg/http/fcgi/fcgi.go
+++ b/src/pkg/http/fcgi/fcgi.go
@@ -14,8 +14,8 @@
 	"bufio"
 	"bytes"
 	"encoding/binary"
+	"errors"
 	"io"
-	"os"
 	"sync"
 )
 
@@ -72,9 +72,9 @@
 	reserved [5]uint8
 }
 
-func (br *beginRequest) read(content []byte) os.Error {
+func (br *beginRequest) read(content []byte) error {
 	if len(content) != 8 {
-		return os.NewError("fcgi: invalid begin request record")
+		return errors.New("fcgi: invalid begin request record")
 	}
 	br.role = binary.BigEndian.Uint16(content)
 	br.flags = content[2]
@@ -107,7 +107,7 @@
 	return &conn{rwc: rwc}
 }
 
-func (c *conn) Close() os.Error {
+func (c *conn) Close() error {
 	c.mutex.Lock()
 	defer c.mutex.Unlock()
 	return c.rwc.Close()
@@ -118,12 +118,12 @@
 	buf [maxWrite + maxPad]byte
 }
 
-func (rec *record) read(r io.Reader) (err os.Error) {
+func (rec *record) read(r io.Reader) (err error) {
 	if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil {
 		return err
 	}
 	if rec.h.Version != 1 {
-		return os.NewError("fcgi: invalid header version")
+		return errors.New("fcgi: invalid header version")
 	}
 	n := int(rec.h.ContentLength) + int(rec.h.PaddingLength)
 	if _, err = io.ReadFull(r, rec.buf[:n]); err != nil {
@@ -137,7 +137,7 @@
 }
 
 // writeRecord writes and sends a single record.
-func (c *conn) writeRecord(recType uint8, reqId uint16, b []byte) os.Error {
+func (c *conn) writeRecord(recType uint8, reqId uint16, b []byte) error {
 	c.mutex.Lock()
 	defer c.mutex.Unlock()
 	c.buf.Reset()
@@ -155,19 +155,19 @@
 	return err
 }
 
-func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) os.Error {
+func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) error {
 	b := [8]byte{byte(role >> 8), byte(role), flags}
 	return c.writeRecord(typeBeginRequest, reqId, b[:])
 }
 
-func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) os.Error {
+func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error {
 	b := make([]byte, 8)
 	binary.BigEndian.PutUint32(b, uint32(appStatus))
 	b[4] = protocolStatus
 	return c.writeRecord(typeEndRequest, reqId, b)
 }
 
-func (c *conn) writePairs(recType uint8, reqId uint16, pairs map[string]string) os.Error {
+func (c *conn) writePairs(recType uint8, reqId uint16, pairs map[string]string) error {
 	w := newWriter(c, recType, reqId)
 	b := make([]byte, 8)
 	for k, v := range pairs {
@@ -227,7 +227,7 @@
 	*bufio.Writer
 }
 
-func (w *bufWriter) Close() os.Error {
+func (w *bufWriter) Close() error {
 	if err := w.Writer.Flush(); err != nil {
 		w.closer.Close()
 		return err
@@ -249,7 +249,7 @@
 	reqId   uint16
 }
 
-func (w *streamWriter) Write(p []byte) (int, os.Error) {
+func (w *streamWriter) Write(p []byte) (int, error) {
 	nn := 0
 	for len(p) > 0 {
 		n := len(p)
@@ -265,7 +265,7 @@
 	return nn, nil
 }
 
-func (w *streamWriter) Close() os.Error {
+func (w *streamWriter) Close() error {
 	// send empty record to close the stream
 	return w.c.writeRecord(w.recType, w.reqId, nil)
 }
diff --git a/src/pkg/http/fcgi/fcgi_test.go b/src/pkg/http/fcgi/fcgi_test.go
index 5c8e46b..e42f8ef 100644
--- a/src/pkg/http/fcgi/fcgi_test.go
+++ b/src/pkg/http/fcgi/fcgi_test.go
@@ -7,7 +7,6 @@
 import (
 	"bytes"
 	"io"
-	"os"
 	"testing"
 )
 
@@ -69,7 +68,7 @@
 	io.ReadWriter
 }
 
-func (c *nilCloser) Close() os.Error { return nil }
+func (c *nilCloser) Close() error { return nil }
 
 func TestStreams(t *testing.T) {
 	var rec record
diff --git a/src/pkg/http/filetransport.go b/src/pkg/http/filetransport.go
index 78f3aa2..821787e 100644
--- a/src/pkg/http/filetransport.go
+++ b/src/pkg/http/filetransport.go
@@ -7,7 +7,6 @@
 import (
 	"fmt"
 	"io"
-	"os"
 )
 
 // fileTransport implements RoundTripper for the 'file' protocol.
@@ -32,7 +31,7 @@
 	return fileTransport{fileHandler{fs}}
 }
 
-func (t fileTransport) RoundTrip(req *Request) (resp *Response, err os.Error) {
+func (t fileTransport) RoundTrip(req *Request) (resp *Response, err error) {
 	// We start ServeHTTP in a goroutine, which may take a long
 	// time if the file is large.  The newPopulateResponseWriter
 	// call returns a channel which either ServeHTTP or finish()
@@ -112,7 +111,7 @@
 	pr.res.Status = fmt.Sprintf("%d %s", code, StatusText(code))
 }
 
-func (pr *populateResponse) Write(p []byte) (n int, err os.Error) {
+func (pr *populateResponse) Write(p []byte) (n int, err error) {
 	if !pr.wroteHeader {
 		pr.WriteHeader(StatusOK)
 	}
diff --git a/src/pkg/http/filetransport_test.go b/src/pkg/http/filetransport_test.go
index 2634243..aaee73e 100644
--- a/src/pkg/http/filetransport_test.go
+++ b/src/pkg/http/filetransport_test.go
@@ -8,12 +8,11 @@
 	"http"
 	"io/ioutil"
 	"path/filepath"
-	"os"
 	"testing"
 )
 
-func checker(t *testing.T) func(string, os.Error) {
-	return func(call string, err os.Error) {
+func checker(t *testing.T) func(string, error) {
+	return func(call string, err error) {
 		if err == nil {
 			return
 		}
diff --git a/src/pkg/http/fs.go b/src/pkg/http/fs.go
index 6d71665..eb0c67d 100644
--- a/src/pkg/http/fs.go
+++ b/src/pkg/http/fs.go
@@ -7,6 +7,7 @@
 package http
 
 import (
+	"errors"
 	"fmt"
 	"io"
 	"mime"
@@ -23,9 +24,9 @@
 // system restricted to a specific directory tree.
 type Dir string
 
-func (d Dir) Open(name string) (File, os.Error) {
+func (d Dir) Open(name string) (File, error) {
 	if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 {
-		return nil, os.NewError("http: invalid character in file path")
+		return nil, errors.New("http: invalid character in file path")
 	}
 	f, err := os.Open(filepath.Join(string(d), filepath.FromSlash(path.Clean("/"+name))))
 	if err != nil {
@@ -38,17 +39,17 @@
 // The elements in a file path are separated by slash ('/', U+002F)
 // characters, regardless of host operating system convention.
 type FileSystem interface {
-	Open(name string) (File, os.Error)
+	Open(name string) (File, error)
 }
 
 // A File is returned by a FileSystem's Open method and can be
 // served by the FileServer implementation.
 type File interface {
-	Close() os.Error
-	Stat() (*os.FileInfo, os.Error)
-	Readdir(count int) ([]os.FileInfo, os.Error)
-	Read([]byte) (int, os.Error)
-	Seek(offset int64, whence int) (int64, os.Error)
+	Close() error
+	Stat() (*os.FileInfo, error)
+	Readdir(count int) ([]os.FileInfo, error)
+	Read([]byte) (int, error)
+	Seek(offset int64, whence int) (int64, error)
 }
 
 // Heuristic: b is text if it is valid UTF-8 and doesn't
@@ -194,16 +195,16 @@
 	// TODO(adg): handle multiple ranges
 	ranges, err := parseRange(r.Header.Get("Range"), size)
 	if err == nil && len(ranges) > 1 {
-		err = os.NewError("multiple ranges not supported")
+		err = errors.New("multiple ranges not supported")
 	}
 	if err != nil {
-		Error(w, err.String(), StatusRequestedRangeNotSatisfiable)
+		Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
 		return
 	}
 	if len(ranges) == 1 {
 		ra := ranges[0]
 		if _, err := f.Seek(ra.start, os.SEEK_SET); err != nil {
-			Error(w, err.String(), StatusRequestedRangeNotSatisfiable)
+			Error(w, err.Error(), StatusRequestedRangeNotSatisfiable)
 			return
 		}
 		size = ra.length
@@ -269,19 +270,19 @@
 }
 
 // parseRange parses a Range header string as per RFC 2616.
-func parseRange(s string, size int64) ([]httpRange, os.Error) {
+func parseRange(s string, size int64) ([]httpRange, error) {
 	if s == "" {
 		return nil, nil // header not present
 	}
 	const b = "bytes="
 	if !strings.HasPrefix(s, b) {
-		return nil, os.NewError("invalid range")
+		return nil, errors.New("invalid range")
 	}
 	var ranges []httpRange
 	for _, ra := range strings.Split(s[len(b):], ",") {
 		i := strings.Index(ra, "-")
 		if i < 0 {
-			return nil, os.NewError("invalid range")
+			return nil, errors.New("invalid range")
 		}
 		start, end := ra[:i], ra[i+1:]
 		var r httpRange
@@ -290,7 +291,7 @@
 			// range start relative to the end of the file.
 			i, err := strconv.Atoi64(end)
 			if err != nil {
-				return nil, os.NewError("invalid range")
+				return nil, errors.New("invalid range")
 			}
 			if i > size {
 				i = size
@@ -300,7 +301,7 @@
 		} else {
 			i, err := strconv.Atoi64(start)
 			if err != nil || i > size || i < 0 {
-				return nil, os.NewError("invalid range")
+				return nil, errors.New("invalid range")
 			}
 			r.start = i
 			if end == "" {
@@ -309,7 +310,7 @@
 			} else {
 				i, err := strconv.Atoi64(end)
 				if err != nil || r.start > i {
-					return nil, os.NewError("invalid range")
+					return nil, errors.New("invalid range")
 				}
 				if i >= size {
 					i = size - 1
diff --git a/src/pkg/http/fs_test.go b/src/pkg/http/fs_test.go
index bb6d015..76312e8 100644
--- a/src/pkg/http/fs_test.go
+++ b/src/pkg/http/fs_test.go
@@ -40,7 +40,7 @@
 	}))
 	defer ts.Close()
 
-	var err os.Error
+	var err error
 
 	file, err := ioutil.ReadFile(testFile)
 	if err != nil {
@@ -113,16 +113,16 @@
 }
 
 type testFileSystem struct {
-	open func(name string) (File, os.Error)
+	open func(name string) (File, error)
 }
 
-func (fs *testFileSystem) Open(name string) (File, os.Error) {
+func (fs *testFileSystem) Open(name string) (File, error) {
 	return fs.open(name)
 }
 
 func TestFileServerCleans(t *testing.T) {
 	ch := make(chan string, 1)
-	fs := FileServer(&testFileSystem{func(name string) (File, os.Error) {
+	fs := FileServer(&testFileSystem{func(name string) (File, error) {
 		ch <- name
 		return nil, os.ENOENT
 	}})
diff --git a/src/pkg/http/header.go b/src/pkg/http/header.go
index aaaa92a..6be6016 100644
--- a/src/pkg/http/header.go
+++ b/src/pkg/http/header.go
@@ -8,7 +8,6 @@
 	"fmt"
 	"io"
 	"net/textproto"
-	"os"
 	"sort"
 	"strings"
 )
@@ -43,7 +42,7 @@
 }
 
 // Write writes a header in wire format.
-func (h Header) Write(w io.Writer) os.Error {
+func (h Header) Write(w io.Writer) error {
 	return h.WriteSubset(w, nil)
 }
 
@@ -51,7 +50,7 @@
 
 // WriteSubset writes a header in wire format.
 // If exclude is not nil, keys where exclude[key] == true are not written.
-func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) os.Error {
+func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error {
 	keys := make([]string, 0, len(h))
 	for k := range h {
 		if exclude == nil || !exclude[k] {
diff --git a/src/pkg/http/httptest/recorder.go b/src/pkg/http/httptest/recorder.go
index f2fedef..f69279f 100644
--- a/src/pkg/http/httptest/recorder.go
+++ b/src/pkg/http/httptest/recorder.go
@@ -8,7 +8,6 @@
 import (
 	"bytes"
 	"http"
-	"os"
 )
 
 // ResponseRecorder is an implementation of http.ResponseWriter that
@@ -38,7 +37,7 @@
 }
 
 // Write always succeeds and writes to rw.Body, if not nil.
-func (rw *ResponseRecorder) Write(buf []byte) (int, os.Error) {
+func (rw *ResponseRecorder) Write(buf []byte) (int, error) {
 	if rw.Body != nil {
 		rw.Body.Write(buf)
 	}
diff --git a/src/pkg/http/httptest/server.go b/src/pkg/http/httptest/server.go
index 43a48eb..ea719cf 100644
--- a/src/pkg/http/httptest/server.go
+++ b/src/pkg/http/httptest/server.go
@@ -36,7 +36,7 @@
 	history []net.Conn
 }
 
-func (hs *historyListener) Accept() (c net.Conn, err os.Error) {
+func (hs *historyListener) Accept() (c net.Conn, err error) {
 	c, err = hs.Listener.Accept()
 	if err == nil {
 		hs.history = append(hs.history, c)
diff --git a/src/pkg/http/persist.go b/src/pkg/http/persist.go
index f73e6c6..7d84e96 100644
--- a/src/pkg/http/persist.go
+++ b/src/pkg/http/persist.go
@@ -6,6 +6,7 @@
 
 import (
 	"bufio"
+	"errors"
 	"io"
 	"net"
 	"net/textproto"
@@ -31,7 +32,7 @@
 	lk              sync.Mutex // read-write protects the following fields
 	c               net.Conn
 	r               *bufio.Reader
-	re, we          os.Error // read/write errors
+	re, we          error // read/write errors
 	lastbody        io.ReadCloser
 	nread, nwritten int
 	pipereq         map[*Request]uint
@@ -63,7 +64,7 @@
 }
 
 // Close calls Hijack and then also closes the underlying connection
-func (sc *ServerConn) Close() os.Error {
+func (sc *ServerConn) Close() error {
 	c, _ := sc.Hijack()
 	if c != nil {
 		return c.Close()
@@ -75,7 +76,7 @@
 // it is gracefully determined that there are no more requests (e.g. after the
 // first request on an HTTP/1.0 connection, or after a Connection:close on a
 // HTTP/1.1 connection).
-func (sc *ServerConn) Read() (req *Request, err os.Error) {
+func (sc *ServerConn) Read() (req *Request, err error) {
 
 	// Ensure ordered execution of Reads and Writes
 	id := sc.pipe.Next()
@@ -160,7 +161,7 @@
 // Write writes resp in response to req. To close the connection gracefully, set the
 // Response.Close field to true. Write should be considered operational until
 // it returns an error, regardless of any errors returned on the Read side.
-func (sc *ServerConn) Write(req *Request, resp *Response) os.Error {
+func (sc *ServerConn) Write(req *Request, resp *Response) error {
 
 	// Retrieve the pipeline ID of this request/response pair
 	sc.lk.Lock()
@@ -188,7 +189,7 @@
 	c := sc.c
 	if sc.nread <= sc.nwritten {
 		defer sc.lk.Unlock()
-		return os.NewError("persist server pipe count")
+		return errors.New("persist server pipe count")
 	}
 	if resp.Close {
 		// After signaling a keep-alive close, any pipelined unread
@@ -221,13 +222,13 @@
 	lk              sync.Mutex // read-write protects the following fields
 	c               net.Conn
 	r               *bufio.Reader
-	re, we          os.Error // read/write errors
+	re, we          error // read/write errors
 	lastbody        io.ReadCloser
 	nread, nwritten int
 	pipereq         map[*Request]uint
 
 	pipe     textproto.Pipeline
-	writeReq func(*Request, io.Writer) os.Error
+	writeReq func(*Request, io.Writer) error
 }
 
 // NewClientConn returns a new ClientConn reading and writing c.  If r is not
@@ -267,7 +268,7 @@
 }
 
 // Close calls Hijack and then also closes the underlying connection
-func (cc *ClientConn) Close() os.Error {
+func (cc *ClientConn) Close() error {
 	c, _ := cc.Hijack()
 	if c != nil {
 		return c.Close()
@@ -280,7 +281,7 @@
 // keepalive connection is logically closed after this request and the opposing
 // server is informed. An ErrUnexpectedEOF indicates the remote closed the
 // underlying TCP connection, which is usually considered as graceful close.
-func (cc *ClientConn) Write(req *Request) (err os.Error) {
+func (cc *ClientConn) Write(req *Request) (err error) {
 
 	// Ensure ordered execution of Writes
 	id := cc.pipe.Next()
@@ -343,13 +344,13 @@
 // returned together with an ErrPersistEOF, which means that the remote
 // requested that this be the last request serviced. Read can be called
 // concurrently with Write, but not with another Read.
-func (cc *ClientConn) Read(req *Request) (*Response, os.Error) {
+func (cc *ClientConn) Read(req *Request) (*Response, error) {
 	return cc.readUsing(req, ReadResponse)
 }
 
 // readUsing is the implementation of Read with a replaceable
 // ReadResponse-like function, used by the Transport.
-func (cc *ClientConn) readUsing(req *Request, readRes func(*bufio.Reader, *Request) (*Response, os.Error)) (resp *Response, err os.Error) {
+func (cc *ClientConn) readUsing(req *Request, readRes func(*bufio.Reader, *Request) (*Response, error)) (resp *Response, err error) {
 	// Retrieve the pipeline ID of this request/response pair
 	cc.lk.Lock()
 	id, ok := cc.pipereq[req]
@@ -411,7 +412,7 @@
 }
 
 // Do is convenience method that writes a request and reads a response.
-func (cc *ClientConn) Do(req *Request) (resp *Response, err os.Error) {
+func (cc *ClientConn) Do(req *Request) (resp *Response, err error) {
 	err = cc.Write(req)
 	if err != nil {
 		return
diff --git a/src/pkg/http/pprof/pprof.go b/src/pkg/http/pprof/pprof.go
index 917c7f8..a118a25 100644
--- a/src/pkg/http/pprof/pprof.go
+++ b/src/pkg/http/pprof/pprof.go
@@ -29,6 +29,7 @@
 	"bytes"
 	"fmt"
 	"http"
+	"io"
 	"os"
 	"runtime"
 	"runtime/pprof"
@@ -121,7 +122,7 @@
 		// Wait until here to check for err; the last
 		// symbol will have an err because it doesn't end in +.
 		if err != nil {
-			if err != os.EOF {
+			if err != io.EOF {
 				fmt.Fprintf(&buf, "reading request: %v\n", err)
 			}
 			break
diff --git a/src/pkg/http/readrequest_test.go b/src/pkg/http/readrequest_test.go
index 6d9042a..d62133d 100644
--- a/src/pkg/http/readrequest_test.go
+++ b/src/pkg/http/readrequest_test.go
@@ -159,8 +159,8 @@
 		braw.WriteString(tt.Raw)
 		req, err := ReadRequest(bufio.NewReader(&braw))
 		if err != nil {
-			if err.String() != tt.Error {
-				t.Errorf("#%d: error %q, want error %q", i, err.String(), tt.Error)
+			if err.Error() != tt.Error {
+				t.Errorf("#%d: error %q, want error %q", i, err.Error(), tt.Error)
 			}
 			continue
 		}
diff --git a/src/pkg/http/request.go b/src/pkg/http/request.go
index 78e07ec..d9a04ef 100644
--- a/src/pkg/http/request.go
+++ b/src/pkg/http/request.go
@@ -11,13 +11,13 @@
 	"bytes"
 	"crypto/tls"
 	"encoding/base64"
+	"errors"
 	"fmt"
 	"io"
 	"io/ioutil"
 	"mime"
 	"mime/multipart"
 	"net/textproto"
-	"os"
 	"strconv"
 	"strings"
 	"url"
@@ -33,14 +33,14 @@
 
 // ErrMissingFile is returned by FormFile when the provided file field name
 // is either not present in the request or not a file field.
-var ErrMissingFile = os.NewError("http: no such file")
+var ErrMissingFile = errors.New("http: no such file")
 
 // HTTP request parsing errors.
 type ProtocolError struct {
 	ErrorString string
 }
 
-func (err *ProtocolError) String() string { return err.ErrorString }
+func (err *ProtocolError) Error() string { return err.ErrorString }
 
 var (
 	ErrLineTooLong          = &ProtocolError{"header line too long"}
@@ -58,7 +58,7 @@
 	str  string
 }
 
-func (e *badStringError) String() string { return fmt.Sprintf("%s %q", e.what, e.str) }
+func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
 
 // Headers that Request.Write handles itself and should be skipped.
 var reqWriteExcludeHeader = map[string]bool{
@@ -174,11 +174,11 @@
 	return readCookies(r.Header, "")
 }
 
-var ErrNoCookie = os.NewError("http: named cookied not present")
+var ErrNoCookie = errors.New("http: named cookied not present")
 
 // Cookie returns the named cookie provided in the request or
 // ErrNoCookie if not found.
-func (r *Request) Cookie(name string) (*Cookie, os.Error) {
+func (r *Request) Cookie(name string) (*Cookie, error) {
 	for _, c := range readCookies(r.Header, name) {
 		return c, nil
 	}
@@ -222,18 +222,18 @@
 // multipart/form-data POST request, else returns nil and an error.
 // Use this function instead of ParseMultipartForm to
 // process the request body as a stream.
-func (r *Request) MultipartReader() (*multipart.Reader, os.Error) {
+func (r *Request) MultipartReader() (*multipart.Reader, error) {
 	if r.MultipartForm == multipartByReader {
-		return nil, os.NewError("http: MultipartReader called twice")
+		return nil, errors.New("http: MultipartReader called twice")
 	}
 	if r.MultipartForm != nil {
-		return nil, os.NewError("http: multipart handled by ParseMultipartForm")
+		return nil, errors.New("http: multipart handled by ParseMultipartForm")
 	}
 	r.MultipartForm = multipartByReader
 	return r.multipartReader()
 }
 
-func (r *Request) multipartReader() (*multipart.Reader, os.Error) {
+func (r *Request) multipartReader() (*multipart.Reader, error) {
 	v := r.Header.Get("Content-Type")
 	if v == "" {
 		return nil, ErrNotMultipart
@@ -272,7 +272,7 @@
 // If Body is present, Content-Length is <= 0 and TransferEncoding
 // hasn't been set to "identity", Write adds "Transfer-Encoding:
 // chunked" to the header. Body is closed after it is sent.
-func (req *Request) Write(w io.Writer) os.Error {
+func (req *Request) Write(w io.Writer) error {
 	return req.write(w, false, nil)
 }
 
@@ -282,11 +282,11 @@
 // section 5.1.2 of RFC 2616, including the scheme and host. In
 // either case, WriteProxy also writes a Host header, using either
 // req.Host or req.URL.Host.
-func (req *Request) WriteProxy(w io.Writer) os.Error {
+func (req *Request) WriteProxy(w io.Writer) error {
 	return req.write(w, true, nil)
 }
 
-func (req *Request) dumpWrite(w io.Writer) os.Error {
+func (req *Request) dumpWrite(w io.Writer) error {
 	// TODO(bradfitz): RawPath here?
 	urlStr := valueOrDefault(req.URL.EncodedPath(), "/")
 	if req.URL.RawQuery != "" {
@@ -332,11 +332,11 @@
 }
 
 // extraHeaders may be nil
-func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header) os.Error {
+func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header) error {
 	host := req.Host
 	if host == "" {
 		if req.URL == nil {
-			return os.NewError("http: Request.Write on Request with no Host or URL set")
+			return errors.New("http: Request.Write on Request with no Host or URL set")
 		}
 		host = req.URL.Host
 	}
@@ -415,11 +415,11 @@
 // Give up if the line exceeds maxLineLength.
 // The returned bytes are a pointer into storage in
 // the bufio, so they are only valid until the next bufio read.
-func readLineBytes(b *bufio.Reader) (p []byte, err os.Error) {
+func readLineBytes(b *bufio.Reader) (p []byte, err error) {
 	if p, err = b.ReadSlice('\n'); err != nil {
 		// We always know when EOF is coming.
 		// If the caller asked for a line, there should be a line.
-		if err == os.EOF {
+		if err == io.EOF {
 			err = io.ErrUnexpectedEOF
 		} else if err == bufio.ErrBufferFull {
 			err = ErrLineTooLong
@@ -441,7 +441,7 @@
 }
 
 // readLineBytes, but convert the bytes into a string.
-func readLine(b *bufio.Reader) (s string, err os.Error) {
+func readLine(b *bufio.Reader) (s string, err error) {
 	p, e := readLineBytes(b)
 	if e != nil {
 		return "", e
@@ -487,7 +487,7 @@
 type chunkedReader struct {
 	r   *bufio.Reader
 	n   uint64 // unread bytes in chunk
-	err os.Error
+	err error
 }
 
 func (cr *chunkedReader) beginChunk() {
@@ -512,11 +512,11 @@
 				break
 			}
 		}
-		cr.err = os.EOF
+		cr.err = io.EOF
 	}
 }
 
-func (cr *chunkedReader) Read(b []uint8) (n int, err os.Error) {
+func (cr *chunkedReader) Read(b []uint8) (n int, err error) {
 	if cr.err != nil {
 		return 0, cr.err
 	}
@@ -536,7 +536,7 @@
 		b := make([]byte, 2)
 		if _, cr.err = io.ReadFull(cr.r, b); cr.err == nil {
 			if b[0] != '\r' || b[1] != '\n' {
-				cr.err = os.NewError("malformed chunked encoding")
+				cr.err = errors.New("malformed chunked encoding")
 			}
 		}
 	}
@@ -544,7 +544,7 @@
 }
 
 // NewRequest returns a new Request given a method, URL, and optional body.
-func NewRequest(method, urlStr string, body io.Reader) (*Request, os.Error) {
+func NewRequest(method, urlStr string, body io.Reader) (*Request, error) {
 	u, err := url.Parse(urlStr)
 	if err != nil {
 		return nil, err
@@ -586,7 +586,7 @@
 }
 
 // ReadRequest reads and parses a request from b.
-func ReadRequest(b *bufio.Reader) (req *Request, err os.Error) {
+func ReadRequest(b *bufio.Reader) (req *Request, err error) {
 
 	tp := textproto.NewReader(b)
 	req = new(Request)
@@ -594,7 +594,7 @@
 	// First line: GET /index.html HTTP/1.0
 	var s string
 	if s, err = tp.ReadLine(); err != nil {
-		if err == os.EOF {
+		if err == io.EOF {
 			err = io.ErrUnexpectedEOF
 		}
 		return nil, err
@@ -690,7 +690,7 @@
 	stopped bool
 }
 
-func (l *maxBytesReader) Read(p []byte) (n int, err os.Error) {
+func (l *maxBytesReader) Read(p []byte) (n int, err error) {
 	if l.n <= 0 {
 		if !l.stopped {
 			l.stopped = true
@@ -698,7 +698,7 @@
 				res.requestTooLarge()
 			}
 		}
-		return 0, os.NewError("http: request body too large")
+		return 0, errors.New("http: request body too large")
 	}
 	if int64(len(p)) > l.n {
 		p = p[:l.n]
@@ -708,7 +708,7 @@
 	return
 }
 
-func (l *maxBytesReader) Close() os.Error {
+func (l *maxBytesReader) Close() error {
 	return l.r.Close()
 }
 
@@ -720,7 +720,7 @@
 //
 // ParseMultipartForm calls ParseForm automatically.
 // It is idempotent.
-func (r *Request) ParseForm() (err os.Error) {
+func (r *Request) ParseForm() (err error) {
 	if r.Form != nil {
 		return
 	}
@@ -729,7 +729,7 @@
 	}
 	if r.Method == "POST" || r.Method == "PUT" {
 		if r.Body == nil {
-			return os.NewError("missing form body")
+			return errors.New("missing form body")
 		}
 		ct := r.Header.Get("Content-Type")
 		ct, _, err := mime.ParseMediaType(ct)
@@ -749,7 +749,7 @@
 				break
 			}
 			if int64(len(b)) > maxFormSize {
-				return os.NewError("http: POST too large")
+				return errors.New("http: POST too large")
 			}
 			var newValues url.Values
 			newValues, e = url.ParseQuery(string(b))
@@ -785,9 +785,9 @@
 // disk in temporary files.
 // ParseMultipartForm calls ParseForm if necessary.
 // After one call to ParseMultipartForm, subsequent calls have no effect.
-func (r *Request) ParseMultipartForm(maxMemory int64) os.Error {
+func (r *Request) ParseMultipartForm(maxMemory int64) error {
 	if r.MultipartForm == multipartByReader {
-		return os.NewError("http: multipart handled by MultipartReader")
+		return errors.New("http: multipart handled by MultipartReader")
 	}
 	if r.Form == nil {
 		err := r.ParseForm()
@@ -832,9 +832,9 @@
 
 // FormFile returns the first file for the provided form key.
 // FormFile calls ParseMultipartForm and ParseForm if necessary.
-func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, os.Error) {
+func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, error) {
 	if r.MultipartForm == multipartByReader {
-		return nil, nil, os.NewError("http: multipart handled by MultipartReader")
+		return nil, nil, errors.New("http: multipart handled by MultipartReader")
 	}
 	if r.MultipartForm == nil {
 		err := r.ParseMultipartForm(defaultMaxMemory)
diff --git a/src/pkg/http/requestwrite_test.go b/src/pkg/http/requestwrite_test.go
index 194f6dd..16593e9 100644
--- a/src/pkg/http/requestwrite_test.go
+++ b/src/pkg/http/requestwrite_test.go
@@ -6,10 +6,10 @@
 
 import (
 	"bytes"
+	"errors"
 	"fmt"
 	"io"
 	"io/ioutil"
-	"os"
 	"strings"
 	"testing"
 	"url"
@@ -24,7 +24,7 @@
 	WantProxy string // Request.WriteProxy
 	WantDump  string // DumpRequest
 
-	WantError os.Error // wanted error from Request.Write
+	WantError error // wanted error from Request.Write
 }
 
 var reqWriteTests = []reqWriteTest{
@@ -292,7 +292,7 @@
 			ContentLength: 10, // but we're going to send only 5 bytes
 		},
 		Body:      []byte("12345"),
-		WantError: os.NewError("http: Request.ContentLength=10 with Body length 5"),
+		WantError: errors.New("http: Request.ContentLength=10 with Body length 5"),
 	},
 
 	// Request with a ContentLength of 4 but an 8 byte body.
@@ -306,7 +306,7 @@
 			ContentLength: 4, // but we're going to try to send 8 bytes
 		},
 		Body:      []byte("12345678"),
-		WantError: os.NewError("http: Request.ContentLength=4 with Body length 8"),
+		WantError: errors.New("http: Request.ContentLength=4 with Body length 8"),
 	},
 
 	// Request with a 5 ContentLength and nil body.
@@ -319,7 +319,7 @@
 			ProtoMinor:    1,
 			ContentLength: 5, // but we'll omit the body
 		},
-		WantError: os.NewError("http: Request.ContentLength=5 with nil Body"),
+		WantError: errors.New("http: Request.ContentLength=5 with nil Body"),
 	},
 
 	// Verify that DumpRequest preserves the HTTP version number, doesn't add a Host,
@@ -422,7 +422,7 @@
 	closed bool
 }
 
-func (rc *closeChecker) Close() os.Error {
+func (rc *closeChecker) Close() error {
 	rc.closed = true
 	return nil
 }
diff --git a/src/pkg/http/response.go b/src/pkg/http/response.go
index 56c65b5..7be7150 100644
--- a/src/pkg/http/response.go
+++ b/src/pkg/http/response.go
@@ -8,9 +8,9 @@
 
 import (
 	"bufio"
+	"errors"
 	"io"
 	"net/textproto"
-	"os"
 	"strconv"
 	"strings"
 	"url"
@@ -78,13 +78,13 @@
 	return readSetCookies(r.Header)
 }
 
-var ErrNoLocation = os.NewError("http: no Location header in response")
+var ErrNoLocation = errors.New("http: no Location header in response")
 
 // Location returns the URL of the response's "Location" header,
 // if present.  Relative redirects are resolved relative to
 // the Response's Request.  ErrNoLocation is returned if no
 // Location header is present.
-func (r *Response) Location() (*url.URL, os.Error) {
+func (r *Response) Location() (*url.URL, error) {
 	lv := r.Header.Get("Location")
 	if lv == "" {
 		return nil, ErrNoLocation
@@ -101,7 +101,7 @@
 // reading resp.Body.  After that call, clients can inspect
 // resp.Trailer to find key/value pairs included in the response
 // trailer.
-func ReadResponse(r *bufio.Reader, req *Request) (resp *Response, err os.Error) {
+func ReadResponse(r *bufio.Reader, req *Request) (resp *Response, err error) {
 
 	tp := textproto.NewReader(r)
 	resp = new(Response)
@@ -112,7 +112,7 @@
 	// Parse the first line of the response.
 	line, err := tp.ReadLine()
 	if err != nil {
-		if err == os.EOF {
+		if err == io.EOF {
 			err = io.ErrUnexpectedEOF
 		}
 		return nil, err
@@ -186,7 +186,7 @@
 //  ContentLength
 //  Header, values for non-canonical keys will have unpredictable behavior
 //
-func (resp *Response) Write(w io.Writer) os.Error {
+func (resp *Response) Write(w io.Writer) error {
 
 	// RequestMethod should be upper-case
 	if resp.Request != nil {
diff --git a/src/pkg/http/response_test.go b/src/pkg/http/response_test.go
index 86494bf..6a14179 100644
--- a/src/pkg/http/response_test.go
+++ b/src/pkg/http/response_test.go
@@ -10,7 +10,6 @@
 	"compress/gzip"
 	"crypto/rand"
 	"fmt"
-	"os"
 	"io"
 	"io/ioutil"
 	"reflect"
@@ -301,7 +300,7 @@
 			args = append([]interface{}{test.chunked, test.compressed}, args...)
 			t.Fatalf("on test chunked=%v, compressed=%v: "+format, args...)
 		}
-		checkErr := func(err os.Error, msg string) {
+		checkErr := func(err error, msg string) {
 			if err == nil {
 				return
 			}
@@ -320,7 +319,7 @@
 		}
 		if test.compressed {
 			buf.WriteString("Content-Encoding: gzip\r\n")
-			var err os.Error
+			var err error
 			wr, err = gzip.NewWriter(wr)
 			checkErr(err, "gzip.NewWriter")
 		}
@@ -401,7 +400,7 @@
 	location string // Response's Location header or ""
 	requrl   string // Response.Request.URL or ""
 	want     string
-	wantErr  os.Error
+	wantErr  error
 }
 
 var responseLocationTests = []responseLocationTest{
@@ -417,7 +416,7 @@
 		res.Header.Set("Location", tt.location)
 		if tt.requrl != "" {
 			res.Request = &Request{}
-			var err os.Error
+			var err error
 			res.Request.URL, err = url.Parse(tt.requrl)
 			if err != nil {
 				t.Fatalf("bad test URL %q: %v", tt.requrl, err)
@@ -430,7 +429,7 @@
 				t.Errorf("%d. err=nil; want %q", i, tt.wantErr)
 				continue
 			}
-			if g, e := err.String(), tt.wantErr.String(); g != e {
+			if g, e := err.Error(), tt.wantErr.Error(); g != e {
 				t.Errorf("%d. err=%q; want %q", i, g, e)
 				continue
 			}
diff --git a/src/pkg/http/reverseproxy.go b/src/pkg/http/reverseproxy.go
index 3a63db0..9cd359f 100644
--- a/src/pkg/http/reverseproxy.go
+++ b/src/pkg/http/reverseproxy.go
@@ -10,7 +10,6 @@
 	"io"
 	"log"
 	"net"
-	"os"
 	"strings"
 	"sync"
 	"time"
@@ -141,7 +140,7 @@
 	done chan bool
 }
 
-func (m *maxLatencyWriter) Write(p []byte) (n int, err os.Error) {
+func (m *maxLatencyWriter) Write(p []byte) (n int, err error) {
 	m.lk.Lock()
 	defer m.lk.Unlock()
 	if m.done == nil {
diff --git a/src/pkg/http/serve_test.go b/src/pkg/http/serve_test.go
index 2ff66d5..98e10d4 100644
--- a/src/pkg/http/serve_test.go
+++ b/src/pkg/http/serve_test.go
@@ -31,10 +31,10 @@
 	conn net.Conn
 }
 
-func (l *oneConnListener) Accept() (c net.Conn, err os.Error) {
+func (l *oneConnListener) Accept() (c net.Conn, err error) {
 	c = l.conn
 	if c == nil {
-		err = os.EOF
+		err = io.EOF
 		return
 	}
 	err = nil
@@ -42,7 +42,7 @@
 	return
 }
 
-func (l *oneConnListener) Close() os.Error {
+func (l *oneConnListener) Close() error {
 	return nil
 }
 
@@ -63,15 +63,15 @@
 	writeBuf bytes.Buffer
 }
 
-func (c *testConn) Read(b []byte) (int, os.Error) {
+func (c *testConn) Read(b []byte) (int, error) {
 	return c.readBuf.Read(b)
 }
 
-func (c *testConn) Write(b []byte) (int, os.Error) {
+func (c *testConn) Write(b []byte) (int, error) {
 	return c.writeBuf.Write(b)
 }
 
-func (c *testConn) Close() os.Error {
+func (c *testConn) Close() error {
 	return nil
 }
 
@@ -83,15 +83,15 @@
 	return dummyAddr("remote-addr")
 }
 
-func (c *testConn) SetTimeout(nsec int64) os.Error {
+func (c *testConn) SetTimeout(nsec int64) error {
 	return nil
 }
 
-func (c *testConn) SetReadTimeout(nsec int64) os.Error {
+func (c *testConn) SetReadTimeout(nsec int64) error {
 	return nil
 }
 
-func (c *testConn) SetWriteTimeout(nsec int64) os.Error {
+func (c *testConn) SetWriteTimeout(nsec int64) error {
 	return nil
 }
 
@@ -108,7 +108,7 @@
 
 	reqNum := 0
 	ch := make(chan *Request)
-	servech := make(chan os.Error)
+	servech := make(chan error)
 	listener := &oneConnListener{conn}
 	handler := func(res ResponseWriter, req *Request) {
 		reqNum++
@@ -138,7 +138,7 @@
 			req.Method, "POST")
 	}
 
-	if serveerr := <-servech; serveerr != os.EOF {
+	if serveerr := <-servech; serveerr != io.EOF {
 		t.Errorf("Serve returned %q; expected EOF", serveerr)
 	}
 }
@@ -273,8 +273,8 @@
 	buf := make([]byte, 1)
 	n, err := conn.Read(buf)
 	latency := time.Nanoseconds() - t1
-	if n != 0 || err != os.EOF {
-		t.Errorf("Read = %v, %v, wanted %v, %v", n, err, 0, os.EOF)
+	if n != 0 || err != io.EOF {
+		t.Errorf("Read = %v, %v, wanted %v, %v", n, err, 0, io.EOF)
 	}
 	if latency < second*0.20 /* fudge from 0.25 above */ {
 		t.Errorf("got EOF after %d ns, want >= %d", latency, second*0.20)
@@ -753,7 +753,7 @@
 
 func TestTimeoutHandler(t *testing.T) {
 	sendHi := make(chan bool, 1)
-	writeErrors := make(chan os.Error, 1)
+	writeErrors := make(chan error, 1)
 	sayHi := HandlerFunc(func(w ResponseWriter, r *Request) {
 		<-sendHi
 		_, werr := w.Write([]byte("hi"))
@@ -992,7 +992,7 @@
 
 type neverEnding byte
 
-func (b neverEnding) Read(p []byte) (n int, err os.Error) {
+func (b neverEnding) Read(p []byte) (n int, err error) {
 	for i := range p {
 		p[i] = byte(b)
 	}
@@ -1004,7 +1004,7 @@
 	n *int64
 }
 
-func (cr countReader) Read(p []byte) (n int, err os.Error) {
+func (cr countReader) Read(p []byte) (n int, err error) {
 	n, err = cr.r.Read(p)
 	*cr.n += int64(n)
 	return
@@ -1092,19 +1092,19 @@
 }
 
 type errorListener struct {
-	errs []os.Error
+	errs []error
 }
 
-func (l *errorListener) Accept() (c net.Conn, err os.Error) {
+func (l *errorListener) Accept() (c net.Conn, err error) {
 	if len(l.errs) == 0 {
-		return nil, os.EOF
+		return nil, io.EOF
 	}
 	err = l.errs[0]
 	l.errs = l.errs[1:]
 	return
 }
 
-func (l *errorListener) Close() os.Error {
+func (l *errorListener) Close() error {
 	return nil
 }
 
@@ -1116,13 +1116,13 @@
 	log.SetOutput(ioutil.Discard) // is noisy otherwise
 	defer log.SetOutput(os.Stderr)
 
-	ln := &errorListener{[]os.Error{
+	ln := &errorListener{[]error{
 		&net.OpError{
-			Op:    "accept",
-			Error: os.Errno(syscall.EMFILE),
+			Op:  "accept",
+			Err: os.Errno(syscall.EMFILE),
 		}}}
 	err := Serve(ln, HandlerFunc(HandlerFunc(func(ResponseWriter, *Request) {})))
-	if err != os.EOF {
+	if err != io.EOF {
 		t.Errorf("got error %v, want EOF", err)
 	}
 }
@@ -1138,11 +1138,11 @@
 	for i := 0; i < b.N; i++ {
 		res, err := Get(ts.URL)
 		if err != nil {
-			panic("Get: " + err.String())
+			panic("Get: " + err.Error())
 		}
 		all, err := ioutil.ReadAll(res.Body)
 		if err != nil {
-			panic("ReadAll: " + err.String())
+			panic("ReadAll: " + err.Error())
 		}
 		body := string(all)
 		if body != "Hello world.\n" {
diff --git a/src/pkg/http/server.go b/src/pkg/http/server.go
index 9792c60..f2a4f01 100644
--- a/src/pkg/http/server.go
+++ b/src/pkg/http/server.go
@@ -14,12 +14,12 @@
 	"bytes"
 	"crypto/rand"
 	"crypto/tls"
+	"errors"
 	"fmt"
 	"io"
 	"io/ioutil"
 	"log"
 	"net"
-	"os"
 	"path"
 	"runtime/debug"
 	"strconv"
@@ -31,10 +31,10 @@
 
 // Errors introduced by the HTTP server.
 var (
-	ErrWriteAfterFlush = os.NewError("Conn.Write called after Flush")
-	ErrBodyNotAllowed  = os.NewError("http: response status code does not allow body")
-	ErrHijacked        = os.NewError("Conn has been hijacked")
-	ErrContentLength   = os.NewError("Conn.Write wrote more than the declared Content-Length")
+	ErrWriteAfterFlush = errors.New("Conn.Write called after Flush")
+	ErrBodyNotAllowed  = errors.New("http: response status code does not allow body")
+	ErrHijacked        = errors.New("Conn has been hijacked")
+	ErrContentLength   = errors.New("Conn.Write wrote more than the declared Content-Length")
 )
 
 // Objects implementing the Handler interface can be
@@ -60,7 +60,7 @@
 	// Write writes the data to the connection as part of an HTTP reply.
 	// If WriteHeader has not yet been called, Write calls WriteHeader(http.StatusOK)
 	// before writing the data.
-	Write([]byte) (int, os.Error)
+	Write([]byte) (int, error)
 
 	// WriteHeader sends an HTTP response header with status code.
 	// If WriteHeader is not called explicitly, the first call to Write
@@ -90,7 +90,7 @@
 	// will not do anything else with the connection.
 	// It becomes the caller's responsibility to manage
 	// and close the connection.
-	Hijack() (net.Conn, *bufio.ReadWriter, os.Error)
+	Hijack() (net.Conn, *bufio.ReadWriter, error)
 }
 
 // A conn represents the server side of an HTTP connection.
@@ -148,7 +148,7 @@
 	io.Writer
 }
 
-func (w *response) ReadFrom(src io.Reader) (n int64, err os.Error) {
+func (w *response) ReadFrom(src io.Reader) (n int64, err error) {
 	// Flush before checking w.chunking, as Flush will call
 	// WriteHeader if it hasn't been called yet, and WriteHeader
 	// is what sets w.chunking.
@@ -169,7 +169,7 @@
 const noLimit int64 = (1 << 63) - 1
 
 // Create new connection from rwc.
-func (srv *Server) newConn(rwc net.Conn) (c *conn, err os.Error) {
+func (srv *Server) newConn(rwc net.Conn) (c *conn, err error) {
 	c = new(conn)
 	c.remoteAddr = rwc.RemoteAddr().String()
 	c.server = srv
@@ -202,9 +202,9 @@
 	closed     bool
 }
 
-func (ecr *expectContinueReader) Read(p []byte) (n int, err os.Error) {
+func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
 	if ecr.closed {
-		return 0, os.NewError("http: Read after Close on request Body")
+		return 0, errors.New("http: Read after Close on request Body")
 	}
 	if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked {
 		ecr.resp.wroteContinue = true
@@ -214,7 +214,7 @@
 	return ecr.readCloser.Read(p)
 }
 
-func (ecr *expectContinueReader) Close() os.Error {
+func (ecr *expectContinueReader) Close() error {
 	ecr.closed = true
 	return ecr.readCloser.Close()
 }
@@ -225,10 +225,10 @@
 // It is like time.RFC1123 but hard codes GMT as the time zone.
 const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
 
-var errTooLarge = os.NewError("http: request too large")
+var errTooLarge = errors.New("http: request too large")
 
 // Read next request from connection.
-func (c *conn) readRequest() (w *response, err os.Error) {
+func (c *conn) readRequest() (w *response, err error) {
 	if c.hijacked {
 		return nil, ErrHijacked
 	}
@@ -285,7 +285,7 @@
 	var hasCL bool
 	var contentLength int64
 	if clenStr := w.header.Get("Content-Length"); clenStr != "" {
-		var err os.Error
+		var err error
 		contentLength, err = strconv.Atoi64(clenStr)
 		if err == nil {
 			hasCL = true
@@ -439,7 +439,7 @@
 	return w.status != StatusNotModified && w.req.Method != "HEAD"
 }
 
-func (w *response) Write(data []byte) (n int, err os.Error) {
+func (w *response) Write(data []byte) (n int, err error) {
 	if w.conn.hijacked {
 		log.Print("http: response.Write on hijacked connection")
 		return 0, ErrHijacked
@@ -663,7 +663,7 @@
 
 // Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter
 // and a Hijacker.
-func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err os.Error) {
+func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {
 	if w.conn.hijacked {
 		return nil, nil, ErrHijacked
 	}
@@ -943,7 +943,7 @@
 // creating a new service thread for each.  The service threads
 // read requests and then call handler to reply to them.
 // Handler is typically nil, in which case the DefaultServeMux is used.
-func Serve(l net.Listener, handler Handler) os.Error {
+func Serve(l net.Listener, handler Handler) error {
 	srv := &Server{Handler: handler}
 	return srv.Serve(l)
 }
@@ -960,7 +960,7 @@
 // ListenAndServe listens on the TCP network address srv.Addr and then
 // calls Serve to handle requests on incoming connections.  If
 // srv.Addr is blank, ":http" is used.
-func (srv *Server) ListenAndServe() os.Error {
+func (srv *Server) ListenAndServe() error {
 	addr := srv.Addr
 	if addr == "" {
 		addr = ":http"
@@ -975,7 +975,7 @@
 // Serve accepts incoming connections on the Listener l, creating a
 // new service thread for each.  The service threads read requests and
 // then call srv.Handler to reply to them.
-func (srv *Server) Serve(l net.Listener) os.Error {
+func (srv *Server) Serve(l net.Listener) error {
 	defer l.Close()
 	for {
 		rw, e := l.Accept()
@@ -1028,7 +1028,7 @@
 //			log.Fatal("ListenAndServe: ", err.String())
 //		}
 //	}
-func ListenAndServe(addr string, handler Handler) os.Error {
+func ListenAndServe(addr string, handler Handler) error {
 	server := &Server{Addr: addr, Handler: handler}
 	return server.ListenAndServe()
 }
@@ -1061,7 +1061,7 @@
 //	}
 //
 // One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem.
-func ListenAndServeTLS(addr string, certFile string, keyFile string, handler Handler) os.Error {
+func ListenAndServeTLS(addr string, certFile string, keyFile string, handler Handler) error {
 	server := &Server{Addr: addr, Handler: handler}
 	return server.ListenAndServeTLS(certFile, keyFile)
 }
@@ -1075,7 +1075,7 @@
 // of the server's certificate followed by the CA's certificate.
 //
 // If srv.Addr is blank, ":https" is used.
-func (s *Server) ListenAndServeTLS(certFile, keyFile string) os.Error {
+func (s *Server) ListenAndServeTLS(certFile, keyFile string) error {
 	addr := s.Addr
 	if addr == "" {
 		addr = ":https"
@@ -1086,7 +1086,7 @@
 		NextProtos: []string{"http/1.1"},
 	}
 
-	var err os.Error
+	var err error
 	config.Certificates = make([]tls.Certificate, 1)
 	config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
 	if err != nil {
@@ -1119,7 +1119,7 @@
 
 // ErrHandlerTimeout is returned on ResponseWriter Write calls
 // in handlers which have timed out.
-var ErrHandlerTimeout = os.NewError("http: Handler timeout")
+var ErrHandlerTimeout = errors.New("http: Handler timeout")
 
 type timeoutHandler struct {
 	handler Handler
@@ -1167,7 +1167,7 @@
 	return tw.w.Header()
 }
 
-func (tw *timeoutWriter) Write(p []byte) (int, os.Error) {
+func (tw *timeoutWriter) Write(p []byte) (int, error) {
 	tw.mu.Lock()
 	timedOut := tw.timedOut
 	tw.mu.Unlock()
diff --git a/src/pkg/http/transfer.go b/src/pkg/http/transfer.go
index 868a114..6cb8625 100644
--- a/src/pkg/http/transfer.go
+++ b/src/pkg/http/transfer.go
@@ -7,10 +7,10 @@
 import (
 	"bytes"
 	"bufio"
+	"errors"
 	"fmt"
 	"io"
 	"io/ioutil"
-	"os"
 	"strconv"
 	"strings"
 )
@@ -29,7 +29,7 @@
 	Trailer          Header
 }
 
-func newTransferWriter(r interface{}) (t *transferWriter, err os.Error) {
+func newTransferWriter(r interface{}) (t *transferWriter, err error) {
 	t = &transferWriter{}
 
 	// Extract relevant fields
@@ -133,7 +133,7 @@
 	return false
 }
 
-func (t *transferWriter) WriteHeader(w io.Writer) (err os.Error) {
+func (t *transferWriter) WriteHeader(w io.Writer) (err error) {
 	if t.Close {
 		_, err = io.WriteString(w, "Connection: close\r\n")
 		if err != nil {
@@ -181,7 +181,7 @@
 	return
 }
 
-func (t *transferWriter) WriteBody(w io.Writer) (err os.Error) {
+func (t *transferWriter) WriteBody(w io.Writer) (err error) {
 	var ncopy int64
 
 	// Write body
@@ -254,7 +254,7 @@
 }
 
 // msg is *Request or *Response.
-func readTransfer(msg interface{}, r *bufio.Reader) (err os.Error) {
+func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
 	t := &transferReader{}
 
 	// Unify input
@@ -360,7 +360,7 @@
 func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" }
 
 // Sanitize transfer encoding
-func fixTransferEncoding(requestMethod string, header Header) ([]string, os.Error) {
+func fixTransferEncoding(requestMethod string, header Header) ([]string, error) {
 	raw, present := header["Transfer-Encoding"]
 	if !present {
 		return nil, nil
@@ -409,7 +409,7 @@
 // Determine the expected body length, using RFC 2616 Section 4.4. This
 // function is not a method, because ultimately it should be shared by
 // ReadResponse and ReadRequest.
-func fixLength(isResponse bool, status int, requestMethod string, header Header, te []string) (int64, os.Error) {
+func fixLength(isResponse bool, status int, requestMethod string, header Header, te []string) (int64, error) {
 
 	// Logic based on response type or status
 	if noBodyExpected(requestMethod) {
@@ -482,7 +482,7 @@
 }
 
 // Parse the trailer header
-func fixTrailer(header Header, te []string) (Header, os.Error) {
+func fixTrailer(header Header, te []string) (Header, error) {
 	raw := header.Get("Trailer")
 	if raw == "" {
 		return nil, nil
@@ -526,16 +526,16 @@
 // the body has been closed. This typically happens when the body is
 // read after an HTTP Handler calls WriteHeader or Write on its
 // ResponseWriter.
-var ErrBodyReadAfterClose = os.NewError("http: invalid Read on closed request Body")
+var ErrBodyReadAfterClose = errors.New("http: invalid Read on closed request Body")
 
-func (b *body) Read(p []byte) (n int, err os.Error) {
+func (b *body) Read(p []byte) (n int, err error) {
 	if b.closed {
 		return 0, ErrBodyReadAfterClose
 	}
 	return b.Reader.Read(p)
 }
 
-func (b *body) Close() os.Error {
+func (b *body) Close() error {
 	if b.closed {
 		return nil
 	}
diff --git a/src/pkg/http/transport.go b/src/pkg/http/transport.go
index 1d4433d..c7041cba 100644
--- a/src/pkg/http/transport.go
+++ b/src/pkg/http/transport.go
@@ -14,6 +14,7 @@
 	"compress/gzip"
 	"crypto/tls"
 	"encoding/base64"
+	"errors"
 	"fmt"
 	"io"
 	"io/ioutil"
@@ -52,12 +53,12 @@
 	// Request. If the function returns a non-nil error, the
 	// request is aborted with the provided error.
 	// If Proxy is nil or returns a nil *URL, no proxy is used.
-	Proxy func(*Request) (*url.URL, os.Error)
+	Proxy func(*Request) (*url.URL, error)
 
 	// Dial specifies the dial function for creating TCP
 	// connections.
 	// If Dial is nil, net.Dial is used.
-	Dial func(net, addr string) (c net.Conn, err os.Error)
+	Dial func(net, addr string) (c net.Conn, err error)
 
 	// TLSClientConfig specifies the TLS configuration to use with
 	// tls.Client. If nil, the default configuration is used.
@@ -76,7 +77,7 @@
 // given request, as indicated by the environment variables
 // $HTTP_PROXY and $NO_PROXY (or $http_proxy and $no_proxy).
 // Either URL or an error is returned.
-func ProxyFromEnvironment(req *Request) (*url.URL, os.Error) {
+func ProxyFromEnvironment(req *Request) (*url.URL, error) {
 	proxy := getenvEitherCase("HTTP_PROXY")
 	if proxy == "" {
 		return nil, nil
@@ -86,12 +87,12 @@
 	}
 	proxyURL, err := url.ParseRequest(proxy)
 	if err != nil {
-		return nil, os.NewError("invalid proxy address")
+		return nil, errors.New("invalid proxy address")
 	}
 	if proxyURL.Host == "" {
 		proxyURL, err = url.ParseRequest("http://" + proxy)
 		if err != nil {
-			return nil, os.NewError("invalid proxy address")
+			return nil, errors.New("invalid proxy address")
 		}
 	}
 	return proxyURL, nil
@@ -99,8 +100,8 @@
 
 // ProxyURL returns a proxy function (for use in a Transport)
 // that always returns the same URL.
-func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, os.Error) {
-	return func(*Request) (*url.URL, os.Error) {
+func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) {
+	return func(*Request) (*url.URL, error) {
 		return fixedURL, nil
 	}
 }
@@ -120,12 +121,12 @@
 }
 
 // RoundTrip implements the RoundTripper interface.
-func (t *Transport) RoundTrip(req *Request) (resp *Response, err os.Error) {
+func (t *Transport) RoundTrip(req *Request) (resp *Response, err error) {
 	if req.URL == nil {
-		return nil, os.NewError("http: nil Request.URL")
+		return nil, errors.New("http: nil Request.URL")
 	}
 	if req.Header == nil {
-		return nil, os.NewError("http: nil Request.Header")
+		return nil, errors.New("http: nil Request.Header")
 	}
 	if req.URL.Scheme != "http" && req.URL.Scheme != "https" {
 		t.lk.Lock()
@@ -207,13 +208,13 @@
 	return os.Getenv(strings.ToLower(k))
 }
 
-func (t *Transport) connectMethodForRequest(treq *transportRequest) (*connectMethod, os.Error) {
+func (t *Transport) connectMethodForRequest(treq *transportRequest) (*connectMethod, error) {
 	cm := &connectMethod{
 		targetScheme: treq.URL.Scheme,
 		targetAddr:   canonicalAddr(treq.URL),
 	}
 	if t.Proxy != nil {
-		var err os.Error
+		var err error
 		cm.proxyURL, err = t.Proxy(treq.Request)
 		if err != nil {
 			return nil, err
@@ -285,7 +286,7 @@
 	return
 }
 
-func (t *Transport) dial(network, addr string) (c net.Conn, err os.Error) {
+func (t *Transport) dial(network, addr string) (c net.Conn, err error) {
 	if t.Dial != nil {
 		return t.Dial(network, addr)
 	}
@@ -296,7 +297,7 @@
 // specified in the connectMethod.  This includes doing a proxy CONNECT
 // and/or setting up TLS.  If this doesn't return an error, the persistConn
 // is ready to write requests to.
-func (t *Transport) getConn(cm *connectMethod) (*persistConn, os.Error) {
+func (t *Transport) getConn(cm *connectMethod) (*persistConn, error) {
 	if pc := t.getIdleConn(cm); pc != nil {
 		return pc, nil
 	}
@@ -352,7 +353,7 @@
 		if resp.StatusCode != 200 {
 			f := strings.SplitN(resp.Status, " ", 2)
 			conn.Close()
-			return nil, os.NewError(f[1])
+			return nil, errors.New(f[1])
 		}
 	}
 
@@ -500,10 +501,10 @@
 	return pc.numExpectedResponses > 0
 }
 
-var remoteSideClosedFunc func(os.Error) bool // or nil to use default
+var remoteSideClosedFunc func(error) bool // or nil to use default
 
-func remoteSideClosed(err os.Error) bool {
-	if err == os.EOF || err == os.EINVAL {
+func remoteSideClosed(err error) bool {
+	if err == io.EOF || err == os.EINVAL {
 		return true
 	}
 	if remoteSideClosedFunc != nil {
@@ -532,7 +533,7 @@
 		}
 
 		rc := <-pc.reqch
-		resp, err := pc.cc.readUsing(rc.req, func(buf *bufio.Reader, forReq *Request) (*Response, os.Error) {
+		resp, err := pc.cc.readUsing(rc.req, func(buf *bufio.Reader, forReq *Request) (*Response, error) {
 			resp, err := ReadResponse(buf, forReq)
 			if err != nil || resp.ContentLength == 0 {
 				return resp, err
@@ -599,7 +600,7 @@
 
 type responseAndError struct {
 	res *Response
-	err os.Error
+	err error
 }
 
 type requestAndChan struct {
@@ -612,7 +613,7 @@
 	addedGzip bool
 }
 
-func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err os.Error) {
+func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err error) {
 	if pc.mutateHeaderFunc != nil {
 		pc.mutateHeaderFunc(req.extraHeaders())
 	}
@@ -634,7 +635,7 @@
 	pc.numExpectedResponses++
 	pc.lk.Unlock()
 
-	pc.cc.writeReq = func(r *Request, w io.Writer) os.Error {
+	pc.cc.writeReq = func(r *Request, w io.Writer) error {
 		return r.write(w, pc.isProxy, req.extra)
 	}
 
@@ -691,19 +692,19 @@
 	isClosed bool
 }
 
-func (es *bodyEOFSignal) Read(p []byte) (n int, err os.Error) {
+func (es *bodyEOFSignal) Read(p []byte) (n int, err error) {
 	n, err = es.body.Read(p)
 	if es.isClosed && n > 0 {
 		panic("http: unexpected bodyEOFSignal Read after Close; see issue 1725")
 	}
-	if err == os.EOF && es.fn != nil {
+	if err == io.EOF && es.fn != nil {
 		es.fn()
 		es.fn = nil
 	}
 	return
 }
 
-func (es *bodyEOFSignal) Close() (err os.Error) {
+func (es *bodyEOFSignal) Close() (err error) {
 	if es.isClosed {
 		return nil
 	}
@@ -721,7 +722,7 @@
 	io.Closer
 }
 
-func (r *readFirstCloseBoth) Close() os.Error {
+func (r *readFirstCloseBoth) Close() error {
 	if err := r.ReadCloser.Close(); err != nil {
 		r.Closer.Close()
 		return err
@@ -737,7 +738,7 @@
 	io.ReadCloser
 }
 
-func (d *discardOnCloseReadCloser) Close() os.Error {
+func (d *discardOnCloseReadCloser) Close() error {
 	io.Copy(ioutil.Discard, d.ReadCloser) // ignore errors; likely invalid or already closed
 	return d.ReadCloser.Close()
 }
diff --git a/src/pkg/http/transport_test.go b/src/pkg/http/transport_test.go
index f3162b9..b2d0eba 100644
--- a/src/pkg/http/transport_test.go
+++ b/src/pkg/http/transport_test.go
@@ -15,7 +15,6 @@
 	"http/httptest"
 	"io"
 	"io/ioutil"
-	"os"
 	"strconv"
 	"strings"
 	"testing"
@@ -77,7 +76,7 @@
 
 		fetch := func(n int) string {
 			req := new(Request)
-			var err os.Error
+			var err error
 			req.URL, err = url.Parse(ts.URL + fmt.Sprintf("/?close=%v", connectionClose))
 			if err != nil {
 				t.Fatalf("URL parse error: %v", err)
@@ -119,7 +118,7 @@
 
 		fetch := func(n int) string {
 			req := new(Request)
-			var err os.Error
+			var err error
 			req.URL, err = url.Parse(ts.URL)
 			if err != nil {
 				t.Fatalf("URL parse error: %v", err)
@@ -575,7 +574,7 @@
 
 type fooProto struct{}
 
-func (fooProto) RoundTrip(req *Request) (*Response, os.Error) {
+func (fooProto) RoundTrip(req *Request) (*Response, error) {
 	res := &Response{
 		Status:     "200 OK",
 		StatusCode: 200,
diff --git a/src/pkg/http/transport_windows.go b/src/pkg/http/transport_windows.go
index 1ae7d83..e0dc857 100644
--- a/src/pkg/http/transport_windows.go
+++ b/src/pkg/http/transport_windows.go
@@ -10,9 +10,9 @@
 )
 
 func init() {
-	remoteSideClosedFunc = func(err os.Error) (out bool) {
+	remoteSideClosedFunc = func(err error) (out bool) {
 		op, ok := err.(*net.OpError)
-		if ok && op.Op == "WSARecv" && op.Net == "tcp" && op.Error == os.Errno(10058) {
+		if ok && op.Op == "WSARecv" && op.Net == "tcp" && op.Err == os.Errno(10058) {
 			// TODO(bradfitz): find the symbol for 10058
 			return true
 		}
diff --git a/src/pkg/image/bmp/reader.go b/src/pkg/image/bmp/reader.go
index 134de5b..ad56865 100644
--- a/src/pkg/image/bmp/reader.go
+++ b/src/pkg/image/bmp/reader.go
@@ -8,15 +8,15 @@
 package bmp
 
 import (
+	"errors"
 	"image/color"
 	"image"
 	"io"
-	"os"
 )
 
 // ErrUnsupported means that the input BMP image uses a valid but unsupported
 // feature.
-var ErrUnsupported = os.NewError("bmp: unsupported BMP image")
+var ErrUnsupported = errors.New("bmp: unsupported BMP image")
 
 func readUint16(b []byte) uint16 {
 	return uint16(b[0]) | uint16(b[1])<<8
@@ -27,7 +27,7 @@
 }
 
 // decodePaletted reads an 8 bit-per-pixel BMP image from r.
-func decodePaletted(r io.Reader, c image.Config) (image.Image, os.Error) {
+func decodePaletted(r io.Reader, c image.Config) (image.Image, error) {
 	var tmp [4]byte
 	paletted := image.NewPaletted(image.Rect(0, 0, c.Width, c.Height), c.ColorModel.(color.Palette))
 	// BMP images are stored bottom-up rather than top-down.
@@ -49,7 +49,7 @@
 }
 
 // decodeRGBA reads a 24 bit-per-pixel BMP image from r.
-func decodeRGBA(r io.Reader, c image.Config) (image.Image, os.Error) {
+func decodeRGBA(r io.Reader, c image.Config) (image.Image, error) {
 	rgba := image.NewRGBA(image.Rect(0, 0, c.Width, c.Height))
 	// There are 3 bytes per pixel, and each row is 4-byte aligned.
 	b := make([]byte, (3*c.Width+3)&^3)
@@ -73,7 +73,7 @@
 
 // Decode reads a BMP image from r and returns it as an image.Image.
 // Limitation: The file must be 8 or 24 bits per pixel.
-func Decode(r io.Reader) (image.Image, os.Error) {
+func Decode(r io.Reader) (image.Image, error) {
 	c, err := DecodeConfig(r)
 	if err != nil {
 		return nil, err
@@ -87,7 +87,7 @@
 // DecodeConfig returns the color model and dimensions of a BMP image without
 // decoding the entire image.
 // Limitation: The file must be 8 or 24 bits per pixel.
-func DecodeConfig(r io.Reader) (config image.Config, err os.Error) {
+func DecodeConfig(r io.Reader) (config image.Config, err error) {
 	// We only support those BMP images that are a BITMAPFILEHEADER
 	// immediately followed by a BITMAPINFOHEADER.
 	const (
@@ -99,7 +99,7 @@
 		return
 	}
 	if string(b[:2]) != "BM" {
-		err = os.NewError("bmp: invalid format")
+		err = errors.New("bmp: invalid format")
 		return
 	}
 	offset := readUint32(b[10:14])
diff --git a/src/pkg/image/decode_test.go b/src/pkg/image/decode_test.go
index b348c1d..1b7db8b 100644
--- a/src/pkg/image/decode_test.go
+++ b/src/pkg/image/decode_test.go
@@ -41,7 +41,7 @@
 	{"testdata/video-005.gray.png", "testdata/video-005.gray.png", 0},
 }
 
-func decode(filename string) (image.Image, string, os.Error) {
+func decode(filename string) (image.Image, string, error) {
 	f, err := os.Open(filename)
 	if err != nil {
 		return nil, "", err
@@ -50,7 +50,7 @@
 	return image.Decode(bufio.NewReader(f))
 }
 
-func decodeConfig(filename string) (image.Config, string, os.Error) {
+func decodeConfig(filename string) (image.Config, string, error) {
 	f, err := os.Open(filename)
 	if err != nil {
 		return image.Config{}, "", err
@@ -83,7 +83,7 @@
 	for _, it := range imageTests {
 		g := golden[it.goldenFilename]
 		if g == nil {
-			var err os.Error
+			var err error
 			g, _, err = decode(it.goldenFilename)
 			if err != nil {
 				t.Errorf("%s: %v", it.goldenFilename, err)
diff --git a/src/pkg/image/format.go b/src/pkg/image/format.go
index b485932..78fc3ed 100644
--- a/src/pkg/image/format.go
+++ b/src/pkg/image/format.go
@@ -6,18 +6,18 @@
 
 import (
 	"bufio"
+	"errors"
 	"io"
-	"os"
 )
 
 // An UnknownFormatErr indicates that decoding encountered an unknown format.
-var UnknownFormatErr = os.NewError("image: unknown format")
+var UnknownFormatErr = errors.New("image: unknown format")
 
 // A format holds an image format's name, magic header and how to decode it.
 type format struct {
 	name, magic  string
-	decode       func(io.Reader) (Image, os.Error)
-	decodeConfig func(io.Reader) (Config, os.Error)
+	decode       func(io.Reader) (Image, error)
+	decodeConfig func(io.Reader) (Config, error)
 }
 
 // Formats is the list of registered formats.
@@ -29,14 +29,14 @@
 // string can contain "?" wildcards that each match any one byte.
 // Decode is the function that decodes the encoded image.
 // DecodeConfig is the function that decodes just its configuration.
-func RegisterFormat(name, magic string, decode func(io.Reader) (Image, os.Error), decodeConfig func(io.Reader) (Config, os.Error)) {
+func RegisterFormat(name, magic string, decode func(io.Reader) (Image, error), decodeConfig func(io.Reader) (Config, error)) {
 	formats = append(formats, format{name, magic, decode, decodeConfig})
 }
 
 // A reader is an io.Reader that can also peek ahead.
 type reader interface {
 	io.Reader
-	Peek(int) ([]byte, os.Error)
+	Peek(int) ([]byte, error)
 }
 
 // AsReader converts an io.Reader to a reader.
@@ -75,7 +75,7 @@
 // The string returned is the format name used during format registration.
 // Format registration is typically done by the init method of the codec-
 // specific package.
-func Decode(r io.Reader) (Image, string, os.Error) {
+func Decode(r io.Reader) (Image, string, error) {
 	rr := asReader(r)
 	f := sniff(rr)
 	if f.decode == nil {
@@ -89,7 +89,7 @@
 // been encoded in a registered format. The string returned is the format name
 // used during format registration. Format registration is typically done by
 // the init method of the codec-specific package.
-func DecodeConfig(r io.Reader) (Config, string, os.Error) {
+func DecodeConfig(r io.Reader) (Config, string, error) {
 	rr := asReader(r)
 	f := sniff(rr)
 	if f.decodeConfig == nil {
diff --git a/src/pkg/image/gif/reader.go b/src/pkg/image/gif/reader.go
index a5a4265..24b53c5 100644
--- a/src/pkg/image/gif/reader.go
+++ b/src/pkg/image/gif/reader.go
@@ -10,11 +10,11 @@
 import (
 	"bufio"
 	"compress/lzw"
+	"errors"
 	"fmt"
 	"image"
 	"image/color"
 	"io"
-	"os"
 )
 
 // If the io.Reader does not also have ReadByte, then decode will introduce its own buffering.
@@ -97,7 +97,7 @@
 	tmp   [256]byte
 }
 
-func (b *blockReader) Read(p []byte) (int, os.Error) {
+func (b *blockReader) Read(p []byte) (int, error) {
 	if len(p) == 0 {
 		return 0, nil
 	}
@@ -107,7 +107,7 @@
 			return 0, err
 		}
 		if blockLen == 0 {
-			return 0, os.EOF
+			return 0, io.EOF
 		}
 		b.slice = b.tmp[0:blockLen]
 		if _, err = io.ReadFull(b.r, b.slice); err != nil {
@@ -120,7 +120,7 @@
 }
 
 // decode reads a GIF image from r and stores the result in d.
-func (d *decoder) decode(r io.Reader, configOnly bool) os.Error {
+func (d *decoder) decode(r io.Reader, configOnly bool) error {
 	// Add buffering if r does not provide ReadByte.
 	if rr, ok := r.(reader); ok {
 		d.r = rr
@@ -146,7 +146,7 @@
 	for err == nil {
 		var c byte
 		c, err = d.r.ReadByte()
-		if err == os.EOF {
+		if err == io.EOF {
 			break
 		}
 		switch c {
@@ -189,7 +189,7 @@
 				return err
 			}
 			if c != 0 {
-				return os.NewError("gif: extra data after image")
+				return errors.New("gif: extra data after image")
 			}
 
 			// Undo the interlacing if necessary.
@@ -217,7 +217,7 @@
 	return nil
 }
 
-func (d *decoder) readHeaderAndScreenDescriptor() os.Error {
+func (d *decoder) readHeaderAndScreenDescriptor() error {
 	_, err := io.ReadFull(d.r, d.tmp[0:13])
 	if err != nil {
 		return err
@@ -236,7 +236,7 @@
 	return nil
 }
 
-func (d *decoder) readColorMap() (color.Palette, os.Error) {
+func (d *decoder) readColorMap() (color.Palette, error) {
 	if d.pixelSize > 8 {
 		return nil, fmt.Errorf("gif: can't handle %d bits per pixel", d.pixelSize)
 	}
@@ -258,7 +258,7 @@
 	return colorMap, nil
 }
 
-func (d *decoder) readExtension() os.Error {
+func (d *decoder) readExtension() error {
 	extension, err := d.r.ReadByte()
 	if err != nil {
 		return err
@@ -307,7 +307,7 @@
 	panic("unreachable")
 }
 
-func (d *decoder) readGraphicControl() os.Error {
+func (d *decoder) readGraphicControl() error {
 	if _, err := io.ReadFull(d.r, d.tmp[0:6]); err != nil {
 		return fmt.Errorf("gif: can't read graphic control: %s", err)
 	}
@@ -326,7 +326,7 @@
 	}
 }
 
-func (d *decoder) newImageFromDescriptor() (*image.Paletted, os.Error) {
+func (d *decoder) newImageFromDescriptor() (*image.Paletted, error) {
 	if _, err := io.ReadFull(d.r, d.tmp[0:9]); err != nil {
 		return nil, fmt.Errorf("gif: can't read image descriptor: %s", err)
 	}
@@ -338,7 +338,7 @@
 	return image.NewPaletted(image.Rect(left, top, left+width, top+height), nil), nil
 }
 
-func (d *decoder) readBlock() (int, os.Error) {
+func (d *decoder) readBlock() (int, error) {
 	n, err := d.r.ReadByte()
 	if n == 0 || err != nil {
 		return 0, err
@@ -379,7 +379,7 @@
 
 // Decode reads a GIF image from r and returns the first embedded
 // image as an image.Image.
-func Decode(r io.Reader) (image.Image, os.Error) {
+func Decode(r io.Reader) (image.Image, error) {
 	var d decoder
 	if err := d.decode(r, false); err != nil {
 		return nil, err
@@ -396,7 +396,7 @@
 
 // DecodeAll reads a GIF image from r and returns the sequential frames
 // and timing information.
-func DecodeAll(r io.Reader) (*GIF, os.Error) {
+func DecodeAll(r io.Reader) (*GIF, error) {
 	var d decoder
 	if err := d.decode(r, false); err != nil {
 		return nil, err
@@ -411,7 +411,7 @@
 
 // DecodeConfig returns the global color model and dimensions of a GIF image
 // without decoding the entire image.
-func DecodeConfig(r io.Reader) (image.Config, os.Error) {
+func DecodeConfig(r io.Reader) (image.Config, error) {
 	var d decoder
 	if err := d.decode(r, true); err != nil {
 		return image.Config{}, err
diff --git a/src/pkg/image/jpeg/huffman.go b/src/pkg/image/jpeg/huffman.go
index 0d03a73..d238249 100644
--- a/src/pkg/image/jpeg/huffman.go
+++ b/src/pkg/image/jpeg/huffman.go
@@ -4,10 +4,7 @@
 
 package jpeg
 
-import (
-	"io"
-	"os"
-)
+import "io"
 
 // Each code is at most 16 bits long.
 const maxCodeLength = 16
@@ -36,7 +33,7 @@
 }
 
 // Reads bytes from the io.Reader to ensure that bits.n is at least n.
-func (d *decoder) ensureNBits(n int) os.Error {
+func (d *decoder) ensureNBits(n int) error {
 	for d.b.n < n {
 		c, err := d.r.ReadByte()
 		if err != nil {
@@ -64,7 +61,7 @@
 }
 
 // The composition of RECEIVE and EXTEND, specified in section F.2.2.1.
-func (d *decoder) receiveExtend(t uint8) (int, os.Error) {
+func (d *decoder) receiveExtend(t uint8) (int, error) {
 	err := d.ensureNBits(int(t))
 	if err != nil {
 		return 0, err
@@ -81,7 +78,7 @@
 
 // Processes a Define Huffman Table marker, and initializes a huffman struct from its contents.
 // Specified in section B.2.4.2.
-func (d *decoder) processDHT(n int) os.Error {
+func (d *decoder) processDHT(n int) error {
 	for n > 0 {
 		if n < 17 {
 			return FormatError("DHT has wrong length")
@@ -167,7 +164,7 @@
 // Returns the next Huffman-coded value from the bit stream, decoded according to h.
 // TODO(nigeltao): This decoding algorithm is simple, but slow. A lookahead table, instead of always
 // peeling off only 1 bit at at time, ought to be faster.
-func (d *decoder) decodeHuffman(h *huffman) (uint8, os.Error) {
+func (d *decoder) decodeHuffman(h *huffman) (uint8, error) {
 	if h.length == 0 {
 		return 0, FormatError("uninitialized Huffman table")
 	}
diff --git a/src/pkg/image/jpeg/reader.go b/src/pkg/image/jpeg/reader.go
index 450355e..c1fc2d5 100644
--- a/src/pkg/image/jpeg/reader.go
+++ b/src/pkg/image/jpeg/reader.go
@@ -13,7 +13,6 @@
 	"image/color"
 	"image/ycbcr"
 	"io"
-	"os"
 )
 
 // TODO(nigeltao): fix up the doc comment style so that sentences start with
@@ -22,12 +21,12 @@
 // A FormatError reports that the input is not a valid JPEG.
 type FormatError string
 
-func (e FormatError) String() string { return "invalid JPEG format: " + string(e) }
+func (e FormatError) Error() string { return "invalid JPEG format: " + string(e) }
 
 // An UnsupportedError reports that the input uses a valid but unimplemented JPEG feature.
 type UnsupportedError string
 
-func (e UnsupportedError) String() string { return "unsupported JPEG feature: " + string(e) }
+func (e UnsupportedError) Error() string { return "unsupported JPEG feature: " + string(e) }
 
 // Component specification, specified in section B.2.2.
 type component struct {
@@ -91,7 +90,7 @@
 // If the passed in io.Reader does not also have ReadByte, then Decode will introduce its own buffering.
 type Reader interface {
 	io.Reader
-	ReadByte() (c byte, err os.Error)
+	ReadByte() (c byte, err error)
 }
 
 type decoder struct {
@@ -109,7 +108,7 @@
 }
 
 // Reads and ignores the next n bytes.
-func (d *decoder) ignore(n int) os.Error {
+func (d *decoder) ignore(n int) error {
 	for n > 0 {
 		m := len(d.tmp)
 		if m > n {
@@ -125,7 +124,7 @@
 }
 
 // Specified in section B.2.2.
-func (d *decoder) processSOF(n int) os.Error {
+func (d *decoder) processSOF(n int) error {
 	switch n {
 	case 6 + 3*nGrayComponent:
 		d.nComp = nGrayComponent
@@ -172,7 +171,7 @@
 }
 
 // Specified in section B.2.4.1.
-func (d *decoder) processDQT(n int) os.Error {
+func (d *decoder) processDQT(n int) error {
 	const qtLength = 1 + blockSize
 	for ; n >= qtLength; n -= qtLength {
 		_, err := io.ReadFull(d.r, d.tmp[0:qtLength])
@@ -229,7 +228,7 @@
 }
 
 // Specified in section B.2.3.
-func (d *decoder) processSOS(n int) os.Error {
+func (d *decoder) processSOS(n int) error {
 	if d.nComp == 0 {
 		return FormatError("missing SOF marker")
 	}
@@ -362,7 +361,7 @@
 }
 
 // Specified in section B.2.4.4.
-func (d *decoder) processDRI(n int) os.Error {
+func (d *decoder) processDRI(n int) error {
 	if n != 2 {
 		return FormatError("DRI has wrong length")
 	}
@@ -375,7 +374,7 @@
 }
 
 // decode reads a JPEG image from r and returns it as an image.Image.
-func (d *decoder) decode(r io.Reader, configOnly bool) (image.Image, os.Error) {
+func (d *decoder) decode(r io.Reader, configOnly bool) (image.Image, error) {
 	if rr, ok := r.(Reader); ok {
 		d.r = rr
 	} else {
@@ -451,14 +450,14 @@
 }
 
 // Decode reads a JPEG image from r and returns it as an image.Image.
-func Decode(r io.Reader) (image.Image, os.Error) {
+func Decode(r io.Reader) (image.Image, error) {
 	var d decoder
 	return d.decode(r, false)
 }
 
 // DecodeConfig returns the color model and dimensions of a JPEG image without
 // decoding the entire image.
-func DecodeConfig(r io.Reader) (image.Config, os.Error) {
+func DecodeConfig(r io.Reader) (image.Config, error) {
 	var d decoder
 	if _, err := d.decode(r, true); err != nil {
 		return image.Config{}, err
diff --git a/src/pkg/image/jpeg/writer.go b/src/pkg/image/jpeg/writer.go
index 2bb6df5..fab0bd0 100644
--- a/src/pkg/image/jpeg/writer.go
+++ b/src/pkg/image/jpeg/writer.go
@@ -6,10 +6,10 @@
 
 import (
 	"bufio"
+	"errors"
 	"image"
 	"image/ycbcr"
 	"io"
-	"os"
 )
 
 // min returns the minimum of two integers.
@@ -207,9 +207,9 @@
 
 // writer is a buffered writer.
 type writer interface {
-	Flush() os.Error
-	Write([]byte) (int, os.Error)
-	WriteByte(byte) os.Error
+	Flush() error
+	Write([]byte) (int, error)
+	WriteByte(byte) error
 }
 
 // encoder encodes an image to the JPEG format.
@@ -217,7 +217,7 @@
 	// w is the writer to write to. err is the first error encountered during
 	// writing. All attempted writes after the first error become no-ops.
 	w   writer
-	err os.Error
+	err error
 	// buf is a scratch buffer.
 	buf [16]byte
 	// bits and nBits are accumulated bits to write to w.
@@ -487,10 +487,10 @@
 
 // Encode writes the Image m to w in JPEG 4:2:0 baseline format with the given
 // options. Default parameters are used if a nil *Options is passed.
-func Encode(w io.Writer, m image.Image, o *Options) os.Error {
+func Encode(w io.Writer, m image.Image, o *Options) error {
 	b := m.Bounds()
 	if b.Dx() >= 1<<16 || b.Dy() >= 1<<16 {
-		return os.NewError("jpeg: image is too large to encode")
+		return errors.New("jpeg: image is too large to encode")
 	}
 	var e encoder
 	if ww, ok := w.(writer); ok {
diff --git a/src/pkg/image/jpeg/writer_test.go b/src/pkg/image/jpeg/writer_test.go
index 0378252..72cec93 100644
--- a/src/pkg/image/jpeg/writer_test.go
+++ b/src/pkg/image/jpeg/writer_test.go
@@ -36,7 +36,7 @@
 	return d
 }
 
-func readPng(filename string) (image.Image, os.Error) {
+func readPng(filename string) (image.Image, error) {
 	f, err := os.Open(filename)
 	if err != nil {
 		return nil, err
diff --git a/src/pkg/image/png/reader.go b/src/pkg/image/png/reader.go
index 66f1916..7eb8cea 100644
--- a/src/pkg/image/png/reader.go
+++ b/src/pkg/image/png/reader.go
@@ -16,7 +16,6 @@
 	"image"
 	"image/color"
 	"io"
-	"os"
 )
 
 // Color type, as per the PNG spec.
@@ -90,14 +89,14 @@
 // A FormatError reports that the input is not a valid PNG.
 type FormatError string
 
-func (e FormatError) String() string { return "png: invalid format: " + string(e) }
+func (e FormatError) Error() string { return "png: invalid format: " + string(e) }
 
 var chunkOrderError = FormatError("chunk out of order")
 
 // An UnsupportedError reports that the input uses a valid but unimplemented PNG feature.
 type UnsupportedError string
 
-func (e UnsupportedError) String() string { return "png: unsupported feature: " + string(e) }
+func (e UnsupportedError) Error() string { return "png: unsupported feature: " + string(e) }
 
 func abs(x int) int {
 	if x < 0 {
@@ -113,7 +112,7 @@
 	return b
 }
 
-func (d *decoder) parseIHDR(length uint32) os.Error {
+func (d *decoder) parseIHDR(length uint32) error {
 	if length != 13 {
 		return FormatError("bad IHDR length")
 	}
@@ -189,7 +188,7 @@
 	return d.verifyChecksum()
 }
 
-func (d *decoder) parsePLTE(length uint32) os.Error {
+func (d *decoder) parsePLTE(length uint32) error {
 	np := int(length / 3) // The number of palette entries.
 	if length%3 != 0 || np <= 0 || np > 256 || np > 1<<uint(d.depth) {
 		return FormatError("bad PLTE length")
@@ -214,7 +213,7 @@
 	return d.verifyChecksum()
 }
 
-func (d *decoder) parsetRNS(length uint32) os.Error {
+func (d *decoder) parsetRNS(length uint32) error {
 	if length > 256 {
 		return FormatError("bad tRNS length")
 	}
@@ -263,7 +262,7 @@
 // immediately before the first Read call is that d.r is positioned between the
 // first IDAT and xxx, and the decoder state immediately after the last Read
 // call is that d.r is positioned between yy and crc1.
-func (d *decoder) Read(p []byte) (int, os.Error) {
+func (d *decoder) Read(p []byte) (int, error) {
 	if len(p) == 0 {
 		return 0, nil
 	}
@@ -294,7 +293,7 @@
 }
 
 // decode decodes the IDAT data into an image.
-func (d *decoder) decode() (image.Image, os.Error) {
+func (d *decoder) decode() (image.Image, error) {
 	r, err := zlib.NewReader(d)
 	if err != nil {
 		return nil, err
@@ -517,8 +516,8 @@
 
 	// Check for EOF, to verify the zlib checksum.
 	n, err := r.Read(pr[:1])
-	if err != os.EOF {
-		return nil, FormatError(err.String())
+	if err != io.EOF {
+		return nil, FormatError(err.Error())
 	}
 	if n != 0 || d.idatLength != 0 {
 		return nil, FormatError("too much pixel data")
@@ -527,7 +526,7 @@
 	return img, nil
 }
 
-func (d *decoder) parseIDAT(length uint32) (err os.Error) {
+func (d *decoder) parseIDAT(length uint32) (err error) {
 	d.idatLength = length
 	d.img, err = d.decode()
 	if err != nil {
@@ -536,14 +535,14 @@
 	return d.verifyChecksum()
 }
 
-func (d *decoder) parseIEND(length uint32) os.Error {
+func (d *decoder) parseIEND(length uint32) error {
 	if length != 0 {
 		return FormatError("bad IEND length")
 	}
 	return d.verifyChecksum()
 }
 
-func (d *decoder) parseChunk() os.Error {
+func (d *decoder) parseChunk() error {
 	// Read the length and chunk type.
 	n, err := io.ReadFull(d.r, d.tmp[:8])
 	if err != nil {
@@ -598,7 +597,7 @@
 	return d.verifyChecksum()
 }
 
-func (d *decoder) verifyChecksum() os.Error {
+func (d *decoder) verifyChecksum() error {
 	if _, err := io.ReadFull(d.r, d.tmp[:4]); err != nil {
 		return err
 	}
@@ -608,7 +607,7 @@
 	return nil
 }
 
-func (d *decoder) checkHeader() os.Error {
+func (d *decoder) checkHeader() error {
 	_, err := io.ReadFull(d.r, d.tmp[:len(pngHeader)])
 	if err != nil {
 		return err
@@ -621,20 +620,20 @@
 
 // Decode reads a PNG image from r and returns it as an image.Image.
 // The type of Image returned depends on the PNG contents.
-func Decode(r io.Reader) (image.Image, os.Error) {
+func Decode(r io.Reader) (image.Image, error) {
 	d := &decoder{
 		r:   r,
 		crc: crc32.NewIEEE(),
 	}
 	if err := d.checkHeader(); err != nil {
-		if err == os.EOF {
+		if err == io.EOF {
 			err = io.ErrUnexpectedEOF
 		}
 		return nil, err
 	}
 	for d.stage != dsSeenIEND {
 		if err := d.parseChunk(); err != nil {
-			if err == os.EOF {
+			if err == io.EOF {
 				err = io.ErrUnexpectedEOF
 			}
 			return nil, err
@@ -645,20 +644,20 @@
 
 // DecodeConfig returns the color model and dimensions of a PNG image without
 // decoding the entire image.
-func DecodeConfig(r io.Reader) (image.Config, os.Error) {
+func DecodeConfig(r io.Reader) (image.Config, error) {
 	d := &decoder{
 		r:   r,
 		crc: crc32.NewIEEE(),
 	}
 	if err := d.checkHeader(); err != nil {
-		if err == os.EOF {
+		if err == io.EOF {
 			err = io.ErrUnexpectedEOF
 		}
 		return image.Config{}, err
 	}
 	for {
 		if err := d.parseChunk(); err != nil {
-			if err == os.EOF {
+			if err == io.EOF {
 				err = io.ErrUnexpectedEOF
 			}
 			return image.Config{}, err
diff --git a/src/pkg/image/png/reader_test.go b/src/pkg/image/png/reader_test.go
index 48d0613..7eb1fc2 100644
--- a/src/pkg/image/png/reader_test.go
+++ b/src/pkg/image/png/reader_test.go
@@ -43,7 +43,7 @@
 	"basn6a16",
 }
 
-func readPNG(filename string) (image.Image, os.Error) {
+func readPNG(filename string) (image.Image, error) {
 	f, err := os.Open(filename)
 	if err != nil {
 		return nil, err
@@ -223,7 +223,7 @@
 		for {
 			ps, perr := pb.ReadString('\n')
 			ss, serr := sb.ReadString('\n')
-			if perr == os.EOF && serr == os.EOF {
+			if perr == io.EOF && serr == io.EOF {
 				break
 			}
 			if perr != nil {
@@ -259,7 +259,7 @@
 			t.Errorf("decoding %s: missing error", tt.file)
 			continue
 		}
-		if !strings.Contains(err.String(), tt.err) {
+		if !strings.Contains(err.Error(), tt.err) {
 			t.Errorf("decoding %s: %s, want %s", tt.file, err, tt.err)
 		}
 		if img != nil {
diff --git a/src/pkg/image/png/writer.go b/src/pkg/image/png/writer.go
index b6103c6..48089ff 100644
--- a/src/pkg/image/png/writer.go
+++ b/src/pkg/image/png/writer.go
@@ -11,7 +11,6 @@
 	"image"
 	"image/color"
 	"io"
-	"os"
 	"strconv"
 )
 
@@ -19,7 +18,7 @@
 	w      io.Writer
 	m      image.Image
 	cb     int
-	err    os.Error
+	err    error
 	header [8]byte
 	footer [4]byte
 	tmp    [3 * 256]byte
@@ -161,7 +160,7 @@
 //
 // This method should only be called from writeIDATs (via writeImage).
 // No other code should treat an encoder as an io.Writer.
-func (e *encoder) Write(b []byte) (int, os.Error) {
+func (e *encoder) Write(b []byte) (int, error) {
 	e.writeChunk(b, "IDAT")
 	if e.err != nil {
 		return 0, e.err
@@ -263,7 +262,7 @@
 	return filter
 }
 
-func writeImage(w io.Writer, m image.Image, cb int) os.Error {
+func writeImage(w io.Writer, m image.Image, cb int) error {
 	zw, err := zlib.NewWriter(w)
 	if err != nil {
 		return err
@@ -424,7 +423,7 @@
 
 // Encode writes the Image m to w in PNG format. Any Image may be encoded, but
 // images that are not image.NRGBA might be encoded lossily.
-func Encode(w io.Writer, m image.Image) os.Error {
+func Encode(w io.Writer, m image.Image) error {
 	// Obviously, negative widths and heights are invalid. Furthermore, the PNG
 	// spec section 11.2.2 says that zero is invalid. Excessively large images are
 	// also rejected.
diff --git a/src/pkg/image/png/writer_test.go b/src/pkg/image/png/writer_test.go
index e517173..1757e14 100644
--- a/src/pkg/image/png/writer_test.go
+++ b/src/pkg/image/png/writer_test.go
@@ -10,11 +10,10 @@
 	"image"
 	"image/color"
 	"io/ioutil"
-	"os"
 	"testing"
 )
 
-func diff(m0, m1 image.Image) os.Error {
+func diff(m0, m1 image.Image) error {
 	b0, b1 := m0.Bounds(), m1.Bounds()
 	if !b0.Size().Eq(b1.Size()) {
 		return fmt.Errorf("dimensions differ: %v vs %v", b0, b1)
@@ -35,7 +34,7 @@
 	return nil
 }
 
-func encodeDecode(m image.Image) (image.Image, os.Error) {
+func encodeDecode(m image.Image) (image.Image, error) {
 	b := bytes.NewBuffer(nil)
 	err := Encode(b, m)
 	if err != nil {
diff --git a/src/pkg/image/tiff/buffer.go b/src/pkg/image/tiff/buffer.go
index 7c07142..ce35073 100644
--- a/src/pkg/image/tiff/buffer.go
+++ b/src/pkg/image/tiff/buffer.go
@@ -15,7 +15,7 @@
 	buf []byte
 }
 
-func (b *buffer) ReadAt(p []byte, off int64) (int, os.Error) {
+func (b *buffer) ReadAt(p []byte, off int64) (int, error) {
 	o := int(off)
 	end := o + len(p)
 	if int64(end) != off+int64(len(p)) {
diff --git a/src/pkg/image/tiff/buffer_test.go b/src/pkg/image/tiff/buffer_test.go
index 4f3e68e..e13afb3 100644
--- a/src/pkg/image/tiff/buffer_test.go
+++ b/src/pkg/image/tiff/buffer_test.go
@@ -5,7 +5,7 @@
 package tiff
 
 import (
-	"os"
+	"io"
 	"strings"
 	"testing"
 )
@@ -14,13 +14,13 @@
 	n   int
 	off int64
 	s   string
-	err os.Error
+	err error
 }{
 	{2, 0, "ab", nil},
 	{6, 0, "abcdef", nil},
 	{3, 3, "def", nil},
-	{3, 5, "f", os.EOF},
-	{3, 6, "", os.EOF},
+	{3, 5, "f", io.EOF},
+	{3, 6, "", io.EOF},
 }
 
 func TestReadAt(t *testing.T) {
diff --git a/src/pkg/image/tiff/compress.go b/src/pkg/image/tiff/compress.go
index e89aa6d..40c7fd8 100644
--- a/src/pkg/image/tiff/compress.go
+++ b/src/pkg/image/tiff/compress.go
@@ -7,7 +7,6 @@
 import (
 	"bufio"
 	"io"
-	"os"
 )
 
 type byteReader interface {
@@ -20,7 +19,7 @@
 //
 // The PackBits compression format is described in section 9 (p. 42)
 // of the TIFF spec.
-func unpackBits(r io.Reader) ([]byte, os.Error) {
+func unpackBits(r io.Reader) ([]byte, error) {
 	buf := make([]byte, 128)
 	dst := make([]byte, 0, 1024)
 	br, ok := r.(byteReader)
@@ -31,7 +30,7 @@
 	for {
 		b, err := br.ReadByte()
 		if err != nil {
-			if err == os.EOF {
+			if err == io.EOF {
 				return dst, nil
 			}
 			return nil, err
diff --git a/src/pkg/image/tiff/reader.go b/src/pkg/image/tiff/reader.go
index c452f5d..00a51db 100644
--- a/src/pkg/image/tiff/reader.go
+++ b/src/pkg/image/tiff/reader.go
@@ -15,13 +15,12 @@
 	"image/color"
 	"io"
 	"io/ioutil"
-	"os"
 )
 
 // A FormatError reports that the input is not a valid TIFF image.
 type FormatError string
 
-func (e FormatError) String() string {
+func (e FormatError) Error() string {
 	return "tiff: invalid format: " + string(e)
 }
 
@@ -29,14 +28,14 @@
 // unimplemented feature.
 type UnsupportedError string
 
-func (e UnsupportedError) String() string {
+func (e UnsupportedError) Error() string {
 	return "tiff: unsupported feature: " + string(e)
 }
 
 // An InternalError reports that an internal error was encountered.
 type InternalError string
 
-func (e InternalError) String() string {
+func (e InternalError) Error() string {
 	return "tiff: internal error: " + string(e)
 }
 
@@ -66,7 +65,7 @@
 
 // ifdUint decodes the IFD entry in p, which must be of the Byte, Short
 // or Long type, and returns the decoded uint values.
-func (d *decoder) ifdUint(p []byte) (u []uint, err os.Error) {
+func (d *decoder) ifdUint(p []byte) (u []uint, err error) {
 	var raw []byte
 	datatype := d.byteOrder.Uint16(p[2:4])
 	count := d.byteOrder.Uint32(p[4:8])
@@ -103,7 +102,7 @@
 
 // parseIFD decides whether the the IFD entry in p is "interesting" and
 // stows away the data in the decoder.
-func (d *decoder) parseIFD(p []byte) os.Error {
+func (d *decoder) parseIFD(p []byte) error {
 	tag := d.byteOrder.Uint16(p[0:2])
 	switch tag {
 	case tBitsPerSample,
@@ -180,7 +179,7 @@
 
 // decode decodes the raw data of an image.
 // It reads from d.buf and writes the strip with ymin <= y < ymax into dst.
-func (d *decoder) decode(dst image.Image, ymin, ymax int) os.Error {
+func (d *decoder) decode(dst image.Image, ymin, ymax int) error {
 	d.off = 0
 
 	// Apply horizontal predictor if necessary.
@@ -255,7 +254,7 @@
 	return nil
 }
 
-func newDecoder(r io.Reader) (*decoder, os.Error) {
+func newDecoder(r io.Reader) (*decoder, error) {
 	d := &decoder{
 		r:        newReaderAt(r),
 		features: make(map[int][]uint),
@@ -350,7 +349,7 @@
 
 // DecodeConfig returns the color model and dimensions of a TIFF image without
 // decoding the entire image.
-func DecodeConfig(r io.Reader) (image.Config, os.Error) {
+func DecodeConfig(r io.Reader) (image.Config, error) {
 	d, err := newDecoder(r)
 	if err != nil {
 		return image.Config{}, err
@@ -360,7 +359,7 @@
 
 // Decode reads a TIFF image from r and returns it as an image.Image.
 // The type of Image returned depends on the contents of the TIFF.
-func Decode(r io.Reader) (img image.Image, err os.Error) {
+func Decode(r io.Reader) (img image.Image, err error) {
 	d, err := newDecoder(r)
 	if err != nil {
 		return
diff --git a/src/pkg/image/tiff/reader_test.go b/src/pkg/image/tiff/reader_test.go
index 86b7dc3..1a3d23b 100644
--- a/src/pkg/image/tiff/reader_test.go
+++ b/src/pkg/image/tiff/reader_test.go
@@ -13,7 +13,7 @@
 )
 
 // Read makes *buffer implements io.Reader, so that we can pass one to Decode.
-func (*buffer) Read([]byte) (int, os.Error) {
+func (*buffer) Read([]byte) (int, error) {
 	panic("unimplemented")
 }
 
diff --git a/src/pkg/index/suffixarray/suffixarray.go b/src/pkg/index/suffixarray/suffixarray.go
index 174460c..c59ae6e 100644
--- a/src/pkg/index/suffixarray/suffixarray.go
+++ b/src/pkg/index/suffixarray/suffixarray.go
@@ -20,7 +20,6 @@
 	"bytes"
 	"encoding/binary"
 	"io"
-	"os"
 	"regexp"
 	"sort"
 )
@@ -38,14 +37,14 @@
 }
 
 // writeInt writes an int x to w using buf to buffer the write.
-func writeInt(w io.Writer, buf []byte, x int) os.Error {
+func writeInt(w io.Writer, buf []byte, x int) error {
 	binary.PutVarint(buf, int64(x))
 	_, err := w.Write(buf[0:binary.MaxVarintLen64])
 	return err
 }
 
 // readInt reads an int x from r using buf to buffer the read and returns x.
-func readInt(r io.Reader, buf []byte) (int, os.Error) {
+func readInt(r io.Reader, buf []byte) (int, error) {
 	_, err := io.ReadFull(r, buf[0:binary.MaxVarintLen64]) // ok to continue with error
 	x, _ := binary.Varint(buf)
 	return int(x), err
@@ -53,7 +52,7 @@
 
 // writeSlice writes data[:n] to w and returns n.
 // It uses buf to buffer the write.
-func writeSlice(w io.Writer, buf []byte, data []int) (n int, err os.Error) {
+func writeSlice(w io.Writer, buf []byte, data []int) (n int, err error) {
 	// encode as many elements as fit into buf
 	p := binary.MaxVarintLen64
 	for ; n < len(data) && p+binary.MaxVarintLen64 <= len(buf); n++ {
@@ -70,7 +69,7 @@
 
 // readSlice reads data[:n] from r and returns n.
 // It uses buf to buffer the read.
-func readSlice(r io.Reader, buf []byte, data []int) (n int, err os.Error) {
+func readSlice(r io.Reader, buf []byte, data []int) (n int, err error) {
 	// read buffer size
 	var size int
 	size, err = readInt(r, buf)
@@ -96,7 +95,7 @@
 const bufSize = 16 << 10 // reasonable for BenchmarkSaveRestore
 
 // Read reads the index from r into x; x must not be nil.
-func (x *Index) Read(r io.Reader) os.Error {
+func (x *Index) Read(r io.Reader) error {
 	// buffer for all reads
 	buf := make([]byte, bufSize)
 
@@ -135,7 +134,7 @@
 }
 
 // Write writes the index x to w.
-func (x *Index) Write(w io.Writer) os.Error {
+func (x *Index) Write(w io.Writer) error {
 	// buffer for all writes
 	buf := make([]byte, bufSize)
 
diff --git a/src/pkg/io/ioutil/ioutil.go b/src/pkg/io/ioutil/ioutil.go
index dd50d96..f6c8cd8 100644
--- a/src/pkg/io/ioutil/ioutil.go
+++ b/src/pkg/io/ioutil/ioutil.go
@@ -14,19 +14,19 @@
 
 // readAll reads from r until an error or EOF and returns the data it read
 // from the internal buffer allocated with a specified capacity.
-func readAll(r io.Reader, capacity int64) ([]byte, os.Error) {
+func readAll(r io.Reader, capacity int64) ([]byte, error) {
 	buf := bytes.NewBuffer(make([]byte, 0, capacity))
 	_, err := buf.ReadFrom(r)
 	return buf.Bytes(), err
 }
 
 // ReadAll reads from r until an error or EOF and returns the data it read.
-func ReadAll(r io.Reader) ([]byte, os.Error) {
+func ReadAll(r io.Reader) ([]byte, error) {
 	return readAll(r, bytes.MinRead)
 }
 
 // ReadFile reads the file named by filename and returns the contents.
-func ReadFile(filename string) ([]byte, os.Error) {
+func ReadFile(filename string) ([]byte, error) {
 	f, err := os.Open(filename)
 	if err != nil {
 		return nil, err
@@ -50,7 +50,7 @@
 // WriteFile writes data to a file named by filename.
 // If the file does not exist, WriteFile creates it with permissions perm;
 // otherwise WriteFile truncates it before writing.
-func WriteFile(filename string, data []byte, perm uint32) os.Error {
+func WriteFile(filename string, data []byte, perm uint32) error {
 	f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
 	if err != nil {
 		return err
@@ -72,7 +72,7 @@
 
 // ReadDir reads the directory named by dirname and returns
 // a list of sorted directory entries.
-func ReadDir(dirname string) ([]*os.FileInfo, os.Error) {
+func ReadDir(dirname string) ([]*os.FileInfo, error) {
 	f, err := os.Open(dirname)
 	if err != nil {
 		return nil, err
@@ -94,7 +94,7 @@
 	io.Reader
 }
 
-func (nopCloser) Close() os.Error { return nil }
+func (nopCloser) Close() error { return nil }
 
 // NopCloser returns a ReadCloser with a no-op Close method wrapping
 // the provided Reader r.
@@ -108,19 +108,19 @@
 // ioutil.Discard can avoid doing unnecessary work.
 var _ io.ReaderFrom = devNull(0)
 
-func (devNull) Write(p []byte) (int, os.Error) {
+func (devNull) Write(p []byte) (int, error) {
 	return len(p), nil
 }
 
 var blackHole = make([]byte, 8192)
 
-func (devNull) ReadFrom(r io.Reader) (n int64, err os.Error) {
+func (devNull) ReadFrom(r io.Reader) (n int64, err error) {
 	readSize := 0
 	for {
 		readSize, err = r.Read(blackHole)
 		n += int64(readSize)
 		if err != nil {
-			if err == os.EOF {
+			if err == io.EOF {
 				return n, nil
 			}
 			return
diff --git a/src/pkg/io/ioutil/tempfile.go b/src/pkg/io/ioutil/tempfile.go
index 8e681bd..658ea78 100644
--- a/src/pkg/io/ioutil/tempfile.go
+++ b/src/pkg/io/ioutil/tempfile.go
@@ -40,7 +40,7 @@
 // will not choose the same file.  The caller can use f.Name()
 // to find the name of the file.  It is the caller's responsibility to
 // remove the file when no longer needed.
-func TempFile(dir, prefix string) (f *os.File, err os.Error) {
+func TempFile(dir, prefix string) (f *os.File, err error) {
 	if dir == "" {
 		dir = os.TempDir()
 	}
@@ -49,7 +49,7 @@
 	for i := 0; i < 10000; i++ {
 		name := filepath.Join(dir, prefix+nextSuffix())
 		f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
-		if pe, ok := err.(*os.PathError); ok && pe.Error == os.EEXIST {
+		if pe, ok := err.(*os.PathError); ok && pe.Err == os.EEXIST {
 			if nconflict++; nconflict > 10 {
 				rand = reseed()
 			}
@@ -67,7 +67,7 @@
 // Multiple programs calling TempDir simultaneously
 // will not choose the same directory.  It is the caller's responsibility
 // to remove the directory when no longer needed.
-func TempDir(dir, prefix string) (name string, err os.Error) {
+func TempDir(dir, prefix string) (name string, err error) {
 	if dir == "" {
 		dir = os.TempDir()
 	}
@@ -76,7 +76,7 @@
 	for i := 0; i < 10000; i++ {
 		try := filepath.Join(dir, prefix+nextSuffix())
 		err = os.Mkdir(try, 0700)
-		if pe, ok := err.(*os.PathError); ok && pe.Error == os.EEXIST {
+		if pe, ok := err.(*os.PathError); ok && pe.Err == os.EEXIST {
 			if nconflict++; nconflict > 10 {
 				rand = reseed()
 			}
diff --git a/src/pkg/json/decode.go b/src/pkg/json/decode.go
index 800df98..8abd7b4 100644
--- a/src/pkg/json/decode.go
+++ b/src/pkg/json/decode.go
@@ -9,7 +9,7 @@
 
 import (
 	"encoding/base64"
-	"os"
+	"errors"
 	"reflect"
 	"runtime"
 	"strconv"
@@ -50,7 +50,7 @@
 // If no more serious errors are encountered, Unmarshal returns
 // an UnmarshalTypeError describing the earliest such error.
 //
-func Unmarshal(data []byte, v interface{}) os.Error {
+func Unmarshal(data []byte, v interface{}) error {
 	d := new(decodeState).init(data)
 
 	// Quick check for well-formedness.
@@ -70,7 +70,7 @@
 // encoding.  UnmarshalJSON must copy the JSON data
 // if it wishes to retain the data after returning.
 type Unmarshaler interface {
-	UnmarshalJSON([]byte) os.Error
+	UnmarshalJSON([]byte) error
 }
 
 // An UnmarshalTypeError describes a JSON value that was
@@ -80,7 +80,7 @@
 	Type  reflect.Type // type of Go value it could not be assigned to
 }
 
-func (e *UnmarshalTypeError) String() string {
+func (e *UnmarshalTypeError) Error() string {
 	return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
 }
 
@@ -92,7 +92,7 @@
 	Field reflect.StructField
 }
 
-func (e *UnmarshalFieldError) String() string {
+func (e *UnmarshalFieldError) Error() string {
 	return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
 }
 
@@ -102,7 +102,7 @@
 	Type reflect.Type
 }
 
-func (e *InvalidUnmarshalError) String() string {
+func (e *InvalidUnmarshalError) Error() string {
 	if e.Type == nil {
 		return "json: Unmarshal(nil)"
 	}
@@ -113,13 +113,13 @@
 	return "json: Unmarshal(nil " + e.Type.String() + ")"
 }
 
-func (d *decodeState) unmarshal(v interface{}) (err os.Error) {
+func (d *decodeState) unmarshal(v interface{}) (err error) {
 	defer func() {
 		if r := recover(); r != nil {
 			if _, ok := r.(runtime.Error); ok {
 				panic(r)
 			}
-			err = r.(os.Error)
+			err = r.(error)
 		}
 	}()
 
@@ -142,14 +142,14 @@
 	off        int // read offset in data
 	scan       scanner
 	nextscan   scanner // for calls to nextValue
-	savedError os.Error
+	savedError error
 	tempstr    string // scratch space to avoid some allocations
 }
 
 // errPhase is used for errors that should not happen unless
 // there is a bug in the JSON decoder or something is editing
 // the data slice while the decoder executes.
-var errPhase = os.NewError("JSON decoder out of sync - data changing underfoot?")
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
 
 func (d *decodeState) init(data []byte) *decodeState {
 	d.data = data
@@ -159,13 +159,13 @@
 }
 
 // error aborts the decoding by panicking with err.
-func (d *decodeState) error(err os.Error) {
+func (d *decodeState) error(err error) {
 	panic(err)
 }
 
 // saveError saves the first err it is called with,
 // for reporting at the end of the unmarshal.
-func (d *decodeState) saveError(err os.Error) {
+func (d *decodeState) saveError(err error) {
 	if d.savedError == nil {
 		d.savedError = err
 	}
diff --git a/src/pkg/json/decode_test.go b/src/pkg/json/decode_test.go
index d745e8d..bd4326a 100644
--- a/src/pkg/json/decode_test.go
+++ b/src/pkg/json/decode_test.go
@@ -6,7 +6,6 @@
 
 import (
 	"bytes"
-	"os"
 	"reflect"
 	"strings"
 	"testing"
@@ -30,7 +29,7 @@
 	T bool
 }
 
-func (u *unmarshaler) UnmarshalJSON(b []byte) os.Error {
+func (u *unmarshaler) UnmarshalJSON(b []byte) error {
 	*u = unmarshaler{true} // All we need to see that UnmarshalJson is called.
 	return nil
 }
@@ -52,7 +51,7 @@
 	in  string
 	ptr interface{}
 	out interface{}
-	err os.Error
+	err error
 }
 
 var unmarshalTests = []unmarshalTest{
diff --git a/src/pkg/json/encode.go b/src/pkg/json/encode.go
index ba5c15c..aac8f91 100644
--- a/src/pkg/json/encode.go
+++ b/src/pkg/json/encode.go
@@ -12,7 +12,6 @@
 import (
 	"bytes"
 	"encoding/base64"
-	"os"
 	"reflect"
 	"runtime"
 	"sort"
@@ -96,7 +95,7 @@
 // handle them.  Passing cyclic structures to Marshal will result in
 // an infinite recursion.
 //
-func Marshal(v interface{}) ([]byte, os.Error) {
+func Marshal(v interface{}) ([]byte, error) {
 	e := &encodeState{}
 	err := e.marshal(v)
 	if err != nil {
@@ -106,7 +105,7 @@
 }
 
 // MarshalIndent is like Marshal but applies Indent to format the output.
-func MarshalIndent(v interface{}, prefix, indent string) ([]byte, os.Error) {
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
 	b, err := Marshal(v)
 	if err != nil {
 		return nil, err
@@ -120,7 +119,7 @@
 }
 
 // MarshalForHTML is like Marshal but applies HTMLEscape to the output.
-func MarshalForHTML(v interface{}) ([]byte, os.Error) {
+func MarshalForHTML(v interface{}) ([]byte, error) {
 	b, err := Marshal(v)
 	if err != nil {
 		return nil, err
@@ -159,14 +158,14 @@
 // Marshaler is the interface implemented by objects that
 // can marshal themselves into valid JSON.
 type Marshaler interface {
-	MarshalJSON() ([]byte, os.Error)
+	MarshalJSON() ([]byte, error)
 }
 
 type UnsupportedTypeError struct {
 	Type reflect.Type
 }
 
-func (e *UnsupportedTypeError) String() string {
+func (e *UnsupportedTypeError) Error() string {
 	return "json: unsupported type: " + e.Type.String()
 }
 
@@ -174,17 +173,17 @@
 	S string
 }
 
-func (e *InvalidUTF8Error) String() string {
+func (e *InvalidUTF8Error) Error() string {
 	return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
 }
 
 type MarshalerError struct {
-	Type  reflect.Type
-	Error os.Error
+	Type reflect.Type
+	Err  error
 }
 
-func (e *MarshalerError) String() string {
-	return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Error.String()
+func (e *MarshalerError) Error() string {
+	return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
 }
 
 type interfaceOrPtrValue interface {
@@ -199,20 +198,20 @@
 	bytes.Buffer // accumulated output
 }
 
-func (e *encodeState) marshal(v interface{}) (err os.Error) {
+func (e *encodeState) marshal(v interface{}) (err error) {
 	defer func() {
 		if r := recover(); r != nil {
 			if _, ok := r.(runtime.Error); ok {
 				panic(r)
 			}
-			err = r.(os.Error)
+			err = r.(error)
 		}
 	}()
 	e.reflectValue(reflect.ValueOf(v))
 	return nil
 }
 
-func (e *encodeState) error(err os.Error) {
+func (e *encodeState) error(err error) {
 	panic(err)
 }
 
@@ -423,7 +422,7 @@
 func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
 func (sv stringValues) get(i int) string   { return sv[i].String() }
 
-func (e *encodeState) string(s string) (int, os.Error) {
+func (e *encodeState) string(s string) (int, error) {
 	len0 := e.Len()
 	e.WriteByte('"')
 	start := 0
diff --git a/src/pkg/json/indent.go b/src/pkg/json/indent.go
index 2a75303..5ba19b0 100644
--- a/src/pkg/json/indent.go
+++ b/src/pkg/json/indent.go
@@ -4,14 +4,11 @@
 
 package json
 
-import (
-	"bytes"
-	"os"
-)
+import "bytes"
 
 // Compact appends to dst the JSON-encoded src with
 // insignificant space characters elided.
-func Compact(dst *bytes.Buffer, src []byte) os.Error {
+func Compact(dst *bytes.Buffer, src []byte) error {
 	origLen := dst.Len()
 	var scan scanner
 	scan.reset()
@@ -52,7 +49,7 @@
 // copies of indent according to the indentation nesting.
 // The data appended to dst has no trailing newline, to make it easier
 // to embed inside other formatted JSON data.
-func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) os.Error {
+func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
 	origLen := dst.Len()
 	var scan scanner
 	scan.reset()
diff --git a/src/pkg/json/scanner.go b/src/pkg/json/scanner.go
index 1a39b4c..1796904 100644
--- a/src/pkg/json/scanner.go
+++ b/src/pkg/json/scanner.go
@@ -13,14 +13,11 @@
 // This file starts with two simple examples using the scanner
 // before diving into the scanner itself.
 
-import (
-	"os"
-	"strconv"
-)
+import "strconv"
 
 // checkValid verifies that data is valid JSON-encoded data.
 // scan is passed in for use by checkValid to avoid an allocation.
-func checkValid(data []byte, scan *scanner) os.Error {
+func checkValid(data []byte, scan *scanner) error {
 	scan.reset()
 	for _, c := range data {
 		scan.bytes++
@@ -37,7 +34,7 @@
 // nextValue splits data after the next whole JSON value,
 // returning that value and the bytes that follow it as separate slices.
 // scan is passed in for use by nextValue to avoid an allocation.
-func nextValue(data []byte, scan *scanner) (value, rest []byte, err os.Error) {
+func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
 	scan.reset()
 	for i, c := range data {
 		v := scan.step(scan, int(c))
@@ -62,7 +59,7 @@
 	Offset int64  // error occurred after reading Offset bytes
 }
 
-func (e *SyntaxError) String() string { return e.msg }
+func (e *SyntaxError) Error() string { return e.msg }
 
 // A scanner is a JSON scanning state machine.
 // Callers call scan.reset() and then pass bytes in one at a time
@@ -87,7 +84,7 @@
 	parseState []int
 
 	// Error that happened, if any.
-	err os.Error
+	err error
 
 	// 1-byte redo (see undo method)
 	redoCode  int
diff --git a/src/pkg/json/scanner_test.go b/src/pkg/json/scanner_test.go
index 40bf295..0b86cb5 100644
--- a/src/pkg/json/scanner_test.go
+++ b/src/pkg/json/scanner_test.go
@@ -7,7 +7,6 @@
 import (
 	"bytes"
 	"math"
-	"os"
 	"rand"
 	"reflect"
 	"testing"
@@ -140,7 +139,7 @@
 
 type indentErrorTest struct {
 	in  string
-	err os.Error
+	err error
 }
 
 var indentErrorTests = []indentErrorTest{
diff --git a/src/pkg/json/stream.go b/src/pkg/json/stream.go
index 98cb793..f247639 100644
--- a/src/pkg/json/stream.go
+++ b/src/pkg/json/stream.go
@@ -5,8 +5,8 @@
 package json
 
 import (
+	"errors"
 	"io"
-	"os"
 )
 
 // A Decoder reads and decodes JSON objects from an input stream.
@@ -15,7 +15,7 @@
 	buf  []byte
 	d    decodeState
 	scan scanner
-	err  os.Error
+	err  error
 }
 
 // NewDecoder returns a new decoder that reads from r.
@@ -28,7 +28,7 @@
 //
 // See the documentation for Unmarshal for details about
 // the conversion of JSON into a Go value.
-func (dec *Decoder) Decode(v interface{}) os.Error {
+func (dec *Decoder) Decode(v interface{}) error {
 	if dec.err != nil {
 		return dec.err
 	}
@@ -53,11 +53,11 @@
 
 // readValue reads a JSON value into dec.buf.
 // It returns the length of the encoding.
-func (dec *Decoder) readValue() (int, os.Error) {
+func (dec *Decoder) readValue() (int, error) {
 	dec.scan.reset()
 
 	scanp := 0
-	var err os.Error
+	var err error
 Input:
 	for {
 		// Look in the buffer for a new value.
@@ -85,7 +85,7 @@
 		// Did the last read have an error?
 		// Delayed until now to allow buffer scan.
 		if err != nil {
-			if err == os.EOF {
+			if err == io.EOF {
 				if dec.scan.step(&dec.scan, ' ') == scanEnd {
 					break Input
 				}
@@ -126,7 +126,7 @@
 type Encoder struct {
 	w   io.Writer
 	e   encodeState
-	err os.Error
+	err error
 }
 
 // NewEncoder returns a new encoder that writes to w.
@@ -138,7 +138,7 @@
 //
 // See the documentation for Marshal for details about the
 // conversion of Go values to JSON.
-func (enc *Encoder) Encode(v interface{}) os.Error {
+func (enc *Encoder) Encode(v interface{}) error {
 	if enc.err != nil {
 		return enc.err
 	}
@@ -168,14 +168,14 @@
 type RawMessage []byte
 
 // MarshalJSON returns *m as the JSON encoding of m.
-func (m *RawMessage) MarshalJSON() ([]byte, os.Error) {
+func (m *RawMessage) MarshalJSON() ([]byte, error) {
 	return *m, nil
 }
 
 // UnmarshalJSON sets *m to a copy of data.
-func (m *RawMessage) UnmarshalJSON(data []byte) os.Error {
+func (m *RawMessage) UnmarshalJSON(data []byte) error {
 	if m == nil {
-		return os.NewError("json.RawMessage: UnmarshalJSON on nil pointer")
+		return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
 	}
 	*m = append((*m)[0:0], data...)
 	return nil
diff --git a/src/pkg/log/log.go b/src/pkg/log/log.go
index ec09743..55b7e9e 100644
--- a/src/pkg/log/log.go
+++ b/src/pkg/log/log.go
@@ -132,7 +132,7 @@
 // already a newline.  Calldepth is used to recover the PC and is
 // provided for generality, although at the moment on all pre-defined
 // paths it will be 2.
-func (l *Logger) Output(calldepth int, s string) os.Error {
+func (l *Logger) Output(calldepth int, s string) error {
 	now := time.Nanoseconds() // get this early.
 	var file string
 	var line int
diff --git a/src/pkg/mail/message.go b/src/pkg/mail/message.go
index 29249fb..95246b2 100644
--- a/src/pkg/mail/message.go
+++ b/src/pkg/mail/message.go
@@ -19,12 +19,12 @@
 	"bufio"
 	"bytes"
 	"encoding/base64"
+	"errors"
 	"fmt"
 	"io"
 	"io/ioutil"
 	"log"
 	"net/textproto"
-	"os"
 	"strconv"
 	"strings"
 	"time"
@@ -48,7 +48,7 @@
 
 // ReadMessage reads a message from r.
 // The headers are parsed, and the body of the message will be reading from r.
-func ReadMessage(r io.Reader) (msg *Message, err os.Error) {
+func ReadMessage(r io.Reader) (msg *Message, err error) {
 	tp := textproto.NewReader(bufio.NewReader(r))
 
 	hdr, err := tp.ReadMIMEHeader()
@@ -89,14 +89,14 @@
 	}
 }
 
-func parseDate(date string) (*time.Time, os.Error) {
+func parseDate(date string) (*time.Time, error) {
 	for _, layout := range dateLayouts {
 		t, err := time.Parse(layout, date)
 		if err == nil {
 			return t, nil
 		}
 	}
-	return nil, os.NewError("mail: header could not be parsed")
+	return nil, errors.New("mail: header could not be parsed")
 }
 
 // A Header represents the key-value pairs in a mail message header.
@@ -108,10 +108,10 @@
 	return textproto.MIMEHeader(h).Get(key)
 }
 
-var ErrHeaderNotPresent = os.NewError("mail: header not in message")
+var ErrHeaderNotPresent = errors.New("mail: header not in message")
 
 // Date parses the Date header field.
-func (h Header) Date() (*time.Time, os.Error) {
+func (h Header) Date() (*time.Time, error) {
 	hdr := h.Get("Date")
 	if hdr == "" {
 		return nil, ErrHeaderNotPresent
@@ -120,7 +120,7 @@
 }
 
 // AddressList parses the named header field as a list of addresses.
-func (h Header) AddressList(key string) ([]*Address, os.Error) {
+func (h Header) AddressList(key string) ([]*Address, error) {
 	hdr := h.Get(key)
 	if hdr == "" {
 		return nil, ErrHeaderNotPresent
@@ -189,7 +189,7 @@
 	return &p
 }
 
-func (p *addrParser) parseAddressList() ([]*Address, os.Error) {
+func (p *addrParser) parseAddressList() ([]*Address, error) {
 	var list []*Address
 	for {
 		p.skipSpace()
@@ -204,18 +204,18 @@
 			break
 		}
 		if !p.consume(',') {
-			return nil, os.NewError("mail: expected comma")
+			return nil, errors.New("mail: expected comma")
 		}
 	}
 	return list, nil
 }
 
 // parseAddress parses a single RFC 5322 address at the start of p.
-func (p *addrParser) parseAddress() (addr *Address, err os.Error) {
+func (p *addrParser) parseAddress() (addr *Address, err error) {
 	debug.Printf("parseAddress: %q", *p)
 	p.skipSpace()
 	if p.empty() {
-		return nil, os.NewError("mail: no address")
+		return nil, errors.New("mail: no address")
 	}
 
 	// address = name-addr / addr-spec
@@ -246,14 +246,14 @@
 	// angle-addr = "<" addr-spec ">"
 	p.skipSpace()
 	if !p.consume('<') {
-		return nil, os.NewError("mail: no angle-addr")
+		return nil, errors.New("mail: no angle-addr")
 	}
 	spec, err = p.consumeAddrSpec()
 	if err != nil {
 		return nil, err
 	}
 	if !p.consume('>') {
-		return nil, os.NewError("mail: unclosed angle-addr")
+		return nil, errors.New("mail: unclosed angle-addr")
 	}
 	debug.Printf("parseAddress: spec=%q", spec)
 
@@ -264,7 +264,7 @@
 }
 
 // consumeAddrSpec parses a single RFC 5322 addr-spec at the start of p.
-func (p *addrParser) consumeAddrSpec() (spec string, err os.Error) {
+func (p *addrParser) consumeAddrSpec() (spec string, err error) {
 	debug.Printf("consumeAddrSpec: %q", *p)
 
 	orig := *p
@@ -278,7 +278,7 @@
 	var localPart string
 	p.skipSpace()
 	if p.empty() {
-		return "", os.NewError("mail: no addr-spec")
+		return "", errors.New("mail: no addr-spec")
 	}
 	if p.peek() == '"' {
 		// quoted-string
@@ -295,14 +295,14 @@
 	}
 
 	if !p.consume('@') {
-		return "", os.NewError("mail: missing @ in addr-spec")
+		return "", errors.New("mail: missing @ in addr-spec")
 	}
 
 	// domain = dot-atom / domain-literal
 	var domain string
 	p.skipSpace()
 	if p.empty() {
-		return "", os.NewError("mail: no domain in addr-spec")
+		return "", errors.New("mail: no domain in addr-spec")
 	}
 	// TODO(dsymonds): Handle domain-literal
 	domain, err = p.consumeAtom(true)
@@ -314,7 +314,7 @@
 }
 
 // consumePhrase parses the RFC 5322 phrase at the start of p.
-func (p *addrParser) consumePhrase() (phrase string, err os.Error) {
+func (p *addrParser) consumePhrase() (phrase string, err error) {
 	debug.Printf("consumePhrase: [%s]", *p)
 	// phrase = 1*word
 	var words []string
@@ -323,7 +323,7 @@
 		var word string
 		p.skipSpace()
 		if p.empty() {
-			return "", os.NewError("mail: missing phrase")
+			return "", errors.New("mail: missing phrase")
 		}
 		if p.peek() == '"' {
 			// quoted-string
@@ -347,28 +347,28 @@
 	// Ignore any error if we got at least one word.
 	if err != nil && len(words) == 0 {
 		debug.Printf("consumePhrase: hit err: %v", err)
-		return "", os.NewError("mail: missing word in phrase")
+		return "", errors.New("mail: missing word in phrase")
 	}
 	phrase = strings.Join(words, " ")
 	return phrase, nil
 }
 
 // consumeQuotedString parses the quoted string at the start of p.
-func (p *addrParser) consumeQuotedString() (qs string, err os.Error) {
+func (p *addrParser) consumeQuotedString() (qs string, err error) {
 	// Assume first byte is '"'.
 	i := 1
 	qsb := make([]byte, 0, 10)
 Loop:
 	for {
 		if i >= p.len() {
-			return "", os.NewError("mail: unclosed quoted-string")
+			return "", errors.New("mail: unclosed quoted-string")
 		}
 		switch c := (*p)[i]; {
 		case c == '"':
 			break Loop
 		case c == '\\':
 			if i+1 == p.len() {
-				return "", os.NewError("mail: unclosed quoted-string")
+				return "", errors.New("mail: unclosed quoted-string")
 			}
 			qsb = append(qsb, (*p)[i+1])
 			i += 2
@@ -387,9 +387,9 @@
 
 // consumeAtom parses an RFC 5322 atom at the start of p.
 // If dot is true, consumeAtom parses an RFC 5322 dot-atom instead.
-func (p *addrParser) consumeAtom(dot bool) (atom string, err os.Error) {
+func (p *addrParser) consumeAtom(dot bool) (atom string, err error) {
 	if !isAtext(p.peek(), false) {
-		return "", os.NewError("mail: invalid string")
+		return "", errors.New("mail: invalid string")
 	}
 	i := 1
 	for ; i < p.len() && isAtext((*p)[i], dot); i++ {
@@ -424,10 +424,10 @@
 	return len(*p)
 }
 
-func decodeRFC2047Word(s string) (string, os.Error) {
+func decodeRFC2047Word(s string) (string, error) {
 	fields := strings.Split(s, "?")
 	if len(fields) != 5 || fields[0] != "=" || fields[4] != "=" {
-		return "", os.NewError("mail: address not RFC 2047 encoded")
+		return "", errors.New("mail: address not RFC 2047 encoded")
 	}
 	charset, enc := strings.ToLower(fields[1]), strings.ToLower(fields[2])
 	if charset != "iso-8859-1" && charset != "utf-8" {
@@ -468,7 +468,7 @@
 	scratch [2]byte
 }
 
-func (qd qDecoder) Read(p []byte) (n int, err os.Error) {
+func (qd qDecoder) Read(p []byte) (n int, err error) {
 	// This method writes at most one byte into p.
 	if len(p) == 0 {
 		return 0, nil
diff --git a/src/pkg/mime/mediatype.go b/src/pkg/mime/mediatype.go
index 8ad8004..2bf7978 100644
--- a/src/pkg/mime/mediatype.go
+++ b/src/pkg/mime/mediatype.go
@@ -6,8 +6,8 @@
 
 import (
 	"bytes"
+	"errors"
 	"fmt"
-	"os"
 	"strings"
 	"unicode"
 )
@@ -57,23 +57,23 @@
 	return b.String()
 }
 
-func checkMediaTypeDisposition(s string) os.Error {
+func checkMediaTypeDisposition(s string) error {
 	typ, rest := consumeToken(s)
 	if typ == "" {
-		return os.NewError("mime: no media type")
+		return errors.New("mime: no media type")
 	}
 	if rest == "" {
 		return nil
 	}
 	if !strings.HasPrefix(rest, "/") {
-		return os.NewError("mime: expected slash after first token")
+		return errors.New("mime: expected slash after first token")
 	}
 	subtype, rest := consumeToken(rest[1:])
 	if subtype == "" {
-		return os.NewError("mime: expected token after slash")
+		return errors.New("mime: expected token after slash")
 	}
 	if rest != "" {
-		return os.NewError("mime: unexpected content after media subtype")
+		return errors.New("mime: unexpected content after media subtype")
 	}
 	return nil
 }
@@ -85,7 +85,7 @@
 // to lowercase and trimmed of white space and a non-nil map.
 // The returned map, params, maps from the lowercase
 // attribute to the attribute value with its case preserved.
-func ParseMediaType(v string) (mediatype string, params map[string]string, err os.Error) {
+func ParseMediaType(v string) (mediatype string, params map[string]string, err error) {
 	i := strings.Index(v, ";")
 	if i == -1 {
 		i = len(v)
@@ -118,7 +118,7 @@
 				return
 			}
 			// Parse error.
-			return "", nil, os.NewError("mime: invalid media parameter")
+			return "", nil, errors.New("mime: invalid media parameter")
 		}
 
 		pmap := params
@@ -135,7 +135,7 @@
 		}
 		if _, exists := pmap[key]; exists {
 			// Duplicate parameter name is bogus.
-			return "", nil, os.NewError("mime: duplicate parameter name")
+			return "", nil, errors.New("mime: duplicate parameter name")
 		}
 		pmap[key] = value
 		v = rest
@@ -281,7 +281,7 @@
 	return param, value, rest
 }
 
-func percentHexUnescape(s string) (string, os.Error) {
+func percentHexUnescape(s string) (string, error) {
 	// Count %, check that they're well-formed.
 	percents := 0
 	for i := 0; i < len(s); {
diff --git a/src/pkg/mime/mediatype_test.go b/src/pkg/mime/mediatype_test.go
index 884573e..c06f167 100644
--- a/src/pkg/mime/mediatype_test.go
+++ b/src/pkg/mime/mediatype_test.go
@@ -249,7 +249,7 @@
 	if err == nil {
 		t.Fatalf("expected an error parsing invalid media type; got type %q, params %#v", mt, params)
 	}
-	if err.String() != "mime: invalid media parameter" {
+	if err.Error() != "mime: invalid media parameter" {
 		t.Errorf("expected invalid media parameter; got error %q", err)
 	}
 }
diff --git a/src/pkg/mime/multipart/formdata.go b/src/pkg/mime/multipart/formdata.go
index d114bfa..d9982e5 100644
--- a/src/pkg/mime/multipart/formdata.go
+++ b/src/pkg/mime/multipart/formdata.go
@@ -6,6 +6,7 @@
 
 import (
 	"bytes"
+	"errors"
 	"io"
 	"io/ioutil"
 	"net/textproto"
@@ -19,7 +20,7 @@
 // a Content-Disposition of "form-data".
 // It stores up to maxMemory bytes of the file parts in memory
 // and the remainder on disk in temporary files.
-func (r *Reader) ReadForm(maxMemory int64) (f *Form, err os.Error) {
+func (r *Reader) ReadForm(maxMemory int64) (f *Form, err error) {
 	form := &Form{make(map[string][]string), make(map[string][]*FileHeader)}
 	defer func() {
 		if err != nil {
@@ -30,7 +31,7 @@
 	maxValueBytes := int64(10 << 20) // 10 MB is a lot of text.
 	for {
 		p, err := r.NextPart()
-		if err == os.EOF {
+		if err == io.EOF {
 			break
 		}
 		if err != nil {
@@ -48,12 +49,12 @@
 		if filename == "" {
 			// value, store as string in memory
 			n, err := io.CopyN(&b, p, maxValueBytes)
-			if err != nil && err != os.EOF {
+			if err != nil && err != io.EOF {
 				return nil, err
 			}
 			maxValueBytes -= n
 			if maxValueBytes == 0 {
-				return nil, os.NewError("multipart: message too large")
+				return nil, errors.New("multipart: message too large")
 			}
 			form.Value[name] = append(form.Value[name], b.String())
 			continue
@@ -65,7 +66,7 @@
 			Header:   p.Header,
 		}
 		n, err := io.CopyN(&b, p, maxMemory+1)
-		if err != nil && err != os.EOF {
+		if err != nil && err != io.EOF {
 			return nil, err
 		}
 		if n > maxMemory {
@@ -102,8 +103,8 @@
 }
 
 // RemoveAll removes any temporary files associated with a Form.
-func (f *Form) RemoveAll() os.Error {
-	var err os.Error
+func (f *Form) RemoveAll() error {
+	var err error
 	for _, fhs := range f.File {
 		for _, fh := range fhs {
 			if fh.tmpfile != "" {
@@ -127,7 +128,7 @@
 }
 
 // Open opens and returns the FileHeader's associated File.
-func (fh *FileHeader) Open() (File, os.Error) {
+func (fh *FileHeader) Open() (File, error) {
 	if b := fh.content; b != nil {
 		r := io.NewSectionReader(sliceReaderAt(b), 0, int64(len(b)))
 		return sectionReadCloser{r}, nil
@@ -151,13 +152,13 @@
 	*io.SectionReader
 }
 
-func (rc sectionReadCloser) Close() os.Error {
+func (rc sectionReadCloser) Close() error {
 	return nil
 }
 
 type sliceReaderAt []byte
 
-func (r sliceReaderAt) ReadAt(b []byte, off int64) (int, os.Error) {
+func (r sliceReaderAt) ReadAt(b []byte, off int64) (int, error) {
 	if int(off) >= len(r) || off < 0 {
 		return 0, os.EINVAL
 	}
diff --git a/src/pkg/mime/multipart/multipart.go b/src/pkg/mime/multipart/multipart.go
index d36e9e9..24b0e41 100644
--- a/src/pkg/mime/multipart/multipart.go
+++ b/src/pkg/mime/multipart/multipart.go
@@ -20,7 +20,6 @@
 	"io/ioutil"
 	"mime"
 	"net/textproto"
-	"os"
 )
 
 // TODO(bradfitz): inline these once the compiler can inline them in
@@ -69,7 +68,7 @@
 
 func (p *Part) parseContentDisposition() {
 	v := p.Header.Get("Content-Disposition")
-	var err os.Error
+	var err error
 	p.disposition, p.dispositionParams, err = mime.ParseMediaType(v)
 	if err != nil {
 		p.dispositionParams = emptyParams
@@ -90,7 +89,7 @@
 	}
 }
 
-func newPart(mr *Reader) (*Part, os.Error) {
+func newPart(mr *Reader) (*Part, error) {
 	bp := &Part{
 		Header: make(map[string][]string),
 		mr:     mr,
@@ -102,7 +101,7 @@
 	return bp, nil
 }
 
-func (bp *Part) populateHeaders() os.Error {
+func (bp *Part) populateHeaders() error {
 	r := textproto.NewReader(bp.mr.bufReader)
 	header, err := r.ReadMIMEHeader()
 	if err == nil {
@@ -113,14 +112,14 @@
 
 // Read reads the body of a part, after its headers and before the
 // next part (if any) begins.
-func (bp *Part) Read(p []byte) (n int, err os.Error) {
+func (bp *Part) Read(p []byte) (n int, err error) {
 	if bp.buffer.Len() >= len(p) {
 		// Internal buffer of unconsumed data is large enough for
 		// the read request.  No need to parse more at the moment.
 		return bp.buffer.Read(p)
 	}
 	peek, err := bp.mr.bufReader.Peek(4096) // TODO(bradfitz): add buffer size accessor
-	unexpectedEof := err == os.EOF
+	unexpectedEof := err == io.EOF
 	if err != nil && !unexpectedEof {
 		return 0, fmt.Errorf("multipart: Part Read: %v", err)
 	}
@@ -151,7 +150,7 @@
 		}
 	}
 	n, err = bp.buffer.Read(p)
-	if err == os.EOF && !foundBoundary {
+	if err == io.EOF && !foundBoundary {
 		// If the boundary hasn't been reached there's more to
 		// read, so don't pass through an EOF from the buffer
 		err = nil
@@ -159,7 +158,7 @@
 	return
 }
 
-func (bp *Part) Close() os.Error {
+func (bp *Part) Close() error {
 	io.Copy(ioutil.Discard, bp)
 	return nil
 }
@@ -178,7 +177,7 @@
 
 // NextPart returns the next part in the multipart or an error.
 // When there are no more parts, the error os.EOF is returned.
-func (mr *Reader) NextPart() (*Part, os.Error) {
+func (mr *Reader) NextPart() (*Part, error) {
 	if mr.currentPart != nil {
 		mr.currentPart.Close()
 	}
@@ -202,7 +201,7 @@
 
 		if hasPrefixThenNewline(line, mr.dashBoundaryDash) {
 			// Expected EOF
-			return nil, os.EOF
+			return nil, io.EOF
 		}
 
 		if expectNewPart {
diff --git a/src/pkg/mime/multipart/multipart_test.go b/src/pkg/mime/multipart/multipart_test.go
index 38079e5..dd5d7c1 100644
--- a/src/pkg/mime/multipart/multipart_test.go
+++ b/src/pkg/mime/multipart/multipart_test.go
@@ -10,7 +10,6 @@
 	"io"
 	"io/ioutil"
 	"json"
-	"os"
 	"strings"
 	"testing"
 )
@@ -214,7 +213,7 @@
 	if part != nil {
 		t.Error("Didn't expect a fifth part.")
 	}
-	if err != os.EOF {
+	if err != io.EOF {
 		t.Errorf("On fifth part expected os.EOF; got %v", err)
 	}
 }
@@ -259,7 +258,7 @@
 		if part != nil {
 			t.Errorf("Unexpected part in test %d", testNum)
 		}
-		if err != os.EOF {
+		if err != io.EOF {
 			t.Errorf("On test %d expected os.EOF; got %v", testNum, err)
 		}
 
@@ -273,11 +272,11 @@
 
 const maxReadThreshold = 1 << 20
 
-func (mr *maliciousReader) Read(b []byte) (n int, err os.Error) {
+func (mr *maliciousReader) Read(b []byte) (n int, err error) {
 	mr.n += len(b)
 	if mr.n >= maxReadThreshold {
 		mr.t.Fatal("too much was read")
-		return 0, os.EOF
+		return 0, io.EOF
 	}
 	return len(b), nil
 }
@@ -346,7 +345,7 @@
 	r io.Reader
 }
 
-func (s *slowReader) Read(p []byte) (int, os.Error) {
+func (s *slowReader) Read(p []byte) (int, error) {
 	if len(p) == 0 {
 		return s.r.Read(p)
 	}
diff --git a/src/pkg/mime/multipart/writer.go b/src/pkg/mime/multipart/writer.go
index 1bff02f..ec70be4 100644
--- a/src/pkg/mime/multipart/writer.go
+++ b/src/pkg/mime/multipart/writer.go
@@ -7,10 +7,10 @@
 import (
 	"bytes"
 	"crypto/rand"
+	"errors"
 	"fmt"
 	"io"
 	"net/textproto"
-	"os"
 	"strings"
 )
 
@@ -54,7 +54,7 @@
 // header. The body of the part should be written to the returned
 // Writer. After calling CreatePart, any previous part may no longer
 // be written to.
-func (w *Writer) CreatePart(header textproto.MIMEHeader) (io.Writer, os.Error) {
+func (w *Writer) CreatePart(header textproto.MIMEHeader) (io.Writer, error) {
 	if w.lastpart != nil {
 		if err := w.lastpart.close(); err != nil {
 			return nil, err
@@ -93,7 +93,7 @@
 
 // CreateFormFile is a convenience wrapper around CreatePart. It creates
 // a new form-data header with the provided field name and file name.
-func (w *Writer) CreateFormFile(fieldname, filename string) (io.Writer, os.Error) {
+func (w *Writer) CreateFormFile(fieldname, filename string) (io.Writer, error) {
 	h := make(textproto.MIMEHeader)
 	h.Set("Content-Disposition",
 		fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
@@ -104,7 +104,7 @@
 
 // CreateFormField calls CreatePart with a header using the
 // given field name.
-func (w *Writer) CreateFormField(fieldname string) (io.Writer, os.Error) {
+func (w *Writer) CreateFormField(fieldname string) (io.Writer, error) {
 	h := make(textproto.MIMEHeader)
 	h.Set("Content-Disposition",
 		fmt.Sprintf(`form-data; name="%s"`, escapeQuotes(fieldname)))
@@ -112,7 +112,7 @@
 }
 
 // WriteField calls CreateFormField and then writes the given value.
-func (w *Writer) WriteField(fieldname, value string) os.Error {
+func (w *Writer) WriteField(fieldname, value string) error {
 	p, err := w.CreateFormField(fieldname)
 	if err != nil {
 		return err
@@ -123,7 +123,7 @@
 
 // Close finishes the multipart message and writes the trailing
 // boundary end line to the output.
-func (w *Writer) Close() os.Error {
+func (w *Writer) Close() error {
 	if w.lastpart != nil {
 		if err := w.lastpart.close(); err != nil {
 			return err
@@ -137,17 +137,17 @@
 type part struct {
 	mw     *Writer
 	closed bool
-	we     os.Error // last error that occurred writing
+	we     error // last error that occurred writing
 }
 
-func (p *part) close() os.Error {
+func (p *part) close() error {
 	p.closed = true
 	return p.we
 }
 
-func (p *part) Write(d []byte) (n int, err os.Error) {
+func (p *part) Write(d []byte) (n int, err error) {
 	if p.closed {
-		return 0, os.NewError("multipart: can't write to finished part")
+		return 0, errors.New("multipart: can't write to finished part")
 	}
 	n, err = p.mw.w.Write(d)
 	if err != nil {
diff --git a/src/pkg/mime/type.go b/src/pkg/mime/type.go
index 39bf40ee..ce72bb5 100644
--- a/src/pkg/mime/type.go
+++ b/src/pkg/mime/type.go
@@ -92,7 +92,7 @@
 // AddExtensionType sets the MIME type associated with
 // the extension ext to typ.  The extension should begin with
 // a leading dot, as in ".html".
-func AddExtensionType(ext, typ string) os.Error {
+func AddExtensionType(ext, typ string) error {
 	if ext == "" || ext[0] != '.' {
 		return fmt.Errorf(`mime: extension "%s" misses dot`, ext)
 	}
@@ -100,7 +100,7 @@
 	return setExtensionType(ext, typ)
 }
 
-func setExtensionType(extension, mimeType string) os.Error {
+func setExtensionType(extension, mimeType string) error {
 	full, param, err := ParseMediaType(mimeType)
 	if err != nil {
 		return err